xref: /netbsd-src/sys/kern/vfs_cache.c (revision 8ac07aec990b9d2e483062509d0a9fa5b4f57cf2)
1 /*	$NetBSD: vfs_cache.c,v 1.74 2008/04/12 17:34:26 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by the NetBSD
18  *	Foundation, Inc. and its contributors.
19  * 4. Neither the name of The NetBSD Foundation nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /*
37  * Copyright (c) 1989, 1993
38  *	The Regents of the University of California.  All rights reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 1. Redistributions of source code must retain the above copyright
44  *    notice, this list of conditions and the following disclaimer.
45  * 2. Redistributions in binary form must reproduce the above copyright
46  *    notice, this list of conditions and the following disclaimer in the
47  *    documentation and/or other materials provided with the distribution.
48  * 3. Neither the name of the University nor the names of its contributors
49  *    may be used to endorse or promote products derived from this software
50  *    without specific prior written permission.
51  *
52  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62  * SUCH DAMAGE.
63  *
64  *	@(#)vfs_cache.c	8.3 (Berkeley) 8/22/94
65  */
66 
67 #include <sys/cdefs.h>
68 __KERNEL_RCSID(0, "$NetBSD: vfs_cache.c,v 1.74 2008/04/12 17:34:26 ad Exp $");
69 
70 #include "opt_ddb.h"
71 #include "opt_revcache.h"
72 
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/time.h>
76 #include <sys/mount.h>
77 #include <sys/vnode.h>
78 #include <sys/namei.h>
79 #include <sys/errno.h>
80 #include <sys/malloc.h>
81 #include <sys/pool.h>
82 #include <sys/mutex.h>
83 #include <sys/atomic.h>
84 #include <sys/kthread.h>
85 #include <sys/kernel.h>
86 #include <sys/cpu.h>
87 #include <sys/evcnt.h>
88 
89 #define NAMECACHE_ENTER_REVERSE
90 /*
91  * Name caching works as follows:
92  *
93  * Names found by directory scans are retained in a cache
94  * for future reference.  It is managed LRU, so frequently
95  * used names will hang around.  Cache is indexed by hash value
96  * obtained from (dvp, name) where dvp refers to the directory
97  * containing name.
98  *
99  * For simplicity (and economy of storage), names longer than
100  * a maximum length of NCHNAMLEN are not cached; they occur
101  * infrequently in any case, and are almost never of interest.
102  *
103  * Upon reaching the last segment of a path, if the reference
104  * is for DELETE, or NOCACHE is set (rewrite), and the
105  * name is located in the cache, it will be dropped.
106  * The entry is dropped also when it was not possible to lock
107  * the cached vnode, either because vget() failed or the generation
108  * number has changed while waiting for the lock.
109  */
110 
111 /*
112  * Structures associated with name cacheing.
113  */
114 LIST_HEAD(nchashhead, namecache) *nchashtbl;
115 u_long	nchash;				/* size of hash table - 1 */
116 #define	NCHASH(cnp, dvp)	\
117 	(((cnp)->cn_hash ^ ((uintptr_t)(dvp) >> 3)) & nchash)
118 
119 LIST_HEAD(ncvhashhead, namecache) *ncvhashtbl;
120 u_long	ncvhash;			/* size of hash table - 1 */
121 #define	NCVHASH(vp)		(((uintptr_t)(vp) >> 3) & ncvhash)
122 
123 long	numcache;			/* number of cache entries allocated */
124 static u_int	cache_gcpend;		/* number of entries pending GC */
125 static void	*cache_gcqueue;		/* garbage collection queue */
126 
127 TAILQ_HEAD(, namecache) nclruhead =		/* LRU chain */
128 	TAILQ_HEAD_INITIALIZER(nclruhead);
129 #define	COUNT(x)	nchstats.x++
130 struct	nchstats nchstats;		/* cache effectiveness statistics */
131 
132 static pool_cache_t namecache_cache;
133 
134 MALLOC_DEFINE(M_CACHE, "namecache", "Dynamically allocated cache entries");
135 
136 int cache_lowat = 95;
137 int cache_hiwat = 98;
138 int cache_hottime = 5;			/* number of seconds */
139 int doingcache = 1;			/* 1 => enable the cache */
140 
141 static struct evcnt cache_ev_scan;
142 static struct evcnt cache_ev_gc;
143 static struct evcnt cache_ev_over;
144 static struct evcnt cache_ev_under;
145 static struct evcnt cache_ev_forced;
146 
147 /* A single lock to serialize modifications. */
148 static kmutex_t *namecache_lock;
149 
150 static void cache_invalidate(struct namecache *);
151 static inline struct namecache *cache_lookup_entry(
152     const struct vnode *, const struct componentname *);
153 static void cache_thread(void *);
154 static void cache_invalidate(struct namecache *);
155 static void cache_disassociate(struct namecache *);
156 static void cache_reclaim(void);
157 static int cache_ctor(void *, void *, int);
158 static void cache_dtor(void *, void *);
159 
160 /*
161  * Invalidate a cache entry and enqueue it for garbage collection.
162  */
163 static void
164 cache_invalidate(struct namecache *ncp)
165 {
166 	void *head;
167 
168 	KASSERT(mutex_owned(&ncp->nc_lock));
169 
170 	if (ncp->nc_dvp != NULL) {
171 		ncp->nc_vp = NULL;
172 		ncp->nc_dvp = NULL;
173 		do {
174 			head = cache_gcqueue;
175 			ncp->nc_gcqueue = head;
176 		} while (atomic_cas_ptr(&cache_gcqueue, head, ncp) != head);
177 		atomic_inc_uint(&cache_gcpend);
178 	}
179 }
180 
181 /*
182  * Disassociate a namecache entry from any vnodes it is attached to,
183  * and remove from the global LRU list.
184  */
185 static void
186 cache_disassociate(struct namecache *ncp)
187 {
188 
189 	KASSERT(mutex_owned(namecache_lock));
190 	KASSERT(ncp->nc_dvp == NULL);
191 
192 	if (ncp->nc_lru.tqe_prev != NULL) {
193 		TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
194 		ncp->nc_lru.tqe_prev = NULL;
195 	}
196 	if (ncp->nc_vhash.le_prev != NULL) {
197 		LIST_REMOVE(ncp, nc_vhash);
198 		ncp->nc_vhash.le_prev = NULL;
199 	}
200 	if (ncp->nc_vlist.le_prev != NULL) {
201 		LIST_REMOVE(ncp, nc_vlist);
202 		ncp->nc_vlist.le_prev = NULL;
203 	}
204 	if (ncp->nc_dvlist.le_prev != NULL) {
205 		LIST_REMOVE(ncp, nc_dvlist);
206 		ncp->nc_dvlist.le_prev = NULL;
207 	}
208 }
209 
210 /*
211  * Lock all CPUs to prevent any cache lookup activity.  Conceptually,
212  * this locks out all "readers".
213  */
214 static void
215 cache_lock_cpus(void)
216 {
217 	CPU_INFO_ITERATOR cii;
218 	struct cpu_info *ci;
219 
220 	for (CPU_INFO_FOREACH(cii, ci)) {
221 		mutex_enter(ci->ci_data.cpu_cachelock);
222 	}
223 }
224 
225 /*
226  * Release all CPU locks.
227  */
228 static void
229 cache_unlock_cpus(void)
230 {
231 	CPU_INFO_ITERATOR cii;
232 	struct cpu_info *ci;
233 
234 	for (CPU_INFO_FOREACH(cii, ci)) {
235 		mutex_exit(ci->ci_data.cpu_cachelock);
236 	}
237 }
238 
239 /*
240  * Find a single cache entry and return it locked.  'namecache_lock' or
241  * at least one of the per-CPU locks must be held.
242  */
243 static struct namecache *
244 cache_lookup_entry(const struct vnode *dvp, const struct componentname *cnp)
245 {
246 	struct nchashhead *ncpp;
247 	struct namecache *ncp;
248 
249 	ncpp = &nchashtbl[NCHASH(cnp, dvp)];
250 
251 	LIST_FOREACH(ncp, ncpp, nc_hash) {
252 		if (ncp->nc_dvp != dvp ||
253 		    ncp->nc_nlen != cnp->cn_namelen ||
254 		    memcmp(ncp->nc_name, cnp->cn_nameptr, (u_int)ncp->nc_nlen))
255 		    	continue;
256 	    	mutex_enter(&ncp->nc_lock);
257 		if (ncp->nc_dvp == dvp) {
258 			ncp->nc_hittime = hardclock_ticks;
259 			return ncp;
260 		}
261 		/* Raced: entry has been nullified. */
262 		mutex_exit(&ncp->nc_lock);
263 	}
264 
265 	return NULL;
266 }
267 
268 /*
269  * Look for a the name in the cache. We don't do this
270  * if the segment name is long, simply so the cache can avoid
271  * holding long names (which would either waste space, or
272  * add greatly to the complexity).
273  *
274  * Lookup is called with ni_dvp pointing to the directory to search,
275  * ni_ptr pointing to the name of the entry being sought, ni_namelen
276  * tells the length of the name, and ni_hash contains a hash of
277  * the name. If the lookup succeeds, the vnode is locked, stored in ni_vp
278  * and a status of zero is returned. If the locking fails for whatever
279  * reason, the vnode is unlocked and the error is returned to caller.
280  * If the lookup determines that the name does not exist (negative cacheing),
281  * a status of ENOENT is returned. If the lookup fails, a status of -1
282  * is returned.
283  */
284 int
285 cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
286 {
287 	struct namecache *ncp;
288 	struct vnode *vp;
289 	kmutex_t *cpulock;
290 	int error;
291 
292 	if (!doingcache) {
293 		cnp->cn_flags &= ~MAKEENTRY;
294 		*vpp = NULL;
295 		return (-1);
296 	}
297 
298 	if (cnp->cn_namelen > NCHNAMLEN) {
299 		/* Unlocked, but only for stats. */
300 		COUNT(ncs_long);
301 		cnp->cn_flags &= ~MAKEENTRY;
302 		goto fail;
303 	}
304 	cpulock = curcpu()->ci_data.cpu_cachelock;
305 	mutex_enter(cpulock);
306 	ncp = cache_lookup_entry(dvp, cnp);
307 	if (ncp == NULL) {
308 		COUNT(ncs_miss);
309 		goto fail_wlock;
310 	}
311 	if ((cnp->cn_flags & MAKEENTRY) == 0) {
312 		COUNT(ncs_badhits);
313 		goto remove;
314 	} else if (ncp->nc_vp == NULL) {
315 		/*
316 		 * Restore the ISWHITEOUT flag saved earlier.
317 		 */
318 		cnp->cn_flags |= ncp->nc_flags;
319 		if (cnp->cn_nameiop != CREATE ||
320 		    (cnp->cn_flags & ISLASTCN) == 0) {
321 			COUNT(ncs_neghits);
322 			mutex_exit(&ncp->nc_lock);
323 			mutex_exit(cpulock);
324 			return (ENOENT);
325 		} else {
326 			COUNT(ncs_badhits);
327 			goto remove;
328 		}
329 	}
330 
331 	vp = ncp->nc_vp;
332 	mutex_enter(&vp->v_interlock);
333 	mutex_exit(&ncp->nc_lock);
334 	mutex_exit(cpulock);
335 	error = vget(vp, LK_NOWAIT | LK_INTERLOCK);
336 
337 #ifdef DEBUG
338 	/*
339 	 * since we released nb->nb_lock,
340 	 * we can't use this pointer any more.
341 	 */
342 	ncp = NULL;
343 #endif /* DEBUG */
344 
345 	if (error) {
346 		KASSERT(error == EBUSY);
347 		/*
348 		 * this vnode is being cleaned out.
349 		 */
350 		COUNT(ncs_falsehits); /* XXX badhits? */
351 		goto fail;
352 	}
353 
354 	if (vp == dvp) {	/* lookup on "." */
355 		error = 0;
356 	} else if (cnp->cn_flags & ISDOTDOT) {
357 		VOP_UNLOCK(dvp, 0);
358 		error = vn_lock(vp, LK_EXCLUSIVE);
359 		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
360 	} else {
361 		error = vn_lock(vp, LK_EXCLUSIVE);
362 	}
363 
364 	/*
365 	 * Check that the lock succeeded.
366 	 */
367 	if (error) {
368 		/* Unlocked, but only for stats. */
369 		COUNT(ncs_badhits);
370 		*vpp = NULL;
371 		return (-1);
372 	}
373 
374 	/* Unlocked, but only for stats. */
375 	COUNT(ncs_goodhits);
376 	*vpp = vp;
377 	return (0);
378 
379 remove:
380 	/*
381 	 * Last component and we are renaming or deleting,
382 	 * the cache entry is invalid, or otherwise don't
383 	 * want cache entry to exist.
384 	 */
385 	cache_invalidate(ncp);
386 	mutex_exit(&ncp->nc_lock);
387 fail_wlock:
388 	mutex_exit(cpulock);
389 fail:
390 	*vpp = NULL;
391 	return (-1);
392 }
393 
394 int
395 cache_lookup_raw(struct vnode *dvp, struct vnode **vpp,
396     struct componentname *cnp)
397 {
398 	struct namecache *ncp;
399 	struct vnode *vp;
400 	kmutex_t *cpulock;
401 	int error;
402 
403 	if (!doingcache) {
404 		cnp->cn_flags &= ~MAKEENTRY;
405 		*vpp = NULL;
406 		return (-1);
407 	}
408 
409 	if (cnp->cn_namelen > NCHNAMLEN) {
410 		/* Unlocked, but only for stats. */
411 		COUNT(ncs_long);
412 		cnp->cn_flags &= ~MAKEENTRY;
413 		goto fail;
414 	}
415 	cpulock = curcpu()->ci_data.cpu_cachelock;
416 	mutex_enter(cpulock);
417 	ncp = cache_lookup_entry(dvp, cnp);
418 	if (ncp == NULL) {
419 		COUNT(ncs_miss);
420 		goto fail_wlock;
421 	}
422 	vp = ncp->nc_vp;
423 	if (vp == NULL) {
424 		/*
425 		 * Restore the ISWHITEOUT flag saved earlier.
426 		 */
427 		cnp->cn_flags |= ncp->nc_flags;
428 		COUNT(ncs_neghits);
429 		mutex_exit(&ncp->nc_lock);
430 		mutex_exit(cpulock);
431 		return (ENOENT);
432 	}
433 	mutex_enter(&vp->v_interlock);
434 	mutex_exit(&ncp->nc_lock);
435 	mutex_exit(cpulock);
436 	error = vget(vp, LK_NOWAIT | LK_INTERLOCK);
437 
438 	if (error) {
439 		KASSERT(error == EBUSY);
440 		/*
441 		 * this vnode is being cleaned out.
442 		 */
443 		COUNT(ncs_falsehits); /* XXX badhits? */
444 		goto fail;
445 	}
446 
447 	*vpp = vp;
448 
449 	return 0;
450 
451 fail_wlock:
452 	mutex_exit(cpulock);
453 fail:
454 	*vpp = NULL;
455 	return -1;
456 }
457 
458 /*
459  * Scan cache looking for name of directory entry pointing at vp.
460  *
461  * Fill in dvpp.
462  *
463  * If bufp is non-NULL, also place the name in the buffer which starts
464  * at bufp, immediately before *bpp, and move bpp backwards to point
465  * at the start of it.  (Yes, this is a little baroque, but it's done
466  * this way to cater to the whims of getcwd).
467  *
468  * Returns 0 on success, -1 on cache miss, positive errno on failure.
469  */
470 int
471 cache_revlookup(struct vnode *vp, struct vnode **dvpp, char **bpp, char *bufp)
472 {
473 	struct namecache *ncp;
474 	struct vnode *dvp;
475 	struct ncvhashhead *nvcpp;
476 	char *bp;
477 
478 	if (!doingcache)
479 		goto out;
480 
481 	nvcpp = &ncvhashtbl[NCVHASH(vp)];
482 
483 	mutex_enter(namecache_lock);
484 	LIST_FOREACH(ncp, nvcpp, nc_vhash) {
485 		mutex_enter(&ncp->nc_lock);
486 		if (ncp->nc_vp == vp &&
487 		    (dvp = ncp->nc_dvp) != NULL &&
488 		    dvp != vp) { 		/* avoid pesky . entries.. */
489 
490 #ifdef DIAGNOSTIC
491 			if (ncp->nc_nlen == 1 &&
492 			    ncp->nc_name[0] == '.')
493 				panic("cache_revlookup: found entry for .");
494 
495 			if (ncp->nc_nlen == 2 &&
496 			    ncp->nc_name[0] == '.' &&
497 			    ncp->nc_name[1] == '.')
498 				panic("cache_revlookup: found entry for ..");
499 #endif
500 			COUNT(ncs_revhits);
501 
502 			if (bufp) {
503 				bp = *bpp;
504 				bp -= ncp->nc_nlen;
505 				if (bp <= bufp) {
506 					*dvpp = NULL;
507 					mutex_exit(&ncp->nc_lock);
508 					mutex_exit(namecache_lock);
509 					return (ERANGE);
510 				}
511 				memcpy(bp, ncp->nc_name, ncp->nc_nlen);
512 				*bpp = bp;
513 			}
514 
515 			/* XXX MP: how do we know dvp won't evaporate? */
516 			*dvpp = dvp;
517 			mutex_exit(&ncp->nc_lock);
518 			mutex_exit(namecache_lock);
519 			return (0);
520 		}
521 		mutex_exit(&ncp->nc_lock);
522 	}
523 	COUNT(ncs_revmiss);
524 	mutex_exit(namecache_lock);
525  out:
526 	*dvpp = NULL;
527 	return (-1);
528 }
529 
530 /*
531  * Add an entry to the cache
532  */
533 void
534 cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
535 {
536 	struct namecache *ncp;
537 	struct namecache *oncp;
538 	struct nchashhead *ncpp;
539 	struct ncvhashhead *nvcpp;
540 
541 #ifdef DIAGNOSTIC
542 	if (cnp->cn_namelen > NCHNAMLEN)
543 		panic("cache_enter: name too long");
544 #endif
545 	if (!doingcache)
546 		return;
547 
548 	if (numcache > desiredvnodes) {
549 		mutex_enter(namecache_lock);
550 		cache_ev_forced.ev_count++;
551 		cache_reclaim();
552 		mutex_exit(namecache_lock);
553 	}
554 
555 	ncp = pool_cache_get(namecache_cache, PR_WAITOK);
556 	mutex_enter(namecache_lock);
557 	numcache++;
558 
559 	/*
560 	 * Concurrent lookups in the same directory may race for a
561 	 * cache entry.  if there's a duplicated entry, free it.
562 	 */
563 	oncp = cache_lookup_entry(dvp, cnp);
564 	if (oncp) {
565 		cache_invalidate(oncp);
566 		mutex_exit(&oncp->nc_lock);
567 	}
568 
569 	/* Grab the vnode we just found. */
570 	mutex_enter(&ncp->nc_lock);
571 	ncp->nc_vp = vp;
572 	ncp->nc_flags = 0;
573 	ncp->nc_hittime = 0;
574 	ncp->nc_gcqueue = NULL;
575 	if (vp == NULL) {
576 		/*
577 		 * For negative hits, save the ISWHITEOUT flag so we can
578 		 * restore it later when the cache entry is used again.
579 		 */
580 		ncp->nc_flags = cnp->cn_flags & ISWHITEOUT;
581 	}
582 	/* Fill in cache info. */
583 	ncp->nc_dvp = dvp;
584 	LIST_INSERT_HEAD(&dvp->v_dnclist, ncp, nc_dvlist);
585 	if (vp)
586 		LIST_INSERT_HEAD(&vp->v_nclist, ncp, nc_vlist);
587 	else {
588 		ncp->nc_vlist.le_prev = NULL;
589 		ncp->nc_vlist.le_next = NULL;
590 	}
591 	ncp->nc_nlen = cnp->cn_namelen;
592 	TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
593 	memcpy(ncp->nc_name, cnp->cn_nameptr, (unsigned)ncp->nc_nlen);
594 	ncpp = &nchashtbl[NCHASH(cnp, dvp)];
595 
596 	/*
597 	 * Flush updates before making visible in table.  No need for a
598 	 * memory barrier on the other side: to see modifications the
599 	 * list must be followed, meaning a dependent pointer load.
600 	 * The below is LIST_INSERT_HEAD() inlined, with the memory
601 	 * barrier included in the correct place.
602 	 */
603 	if ((ncp->nc_hash.le_next = ncpp->lh_first) != NULL)
604 		ncpp->lh_first->nc_hash.le_prev = &ncp->nc_hash.le_next;
605 	ncp->nc_hash.le_prev = &ncpp->lh_first;
606 	membar_producer();
607 	ncpp->lh_first = ncp;
608 
609 	ncp->nc_vhash.le_prev = NULL;
610 	ncp->nc_vhash.le_next = NULL;
611 
612 	/*
613 	 * Create reverse-cache entries (used in getcwd) for directories.
614 	 * (and in linux procfs exe node)
615 	 */
616 	if (vp != NULL &&
617 	    vp != dvp &&
618 #ifndef NAMECACHE_ENTER_REVERSE
619 	    vp->v_type == VDIR &&
620 #endif
621 	    (ncp->nc_nlen > 2 ||
622 	    (ncp->nc_nlen > 1 && ncp->nc_name[1] != '.') ||
623 	    (/* ncp->nc_nlen > 0 && */ ncp->nc_name[0] != '.'))) {
624 		nvcpp = &ncvhashtbl[NCVHASH(vp)];
625 		LIST_INSERT_HEAD(nvcpp, ncp, nc_vhash);
626 	}
627 	mutex_exit(&ncp->nc_lock);
628 	mutex_exit(namecache_lock);
629 }
630 
631 /*
632  * Name cache initialization, from vfs_init() when we are booting
633  */
634 void
635 nchinit(void)
636 {
637 	int error;
638 
639 	namecache_cache = pool_cache_init(sizeof(struct namecache),
640 	    coherency_unit, 0, 0, "ncache", NULL, IPL_NONE, cache_ctor,
641 	    cache_dtor, NULL);
642 	KASSERT(namecache_cache != NULL);
643 
644 	namecache_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
645 
646 	nchashtbl =
647 	    hashinit(desiredvnodes, HASH_LIST, M_CACHE, M_WAITOK, &nchash);
648 	ncvhashtbl =
649 #ifdef NAMECACHE_ENTER_REVERSE
650 	    hashinit(desiredvnodes, HASH_LIST, M_CACHE, M_WAITOK, &ncvhash);
651 #else
652 	    hashinit(desiredvnodes/8, HASH_LIST, M_CACHE, M_WAITOK, &ncvhash);
653 #endif
654 
655 	error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, cache_thread,
656 	    NULL, NULL, "cachegc");
657 	if (error != 0)
658 		panic("nchinit %d", error);
659 
660 	evcnt_attach_dynamic(&cache_ev_scan, EVCNT_TYPE_MISC, NULL,
661 	   "namecache", "entries scanned");
662 	evcnt_attach_dynamic(&cache_ev_gc, EVCNT_TYPE_MISC, NULL,
663 	   "namecache", "entries collected");
664 	evcnt_attach_dynamic(&cache_ev_over, EVCNT_TYPE_MISC, NULL,
665 	   "namecache", "over scan target");
666 	evcnt_attach_dynamic(&cache_ev_under, EVCNT_TYPE_MISC, NULL,
667 	   "namecache", "under scan target");
668 	evcnt_attach_dynamic(&cache_ev_forced, EVCNT_TYPE_MISC, NULL,
669 	   "namecache", "forced reclaims");
670 }
671 
672 static int
673 cache_ctor(void *arg, void *obj, int flag)
674 {
675 	struct namecache *ncp;
676 
677 	ncp = obj;
678 	mutex_init(&ncp->nc_lock, MUTEX_DEFAULT, IPL_NONE);
679 
680 	return 0;
681 }
682 
683 static void
684 cache_dtor(void *arg, void *obj)
685 {
686 	struct namecache *ncp;
687 
688 	ncp = obj;
689 	mutex_destroy(&ncp->nc_lock);
690 }
691 
692 /*
693  * Called once for each CPU in the system as attached.
694  */
695 void
696 cache_cpu_init(struct cpu_info *ci)
697 {
698 
699 	ci->ci_data.cpu_cachelock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
700 }
701 
702 /*
703  * Name cache reinitialization, for when the maximum number of vnodes increases.
704  */
705 void
706 nchreinit(void)
707 {
708 	struct namecache *ncp;
709 	struct nchashhead *oldhash1, *hash1;
710 	struct ncvhashhead *oldhash2, *hash2;
711 	u_long i, oldmask1, oldmask2, mask1, mask2;
712 
713 	hash1 = hashinit(desiredvnodes, HASH_LIST, M_CACHE, M_WAITOK, &mask1);
714 	hash2 =
715 #ifdef NAMECACHE_ENTER_REVERSE
716 	    hashinit(desiredvnodes, HASH_LIST, M_CACHE, M_WAITOK, &mask2);
717 #else
718 	    hashinit(desiredvnodes/8, HASH_LIST, M_CACHE, M_WAITOK, &mask2);
719 #endif
720 	mutex_enter(namecache_lock);
721 	cache_lock_cpus();
722 	oldhash1 = nchashtbl;
723 	oldmask1 = nchash;
724 	nchashtbl = hash1;
725 	nchash = mask1;
726 	oldhash2 = ncvhashtbl;
727 	oldmask2 = ncvhash;
728 	ncvhashtbl = hash2;
729 	ncvhash = mask2;
730 	for (i = 0; i <= oldmask1; i++) {
731 		while ((ncp = LIST_FIRST(&oldhash1[i])) != NULL) {
732 			LIST_REMOVE(ncp, nc_hash);
733 			ncp->nc_hash.le_prev = NULL;
734 		}
735 	}
736 	for (i = 0; i <= oldmask2; i++) {
737 		while ((ncp = LIST_FIRST(&oldhash2[i])) != NULL) {
738 			LIST_REMOVE(ncp, nc_vhash);
739 			ncp->nc_vhash.le_prev = NULL;
740 		}
741 	}
742 	cache_unlock_cpus();
743 	mutex_exit(namecache_lock);
744 	hashdone(oldhash1, M_CACHE);
745 	hashdone(oldhash2, M_CACHE);
746 }
747 
748 /*
749  * Cache flush, a particular vnode; called when a vnode is renamed to
750  * hide entries that would now be invalid
751  */
752 void
753 cache_purge1(struct vnode *vp, const struct componentname *cnp, int flags)
754 {
755 	struct namecache *ncp, *ncnext;
756 
757 	mutex_enter(namecache_lock);
758 	if (flags & PURGE_PARENTS) {
759 		for (ncp = LIST_FIRST(&vp->v_nclist); ncp != NULL;
760 		    ncp = ncnext) {
761 			ncnext = LIST_NEXT(ncp, nc_vlist);
762 			mutex_enter(&ncp->nc_lock);
763 			cache_invalidate(ncp);
764 			mutex_exit(&ncp->nc_lock);
765 			cache_disassociate(ncp);
766 		}
767 	}
768 	if (flags & PURGE_CHILDREN) {
769 		for (ncp = LIST_FIRST(&vp->v_dnclist); ncp != NULL;
770 		    ncp = ncnext) {
771 			ncnext = LIST_NEXT(ncp, nc_dvlist);
772 			mutex_enter(&ncp->nc_lock);
773 			cache_invalidate(ncp);
774 			mutex_exit(&ncp->nc_lock);
775 			cache_disassociate(ncp);
776 		}
777 	}
778 	if (cnp != NULL) {
779 		ncp = cache_lookup_entry(vp, cnp);
780 		if (ncp) {
781 			cache_invalidate(ncp);
782 			cache_disassociate(ncp);
783 			mutex_exit(&ncp->nc_lock);
784 		}
785 	}
786 	mutex_exit(namecache_lock);
787 }
788 
789 /*
790  * Cache flush, a whole filesystem; called when filesys is umounted to
791  * remove entries that would now be invalid.
792  */
793 void
794 cache_purgevfs(struct mount *mp)
795 {
796 	struct namecache *ncp, *nxtcp;
797 
798 	mutex_enter(namecache_lock);
799 	for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) {
800 		nxtcp = TAILQ_NEXT(ncp, nc_lru);
801 		mutex_enter(&ncp->nc_lock);
802 		if (ncp->nc_dvp != NULL && ncp->nc_dvp->v_mount == mp) {
803 			/* Free the resources we had. */
804 			cache_invalidate(ncp);
805 			cache_disassociate(ncp);
806 		}
807 		mutex_exit(&ncp->nc_lock);
808 	}
809 	cache_reclaim();
810 	mutex_exit(namecache_lock);
811 }
812 
813 /*
814  * Scan global list invalidating entries until we meet a preset target.
815  * Prefer to invalidate entries that have not scored a hit within
816  * cache_hottime seconds.  We sort the LRU list only for this routine's
817  * benefit.
818  */
819 static void
820 cache_prune(int incache, int target)
821 {
822 	struct namecache *ncp, *nxtcp, *sentinel;
823 	int items, recent, tryharder;
824 
825 	KASSERT(mutex_owned(namecache_lock));
826 
827 	items = 0;
828 	tryharder = 0;
829 	recent = hardclock_ticks - hz * cache_hottime;
830 	sentinel = NULL;
831 	for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) {
832 		if (incache <= target)
833 			break;
834 		items++;
835 		nxtcp = TAILQ_NEXT(ncp, nc_lru);
836 		if (ncp->nc_dvp == NULL)
837 			continue;
838 		if (ncp == sentinel) {
839 			/*
840 			 * If we looped back on ourself, then ignore
841 			 * recent entries and purge whatever we find.
842 			 */
843 			tryharder = 1;
844 		}
845 		if (!tryharder && ncp->nc_hittime > recent) {
846 			if (sentinel == NULL)
847 				sentinel = ncp;
848 			TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
849 			TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
850 			continue;
851 		}
852 		mutex_enter(&ncp->nc_lock);
853 		if (ncp->nc_dvp != NULL) {
854 			cache_invalidate(ncp);
855 			cache_disassociate(ncp);
856 			incache--;
857 		}
858 		mutex_exit(&ncp->nc_lock);
859 	}
860 	cache_ev_scan.ev_count += items;
861 }
862 
863 /*
864  * Collect dead cache entries from all CPUs and garbage collect.
865  */
866 static void
867 cache_reclaim(void)
868 {
869 	struct namecache *ncp, *next;
870 	int items;
871 
872 	KASSERT(mutex_owned(namecache_lock));
873 
874 	/*
875 	 * If the number of extant entries not awaiting garbage collection
876 	 * exceeds the high water mark, then reclaim stale entries until we
877 	 * reach our low water mark.
878 	 */
879 	items = numcache - cache_gcpend;
880 	if (items > (uint64_t)desiredvnodes * cache_hiwat / 100) {
881 		cache_prune(items, (int)((uint64_t)desiredvnodes *
882 		    cache_lowat / 100));
883 		cache_ev_over.ev_count++;
884 	} else
885 		cache_ev_under.ev_count++;
886 
887 	/*
888 	 * Stop forward lookup activity on all CPUs and garbage collect dead
889 	 * entries.
890 	 */
891 	cache_lock_cpus();
892 	ncp = cache_gcqueue;
893 	cache_gcqueue = NULL;
894 	items = cache_gcpend;
895 	cache_gcpend = 0;
896 	while (ncp != NULL) {
897 		next = ncp->nc_gcqueue;
898 		cache_disassociate(ncp);
899 		KASSERT(ncp->nc_dvp == NULL);
900 		if (ncp->nc_hash.le_prev != NULL) {
901 			LIST_REMOVE(ncp, nc_hash);
902 			ncp->nc_hash.le_prev = NULL;
903 		}
904 		pool_cache_put(namecache_cache, ncp);
905 		ncp = next;
906 	}
907 	cache_unlock_cpus();
908 	numcache -= items;
909 	cache_ev_gc.ev_count += items;
910 }
911 
912 /*
913  * Cache maintainence thread, awakening once per second to:
914  *
915  * => keep number of entries below the high water mark
916  * => sort pseudo-LRU list
917  * => garbage collect dead entries
918  */
919 static void
920 cache_thread(void *arg)
921 {
922 
923 	mutex_enter(namecache_lock);
924 	for (;;) {
925 		cache_reclaim();
926 		kpause("cachegc", false, hz, namecache_lock);
927 	}
928 }
929 
930 #ifdef DDB
931 void
932 namecache_print(struct vnode *vp, void (*pr)(const char *, ...))
933 {
934 	struct vnode *dvp = NULL;
935 	struct namecache *ncp;
936 
937 	TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
938 		if (ncp->nc_vp == vp && ncp->nc_dvp != NULL) {
939 			(*pr)("name %.*s\n", ncp->nc_nlen, ncp->nc_name);
940 			dvp = ncp->nc_dvp;
941 		}
942 	}
943 	if (dvp == NULL) {
944 		(*pr)("name not found\n");
945 		return;
946 	}
947 	vp = dvp;
948 	TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
949 		if (ncp->nc_vp == vp) {
950 			(*pr)("parent %.*s\n", ncp->nc_nlen, ncp->nc_name);
951 		}
952 	}
953 }
954 #endif
955