xref: /netbsd-src/sys/kern/vfs_cache.c (revision 154bfe8e089c1a0a4e9ed8414f08d3da90949162)
1 /*	$NetBSD: vfs_cache.c,v 1.147 2020/06/04 03:08:33 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2008, 2019, 2020 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Copyright (c) 1989, 1993
34  *	The Regents of the University of California.  All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  * 3. Neither the name of the University nor the names of its contributors
45  *    may be used to endorse or promote products derived from this software
46  *    without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  *
60  *	@(#)vfs_cache.c	8.3 (Berkeley) 8/22/94
61  */
62 
63 /*
64  * Name caching:
65  *
66  *	Names found by directory scans are retained in a cache for future
67  *	reference.  It is managed LRU, so frequently used names will hang
68  *	around.  The cache is indexed by hash value obtained from the name.
69  *
70  *	The name cache is the brainchild of Robert Elz and was introduced in
71  *	4.3BSD.  See "Using gprof to Tune the 4.2BSD Kernel", Marshall Kirk
72  *	McKusick, May 21 1984.
73  *
74  * Data structures:
75  *
76  *	Most Unix namecaches very sensibly use a global hash table to index
77  *	names.  The global hash table works well, but can cause concurrency
78  *	headaches for the kernel hacker.  In the NetBSD 10.0 implementation
79  *	we are not sensible, and use a per-directory data structure to index
80  *	names, but the cache otherwise functions the same.
81  *
82  *	The index is a red-black tree.  There are no special concurrency
83  *	requirements placed on it, because it's per-directory and protected
84  *	by the namecache's per-directory locks.  It should therefore not be
85  *	difficult to experiment with other types of index.
86  *
87  *	Each cached name is stored in a struct namecache, along with a
88  *	pointer to the associated vnode (nc_vp).  Names longer than a
89  *	maximum length of NCHNAMLEN are allocated with kmem_alloc(); they
90  *	occur infrequently, and names shorter than this are stored directly
91  *	in struct namecache.  If it is a "negative" entry, (i.e. for a name
92  *	that is known NOT to exist) the vnode pointer will be NULL.
93  *
94  *	For a directory with 3 cached names for 3 distinct vnodes, the
95  *	various vnodes and namecache structs would be connected like this
96  *	(the root is at the bottom of the diagram):
97  *
98  *          ...
99  *           ^
100  *           |- vi_nc_tree
101  *           |
102  *      +----o----+               +---------+               +---------+
103  *      |  VDIR   |               |  VCHR   |               |  VREG   |
104  *      |  vnode  o-----+         |  vnode  o-----+         |  vnode  o------+
105  *      +---------+     |         +---------+     |         +---------+      |
106  *           ^          |              ^          |              ^           |
107  *           |- nc_vp   |- vi_nc_list  |- nc_vp   |- vi_nc_list  |- nc_vp    |
108  *           |          |              |          |              |           |
109  *      +----o----+     |         +----o----+     |         +----o----+      |
110  *  +---onamecache|<----+     +---onamecache|<----+     +---onamecache|<-----+
111  *  |   +---------+           |   +---------+           |   +---------+
112  *  |        ^                |        ^                |        ^
113  *  |        |                |        |                |        |
114  *  |        |  +----------------------+                |        |
115  *  |-nc_dvp | +-------------------------------------------------+
116  *  |        |/- vi_nc_tree   |                         |
117  *  |        |                |- nc_dvp                 |- nc_dvp
118  *  |   +----o----+           |                         |
119  *  +-->|  VDIR   |<----------+                         |
120  *      |  vnode  |<------------------------------------+
121  *      +---------+
122  *
123  *      START HERE
124  *
125  * Replacement:
126  *
127  *	As the cache becomes full, old and unused entries are purged as new
128  *	entries are added.  The synchronization overhead in maintaining a
129  *	strict ordering would be prohibitive, so the VM system's "clock" or
130  *	"second chance" page replacement algorithm is aped here.  New
131  *	entries go to the tail of the active list.  After they age out and
132  *	reach the head of the list, they are moved to the tail of the
133  *	inactive list.  Any use of the deactivated cache entry reactivates
134  *	it, saving it from impending doom; if not reactivated, the entry
135  *	eventually reaches the head of the inactive list and is purged.
136  *
137  * Concurrency:
138  *
139  *	From a performance perspective, cache_lookup(nameiop == LOOKUP) is
140  *	what really matters; insertion of new entries with cache_enter() is
141  *	comparatively infrequent, and overshadowed by the cost of expensive
142  *	file system metadata operations (which may involve disk I/O).  We
143  *	therefore want to make everything simplest in the lookup path.
144  *
145  *	struct namecache is mostly stable except for list and tree related
146  *	entries, changes to which don't affect the cached name or vnode.
147  *	For changes to name+vnode, entries are purged in preference to
148  *	modifying them.
149  *
150  *	Read access to namecache entries is made via tree, list, or LRU
151  *	list.  A lock corresponding to the direction of access should be
152  *	held.  See definition of "struct namecache" in src/sys/namei.src,
153  *	and the definition of "struct vnode" for the particulars.
154  *
155  *	Per-CPU statistics, and LRU list totals are read unlocked, since
156  *	an approximate value is OK.  We maintain 32-bit sized per-CPU
157  *	counters and 64-bit global counters under the theory that 32-bit
158  *	sized counters are less likely to be hosed by nonatomic increment
159  *	(on 32-bit platforms).
160  *
161  *	The lock order is:
162  *
163  *	1) vi->vi_nc_lock	(tree or parent -> child direction,
164  *				 used during forward lookup)
165  *
166  *	2) vi->vi_nc_listlock	(list or child -> parent direction,
167  *				 used during reverse lookup)
168  *
169  *	3) cache_lru_lock	(LRU list direction, used during reclaim)
170  *
171  *	4) vp->v_interlock	(what the cache entry points to)
172  */
173 
174 #include <sys/cdefs.h>
175 __KERNEL_RCSID(0, "$NetBSD: vfs_cache.c,v 1.147 2020/06/04 03:08:33 riastradh Exp $");
176 
177 #define __NAMECACHE_PRIVATE
178 #ifdef _KERNEL_OPT
179 #include "opt_ddb.h"
180 #include "opt_dtrace.h"
181 #endif
182 
183 #include <sys/types.h>
184 #include <sys/atomic.h>
185 #include <sys/callout.h>
186 #include <sys/cpu.h>
187 #include <sys/errno.h>
188 #include <sys/evcnt.h>
189 #include <sys/hash.h>
190 #include <sys/kernel.h>
191 #include <sys/mount.h>
192 #include <sys/mutex.h>
193 #include <sys/namei.h>
194 #include <sys/param.h>
195 #include <sys/pool.h>
196 #include <sys/sdt.h>
197 #include <sys/sysctl.h>
198 #include <sys/systm.h>
199 #include <sys/time.h>
200 #include <sys/vnode_impl.h>
201 
202 #include <miscfs/genfs/genfs.h>
203 
204 static void	cache_activate(struct namecache *);
205 static void	cache_update_stats(void *);
206 static int	cache_compare_nodes(void *, const void *, const void *);
207 static void	cache_deactivate(void);
208 static void	cache_reclaim(void);
209 static int	cache_stat_sysctl(SYSCTLFN_ARGS);
210 
211 /*
212  * Global pool cache.
213  */
214 static pool_cache_t cache_pool __read_mostly;
215 
216 /*
217  * LRU replacement.
218  */
219 enum cache_lru_id {
220 	LRU_ACTIVE,
221 	LRU_INACTIVE,
222 	LRU_COUNT
223 };
224 
225 static struct {
226 	TAILQ_HEAD(, namecache)	list[LRU_COUNT];
227 	u_int			count[LRU_COUNT];
228 } cache_lru __cacheline_aligned;
229 
230 static kmutex_t cache_lru_lock __cacheline_aligned;
231 
232 /*
233  * Cache effectiveness statistics.  nchstats holds system-wide total.
234  */
235 struct nchstats	nchstats;
236 struct nchstats_percpu _NAMEI_CACHE_STATS(uint32_t);
237 struct nchcpu {
238 	struct nchstats_percpu cur;
239 	struct nchstats_percpu last;
240 };
241 static callout_t cache_stat_callout;
242 static kmutex_t cache_stat_lock __cacheline_aligned;
243 
244 #define	COUNT(f) do { \
245 	lwp_t *l = curlwp; \
246 	KPREEMPT_DISABLE(l); \
247 	((struct nchstats_percpu *)curcpu()->ci_data.cpu_nch)->f++; \
248 	KPREEMPT_ENABLE(l); \
249 } while (/* CONSTCOND */ 0);
250 
251 #define	UPDATE(nchcpu, f) do { \
252 	uint32_t cur = atomic_load_relaxed(&nchcpu->cur.f); \
253 	nchstats.f += (uint32_t)(cur - nchcpu->last.f); \
254 	nchcpu->last.f = cur; \
255 } while (/* CONSTCOND */ 0)
256 
257 /*
258  * Tunables.  cache_maxlen replaces the historical doingcache:
259  * set it zero to disable caching for debugging purposes.
260  */
261 int cache_lru_maxdeact __read_mostly = 2;	/* max # to deactivate */
262 int cache_lru_maxscan __read_mostly = 64;	/* max # to scan/reclaim */
263 int cache_maxlen __read_mostly = USHRT_MAX;	/* max name length to cache */
264 int cache_stat_interval __read_mostly = 300;	/* in seconds */
265 
266 /*
267  * sysctl stuff.
268  */
269 static struct	sysctllog *cache_sysctllog;
270 
271 /*
272  * This is a dummy name that cannot usually occur anywhere in the cache nor
273  * file system.  It's used when caching the root vnode of mounted file
274  * systems.  The name is attached to the directory that the file system is
275  * mounted on.
276  */
277 static const char cache_mp_name[] = "";
278 static const int cache_mp_nlen = sizeof(cache_mp_name) - 1;
279 
280 /*
281  * Red-black tree stuff.
282  */
283 static const rb_tree_ops_t cache_rbtree_ops = {
284 	.rbto_compare_nodes = cache_compare_nodes,
285 	.rbto_compare_key = cache_compare_nodes,
286 	.rbto_node_offset = offsetof(struct namecache, nc_tree),
287 	.rbto_context = NULL
288 };
289 
290 /*
291  * dtrace probes.
292  */
293 SDT_PROVIDER_DEFINE(vfs);
294 
295 SDT_PROBE_DEFINE1(vfs, namecache, invalidate, done, "struct vnode *");
296 SDT_PROBE_DEFINE1(vfs, namecache, purge, parents, "struct vnode *");
297 SDT_PROBE_DEFINE1(vfs, namecache, purge, children, "struct vnode *");
298 SDT_PROBE_DEFINE2(vfs, namecache, purge, name, "char *", "size_t");
299 SDT_PROBE_DEFINE1(vfs, namecache, purge, vfs, "struct mount *");
300 SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, "struct vnode *",
301     "char *", "size_t");
302 SDT_PROBE_DEFINE3(vfs, namecache, lookup, miss, "struct vnode *",
303     "char *", "size_t");
304 SDT_PROBE_DEFINE3(vfs, namecache, lookup, toolong, "struct vnode *",
305     "char *", "size_t");
306 SDT_PROBE_DEFINE2(vfs, namecache, revlookup, success, "struct vnode *",
307      "struct vnode *");
308 SDT_PROBE_DEFINE2(vfs, namecache, revlookup, fail, "struct vnode *",
309      "int");
310 SDT_PROBE_DEFINE2(vfs, namecache, prune, done, "int", "int");
311 SDT_PROBE_DEFINE3(vfs, namecache, enter, toolong, "struct vnode *",
312     "char *", "size_t");
313 SDT_PROBE_DEFINE3(vfs, namecache, enter, done, "struct vnode *",
314     "char *", "size_t");
315 
316 /*
317  * rbtree: compare two nodes.
318  */
319 static int
320 cache_compare_nodes(void *context, const void *n1, const void *n2)
321 {
322 	const struct namecache *nc1 = n1;
323 	const struct namecache *nc2 = n2;
324 
325 	if (nc1->nc_key < nc2->nc_key) {
326 		return -1;
327 	}
328 	if (nc1->nc_key > nc2->nc_key) {
329 		return 1;
330 	}
331 	KASSERT(nc1->nc_nlen == nc2->nc_nlen);
332 	return memcmp(nc1->nc_name, nc2->nc_name, nc1->nc_nlen);
333 }
334 
335 /*
336  * Compute a key value for the given name.  The name length is encoded in
337  * the key value to try and improve uniqueness, and so that length doesn't
338  * need to be compared separately for string comparisons.
339  */
340 static inline uint64_t
341 cache_key(const char *name, size_t nlen)
342 {
343 	uint64_t key;
344 
345 	KASSERT(nlen <= USHRT_MAX);
346 
347 	key = hash32_buf(name, nlen, HASH32_STR_INIT);
348 	return (key << 32) | nlen;
349 }
350 
351 /*
352  * Remove an entry from the cache.  vi_nc_lock must be held, and if dir2node
353  * is true, then we're locking in the conventional direction and the list
354  * lock will be acquired when removing the entry from the vnode list.
355  */
356 static void
357 cache_remove(struct namecache *ncp, const bool dir2node)
358 {
359 	struct vnode *vp, *dvp = ncp->nc_dvp;
360 	vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
361 
362 	KASSERT(rw_write_held(&dvi->vi_nc_lock));
363 	KASSERT(cache_key(ncp->nc_name, ncp->nc_nlen) == ncp->nc_key);
364 	KASSERT(rb_tree_find_node(&dvi->vi_nc_tree, ncp) == ncp);
365 
366 	SDT_PROBE(vfs, namecache, invalidate, done, ncp,
367 	    0, 0, 0, 0);
368 
369 	/*
370 	 * Remove from the vnode's list.  This excludes cache_revlookup(),
371 	 * and then it's safe to remove from the LRU lists.
372 	 */
373 	if ((vp = ncp->nc_vp) != NULL) {
374 		vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
375 		if (__predict_true(dir2node)) {
376 			rw_enter(&vi->vi_nc_listlock, RW_WRITER);
377 			TAILQ_REMOVE(&vi->vi_nc_list, ncp, nc_list);
378 			rw_exit(&vi->vi_nc_listlock);
379 		} else {
380 			TAILQ_REMOVE(&vi->vi_nc_list, ncp, nc_list);
381 		}
382 	}
383 
384 	/* Remove from the directory's rbtree. */
385 	rb_tree_remove_node(&dvi->vi_nc_tree, ncp);
386 
387 	/* Remove from the LRU lists. */
388 	mutex_enter(&cache_lru_lock);
389 	TAILQ_REMOVE(&cache_lru.list[ncp->nc_lrulist], ncp, nc_lru);
390 	cache_lru.count[ncp->nc_lrulist]--;
391 	mutex_exit(&cache_lru_lock);
392 
393 	/* Finally, free it. */
394 	if (ncp->nc_nlen > NCHNAMLEN) {
395 		size_t sz = offsetof(struct namecache, nc_name[ncp->nc_nlen]);
396 		kmem_free(ncp, sz);
397 	} else {
398 		pool_cache_put(cache_pool, ncp);
399 	}
400 }
401 
402 /*
403  * Find a single cache entry and return it.  vi_nc_lock must be held.
404  */
405 static struct namecache * __noinline
406 cache_lookup_entry(struct vnode *dvp, const char *name, size_t namelen,
407     uint64_t key)
408 {
409 	vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
410 	struct rb_node *node = dvi->vi_nc_tree.rbt_root;
411 	struct namecache *ncp;
412 	int lrulist, diff;
413 
414 	KASSERT(rw_lock_held(&dvi->vi_nc_lock));
415 
416 	/*
417 	 * Search the RB tree for the key.  This is an inlined lookup
418 	 * tailored for exactly what's needed here (64-bit key and so on)
419 	 * that is quite a bit faster than using rb_tree_find_node().
420 	 *
421 	 * For a matching key memcmp() needs to be called once to confirm
422 	 * that the correct name has been found.  Very rarely there will be
423 	 * a key value collision and the search will continue.
424 	 */
425 	for (;;) {
426 		if (__predict_false(RB_SENTINEL_P(node))) {
427 			return NULL;
428 		}
429 		ncp = (struct namecache *)node;
430 		KASSERT((void *)&ncp->nc_tree == (void *)ncp);
431 		KASSERT(ncp->nc_dvp == dvp);
432 		if (ncp->nc_key == key) {
433 			KASSERT(ncp->nc_nlen == namelen);
434 			diff = memcmp(ncp->nc_name, name, namelen);
435 			if (__predict_true(diff == 0)) {
436 				break;
437 			}
438 			node = node->rb_nodes[diff < 0];
439 		} else {
440 			node = node->rb_nodes[ncp->nc_key < key];
441 		}
442 	}
443 
444 	/*
445 	 * If the entry is on the wrong LRU list, requeue it.  This is an
446 	 * unlocked check, but it will rarely be wrong and even then there
447 	 * will be no harm caused.
448 	 */
449 	lrulist = atomic_load_relaxed(&ncp->nc_lrulist);
450 	if (__predict_false(lrulist != LRU_ACTIVE)) {
451 		cache_activate(ncp);
452 	}
453 	return ncp;
454 }
455 
456 /*
457  * Look for a the name in the cache. We don't do this
458  * if the segment name is long, simply so the cache can avoid
459  * holding long names (which would either waste space, or
460  * add greatly to the complexity).
461  *
462  * Lookup is called with DVP pointing to the directory to search,
463  * and CNP providing the name of the entry being sought: cn_nameptr
464  * is the name, cn_namelen is its length, and cn_flags is the flags
465  * word from the namei operation.
466  *
467  * DVP must be locked.
468  *
469  * There are three possible non-error return states:
470  *    1. Nothing was found in the cache. Nothing is known about
471  *       the requested name.
472  *    2. A negative entry was found in the cache, meaning that the
473  *       requested name definitely does not exist.
474  *    3. A positive entry was found in the cache, meaning that the
475  *       requested name does exist and that we are providing the
476  *       vnode.
477  * In these cases the results are:
478  *    1. 0 returned; VN is set to NULL.
479  *    2. 1 returned; VN is set to NULL.
480  *    3. 1 returned; VN is set to the vnode found.
481  *
482  * The additional result argument ISWHT is set to zero, unless a
483  * negative entry is found that was entered as a whiteout, in which
484  * case ISWHT is set to one.
485  *
486  * The ISWHT_RET argument pointer may be null. In this case an
487  * assertion is made that the whiteout flag is not set. File systems
488  * that do not support whiteouts can/should do this.
489  *
490  * Filesystems that do support whiteouts should add ISWHITEOUT to
491  * cnp->cn_flags if ISWHT comes back nonzero.
492  *
493  * When a vnode is returned, it is locked, as per the vnode lookup
494  * locking protocol.
495  *
496  * There is no way for this function to fail, in the sense of
497  * generating an error that requires aborting the namei operation.
498  *
499  * (Prior to October 2012, this function returned an integer status,
500  * and a vnode, and mucked with the flags word in CNP for whiteouts.
501  * The integer status was -1 for "nothing found", ENOENT for "a
502  * negative entry found", 0 for "a positive entry found", and possibly
503  * other errors, and the value of VN might or might not have been set
504  * depending on what error occurred.)
505  */
506 bool
507 cache_lookup(struct vnode *dvp, const char *name, size_t namelen,
508 	     uint32_t nameiop, uint32_t cnflags,
509 	     int *iswht_ret, struct vnode **vn_ret)
510 {
511 	vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
512 	struct namecache *ncp;
513 	struct vnode *vp;
514 	uint64_t key;
515 	int error;
516 	bool hit;
517 	krw_t op;
518 
519 	KASSERT(namelen != cache_mp_nlen || name == cache_mp_name);
520 
521 	/* Establish default result values */
522 	if (iswht_ret != NULL) {
523 		*iswht_ret = 0;
524 	}
525 	*vn_ret = NULL;
526 
527 	if (__predict_false(namelen > cache_maxlen)) {
528 		SDT_PROBE(vfs, namecache, lookup, toolong, dvp,
529 		    name, namelen, 0, 0);
530 		COUNT(ncs_long);
531 		return false;
532 	}
533 
534 	/* Compute the key up front - don't need the lock. */
535 	key = cache_key(name, namelen);
536 
537 	/* Could the entry be purged below? */
538 	if ((cnflags & ISLASTCN) != 0 &&
539 	    ((cnflags & MAKEENTRY) == 0 || nameiop == CREATE)) {
540 	    	op = RW_WRITER;
541 	} else {
542 		op = RW_READER;
543 	}
544 
545 	/* Now look for the name. */
546 	rw_enter(&dvi->vi_nc_lock, op);
547 	ncp = cache_lookup_entry(dvp, name, namelen, key);
548 	if (__predict_false(ncp == NULL)) {
549 		rw_exit(&dvi->vi_nc_lock);
550 		COUNT(ncs_miss);
551 		SDT_PROBE(vfs, namecache, lookup, miss, dvp,
552 		    name, namelen, 0, 0);
553 		return false;
554 	}
555 	if (__predict_false((cnflags & MAKEENTRY) == 0)) {
556 		/*
557 		 * Last component and we are renaming or deleting,
558 		 * the cache entry is invalid, or otherwise don't
559 		 * want cache entry to exist.
560 		 */
561 		KASSERT((cnflags & ISLASTCN) != 0);
562 		cache_remove(ncp, true);
563 		rw_exit(&dvi->vi_nc_lock);
564 		COUNT(ncs_badhits);
565 		return false;
566 	}
567 	if (ncp->nc_vp == NULL) {
568 		if (iswht_ret != NULL) {
569 			/*
570 			 * Restore the ISWHITEOUT flag saved earlier.
571 			 */
572 			*iswht_ret = ncp->nc_whiteout;
573 		} else {
574 			KASSERT(!ncp->nc_whiteout);
575 		}
576 		if (nameiop == CREATE && (cnflags & ISLASTCN) != 0) {
577 			/*
578 			 * Last component and we are preparing to create
579 			 * the named object, so flush the negative cache
580 			 * entry.
581 			 */
582 			COUNT(ncs_badhits);
583 			cache_remove(ncp, true);
584 			hit = false;
585 		} else {
586 			COUNT(ncs_neghits);
587 			SDT_PROBE(vfs, namecache, lookup, hit, dvp, name,
588 			    namelen, 0, 0);
589 			/* found neg entry; vn is already null from above */
590 			hit = true;
591 		}
592 		rw_exit(&dvi->vi_nc_lock);
593 		return hit;
594 	}
595 	vp = ncp->nc_vp;
596 	error = vcache_tryvget(vp);
597 	rw_exit(&dvi->vi_nc_lock);
598 	if (error) {
599 		KASSERT(error == EBUSY);
600 		/*
601 		 * This vnode is being cleaned out.
602 		 * XXX badhits?
603 		 */
604 		COUNT(ncs_falsehits);
605 		return false;
606 	}
607 
608 	COUNT(ncs_goodhits);
609 	SDT_PROBE(vfs, namecache, lookup, hit, dvp, name, namelen, 0, 0);
610 	/* found it */
611 	*vn_ret = vp;
612 	return true;
613 }
614 
615 /*
616  * Version of the above without the nameiop argument, for NFS.
617  */
618 bool
619 cache_lookup_raw(struct vnode *dvp, const char *name, size_t namelen,
620 		 uint32_t cnflags,
621 		 int *iswht_ret, struct vnode **vn_ret)
622 {
623 
624 	return cache_lookup(dvp, name, namelen, LOOKUP, cnflags | MAKEENTRY,
625 	    iswht_ret, vn_ret);
626 }
627 
628 /*
629  * Used by namei() to walk down a path, component by component by looking up
630  * names in the cache.  The node locks are chained along the way: a parent's
631  * lock is not dropped until the child's is acquired.
632  */
633 bool
634 cache_lookup_linked(struct vnode *dvp, const char *name, size_t namelen,
635 		    struct vnode **vn_ret, krwlock_t **plock,
636 		    kauth_cred_t cred)
637 {
638 	vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
639 	struct namecache *ncp;
640 	krwlock_t *oldlock, *newlock;
641 	uint64_t key;
642 	int error;
643 
644 	KASSERT(namelen != cache_mp_nlen || name == cache_mp_name);
645 
646 	/* If disabled, or file system doesn't support this, bail out. */
647 	if (__predict_false((dvp->v_mount->mnt_iflag & IMNT_NCLOOKUP) == 0)) {
648 		return false;
649 	}
650 
651 	if (__predict_false(namelen > cache_maxlen)) {
652 		COUNT(ncs_long);
653 		return false;
654 	}
655 
656 	/* Compute the key up front - don't need the lock. */
657 	key = cache_key(name, namelen);
658 
659 	/*
660 	 * Acquire the directory lock.  Once we have that, we can drop the
661 	 * previous one (if any).
662 	 *
663 	 * The two lock holds mean that the directory can't go away while
664 	 * here: the directory must be purged with cache_purge() before
665 	 * being freed, and both parent & child's vi_nc_lock must be taken
666 	 * before that point is passed.
667 	 *
668 	 * However if there's no previous lock, like at the root of the
669 	 * chain, then "dvp" must be referenced to prevent dvp going away
670 	 * before we get its lock.
671 	 *
672 	 * Note that the two locks can be the same if looking up a dot, for
673 	 * example: /usr/bin/.  If looking up the parent (..) we can't wait
674 	 * on the lock as child -> parent is the wrong direction.
675 	 */
676 	if (*plock != &dvi->vi_nc_lock) {
677 		oldlock = *plock;
678 		newlock = &dvi->vi_nc_lock;
679 		if (!rw_tryenter(&dvi->vi_nc_lock, RW_READER)) {
680 			return false;
681 		}
682 	} else {
683 		oldlock = NULL;
684 		newlock = NULL;
685 		if (*plock == NULL) {
686 			KASSERT(vrefcnt(dvp) > 0);
687 		}
688 	}
689 
690 	/*
691 	 * First up check if the user is allowed to look up files in this
692 	 * directory.
693 	 */
694 	if (cred != FSCRED) {
695 		if (dvi->vi_nc_mode == VNOVAL) {
696 			if (newlock != NULL) {
697 				rw_exit(newlock);
698 			}
699 			return false;
700 		}
701 		KASSERT(dvi->vi_nc_uid != VNOVAL && dvi->vi_nc_gid != VNOVAL);
702 		error = kauth_authorize_vnode(cred, KAUTH_ACCESS_ACTION(VEXEC,
703 		    dvp->v_type, dvi->vi_nc_mode & ALLPERMS), dvp, NULL,
704 		    genfs_can_access(dvp, cred, dvi->vi_nc_uid, dvi->vi_nc_gid,
705 		    dvi->vi_nc_mode & ALLPERMS, NULL, VEXEC));
706 		if (error != 0) {
707 			if (newlock != NULL) {
708 				rw_exit(newlock);
709 			}
710 			COUNT(ncs_denied);
711 			return false;
712 		}
713 	}
714 
715 	/*
716 	 * Now look for a matching cache entry.
717 	 */
718 	ncp = cache_lookup_entry(dvp, name, namelen, key);
719 	if (__predict_false(ncp == NULL)) {
720 		if (newlock != NULL) {
721 			rw_exit(newlock);
722 		}
723 		COUNT(ncs_miss);
724 		SDT_PROBE(vfs, namecache, lookup, miss, dvp,
725 		    name, namelen, 0, 0);
726 		return false;
727 	}
728 	if (ncp->nc_vp == NULL) {
729 		/* found negative entry; vn is already null from above */
730 		KASSERT(namelen != cache_mp_nlen && name != cache_mp_name);
731 		COUNT(ncs_neghits);
732 	} else {
733 		COUNT(ncs_goodhits); /* XXX can be "badhits" */
734 	}
735 	SDT_PROBE(vfs, namecache, lookup, hit, dvp, name, namelen, 0, 0);
736 
737 	/*
738 	 * Return with the directory lock still held.  It will either be
739 	 * returned to us with another call to cache_lookup_linked() when
740 	 * looking up the next component, or the caller will release it
741 	 * manually when finished.
742 	 */
743 	if (oldlock) {
744 		rw_exit(oldlock);
745 	}
746 	if (newlock) {
747 		*plock = newlock;
748 	}
749 	*vn_ret = ncp->nc_vp;
750 	return true;
751 }
752 
753 /*
754  * Scan cache looking for name of directory entry pointing at vp.
755  * Will not search for "." or "..".
756  *
757  * If the lookup succeeds the vnode is referenced and stored in dvpp.
758  *
759  * If bufp is non-NULL, also place the name in the buffer which starts
760  * at bufp, immediately before *bpp, and move bpp backwards to point
761  * at the start of it.  (Yes, this is a little baroque, but it's done
762  * this way to cater to the whims of getcwd).
763  *
764  * Returns 0 on success, -1 on cache miss, positive errno on failure.
765  */
766 int
767 cache_revlookup(struct vnode *vp, struct vnode **dvpp, char **bpp, char *bufp,
768     bool checkaccess, accmode_t accmode)
769 {
770 	vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
771 	struct namecache *ncp;
772 	struct vnode *dvp;
773 	int error, nlen, lrulist;
774 	char *bp;
775 
776 	KASSERT(vp != NULL);
777 
778 	if (cache_maxlen == 0)
779 		goto out;
780 
781 	rw_enter(&vi->vi_nc_listlock, RW_READER);
782 	if (checkaccess) {
783 		/*
784 		 * Check if the user is allowed to see.  NOTE: this is
785 		 * checking for access on the "wrong" directory.  getcwd()
786 		 * wants to see that there is access on every component
787 		 * along the way, not that there is access to any individual
788 		 * component.  Don't use this to check you can look in vp.
789 		 *
790 		 * I don't like it, I didn't come up with it, don't blame me!
791 		 */
792 		if (vi->vi_nc_mode == VNOVAL) {
793 			rw_exit(&vi->vi_nc_listlock);
794 			return -1;
795 		}
796 		KASSERT(vi->vi_nc_uid != VNOVAL && vi->vi_nc_gid != VNOVAL);
797 		error = kauth_authorize_vnode(curlwp->l_cred,
798 		    KAUTH_ACCESS_ACTION(VEXEC, vp->v_type, vi->vi_nc_mode &
799 		    ALLPERMS), vp, NULL, genfs_can_access(vp, curlwp->l_cred,
800 		    vi->vi_nc_uid, vi->vi_nc_gid, vi->vi_nc_mode & ALLPERMS,
801 		    NULL, accmode));
802 		    if (error != 0) {
803 		    	rw_exit(&vi->vi_nc_listlock);
804 			COUNT(ncs_denied);
805 			return EACCES;
806 		}
807 	}
808 	TAILQ_FOREACH(ncp, &vi->vi_nc_list, nc_list) {
809 		KASSERT(ncp->nc_vp == vp);
810 		KASSERT(ncp->nc_dvp != NULL);
811 		nlen = ncp->nc_nlen;
812 
813 		/*
814 		 * Ignore mountpoint entries.
815 		 */
816 		if (ncp->nc_nlen == cache_mp_nlen) {
817 			continue;
818 		}
819 
820 		/*
821 		 * The queue is partially sorted.  Once we hit dots, nothing
822 		 * else remains but dots and dotdots, so bail out.
823 		 */
824 		if (ncp->nc_name[0] == '.') {
825 			if (nlen == 1 ||
826 			    (nlen == 2 && ncp->nc_name[1] == '.')) {
827 			    	break;
828 			}
829 		}
830 
831 		/*
832 		 * Record a hit on the entry.  This is an unlocked read but
833 		 * even if wrong it doesn't matter too much.
834 		 */
835 		lrulist = atomic_load_relaxed(&ncp->nc_lrulist);
836 		if (lrulist != LRU_ACTIVE) {
837 			cache_activate(ncp);
838 		}
839 
840 		if (bufp) {
841 			bp = *bpp;
842 			bp -= nlen;
843 			if (bp <= bufp) {
844 				*dvpp = NULL;
845 				rw_exit(&vi->vi_nc_listlock);
846 				SDT_PROBE(vfs, namecache, revlookup,
847 				    fail, vp, ERANGE, 0, 0, 0);
848 				return (ERANGE);
849 			}
850 			memcpy(bp, ncp->nc_name, nlen);
851 			*bpp = bp;
852 		}
853 
854 		dvp = ncp->nc_dvp;
855 		error = vcache_tryvget(dvp);
856 		rw_exit(&vi->vi_nc_listlock);
857 		if (error) {
858 			KASSERT(error == EBUSY);
859 			if (bufp)
860 				(*bpp) += nlen;
861 			*dvpp = NULL;
862 			SDT_PROBE(vfs, namecache, revlookup, fail, vp,
863 			    error, 0, 0, 0);
864 			return -1;
865 		}
866 		*dvpp = dvp;
867 		SDT_PROBE(vfs, namecache, revlookup, success, vp, dvp,
868 		    0, 0, 0);
869 		COUNT(ncs_revhits);
870 		return (0);
871 	}
872 	rw_exit(&vi->vi_nc_listlock);
873 	COUNT(ncs_revmiss);
874  out:
875 	*dvpp = NULL;
876 	return (-1);
877 }
878 
879 /*
880  * Add an entry to the cache.
881  */
882 void
883 cache_enter(struct vnode *dvp, struct vnode *vp,
884 	    const char *name, size_t namelen, uint32_t cnflags)
885 {
886 	vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
887 	struct namecache *ncp, *oncp;
888 	int total;
889 
890 	KASSERT(namelen != cache_mp_nlen || name == cache_mp_name);
891 
892 	/* First, check whether we can/should add a cache entry. */
893 	if ((cnflags & MAKEENTRY) == 0 ||
894 	    __predict_false(namelen > cache_maxlen)) {
895 		SDT_PROBE(vfs, namecache, enter, toolong, vp, name, namelen,
896 		    0, 0);
897 		return;
898 	}
899 
900 	SDT_PROBE(vfs, namecache, enter, done, vp, name, namelen, 0, 0);
901 
902 	/*
903 	 * Reclaim some entries if over budget.  This is an unlocked check,
904 	 * but it doesn't matter.  Just need to catch up with things
905 	 * eventually: it doesn't matter if we go over temporarily.
906 	 */
907 	total = atomic_load_relaxed(&cache_lru.count[LRU_ACTIVE]);
908 	total += atomic_load_relaxed(&cache_lru.count[LRU_INACTIVE]);
909 	if (__predict_false(total > desiredvnodes)) {
910 		cache_reclaim();
911 	}
912 
913 	/* Now allocate a fresh entry. */
914 	if (__predict_true(namelen <= NCHNAMLEN)) {
915 		ncp = pool_cache_get(cache_pool, PR_WAITOK);
916 	} else {
917 		size_t sz = offsetof(struct namecache, nc_name[namelen]);
918 		ncp = kmem_alloc(sz, KM_SLEEP);
919 	}
920 
921 	/*
922 	 * Fill in cache info.  For negative hits, save the ISWHITEOUT flag
923 	 * so we can restore it later when the cache entry is used again.
924 	 */
925 	ncp->nc_vp = vp;
926 	ncp->nc_dvp = dvp;
927 	ncp->nc_key = cache_key(name, namelen);
928 	ncp->nc_nlen = namelen;
929 	ncp->nc_whiteout = ((cnflags & ISWHITEOUT) != 0);
930 	memcpy(ncp->nc_name, name, namelen);
931 
932 	/*
933 	 * Insert to the directory.  Concurrent lookups may race for a cache
934 	 * entry.  If there's a entry there already, purge it.
935 	 */
936 	rw_enter(&dvi->vi_nc_lock, RW_WRITER);
937 	oncp = rb_tree_insert_node(&dvi->vi_nc_tree, ncp);
938 	if (oncp != ncp) {
939 		KASSERT(oncp->nc_key == ncp->nc_key);
940 		KASSERT(oncp->nc_nlen == ncp->nc_nlen);
941 		KASSERT(memcmp(oncp->nc_name, name, namelen) == 0);
942 		cache_remove(oncp, true);
943 		oncp = rb_tree_insert_node(&dvi->vi_nc_tree, ncp);
944 		KASSERT(oncp == ncp);
945 	}
946 
947 	/*
948 	 * With the directory lock still held, insert to the tail of the
949 	 * ACTIVE LRU list (new) and take the opportunity to incrementally
950 	 * balance the lists.
951 	 */
952 	mutex_enter(&cache_lru_lock);
953 	ncp->nc_lrulist = LRU_ACTIVE;
954 	cache_lru.count[LRU_ACTIVE]++;
955 	TAILQ_INSERT_TAIL(&cache_lru.list[LRU_ACTIVE], ncp, nc_lru);
956 	cache_deactivate();
957 	mutex_exit(&cache_lru_lock);
958 
959 	/*
960 	 * Finally, insert to the vnode and unlock.  With everything set up
961 	 * it's safe to let cache_revlookup() see the entry.  Partially sort
962 	 * the per-vnode list: dots go to back so cache_revlookup() doesn't
963 	 * have to consider them.
964 	 */
965 	if (vp != NULL) {
966 		vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
967 		rw_enter(&vi->vi_nc_listlock, RW_WRITER);
968 		if ((namelen == 1 && name[0] == '.') ||
969 		    (namelen == 2 && name[0] == '.' && name[1] == '.')) {
970 			TAILQ_INSERT_TAIL(&vi->vi_nc_list, ncp, nc_list);
971 		} else {
972 			TAILQ_INSERT_HEAD(&vi->vi_nc_list, ncp, nc_list);
973 		}
974 		rw_exit(&vi->vi_nc_listlock);
975 	}
976 	rw_exit(&dvi->vi_nc_lock);
977 }
978 
979 /*
980  * Set identity info in cache for a vnode.  We only care about directories
981  * so ignore other updates.  The cached info may be marked invalid if the
982  * inode has an ACL.
983  */
984 void
985 cache_enter_id(struct vnode *vp, mode_t mode, uid_t uid, gid_t gid, bool valid)
986 {
987 	vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
988 
989 	if (vp->v_type == VDIR) {
990 		/* Grab both locks, for forward & reverse lookup. */
991 		rw_enter(&vi->vi_nc_lock, RW_WRITER);
992 		rw_enter(&vi->vi_nc_listlock, RW_WRITER);
993 		if (valid) {
994 			vi->vi_nc_mode = mode;
995 			vi->vi_nc_uid = uid;
996 			vi->vi_nc_gid = gid;
997 		} else {
998 			vi->vi_nc_mode = VNOVAL;
999 			vi->vi_nc_uid = VNOVAL;
1000 			vi->vi_nc_gid = VNOVAL;
1001 		}
1002 		rw_exit(&vi->vi_nc_listlock);
1003 		rw_exit(&vi->vi_nc_lock);
1004 	}
1005 }
1006 
1007 /*
1008  * Return true if we have identity for the given vnode, and use as an
1009  * opportunity to confirm that everything squares up.
1010  *
1011  * Because of shared code, some file systems could provide partial
1012  * information, missing some updates, so check the mount flag too.
1013  */
1014 bool
1015 cache_have_id(struct vnode *vp)
1016 {
1017 
1018 	if (vp->v_type == VDIR &&
1019 	    (vp->v_mount->mnt_iflag & IMNT_NCLOOKUP) != 0 &&
1020 	    atomic_load_relaxed(&VNODE_TO_VIMPL(vp)->vi_nc_mode) != VNOVAL) {
1021 		return true;
1022 	} else {
1023 		return false;
1024 	}
1025 }
1026 
1027 /*
1028  * Enter a mount point.  cvp is the covered vnode, and rvp is the root of
1029  * the mounted file system.
1030  */
1031 void
1032 cache_enter_mount(struct vnode *cvp, struct vnode *rvp)
1033 {
1034 
1035 	KASSERT(vrefcnt(cvp) > 0);
1036 	KASSERT(vrefcnt(rvp) > 0);
1037 	KASSERT(cvp->v_type == VDIR);
1038 	KASSERT((rvp->v_vflag & VV_ROOT) != 0);
1039 
1040 	if (rvp->v_type == VDIR) {
1041 		cache_enter(cvp, rvp, cache_mp_name, cache_mp_nlen, MAKEENTRY);
1042 	}
1043 }
1044 
1045 /*
1046  * Look up a cached mount point.  Used in the strongly locked path.
1047  */
1048 bool
1049 cache_lookup_mount(struct vnode *dvp, struct vnode **vn_ret)
1050 {
1051 	bool ret;
1052 
1053 	ret = cache_lookup(dvp, cache_mp_name, cache_mp_nlen, LOOKUP,
1054 	    MAKEENTRY, NULL, vn_ret);
1055 	KASSERT((*vn_ret != NULL) == ret);
1056 	return ret;
1057 }
1058 
1059 /*
1060  * Try to cross a mount point.  For use with cache_lookup_linked().
1061  */
1062 bool
1063 cache_cross_mount(struct vnode **dvp, krwlock_t **plock)
1064 {
1065 
1066 	return cache_lookup_linked(*dvp, cache_mp_name, cache_mp_nlen,
1067 	   dvp, plock, FSCRED);
1068 }
1069 
1070 /*
1071  * Name cache initialization, from vfs_init() when the system is booting.
1072  */
1073 void
1074 nchinit(void)
1075 {
1076 
1077 	cache_pool = pool_cache_init(sizeof(struct namecache),
1078 	    coherency_unit, 0, 0, "namecache", NULL, IPL_NONE, NULL,
1079 	    NULL, NULL);
1080 	KASSERT(cache_pool != NULL);
1081 
1082 	mutex_init(&cache_lru_lock, MUTEX_DEFAULT, IPL_NONE);
1083 	TAILQ_INIT(&cache_lru.list[LRU_ACTIVE]);
1084 	TAILQ_INIT(&cache_lru.list[LRU_INACTIVE]);
1085 
1086 	mutex_init(&cache_stat_lock, MUTEX_DEFAULT, IPL_NONE);
1087 	callout_init(&cache_stat_callout, CALLOUT_MPSAFE);
1088 	callout_setfunc(&cache_stat_callout, cache_update_stats, NULL);
1089 	callout_schedule(&cache_stat_callout, cache_stat_interval * hz);
1090 
1091 	KASSERT(cache_sysctllog == NULL);
1092 	sysctl_createv(&cache_sysctllog, 0, NULL, NULL,
1093 		       CTLFLAG_PERMANENT,
1094 		       CTLTYPE_STRUCT, "namecache_stats",
1095 		       SYSCTL_DESCR("namecache statistics"),
1096 		       cache_stat_sysctl, 0, NULL, 0,
1097 		       CTL_VFS, CTL_CREATE, CTL_EOL);
1098 }
1099 
1100 /*
1101  * Called once for each CPU in the system as attached.
1102  */
1103 void
1104 cache_cpu_init(struct cpu_info *ci)
1105 {
1106 	void *p;
1107 	size_t sz;
1108 
1109 	sz = roundup2(sizeof(struct nchstats_percpu), coherency_unit) +
1110 	    coherency_unit;
1111 	p = kmem_zalloc(sz, KM_SLEEP);
1112 	ci->ci_data.cpu_nch = (void *)roundup2((uintptr_t)p, coherency_unit);
1113 }
1114 
1115 /*
1116  * A vnode is being allocated: set up cache structures.
1117  */
1118 void
1119 cache_vnode_init(struct vnode *vp)
1120 {
1121 	vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
1122 
1123 	rw_init(&vi->vi_nc_lock);
1124 	rw_init(&vi->vi_nc_listlock);
1125 	rb_tree_init(&vi->vi_nc_tree, &cache_rbtree_ops);
1126 	TAILQ_INIT(&vi->vi_nc_list);
1127 	vi->vi_nc_mode = VNOVAL;
1128 	vi->vi_nc_uid = VNOVAL;
1129 	vi->vi_nc_gid = VNOVAL;
1130 }
1131 
1132 /*
1133  * A vnode is being freed: finish cache structures.
1134  */
1135 void
1136 cache_vnode_fini(struct vnode *vp)
1137 {
1138 	vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
1139 
1140 	KASSERT(RB_TREE_MIN(&vi->vi_nc_tree) == NULL);
1141 	KASSERT(TAILQ_EMPTY(&vi->vi_nc_list));
1142 	rw_destroy(&vi->vi_nc_lock);
1143 	rw_destroy(&vi->vi_nc_listlock);
1144 }
1145 
1146 /*
1147  * Helper for cache_purge1(): purge cache entries for the given vnode from
1148  * all directories that the vnode is cached in.
1149  */
1150 static void
1151 cache_purge_parents(struct vnode *vp)
1152 {
1153 	vnode_impl_t *dvi, *vi = VNODE_TO_VIMPL(vp);
1154 	struct vnode *dvp, *blocked;
1155 	struct namecache *ncp;
1156 
1157 	SDT_PROBE(vfs, namecache, purge, parents, vp, 0, 0, 0, 0);
1158 
1159 	blocked = NULL;
1160 
1161 	rw_enter(&vi->vi_nc_listlock, RW_WRITER);
1162 	while ((ncp = TAILQ_FIRST(&vi->vi_nc_list)) != NULL) {
1163 		/*
1164 		 * Locking in the wrong direction.  Try for a hold on the
1165 		 * directory node's lock, and if we get it then all good,
1166 		 * nuke the entry and move on to the next.
1167 		 */
1168 		dvp = ncp->nc_dvp;
1169 		dvi = VNODE_TO_VIMPL(dvp);
1170 		if (rw_tryenter(&dvi->vi_nc_lock, RW_WRITER)) {
1171 			cache_remove(ncp, false);
1172 			rw_exit(&dvi->vi_nc_lock);
1173 			blocked = NULL;
1174 			continue;
1175 		}
1176 
1177 		/*
1178 		 * We can't wait on the directory node's lock with our list
1179 		 * lock held or the system could deadlock.
1180 		 *
1181 		 * Take a hold on the directory vnode to prevent it from
1182 		 * being freed (taking the vnode & lock with it).  Then
1183 		 * wait for the lock to become available with no other locks
1184 		 * held, and retry.
1185 		 *
1186 		 * If this happens twice in a row, give the other side a
1187 		 * breather; we can do nothing until it lets go.
1188 		 */
1189 		vhold(dvp);
1190 		rw_exit(&vi->vi_nc_listlock);
1191 		rw_enter(&dvi->vi_nc_lock, RW_WRITER);
1192 		/* Do nothing. */
1193 		rw_exit(&dvi->vi_nc_lock);
1194 		holdrele(dvp);
1195 		if (blocked == dvp) {
1196 			kpause("ncpurge", false, 1, NULL);
1197 		}
1198 		rw_enter(&vi->vi_nc_listlock, RW_WRITER);
1199 		blocked = dvp;
1200 	}
1201 	rw_exit(&vi->vi_nc_listlock);
1202 }
1203 
1204 /*
1205  * Helper for cache_purge1(): purge all cache entries hanging off the given
1206  * directory vnode.
1207  */
1208 static void
1209 cache_purge_children(struct vnode *dvp)
1210 {
1211 	vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
1212 	struct namecache *ncp;
1213 
1214 	SDT_PROBE(vfs, namecache, purge, children, dvp, 0, 0, 0, 0);
1215 
1216 	rw_enter(&dvi->vi_nc_lock, RW_WRITER);
1217 	while ((ncp = RB_TREE_MIN(&dvi->vi_nc_tree)) != NULL) {
1218 		cache_remove(ncp, true);
1219 	}
1220 	rw_exit(&dvi->vi_nc_lock);
1221 }
1222 
1223 /*
1224  * Helper for cache_purge1(): purge cache entry from the given vnode,
1225  * finding it by name.
1226  */
1227 static void
1228 cache_purge_name(struct vnode *dvp, const char *name, size_t namelen)
1229 {
1230 	vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
1231 	struct namecache *ncp;
1232 	uint64_t key;
1233 
1234 	SDT_PROBE(vfs, namecache, purge, name, name, namelen, 0, 0, 0);
1235 
1236 	key = cache_key(name, namelen);
1237 	rw_enter(&dvi->vi_nc_lock, RW_WRITER);
1238 	ncp = cache_lookup_entry(dvp, name, namelen, key);
1239 	if (ncp) {
1240 		cache_remove(ncp, true);
1241 	}
1242 	rw_exit(&dvi->vi_nc_lock);
1243 }
1244 
1245 /*
1246  * Cache flush, a particular vnode; called when a vnode is renamed to
1247  * hide entries that would now be invalid.
1248  */
1249 void
1250 cache_purge1(struct vnode *vp, const char *name, size_t namelen, int flags)
1251 {
1252 
1253 	if (flags & PURGE_PARENTS) {
1254 		cache_purge_parents(vp);
1255 	}
1256 	if (flags & PURGE_CHILDREN) {
1257 		cache_purge_children(vp);
1258 	}
1259 	if (name != NULL) {
1260 		cache_purge_name(vp, name, namelen);
1261 	}
1262 }
1263 
1264 /*
1265  * vnode filter for cache_purgevfs().
1266  */
1267 static bool
1268 cache_vdir_filter(void *cookie, vnode_t *vp)
1269 {
1270 
1271 	return vp->v_type == VDIR;
1272 }
1273 
1274 /*
1275  * Cache flush, a whole filesystem; called when filesys is umounted to
1276  * remove entries that would now be invalid.
1277  */
1278 void
1279 cache_purgevfs(struct mount *mp)
1280 {
1281 	struct vnode_iterator *iter;
1282 	vnode_t *dvp;
1283 
1284 	vfs_vnode_iterator_init(mp, &iter);
1285 	for (;;) {
1286 		dvp = vfs_vnode_iterator_next(iter, cache_vdir_filter, NULL);
1287 		if (dvp == NULL) {
1288 			break;
1289 		}
1290 		cache_purge_children(dvp);
1291 		vrele(dvp);
1292 	}
1293 	vfs_vnode_iterator_destroy(iter);
1294 }
1295 
1296 /*
1297  * Re-queue an entry onto the tail of the active LRU list, after it has
1298  * scored a hit.
1299  */
1300 static void
1301 cache_activate(struct namecache *ncp)
1302 {
1303 
1304 	mutex_enter(&cache_lru_lock);
1305 	TAILQ_REMOVE(&cache_lru.list[ncp->nc_lrulist], ncp, nc_lru);
1306 	TAILQ_INSERT_TAIL(&cache_lru.list[LRU_ACTIVE], ncp, nc_lru);
1307 	cache_lru.count[ncp->nc_lrulist]--;
1308 	cache_lru.count[LRU_ACTIVE]++;
1309 	ncp->nc_lrulist = LRU_ACTIVE;
1310 	mutex_exit(&cache_lru_lock);
1311 }
1312 
1313 /*
1314  * Try to balance the LRU lists.  Pick some victim entries, and re-queue
1315  * them from the head of the active list to the tail of the inactive list.
1316  */
1317 static void
1318 cache_deactivate(void)
1319 {
1320 	struct namecache *ncp;
1321 	int total, i;
1322 
1323 	KASSERT(mutex_owned(&cache_lru_lock));
1324 
1325 	/* If we're nowhere near budget yet, don't bother. */
1326 	total = cache_lru.count[LRU_ACTIVE] + cache_lru.count[LRU_INACTIVE];
1327 	if (total < (desiredvnodes >> 1)) {
1328 	    	return;
1329 	}
1330 
1331 	/*
1332 	 * Aim for a 1:1 ratio of active to inactive.  This is to allow each
1333 	 * potential victim a reasonable amount of time to cycle through the
1334 	 * inactive list in order to score a hit and be reactivated, while
1335 	 * trying not to cause reactivations too frequently.
1336 	 */
1337 	if (cache_lru.count[LRU_ACTIVE] < cache_lru.count[LRU_INACTIVE]) {
1338 		return;
1339 	}
1340 
1341 	/* Move only a few at a time; will catch up eventually. */
1342 	for (i = 0; i < cache_lru_maxdeact; i++) {
1343 		ncp = TAILQ_FIRST(&cache_lru.list[LRU_ACTIVE]);
1344 		if (ncp == NULL) {
1345 			break;
1346 		}
1347 		KASSERT(ncp->nc_lrulist == LRU_ACTIVE);
1348 		ncp->nc_lrulist = LRU_INACTIVE;
1349 		TAILQ_REMOVE(&cache_lru.list[LRU_ACTIVE], ncp, nc_lru);
1350 		TAILQ_INSERT_TAIL(&cache_lru.list[LRU_INACTIVE], ncp, nc_lru);
1351 		cache_lru.count[LRU_ACTIVE]--;
1352 		cache_lru.count[LRU_INACTIVE]++;
1353 	}
1354 }
1355 
1356 /*
1357  * Free some entries from the cache, when we have gone over budget.
1358  *
1359  * We don't want to cause too much work for any individual caller, and it
1360  * doesn't matter if we temporarily go over budget.  This is also "just a
1361  * cache" so it's not a big deal if we screw up and throw out something we
1362  * shouldn't.  So we take a relaxed attitude to this process to reduce its
1363  * impact.
1364  */
1365 static void
1366 cache_reclaim(void)
1367 {
1368 	struct namecache *ncp;
1369 	vnode_impl_t *dvi;
1370 	int toscan;
1371 
1372 	/*
1373 	 * Scan up to a preset maxium number of entries, but no more than
1374 	 * 0.8% of the total at once (to allow for very small systems).
1375 	 *
1376 	 * On bigger systems, do a larger chunk of work to reduce the number
1377 	 * of times that cache_lru_lock is held for any length of time.
1378 	 */
1379 	mutex_enter(&cache_lru_lock);
1380 	toscan = MIN(cache_lru_maxscan, desiredvnodes >> 7);
1381 	toscan = MAX(toscan, 1);
1382 	SDT_PROBE(vfs, namecache, prune, done, cache_lru.count[LRU_ACTIVE] +
1383 	    cache_lru.count[LRU_INACTIVE], toscan, 0, 0, 0);
1384 	while (toscan-- != 0) {
1385 		/* First try to balance the lists. */
1386 		cache_deactivate();
1387 
1388 		/* Now look for a victim on head of inactive list (old). */
1389 		ncp = TAILQ_FIRST(&cache_lru.list[LRU_INACTIVE]);
1390 		if (ncp == NULL) {
1391 			break;
1392 		}
1393 		dvi = VNODE_TO_VIMPL(ncp->nc_dvp);
1394 		KASSERT(ncp->nc_lrulist == LRU_INACTIVE);
1395 		KASSERT(dvi != NULL);
1396 
1397 		/*
1398 		 * Locking in the wrong direction.  If we can't get the
1399 		 * lock, the directory is actively busy, and it could also
1400 		 * cause problems for the next guy in here, so send the
1401 		 * entry to the back of the list.
1402 		 */
1403 		if (!rw_tryenter(&dvi->vi_nc_lock, RW_WRITER)) {
1404 			TAILQ_REMOVE(&cache_lru.list[LRU_INACTIVE],
1405 			    ncp, nc_lru);
1406 			TAILQ_INSERT_TAIL(&cache_lru.list[LRU_INACTIVE],
1407 			    ncp, nc_lru);
1408 			continue;
1409 		}
1410 
1411 		/*
1412 		 * Now have the victim entry locked.  Drop the LRU list
1413 		 * lock, purge the entry, and start over.  The hold on
1414 		 * vi_nc_lock will prevent the vnode from vanishing until
1415 		 * finished (cache_purge() will be called on dvp before it
1416 		 * disappears, and that will wait on vi_nc_lock).
1417 		 */
1418 		mutex_exit(&cache_lru_lock);
1419 		cache_remove(ncp, true);
1420 		rw_exit(&dvi->vi_nc_lock);
1421 		mutex_enter(&cache_lru_lock);
1422 	}
1423 	mutex_exit(&cache_lru_lock);
1424 }
1425 
1426 /*
1427  * For file system code: count a lookup that required a full re-scan of
1428  * directory metadata.
1429  */
1430 void
1431 namecache_count_pass2(void)
1432 {
1433 
1434 	COUNT(ncs_pass2);
1435 }
1436 
1437 /*
1438  * For file system code: count a lookup that scored a hit in the directory
1439  * metadata near the location of the last lookup.
1440  */
1441 void
1442 namecache_count_2passes(void)
1443 {
1444 
1445 	COUNT(ncs_2passes);
1446 }
1447 
1448 /*
1449  * Sum the stats from all CPUs into nchstats.  This needs to run at least
1450  * once within every window where a 32-bit counter could roll over.  It's
1451  * called regularly by timer to ensure this.
1452  */
1453 static void
1454 cache_update_stats(void *cookie)
1455 {
1456 	CPU_INFO_ITERATOR cii;
1457 	struct cpu_info *ci;
1458 
1459 	mutex_enter(&cache_stat_lock);
1460 	for (CPU_INFO_FOREACH(cii, ci)) {
1461 		struct nchcpu *nchcpu = ci->ci_data.cpu_nch;
1462 		UPDATE(nchcpu, ncs_goodhits);
1463 		UPDATE(nchcpu, ncs_neghits);
1464 		UPDATE(nchcpu, ncs_badhits);
1465 		UPDATE(nchcpu, ncs_falsehits);
1466 		UPDATE(nchcpu, ncs_miss);
1467 		UPDATE(nchcpu, ncs_long);
1468 		UPDATE(nchcpu, ncs_pass2);
1469 		UPDATE(nchcpu, ncs_2passes);
1470 		UPDATE(nchcpu, ncs_revhits);
1471 		UPDATE(nchcpu, ncs_revmiss);
1472 		UPDATE(nchcpu, ncs_denied);
1473 	}
1474 	if (cookie != NULL) {
1475 		memcpy(cookie, &nchstats, sizeof(nchstats));
1476 	}
1477 	/* Reset the timer; arrive back here in N minutes at latest. */
1478 	callout_schedule(&cache_stat_callout, cache_stat_interval * hz);
1479 	mutex_exit(&cache_stat_lock);
1480 }
1481 
1482 /*
1483  * Fetch the current values of the stats for sysctl.
1484  */
1485 static int
1486 cache_stat_sysctl(SYSCTLFN_ARGS)
1487 {
1488 	struct nchstats stats;
1489 
1490 	if (oldp == NULL) {
1491 		*oldlenp = sizeof(nchstats);
1492 		return 0;
1493 	}
1494 
1495 	if (*oldlenp <= 0) {
1496 		*oldlenp = 0;
1497 		return 0;
1498 	}
1499 
1500 	/* Refresh the global stats. */
1501 	sysctl_unlock();
1502 	cache_update_stats(&stats);
1503 	sysctl_relock();
1504 
1505 	*oldlenp = MIN(sizeof(stats), *oldlenp);
1506 	return sysctl_copyout(l, &stats, oldp, *oldlenp);
1507 }
1508 
1509 /*
1510  * For the debugger, given the address of a vnode, print all associated
1511  * names in the cache.
1512  */
1513 #ifdef DDB
1514 void
1515 namecache_print(struct vnode *vp, void (*pr)(const char *, ...))
1516 {
1517 	struct vnode *dvp = NULL;
1518 	struct namecache *ncp;
1519 	enum cache_lru_id id;
1520 
1521 	for (id = 0; id < LRU_COUNT; id++) {
1522 		TAILQ_FOREACH(ncp, &cache_lru.list[id], nc_lru) {
1523 			if (ncp->nc_vp == vp) {
1524 				(*pr)("name %.*s\n", ncp->nc_nlen,
1525 				    ncp->nc_name);
1526 				dvp = ncp->nc_dvp;
1527 			}
1528 		}
1529 	}
1530 	if (dvp == NULL) {
1531 		(*pr)("name not found\n");
1532 		return;
1533 	}
1534 	for (id = 0; id < LRU_COUNT; id++) {
1535 		TAILQ_FOREACH(ncp, &cache_lru.list[id], nc_lru) {
1536 			if (ncp->nc_vp == dvp) {
1537 				(*pr)("parent %.*s\n", ncp->nc_nlen,
1538 				    ncp->nc_name);
1539 			}
1540 		}
1541 	}
1542 }
1543 #endif
1544