xref: /dflybsd-src/sys/kern/vfs_cache.c (revision 37cbab4e1d236766bff1f9fd79c7ae9ca6d69ba9)
1 /*
2  * Copyright (c) 2003,2004,2009 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * Copyright (c) 1989, 1993, 1995
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * This code is derived from software contributed to Berkeley by
38  * Poul-Henning Kamp of the FreeBSD Project.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 1. Redistributions of source code must retain the above copyright
44  *    notice, this list of conditions and the following disclaimer.
45  * 2. Redistributions in binary form must reproduce the above copyright
46  *    notice, this list of conditions and the following disclaimer in the
47  *    documentation and/or other materials provided with the distribution.
48  * 3. All advertising materials mentioning features or use of this software
49  *    must display the following acknowledgement:
50  *	This product includes software developed by the University of
51  *	California, Berkeley and its contributors.
52  * 4. Neither the name of the University nor the names of its contributors
53  *    may be used to endorse or promote products derived from this software
54  *    without specific prior written permission.
55  *
56  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66  * SUCH DAMAGE.
67  */
68 
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/sysctl.h>
73 #include <sys/mount.h>
74 #include <sys/vnode.h>
75 #include <sys/malloc.h>
76 #include <sys/sysproto.h>
77 #include <sys/spinlock.h>
78 #include <sys/proc.h>
79 #include <sys/namei.h>
80 #include <sys/nlookup.h>
81 #include <sys/filedesc.h>
82 #include <sys/fnv_hash.h>
83 #include <sys/globaldata.h>
84 #include <sys/kern_syscall.h>
85 #include <sys/dirent.h>
86 #include <ddb/ddb.h>
87 
88 #include <sys/sysref2.h>
89 #include <sys/spinlock2.h>
90 #include <sys/mplock2.h>
91 
92 #define MAX_RECURSION_DEPTH	64
93 
94 /*
95  * Random lookups in the cache are accomplished with a hash table using
96  * a hash key of (nc_src_vp, name).  Each hash chain has its own spin lock.
97  *
98  * Negative entries may exist and correspond to resolved namecache
99  * structures where nc_vp is NULL.  In a negative entry, NCF_WHITEOUT
100  * will be set if the entry corresponds to a whited-out directory entry
101  * (verses simply not finding the entry at all).   ncneglist is locked
102  * with a global spinlock (ncspin).
103  *
104  * MPSAFE RULES:
105  *
106  * (1) A ncp must be referenced before it can be locked.
107  *
108  * (2) A ncp must be locked in order to modify it.
109  *
110  * (3) ncp locks are always ordered child -> parent.  That may seem
111  *     backwards but forward scans use the hash table and thus can hold
112  *     the parent unlocked when traversing downward.
113  *
114  *     This allows insert/rename/delete/dot-dot and other operations
115  *     to use ncp->nc_parent links.
116  *
117  *     This also prevents a locked up e.g. NFS node from creating a
118  *     chain reaction all the way back to the root vnode / namecache.
119  *
120  * (4) parent linkages require both the parent and child to be locked.
121  */
122 
123 /*
124  * Structures associated with name cacheing.
125  */
126 #define NCHHASH(hash)	(&nchashtbl[(hash) & nchash])
127 #define MINNEG		1024
128 #define MINPOS		1024
129 
130 MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
131 
132 LIST_HEAD(nchash_list, namecache);
133 
134 struct nchash_head {
135        struct nchash_list list;
136        struct spinlock	spin;
137 };
138 
139 static struct nchash_head	*nchashtbl;
140 static struct namecache_list	ncneglist;
141 static struct spinlock		ncspin;
142 
143 /*
144  * ncvp_debug - debug cache_fromvp().  This is used by the NFS server
145  * to create the namecache infrastructure leading to a dangling vnode.
146  *
147  * 0	Only errors are reported
148  * 1	Successes are reported
149  * 2	Successes + the whole directory scan is reported
150  * 3	Force the directory scan code run as if the parent vnode did not
151  *	have a namecache record, even if it does have one.
152  */
153 static int	ncvp_debug;
154 SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0,
155     "Namecache debug level (0-3)");
156 
157 static u_long	nchash;			/* size of hash table */
158 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0,
159     "Size of namecache hash table");
160 
161 static int	ncnegfactor = 16;	/* ratio of negative entries */
162 SYSCTL_INT(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0,
163     "Ratio of namecache negative entries");
164 
165 static int	nclockwarn;		/* warn on locked entries in ticks */
166 SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0,
167     "Warn on locked namecache entries in ticks");
168 
169 static int	numdefered;		/* number of cache entries allocated */
170 SYSCTL_INT(_debug, OID_AUTO, numdefered, CTLFLAG_RD, &numdefered, 0,
171     "Number of cache entries allocated");
172 
173 static int	ncposlimit;		/* number of cache entries allocated */
174 SYSCTL_INT(_debug, OID_AUTO, ncposlimit, CTLFLAG_RW, &ncposlimit, 0,
175     "Number of cache entries allocated");
176 
177 SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode),
178     "sizeof(struct vnode)");
179 SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache),
180     "sizeof(struct namecache)");
181 
182 static int cache_resolve_mp(struct mount *mp);
183 static struct vnode *cache_dvpref(struct namecache *ncp);
184 static void _cache_lock(struct namecache *ncp);
185 static void _cache_setunresolved(struct namecache *ncp);
186 static void _cache_cleanneg(int count);
187 static void _cache_cleanpos(int count);
188 static void _cache_cleandefered(void);
189 static void _cache_unlink(struct namecache *ncp);
190 
191 /*
192  * The new name cache statistics
193  */
194 SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
195 static int numneg;
196 SYSCTL_INT(_vfs_cache, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0,
197     "Number of negative namecache entries");
198 static int numcache;
199 SYSCTL_INT(_vfs_cache, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0,
200     "Number of namecaches entries");
201 static u_long numcalls;
202 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcalls, CTLFLAG_RD, &numcalls, 0,
203     "Number of namecache lookups");
204 static u_long numchecks;
205 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numchecks, CTLFLAG_RD, &numchecks, 0,
206     "Number of checked entries in namecache lookups");
207 
208 struct nchstats nchstats[SMP_MAXCPU];
209 /*
210  * Export VFS cache effectiveness statistics to user-land.
211  *
212  * The statistics are left for aggregation to user-land so
213  * neat things can be achieved, like observing per-CPU cache
214  * distribution.
215  */
216 static int
217 sysctl_nchstats(SYSCTL_HANDLER_ARGS)
218 {
219 	struct globaldata *gd;
220 	int i, error;
221 
222 	error = 0;
223 	for (i = 0; i < ncpus; ++i) {
224 		gd = globaldata_find(i);
225 		if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats),
226 			sizeof(struct nchstats))))
227 			break;
228 	}
229 
230 	return (error);
231 }
232 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD,
233   0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics");
234 
235 static struct namecache *cache_zap(struct namecache *ncp, int nonblock);
236 
237 /*
238  * Namespace locking.  The caller must already hold a reference to the
239  * namecache structure in order to lock/unlock it.  This function prevents
240  * the namespace from being created or destroyed by accessors other then
241  * the lock holder.
242  *
243  * Note that holding a locked namecache structure prevents other threads
244  * from making namespace changes (e.g. deleting or creating), prevents
245  * vnode association state changes by other threads, and prevents the
246  * namecache entry from being resolved or unresolved by other threads.
247  *
248  * The lock owner has full authority to associate/disassociate vnodes
249  * and resolve/unresolve the locked ncp.
250  *
251  * The primary lock field is nc_exlocks.  nc_locktd is set after the
252  * fact (when locking) or cleared prior to unlocking.
253  *
254  * WARNING!  Holding a locked ncp will prevent a vnode from being destroyed
255  *	     or recycled, but it does NOT help you if the vnode had already
256  *	     initiated a recyclement.  If this is important, use cache_get()
257  *	     rather then cache_lock() (and deal with the differences in the
258  *	     way the refs counter is handled).  Or, alternatively, make an
259  *	     unconditional call to cache_validate() or cache_resolve()
260  *	     after cache_lock() returns.
261  *
262  * MPSAFE
263  */
264 static
265 void
266 _cache_lock(struct namecache *ncp)
267 {
268 	thread_t td;
269 	int didwarn;
270 	int error;
271 	u_int count;
272 
273 	KKASSERT(ncp->nc_refs != 0);
274 	didwarn = 0;
275 	td = curthread;
276 
277 	for (;;) {
278 		count = ncp->nc_exlocks;
279 
280 		if (count == 0) {
281 			if (atomic_cmpset_int(&ncp->nc_exlocks, 0, 1)) {
282 				/*
283 				 * The vp associated with a locked ncp must
284 				 * be held to prevent it from being recycled.
285 				 *
286 				 * WARNING!  If VRECLAIMED is set the vnode
287 				 * could already be in the middle of a recycle.
288 				 * Callers must use cache_vref() or
289 				 * cache_vget() on the locked ncp to
290 				 * validate the vp or set the cache entry
291 				 * to unresolved.
292 				 *
293 				 * NOTE! vhold() is allowed if we hold a
294 				 *	 lock on the ncp (which we do).
295 				 */
296 				ncp->nc_locktd = td;
297 				if (ncp->nc_vp)
298 					vhold(ncp->nc_vp);	/* MPSAFE */
299 				break;
300 			}
301 			/* cmpset failed */
302 			continue;
303 		}
304 		if (ncp->nc_locktd == td) {
305 			if (atomic_cmpset_int(&ncp->nc_exlocks, count,
306 					      count + 1)) {
307 				break;
308 			}
309 			/* cmpset failed */
310 			continue;
311 		}
312 		tsleep_interlock(ncp, 0);
313 		if (atomic_cmpset_int(&ncp->nc_exlocks, count,
314 				      count | NC_EXLOCK_REQ) == 0) {
315 			/* cmpset failed */
316 			continue;
317 		}
318 		error = tsleep(ncp, PINTERLOCKED, "clock", nclockwarn);
319 		if (error == EWOULDBLOCK) {
320 			if (didwarn == 0) {
321 				didwarn = ticks;
322 				kprintf("[diagnostic] cache_lock: blocked "
323 					"on %p",
324 					ncp);
325 				kprintf(" \"%*.*s\"\n",
326 					ncp->nc_nlen, ncp->nc_nlen,
327 					ncp->nc_name);
328 			}
329 		}
330 	}
331 	if (didwarn) {
332 		kprintf("[diagnostic] cache_lock: unblocked %*.*s after "
333 			"%d secs\n",
334 			ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name,
335 			(int)(ticks - didwarn) / hz);
336 	}
337 }
338 
339 /*
340  * NOTE: nc_refs may be zero if the ncp is interlocked by circumstance,
341  *	 such as the case where one of its children is locked.
342  *
343  * MPSAFE
344  */
345 static
346 int
347 _cache_lock_nonblock(struct namecache *ncp)
348 {
349 	thread_t td;
350 	u_int count;
351 
352 	td = curthread;
353 
354 	for (;;) {
355 		count = ncp->nc_exlocks;
356 
357 		if (count == 0) {
358 			if (atomic_cmpset_int(&ncp->nc_exlocks, 0, 1)) {
359 				/*
360 				 * The vp associated with a locked ncp must
361 				 * be held to prevent it from being recycled.
362 				 *
363 				 * WARNING!  If VRECLAIMED is set the vnode
364 				 * could already be in the middle of a recycle.
365 				 * Callers must use cache_vref() or
366 				 * cache_vget() on the locked ncp to
367 				 * validate the vp or set the cache entry
368 				 * to unresolved.
369 				 *
370 				 * NOTE! vhold() is allowed if we hold a
371 				 *	 lock on the ncp (which we do).
372 				 */
373 				ncp->nc_locktd = td;
374 				if (ncp->nc_vp)
375 					vhold(ncp->nc_vp);	/* MPSAFE */
376 				break;
377 			}
378 			/* cmpset failed */
379 			continue;
380 		}
381 		if (ncp->nc_locktd == td) {
382 			if (atomic_cmpset_int(&ncp->nc_exlocks, count,
383 					      count + 1)) {
384 				break;
385 			}
386 			/* cmpset failed */
387 			continue;
388 		}
389 		return(EWOULDBLOCK);
390 	}
391 	return(0);
392 }
393 
394 /*
395  * Helper function
396  *
397  * NOTE: nc_refs can be 0 (degenerate case during _cache_drop).
398  *
399  *	 nc_locktd must be NULLed out prior to nc_exlocks getting cleared.
400  *
401  * MPSAFE
402  */
403 static
404 void
405 _cache_unlock(struct namecache *ncp)
406 {
407 	thread_t td __debugvar = curthread;
408 	u_int count;
409 
410 	KKASSERT(ncp->nc_refs >= 0);
411 	KKASSERT(ncp->nc_exlocks > 0);
412 	KKASSERT(ncp->nc_locktd == td);
413 
414 	count = ncp->nc_exlocks;
415 	if ((count & ~NC_EXLOCK_REQ) == 1) {
416 		ncp->nc_locktd = NULL;
417 		if (ncp->nc_vp)
418 			vdrop(ncp->nc_vp);
419 	}
420 	for (;;) {
421 		if ((count & ~NC_EXLOCK_REQ) == 1) {
422 			if (atomic_cmpset_int(&ncp->nc_exlocks, count, 0)) {
423 				if (count & NC_EXLOCK_REQ)
424 					wakeup(ncp);
425 				break;
426 			}
427 		} else {
428 			if (atomic_cmpset_int(&ncp->nc_exlocks, count,
429 					      count - 1)) {
430 				break;
431 			}
432 		}
433 		count = ncp->nc_exlocks;
434 	}
435 }
436 
437 
438 /*
439  * cache_hold() and cache_drop() prevent the premature deletion of a
440  * namecache entry but do not prevent operations (such as zapping) on
441  * that namecache entry.
442  *
443  * This routine may only be called from outside this source module if
444  * nc_refs is already at least 1.
445  *
446  * This is a rare case where callers are allowed to hold a spinlock,
447  * so we can't ourselves.
448  *
449  * MPSAFE
450  */
451 static __inline
452 struct namecache *
453 _cache_hold(struct namecache *ncp)
454 {
455 	atomic_add_int(&ncp->nc_refs, 1);
456 	return(ncp);
457 }
458 
459 /*
460  * Drop a cache entry, taking care to deal with races.
461  *
462  * For potential 1->0 transitions we must hold the ncp lock to safely
463  * test its flags.  An unresolved entry with no children must be zapped
464  * to avoid leaks.
465  *
466  * The call to cache_zap() itself will handle all remaining races and
467  * will decrement the ncp's refs regardless.  If we are resolved or
468  * have children nc_refs can safely be dropped to 0 without having to
469  * zap the entry.
470  *
471  * NOTE: cache_zap() will re-check nc_refs and nc_list in a MPSAFE fashion.
472  *
473  * NOTE: cache_zap() may return a non-NULL referenced parent which must
474  *	 be dropped in a loop.
475  *
476  * MPSAFE
477  */
478 static __inline
479 void
480 _cache_drop(struct namecache *ncp)
481 {
482 	int refs;
483 
484 	while (ncp) {
485 		KKASSERT(ncp->nc_refs > 0);
486 		refs = ncp->nc_refs;
487 
488 		if (refs == 1) {
489 			if (_cache_lock_nonblock(ncp) == 0) {
490 				ncp->nc_flag &= ~NCF_DEFEREDZAP;
491 				if ((ncp->nc_flag & NCF_UNRESOLVED) &&
492 				    TAILQ_EMPTY(&ncp->nc_list)) {
493 					ncp = cache_zap(ncp, 1);
494 					continue;
495 				}
496 				if (atomic_cmpset_int(&ncp->nc_refs, 1, 0)) {
497 					_cache_unlock(ncp);
498 					break;
499 				}
500 				_cache_unlock(ncp);
501 			}
502 		} else {
503 			if (atomic_cmpset_int(&ncp->nc_refs, refs, refs - 1))
504 				break;
505 		}
506 		cpu_pause();
507 	}
508 }
509 
510 /*
511  * Link a new namecache entry to its parent and to the hash table.  Be
512  * careful to avoid races if vhold() blocks in the future.
513  *
514  * Both ncp and par must be referenced and locked.
515  *
516  * NOTE: The hash table spinlock is likely held during this call, we
517  *	 can't do anything fancy.
518  *
519  * MPSAFE
520  */
521 static void
522 _cache_link_parent(struct namecache *ncp, struct namecache *par,
523 		   struct nchash_head *nchpp)
524 {
525 	KKASSERT(ncp->nc_parent == NULL);
526 	ncp->nc_parent = par;
527 	ncp->nc_head = nchpp;
528 
529 	/*
530 	 * Set inheritance flags.  Note that the parent flags may be
531 	 * stale due to getattr potentially not having been run yet
532 	 * (it gets run during nlookup()'s).
533 	 */
534 	ncp->nc_flag &= ~(NCF_SF_PNOCACHE | NCF_UF_PCACHE);
535 	if (par->nc_flag & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE))
536 		ncp->nc_flag |= NCF_SF_PNOCACHE;
537 	if (par->nc_flag & (NCF_UF_CACHE | NCF_UF_PCACHE))
538 		ncp->nc_flag |= NCF_UF_PCACHE;
539 
540 	LIST_INSERT_HEAD(&nchpp->list, ncp, nc_hash);
541 
542 	if (TAILQ_EMPTY(&par->nc_list)) {
543 		TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
544 		/*
545 		 * Any vp associated with an ncp which has children must
546 		 * be held to prevent it from being recycled.
547 		 */
548 		if (par->nc_vp)
549 			vhold(par->nc_vp);
550 	} else {
551 		TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
552 	}
553 }
554 
555 /*
556  * Remove the parent and hash associations from a namecache structure.
557  * If this is the last child of the parent the cache_drop(par) will
558  * attempt to recursively zap the parent.
559  *
560  * ncp must be locked.  This routine will acquire a temporary lock on
561  * the parent as wlel as the appropriate hash chain.
562  *
563  * MPSAFE
564  */
565 static void
566 _cache_unlink_parent(struct namecache *ncp)
567 {
568 	struct namecache *par;
569 	struct vnode *dropvp;
570 
571 	if ((par = ncp->nc_parent) != NULL) {
572 		KKASSERT(ncp->nc_parent == par);
573 		_cache_hold(par);
574 		_cache_lock(par);
575 		spin_lock(&ncp->nc_head->spin);
576 		LIST_REMOVE(ncp, nc_hash);
577 		TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
578 		dropvp = NULL;
579 		if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
580 			dropvp = par->nc_vp;
581 		spin_unlock(&ncp->nc_head->spin);
582 		ncp->nc_parent = NULL;
583 		ncp->nc_head = NULL;
584 		_cache_unlock(par);
585 		_cache_drop(par);
586 
587 		/*
588 		 * We can only safely vdrop with no spinlocks held.
589 		 */
590 		if (dropvp)
591 			vdrop(dropvp);
592 	}
593 }
594 
595 /*
596  * Allocate a new namecache structure.  Most of the code does not require
597  * zero-termination of the string but it makes vop_compat_ncreate() easier.
598  *
599  * MPSAFE
600  */
601 static struct namecache *
602 cache_alloc(int nlen)
603 {
604 	struct namecache *ncp;
605 
606 	ncp = kmalloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO);
607 	if (nlen)
608 		ncp->nc_name = kmalloc(nlen + 1, M_VFSCACHE, M_WAITOK);
609 	ncp->nc_nlen = nlen;
610 	ncp->nc_flag = NCF_UNRESOLVED;
611 	ncp->nc_error = ENOTCONN;	/* needs to be resolved */
612 	ncp->nc_refs = 1;
613 
614 	TAILQ_INIT(&ncp->nc_list);
615 	_cache_lock(ncp);
616 	return(ncp);
617 }
618 
619 /*
620  * Can only be called for the case where the ncp has never been
621  * associated with anything (so no spinlocks are needed).
622  *
623  * MPSAFE
624  */
625 static void
626 _cache_free(struct namecache *ncp)
627 {
628 	KKASSERT(ncp->nc_refs == 1 && ncp->nc_exlocks == 1);
629 	if (ncp->nc_name)
630 		kfree(ncp->nc_name, M_VFSCACHE);
631 	kfree(ncp, M_VFSCACHE);
632 }
633 
634 /*
635  * MPSAFE
636  */
637 void
638 cache_zero(struct nchandle *nch)
639 {
640 	nch->ncp = NULL;
641 	nch->mount = NULL;
642 }
643 
644 /*
645  * Ref and deref a namecache structure.
646  *
647  * The caller must specify a stable ncp pointer, typically meaning the
648  * ncp is already referenced but this can also occur indirectly through
649  * e.g. holding a lock on a direct child.
650  *
651  * WARNING: Caller may hold an unrelated read spinlock, which means we can't
652  *	    use read spinlocks here.
653  *
654  * MPSAFE if nch is
655  */
656 struct nchandle *
657 cache_hold(struct nchandle *nch)
658 {
659 	_cache_hold(nch->ncp);
660 	atomic_add_int(&nch->mount->mnt_refs, 1);
661 	return(nch);
662 }
663 
664 /*
665  * Create a copy of a namecache handle for an already-referenced
666  * entry.
667  *
668  * MPSAFE if nch is
669  */
670 void
671 cache_copy(struct nchandle *nch, struct nchandle *target)
672 {
673 	*target = *nch;
674 	if (target->ncp)
675 		_cache_hold(target->ncp);
676 	atomic_add_int(&nch->mount->mnt_refs, 1);
677 }
678 
679 /*
680  * MPSAFE if nch is
681  */
682 void
683 cache_changemount(struct nchandle *nch, struct mount *mp)
684 {
685 	atomic_add_int(&nch->mount->mnt_refs, -1);
686 	nch->mount = mp;
687 	atomic_add_int(&nch->mount->mnt_refs, 1);
688 }
689 
690 /*
691  * MPSAFE
692  */
693 void
694 cache_drop(struct nchandle *nch)
695 {
696 	atomic_add_int(&nch->mount->mnt_refs, -1);
697 	_cache_drop(nch->ncp);
698 	nch->ncp = NULL;
699 	nch->mount = NULL;
700 }
701 
702 /*
703  * MPSAFE
704  */
705 void
706 cache_lock(struct nchandle *nch)
707 {
708 	_cache_lock(nch->ncp);
709 }
710 
711 /*
712  * Relock nch1 given an unlocked nch1 and a locked nch2.  The caller
713  * is responsible for checking both for validity on return as they
714  * may have become invalid.
715  *
716  * We have to deal with potential deadlocks here, just ping pong
717  * the lock until we get it (we will always block somewhere when
718  * looping so this is not cpu-intensive).
719  *
720  * which = 0	nch1 not locked, nch2 is locked
721  * which = 1	nch1 is locked, nch2 is not locked
722  */
723 void
724 cache_relock(struct nchandle *nch1, struct ucred *cred1,
725 	     struct nchandle *nch2, struct ucred *cred2)
726 {
727 	int which;
728 
729 	which = 0;
730 
731 	for (;;) {
732 		if (which == 0) {
733 			if (cache_lock_nonblock(nch1) == 0) {
734 				cache_resolve(nch1, cred1);
735 				break;
736 			}
737 			cache_unlock(nch2);
738 			cache_lock(nch1);
739 			cache_resolve(nch1, cred1);
740 			which = 1;
741 		} else {
742 			if (cache_lock_nonblock(nch2) == 0) {
743 				cache_resolve(nch2, cred2);
744 				break;
745 			}
746 			cache_unlock(nch1);
747 			cache_lock(nch2);
748 			cache_resolve(nch2, cred2);
749 			which = 0;
750 		}
751 	}
752 }
753 
754 /*
755  * MPSAFE
756  */
757 int
758 cache_lock_nonblock(struct nchandle *nch)
759 {
760 	return(_cache_lock_nonblock(nch->ncp));
761 }
762 
763 
764 /*
765  * MPSAFE
766  */
767 void
768 cache_unlock(struct nchandle *nch)
769 {
770 	_cache_unlock(nch->ncp);
771 }
772 
773 /*
774  * ref-and-lock, unlock-and-deref functions.
775  *
776  * This function is primarily used by nlookup.  Even though cache_lock
777  * holds the vnode, it is possible that the vnode may have already
778  * initiated a recyclement.
779  *
780  * We want cache_get() to return a definitively usable vnode or a
781  * definitively unresolved ncp.
782  *
783  * MPSAFE
784  */
785 static
786 struct namecache *
787 _cache_get(struct namecache *ncp)
788 {
789 	_cache_hold(ncp);
790 	_cache_lock(ncp);
791 	if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
792 		_cache_setunresolved(ncp);
793 	return(ncp);
794 }
795 
796 /*
797  * This is a special form of _cache_lock() which only succeeds if
798  * it can get a pristine, non-recursive lock.  The caller must have
799  * already ref'd the ncp.
800  *
801  * On success the ncp will be locked, on failure it will not.  The
802  * ref count does not change either way.
803  *
804  * We want _cache_lock_special() (on success) to return a definitively
805  * usable vnode or a definitively unresolved ncp.
806  *
807  * MPSAFE
808  */
809 static int
810 _cache_lock_special(struct namecache *ncp)
811 {
812 	if (_cache_lock_nonblock(ncp) == 0) {
813 		if ((ncp->nc_exlocks & ~NC_EXLOCK_REQ) == 1) {
814 			if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
815 				_cache_setunresolved(ncp);
816 			return(0);
817 		}
818 		_cache_unlock(ncp);
819 	}
820 	return(EWOULDBLOCK);
821 }
822 
823 
824 /*
825  * NOTE: The same nchandle can be passed for both arguments.
826  *
827  * MPSAFE
828  */
829 void
830 cache_get(struct nchandle *nch, struct nchandle *target)
831 {
832 	KKASSERT(nch->ncp->nc_refs > 0);
833 	target->mount = nch->mount;
834 	target->ncp = _cache_get(nch->ncp);
835 	atomic_add_int(&target->mount->mnt_refs, 1);
836 }
837 
838 /*
839  * MPSAFE
840  */
841 static __inline
842 void
843 _cache_put(struct namecache *ncp)
844 {
845 	_cache_unlock(ncp);
846 	_cache_drop(ncp);
847 }
848 
849 /*
850  * MPSAFE
851  */
852 void
853 cache_put(struct nchandle *nch)
854 {
855 	atomic_add_int(&nch->mount->mnt_refs, -1);
856 	_cache_put(nch->ncp);
857 	nch->ncp = NULL;
858 	nch->mount = NULL;
859 }
860 
861 /*
862  * Resolve an unresolved ncp by associating a vnode with it.  If the
863  * vnode is NULL, a negative cache entry is created.
864  *
865  * The ncp should be locked on entry and will remain locked on return.
866  *
867  * MPSAFE
868  */
869 static
870 void
871 _cache_setvp(struct mount *mp, struct namecache *ncp, struct vnode *vp)
872 {
873 	KKASSERT(ncp->nc_flag & NCF_UNRESOLVED);
874 
875 	if (vp != NULL) {
876 		/*
877 		 * Any vp associated with an ncp which has children must
878 		 * be held.  Any vp associated with a locked ncp must be held.
879 		 */
880 		if (!TAILQ_EMPTY(&ncp->nc_list))
881 			vhold(vp);
882 		spin_lock(&vp->v_spin);
883 		ncp->nc_vp = vp;
884 		TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode);
885 		spin_unlock(&vp->v_spin);
886 		if (ncp->nc_exlocks)
887 			vhold(vp);
888 
889 		/*
890 		 * Set auxiliary flags
891 		 */
892 		switch(vp->v_type) {
893 		case VDIR:
894 			ncp->nc_flag |= NCF_ISDIR;
895 			break;
896 		case VLNK:
897 			ncp->nc_flag |= NCF_ISSYMLINK;
898 			/* XXX cache the contents of the symlink */
899 			break;
900 		default:
901 			break;
902 		}
903 		atomic_add_int(&numcache, 1);
904 		ncp->nc_error = 0;
905 		/* XXX: this is a hack to work-around the lack of a real pfs vfs
906 		 * implementation*/
907 		if (mp != NULL)
908 			if (strncmp(mp->mnt_stat.f_fstypename, "null", 5) == 0)
909 				vp->v_pfsmp = mp;
910 	} else {
911 		/*
912 		 * When creating a negative cache hit we set the
913 		 * namecache_gen.  A later resolve will clean out the
914 		 * negative cache hit if the mount point's namecache_gen
915 		 * has changed.  Used by devfs, could also be used by
916 		 * other remote FSs.
917 		 */
918 		ncp->nc_vp = NULL;
919 		spin_lock(&ncspin);
920 		TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
921 		++numneg;
922 		spin_unlock(&ncspin);
923 		ncp->nc_error = ENOENT;
924 		if (mp)
925 			VFS_NCPGEN_SET(mp, ncp);
926 	}
927 	ncp->nc_flag &= ~(NCF_UNRESOLVED | NCF_DEFEREDZAP);
928 }
929 
930 /*
931  * MPSAFE
932  */
933 void
934 cache_setvp(struct nchandle *nch, struct vnode *vp)
935 {
936 	_cache_setvp(nch->mount, nch->ncp, vp);
937 }
938 
939 /*
940  * MPSAFE
941  */
942 void
943 cache_settimeout(struct nchandle *nch, int nticks)
944 {
945 	struct namecache *ncp = nch->ncp;
946 
947 	if ((ncp->nc_timeout = ticks + nticks) == 0)
948 		ncp->nc_timeout = 1;
949 }
950 
951 /*
952  * Disassociate the vnode or negative-cache association and mark a
953  * namecache entry as unresolved again.  Note that the ncp is still
954  * left in the hash table and still linked to its parent.
955  *
956  * The ncp should be locked and refd on entry and will remain locked and refd
957  * on return.
958  *
959  * This routine is normally never called on a directory containing children.
960  * However, NFS often does just that in its rename() code as a cop-out to
961  * avoid complex namespace operations.  This disconnects a directory vnode
962  * from its namecache and can cause the OLDAPI and NEWAPI to get out of
963  * sync.
964  *
965  * MPSAFE
966  */
967 static
968 void
969 _cache_setunresolved(struct namecache *ncp)
970 {
971 	struct vnode *vp;
972 
973 	if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
974 		ncp->nc_flag |= NCF_UNRESOLVED;
975 		ncp->nc_timeout = 0;
976 		ncp->nc_error = ENOTCONN;
977 		if ((vp = ncp->nc_vp) != NULL) {
978 			atomic_add_int(&numcache, -1);
979 			spin_lock(&vp->v_spin);
980 			ncp->nc_vp = NULL;
981 			TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode);
982 			spin_unlock(&vp->v_spin);
983 
984 			/*
985 			 * Any vp associated with an ncp with children is
986 			 * held by that ncp.  Any vp associated with a locked
987 			 * ncp is held by that ncp.  These conditions must be
988 			 * undone when the vp is cleared out from the ncp.
989 			 */
990 			if (!TAILQ_EMPTY(&ncp->nc_list))
991 				vdrop(vp);
992 			if (ncp->nc_exlocks)
993 				vdrop(vp);
994 		} else {
995 			spin_lock(&ncspin);
996 			TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
997 			--numneg;
998 			spin_unlock(&ncspin);
999 		}
1000 		ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK);
1001 	}
1002 }
1003 
1004 /*
1005  * The cache_nresolve() code calls this function to automatically
1006  * set a resolved cache element to unresolved if it has timed out
1007  * or if it is a negative cache hit and the mount point namecache_gen
1008  * has changed.
1009  *
1010  * MPSAFE
1011  */
1012 static __inline void
1013 _cache_auto_unresolve(struct mount *mp, struct namecache *ncp)
1014 {
1015 	/*
1016 	 * Already in an unresolved state, nothing to do.
1017 	 */
1018 	if (ncp->nc_flag & NCF_UNRESOLVED)
1019 		return;
1020 
1021 	/*
1022 	 * Try to zap entries that have timed out.  We have
1023 	 * to be careful here because locked leafs may depend
1024 	 * on the vnode remaining intact in a parent, so only
1025 	 * do this under very specific conditions.
1026 	 */
1027 	if (ncp->nc_timeout && (int)(ncp->nc_timeout - ticks) < 0 &&
1028 	    TAILQ_EMPTY(&ncp->nc_list)) {
1029 		_cache_setunresolved(ncp);
1030 		return;
1031 	}
1032 
1033 	/*
1034 	 * If a resolved negative cache hit is invalid due to
1035 	 * the mount's namecache generation being bumped, zap it.
1036 	 */
1037 	if (ncp->nc_vp == NULL && VFS_NCPGEN_TEST(mp, ncp)) {
1038 		_cache_setunresolved(ncp);
1039 		return;
1040 	}
1041 }
1042 
1043 /*
1044  * MPSAFE
1045  */
1046 void
1047 cache_setunresolved(struct nchandle *nch)
1048 {
1049 	_cache_setunresolved(nch->ncp);
1050 }
1051 
1052 /*
1053  * Determine if we can clear NCF_ISMOUNTPT by scanning the mountlist
1054  * looking for matches.  This flag tells the lookup code when it must
1055  * check for a mount linkage and also prevents the directories in question
1056  * from being deleted or renamed.
1057  *
1058  * MPSAFE
1059  */
1060 static
1061 int
1062 cache_clrmountpt_callback(struct mount *mp, void *data)
1063 {
1064 	struct nchandle *nch = data;
1065 
1066 	if (mp->mnt_ncmounton.ncp == nch->ncp)
1067 		return(1);
1068 	if (mp->mnt_ncmountpt.ncp == nch->ncp)
1069 		return(1);
1070 	return(0);
1071 }
1072 
1073 /*
1074  * MPSAFE
1075  */
1076 void
1077 cache_clrmountpt(struct nchandle *nch)
1078 {
1079 	int count;
1080 
1081 	count = mountlist_scan(cache_clrmountpt_callback, nch,
1082 			       MNTSCAN_FORWARD|MNTSCAN_NOBUSY);
1083 	if (count == 0)
1084 		nch->ncp->nc_flag &= ~NCF_ISMOUNTPT;
1085 }
1086 
1087 /*
1088  * Invalidate portions of the namecache topology given a starting entry.
1089  * The passed ncp is set to an unresolved state and:
1090  *
1091  * The passed ncp must be referencxed and locked.  The routine may unlock
1092  * and relock ncp several times, and will recheck the children and loop
1093  * to catch races.  When done the passed ncp will be returned with the
1094  * reference and lock intact.
1095  *
1096  * CINV_DESTROY		- Set a flag in the passed ncp entry indicating
1097  *			  that the physical underlying nodes have been
1098  *			  destroyed... as in deleted.  For example, when
1099  *			  a directory is removed.  This will cause record
1100  *			  lookups on the name to no longer be able to find
1101  *			  the record and tells the resolver to return failure
1102  *			  rather then trying to resolve through the parent.
1103  *
1104  *			  The topology itself, including ncp->nc_name,
1105  *			  remains intact.
1106  *
1107  *			  This only applies to the passed ncp, if CINV_CHILDREN
1108  *			  is specified the children are not flagged.
1109  *
1110  * CINV_CHILDREN	- Set all children (recursively) to an unresolved
1111  *			  state as well.
1112  *
1113  *			  Note that this will also have the side effect of
1114  *			  cleaning out any unreferenced nodes in the topology
1115  *			  from the leaves up as the recursion backs out.
1116  *
1117  * Note that the topology for any referenced nodes remains intact, but
1118  * the nodes will be marked as having been destroyed and will be set
1119  * to an unresolved state.
1120  *
1121  * It is possible for cache_inval() to race a cache_resolve(), meaning that
1122  * the namecache entry may not actually be invalidated on return if it was
1123  * revalidated while recursing down into its children.  This code guarentees
1124  * that the node(s) will go through an invalidation cycle, but does not
1125  * guarentee that they will remain in an invalidated state.
1126  *
1127  * Returns non-zero if a revalidation was detected during the invalidation
1128  * recursion, zero otherwise.  Note that since only the original ncp is
1129  * locked the revalidation ultimately can only indicate that the original ncp
1130  * *MIGHT* no have been reresolved.
1131  *
1132  * DEEP RECURSION HANDLING - If a recursive invalidation recurses deeply we
1133  * have to avoid blowing out the kernel stack.  We do this by saving the
1134  * deep namecache node and aborting the recursion, then re-recursing at that
1135  * node using a depth-first algorithm in order to allow multiple deep
1136  * recursions to chain through each other, then we restart the invalidation
1137  * from scratch.
1138  *
1139  * MPSAFE
1140  */
1141 
1142 struct cinvtrack {
1143 	struct namecache *resume_ncp;
1144 	int depth;
1145 };
1146 
1147 static int _cache_inval_internal(struct namecache *, int, struct cinvtrack *);
1148 
1149 static
1150 int
1151 _cache_inval(struct namecache *ncp, int flags)
1152 {
1153 	struct cinvtrack track;
1154 	struct namecache *ncp2;
1155 	int r;
1156 
1157 	track.depth = 0;
1158 	track.resume_ncp = NULL;
1159 
1160 	for (;;) {
1161 		r = _cache_inval_internal(ncp, flags, &track);
1162 		if (track.resume_ncp == NULL)
1163 			break;
1164 		kprintf("Warning: deep namecache recursion at %s\n",
1165 			ncp->nc_name);
1166 		_cache_unlock(ncp);
1167 		while ((ncp2 = track.resume_ncp) != NULL) {
1168 			track.resume_ncp = NULL;
1169 			_cache_lock(ncp2);
1170 			_cache_inval_internal(ncp2, flags & ~CINV_DESTROY,
1171 					     &track);
1172 			_cache_put(ncp2);
1173 		}
1174 		_cache_lock(ncp);
1175 	}
1176 	return(r);
1177 }
1178 
1179 int
1180 cache_inval(struct nchandle *nch, int flags)
1181 {
1182 	return(_cache_inval(nch->ncp, flags));
1183 }
1184 
1185 /*
1186  * Helper for _cache_inval().  The passed ncp is refd and locked and
1187  * remains that way on return, but may be unlocked/relocked multiple
1188  * times by the routine.
1189  */
1190 static int
1191 _cache_inval_internal(struct namecache *ncp, int flags, struct cinvtrack *track)
1192 {
1193 	struct namecache *kid;
1194 	struct namecache *nextkid;
1195 	int rcnt = 0;
1196 
1197 	KKASSERT(ncp->nc_exlocks);
1198 
1199 	_cache_setunresolved(ncp);
1200 	if (flags & CINV_DESTROY)
1201 		ncp->nc_flag |= NCF_DESTROYED;
1202 	if ((flags & CINV_CHILDREN) &&
1203 	    (kid = TAILQ_FIRST(&ncp->nc_list)) != NULL
1204 	) {
1205 		_cache_hold(kid);
1206 		if (++track->depth > MAX_RECURSION_DEPTH) {
1207 			track->resume_ncp = ncp;
1208 			_cache_hold(ncp);
1209 			++rcnt;
1210 		}
1211 		_cache_unlock(ncp);
1212 		while (kid) {
1213 			if (track->resume_ncp) {
1214 				_cache_drop(kid);
1215 				break;
1216 			}
1217 			if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL)
1218 				_cache_hold(nextkid);
1219 			if ((kid->nc_flag & NCF_UNRESOLVED) == 0 ||
1220 			    TAILQ_FIRST(&kid->nc_list)
1221 			) {
1222 				_cache_lock(kid);
1223 				rcnt += _cache_inval_internal(kid, flags & ~CINV_DESTROY, track);
1224 				_cache_unlock(kid);
1225 			}
1226 			_cache_drop(kid);
1227 			kid = nextkid;
1228 		}
1229 		--track->depth;
1230 		_cache_lock(ncp);
1231 	}
1232 
1233 	/*
1234 	 * Someone could have gotten in there while ncp was unlocked,
1235 	 * retry if so.
1236 	 */
1237 	if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
1238 		++rcnt;
1239 	return (rcnt);
1240 }
1241 
1242 /*
1243  * Invalidate a vnode's namecache associations.  To avoid races against
1244  * the resolver we do not invalidate a node which we previously invalidated
1245  * but which was then re-resolved while we were in the invalidation loop.
1246  *
1247  * Returns non-zero if any namecache entries remain after the invalidation
1248  * loop completed.
1249  *
1250  * NOTE: Unlike the namecache topology which guarentees that ncp's will not
1251  *	 be ripped out of the topology while held, the vnode's v_namecache
1252  *	 list has no such restriction.  NCP's can be ripped out of the list
1253  *	 at virtually any time if not locked, even if held.
1254  *
1255  *	 In addition, the v_namecache list itself must be locked via
1256  *	 the vnode's spinlock.
1257  *
1258  * MPSAFE
1259  */
1260 int
1261 cache_inval_vp(struct vnode *vp, int flags)
1262 {
1263 	struct namecache *ncp;
1264 	struct namecache *next;
1265 
1266 restart:
1267 	spin_lock(&vp->v_spin);
1268 	ncp = TAILQ_FIRST(&vp->v_namecache);
1269 	if (ncp)
1270 		_cache_hold(ncp);
1271 	while (ncp) {
1272 		/* loop entered with ncp held and vp spin-locked */
1273 		if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
1274 			_cache_hold(next);
1275 		spin_unlock(&vp->v_spin);
1276 		_cache_lock(ncp);
1277 		if (ncp->nc_vp != vp) {
1278 			kprintf("Warning: cache_inval_vp: race-A detected on "
1279 				"%s\n", ncp->nc_name);
1280 			_cache_put(ncp);
1281 			if (next)
1282 				_cache_drop(next);
1283 			goto restart;
1284 		}
1285 		_cache_inval(ncp, flags);
1286 		_cache_put(ncp);		/* also releases reference */
1287 		ncp = next;
1288 		spin_lock(&vp->v_spin);
1289 		if (ncp && ncp->nc_vp != vp) {
1290 			spin_unlock(&vp->v_spin);
1291 			kprintf("Warning: cache_inval_vp: race-B detected on "
1292 				"%s\n", ncp->nc_name);
1293 			_cache_drop(ncp);
1294 			goto restart;
1295 		}
1296 	}
1297 	spin_unlock(&vp->v_spin);
1298 	return(TAILQ_FIRST(&vp->v_namecache) != NULL);
1299 }
1300 
1301 /*
1302  * This routine is used instead of the normal cache_inval_vp() when we
1303  * are trying to recycle otherwise good vnodes.
1304  *
1305  * Return 0 on success, non-zero if not all namecache records could be
1306  * disassociated from the vnode (for various reasons).
1307  *
1308  * MPSAFE
1309  */
1310 int
1311 cache_inval_vp_nonblock(struct vnode *vp)
1312 {
1313 	struct namecache *ncp;
1314 	struct namecache *next;
1315 
1316 	spin_lock(&vp->v_spin);
1317 	ncp = TAILQ_FIRST(&vp->v_namecache);
1318 	if (ncp)
1319 		_cache_hold(ncp);
1320 	while (ncp) {
1321 		/* loop entered with ncp held */
1322 		if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
1323 			_cache_hold(next);
1324 		spin_unlock(&vp->v_spin);
1325 		if (_cache_lock_nonblock(ncp)) {
1326 			_cache_drop(ncp);
1327 			if (next)
1328 				_cache_drop(next);
1329 			goto done;
1330 		}
1331 		if (ncp->nc_vp != vp) {
1332 			kprintf("Warning: cache_inval_vp: race-A detected on "
1333 				"%s\n", ncp->nc_name);
1334 			_cache_put(ncp);
1335 			if (next)
1336 				_cache_drop(next);
1337 			goto done;
1338 		}
1339 		_cache_inval(ncp, 0);
1340 		_cache_put(ncp);		/* also releases reference */
1341 		ncp = next;
1342 		spin_lock(&vp->v_spin);
1343 		if (ncp && ncp->nc_vp != vp) {
1344 			spin_unlock(&vp->v_spin);
1345 			kprintf("Warning: cache_inval_vp: race-B detected on "
1346 				"%s\n", ncp->nc_name);
1347 			_cache_drop(ncp);
1348 			goto done;
1349 		}
1350 	}
1351 	spin_unlock(&vp->v_spin);
1352 done:
1353 	return(TAILQ_FIRST(&vp->v_namecache) != NULL);
1354 }
1355 
1356 /*
1357  * The source ncp has been renamed to the target ncp.  Both fncp and tncp
1358  * must be locked.  The target ncp is destroyed (as a normal rename-over
1359  * would destroy the target file or directory).
1360  *
1361  * Because there may be references to the source ncp we cannot copy its
1362  * contents to the target.  Instead the source ncp is relinked as the target
1363  * and the target ncp is removed from the namecache topology.
1364  *
1365  * MPSAFE
1366  */
1367 void
1368 cache_rename(struct nchandle *fnch, struct nchandle *tnch)
1369 {
1370 	struct namecache *fncp = fnch->ncp;
1371 	struct namecache *tncp = tnch->ncp;
1372 	struct namecache *tncp_par;
1373 	struct nchash_head *nchpp;
1374 	u_int32_t hash;
1375 	char *oname;
1376 	char *nname;
1377 
1378 	if (tncp->nc_nlen) {
1379 		nname = kmalloc(tncp->nc_nlen + 1, M_VFSCACHE, M_WAITOK);
1380 		bcopy(tncp->nc_name, nname, tncp->nc_nlen);
1381 		nname[tncp->nc_nlen] = 0;
1382 	} else {
1383 		nname = NULL;
1384 	}
1385 
1386 	/*
1387 	 * Rename fncp (unlink)
1388 	 */
1389 	_cache_unlink_parent(fncp);
1390 	oname = fncp->nc_name;
1391 	fncp->nc_name = nname;
1392 	fncp->nc_nlen = tncp->nc_nlen;
1393 	if (oname)
1394 		kfree(oname, M_VFSCACHE);
1395 
1396 	tncp_par = tncp->nc_parent;
1397 	_cache_hold(tncp_par);
1398 	_cache_lock(tncp_par);
1399 
1400 	/*
1401 	 * Rename fncp (relink)
1402 	 */
1403 	hash = fnv_32_buf(fncp->nc_name, fncp->nc_nlen, FNV1_32_INIT);
1404 	hash = fnv_32_buf(&tncp_par, sizeof(tncp_par), hash);
1405 	nchpp = NCHHASH(hash);
1406 
1407 	spin_lock(&nchpp->spin);
1408 	_cache_link_parent(fncp, tncp_par, nchpp);
1409 	spin_unlock(&nchpp->spin);
1410 
1411 	_cache_put(tncp_par);
1412 
1413 	/*
1414 	 * Get rid of the overwritten tncp (unlink)
1415 	 */
1416 	_cache_unlink(tncp);
1417 }
1418 
1419 /*
1420  * Perform actions consistent with unlinking a file.  The passed-in ncp
1421  * must be locked.
1422  *
1423  * The ncp is marked DESTROYED so it no longer shows up in searches,
1424  * and will be physically deleted when the vnode goes away.
1425  *
1426  * If the related vnode has no refs then we cycle it through vget()/vput()
1427  * to (possibly if we don't have a ref race) trigger a deactivation,
1428  * allowing the VFS to trivially detect and recycle the deleted vnode
1429  * via VOP_INACTIVE().
1430  *
1431  * NOTE: _cache_rename() will automatically call _cache_unlink() on the
1432  *	 target ncp.
1433  */
1434 void
1435 cache_unlink(struct nchandle *nch)
1436 {
1437 	_cache_unlink(nch->ncp);
1438 }
1439 
1440 static void
1441 _cache_unlink(struct namecache *ncp)
1442 {
1443 	struct vnode *vp;
1444 
1445 	/*
1446 	 * Causes lookups to fail and allows another ncp with the same
1447 	 * name to be created under ncp->nc_parent.
1448 	 */
1449 	ncp->nc_flag |= NCF_DESTROYED;
1450 
1451 	/*
1452 	 * Attempt to trigger a deactivation.
1453 	 */
1454 	if ((ncp->nc_flag & NCF_UNRESOLVED) == 0 &&
1455 	    (vp = ncp->nc_vp) != NULL &&
1456 	    !sysref_isactive(&vp->v_sysref)) {
1457 		if (vget(vp, LK_SHARED) == 0)
1458 			vput(vp);
1459 	}
1460 }
1461 
1462 /*
1463  * vget the vnode associated with the namecache entry.  Resolve the namecache
1464  * entry if necessary.  The passed ncp must be referenced and locked.
1465  *
1466  * lk_type may be LK_SHARED, LK_EXCLUSIVE.  A ref'd, possibly locked
1467  * (depending on the passed lk_type) will be returned in *vpp with an error
1468  * of 0, or NULL will be returned in *vpp with a non-0 error code.  The
1469  * most typical error is ENOENT, meaning that the ncp represents a negative
1470  * cache hit and there is no vnode to retrieve, but other errors can occur
1471  * too.
1472  *
1473  * The vget() can race a reclaim.  If this occurs we re-resolve the
1474  * namecache entry.
1475  *
1476  * There are numerous places in the kernel where vget() is called on a
1477  * vnode while one or more of its namecache entries is locked.  Releasing
1478  * a vnode never deadlocks against locked namecache entries (the vnode
1479  * will not get recycled while referenced ncp's exist).  This means we
1480  * can safely acquire the vnode.  In fact, we MUST NOT release the ncp
1481  * lock when acquiring the vp lock or we might cause a deadlock.
1482  *
1483  * MPSAFE
1484  */
1485 int
1486 cache_vget(struct nchandle *nch, struct ucred *cred,
1487 	   int lk_type, struct vnode **vpp)
1488 {
1489 	struct namecache *ncp;
1490 	struct vnode *vp;
1491 	int error;
1492 
1493 	ncp = nch->ncp;
1494 	KKASSERT(ncp->nc_locktd == curthread);
1495 again:
1496 	vp = NULL;
1497 	if (ncp->nc_flag & NCF_UNRESOLVED)
1498 		error = cache_resolve(nch, cred);
1499 	else
1500 		error = 0;
1501 
1502 	if (error == 0 && (vp = ncp->nc_vp) != NULL) {
1503 		error = vget(vp, lk_type);
1504 		if (error) {
1505 			/*
1506 			 * VRECLAIM race
1507 			 */
1508 			if (error == ENOENT) {
1509 				kprintf("Warning: vnode reclaim race detected "
1510 					"in cache_vget on %p (%s)\n",
1511 					vp, ncp->nc_name);
1512 				_cache_setunresolved(ncp);
1513 				goto again;
1514 			}
1515 
1516 			/*
1517 			 * Not a reclaim race, some other error.
1518 			 */
1519 			KKASSERT(ncp->nc_vp == vp);
1520 			vp = NULL;
1521 		} else {
1522 			KKASSERT(ncp->nc_vp == vp);
1523 			KKASSERT((vp->v_flag & VRECLAIMED) == 0);
1524 		}
1525 	}
1526 	if (error == 0 && vp == NULL)
1527 		error = ENOENT;
1528 	*vpp = vp;
1529 	return(error);
1530 }
1531 
1532 int
1533 cache_vref(struct nchandle *nch, struct ucred *cred, struct vnode **vpp)
1534 {
1535 	struct namecache *ncp;
1536 	struct vnode *vp;
1537 	int error;
1538 
1539 	ncp = nch->ncp;
1540 	KKASSERT(ncp->nc_locktd == curthread);
1541 again:
1542 	vp = NULL;
1543 	if (ncp->nc_flag & NCF_UNRESOLVED)
1544 		error = cache_resolve(nch, cred);
1545 	else
1546 		error = 0;
1547 
1548 	if (error == 0 && (vp = ncp->nc_vp) != NULL) {
1549 		error = vget(vp, LK_SHARED);
1550 		if (error) {
1551 			/*
1552 			 * VRECLAIM race
1553 			 */
1554 			if (error == ENOENT) {
1555 				kprintf("Warning: vnode reclaim race detected "
1556 					"in cache_vget on %p (%s)\n",
1557 					vp, ncp->nc_name);
1558 				_cache_setunresolved(ncp);
1559 				goto again;
1560 			}
1561 
1562 			/*
1563 			 * Not a reclaim race, some other error.
1564 			 */
1565 			KKASSERT(ncp->nc_vp == vp);
1566 			vp = NULL;
1567 		} else {
1568 			KKASSERT(ncp->nc_vp == vp);
1569 			KKASSERT((vp->v_flag & VRECLAIMED) == 0);
1570 			/* caller does not want a lock */
1571 			vn_unlock(vp);
1572 		}
1573 	}
1574 	if (error == 0 && vp == NULL)
1575 		error = ENOENT;
1576 	*vpp = vp;
1577 	return(error);
1578 }
1579 
1580 /*
1581  * Return a referenced vnode representing the parent directory of
1582  * ncp.
1583  *
1584  * Because the caller has locked the ncp it should not be possible for
1585  * the parent ncp to go away.  However, the parent can unresolve its
1586  * dvp at any time so we must be able to acquire a lock on the parent
1587  * to safely access nc_vp.
1588  *
1589  * We have to leave par unlocked when vget()ing dvp to avoid a deadlock,
1590  * so use vhold()/vdrop() while holding the lock to prevent dvp from
1591  * getting destroyed.
1592  *
1593  * MPSAFE - Note vhold() is allowed when dvp has 0 refs if we hold a
1594  *	    lock on the ncp in question..
1595  */
1596 static struct vnode *
1597 cache_dvpref(struct namecache *ncp)
1598 {
1599 	struct namecache *par;
1600 	struct vnode *dvp;
1601 
1602 	dvp = NULL;
1603 	if ((par = ncp->nc_parent) != NULL) {
1604 		_cache_hold(par);
1605 		_cache_lock(par);
1606 		if ((par->nc_flag & NCF_UNRESOLVED) == 0) {
1607 			if ((dvp = par->nc_vp) != NULL)
1608 				vhold(dvp);
1609 		}
1610 		_cache_unlock(par);
1611 		if (dvp) {
1612 			if (vget(dvp, LK_SHARED) == 0) {
1613 				vn_unlock(dvp);
1614 				vdrop(dvp);
1615 				/* return refd, unlocked dvp */
1616 			} else {
1617 				vdrop(dvp);
1618 				dvp = NULL;
1619 			}
1620 		}
1621 		_cache_drop(par);
1622 	}
1623 	return(dvp);
1624 }
1625 
1626 /*
1627  * Convert a directory vnode to a namecache record without any other
1628  * knowledge of the topology.  This ONLY works with directory vnodes and
1629  * is ONLY used by the NFS server.  dvp must be refd but unlocked, and the
1630  * returned ncp (if not NULL) will be held and unlocked.
1631  *
1632  * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned.
1633  * If 'makeit' is 1 we attempt to track-down and create the namecache topology
1634  * for dvp.  This will fail only if the directory has been deleted out from
1635  * under the caller.
1636  *
1637  * Callers must always check for a NULL return no matter the value of 'makeit'.
1638  *
1639  * To avoid underflowing the kernel stack each recursive call increments
1640  * the makeit variable.
1641  */
1642 
1643 static int cache_inefficient_scan(struct nchandle *nch, struct ucred *cred,
1644 				  struct vnode *dvp, char *fakename);
1645 static int cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
1646 				  struct vnode **saved_dvp);
1647 
1648 int
1649 cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit,
1650 	      struct nchandle *nch)
1651 {
1652 	struct vnode *saved_dvp;
1653 	struct vnode *pvp;
1654 	char *fakename;
1655 	int error;
1656 
1657 	nch->ncp = NULL;
1658 	nch->mount = dvp->v_mount;
1659 	saved_dvp = NULL;
1660 	fakename = NULL;
1661 
1662 	/*
1663 	 * Handle the makeit == 0 degenerate case
1664 	 */
1665 	if (makeit == 0) {
1666 		spin_lock(&dvp->v_spin);
1667 		nch->ncp = TAILQ_FIRST(&dvp->v_namecache);
1668 		if (nch->ncp)
1669 			cache_hold(nch);
1670 		spin_unlock(&dvp->v_spin);
1671 	}
1672 
1673 	/*
1674 	 * Loop until resolution, inside code will break out on error.
1675 	 */
1676 	while (makeit) {
1677 		/*
1678 		 * Break out if we successfully acquire a working ncp.
1679 		 */
1680 		spin_lock(&dvp->v_spin);
1681 		nch->ncp = TAILQ_FIRST(&dvp->v_namecache);
1682 		if (nch->ncp) {
1683 			cache_hold(nch);
1684 			spin_unlock(&dvp->v_spin);
1685 			break;
1686 		}
1687 		spin_unlock(&dvp->v_spin);
1688 
1689 		/*
1690 		 * If dvp is the root of its filesystem it should already
1691 		 * have a namecache pointer associated with it as a side
1692 		 * effect of the mount, but it may have been disassociated.
1693 		 */
1694 		if (dvp->v_flag & VROOT) {
1695 			nch->ncp = _cache_get(nch->mount->mnt_ncmountpt.ncp);
1696 			error = cache_resolve_mp(nch->mount);
1697 			_cache_put(nch->ncp);
1698 			if (ncvp_debug) {
1699 				kprintf("cache_fromdvp: resolve root of mount %p error %d",
1700 					dvp->v_mount, error);
1701 			}
1702 			if (error) {
1703 				if (ncvp_debug)
1704 					kprintf(" failed\n");
1705 				nch->ncp = NULL;
1706 				break;
1707 			}
1708 			if (ncvp_debug)
1709 				kprintf(" succeeded\n");
1710 			continue;
1711 		}
1712 
1713 		/*
1714 		 * If we are recursed too deeply resort to an O(n^2)
1715 		 * algorithm to resolve the namecache topology.  The
1716 		 * resolved pvp is left referenced in saved_dvp to
1717 		 * prevent the tree from being destroyed while we loop.
1718 		 */
1719 		if (makeit > 20) {
1720 			error = cache_fromdvp_try(dvp, cred, &saved_dvp);
1721 			if (error) {
1722 				kprintf("lookupdotdot(longpath) failed %d "
1723 				       "dvp %p\n", error, dvp);
1724 				nch->ncp = NULL;
1725 				break;
1726 			}
1727 			continue;
1728 		}
1729 
1730 		/*
1731 		 * Get the parent directory and resolve its ncp.
1732 		 */
1733 		if (fakename) {
1734 			kfree(fakename, M_TEMP);
1735 			fakename = NULL;
1736 		}
1737 		error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred,
1738 					  &fakename);
1739 		if (error) {
1740 			kprintf("lookupdotdot failed %d dvp %p\n", error, dvp);
1741 			break;
1742 		}
1743 		vn_unlock(pvp);
1744 
1745 		/*
1746 		 * Reuse makeit as a recursion depth counter.  On success
1747 		 * nch will be fully referenced.
1748 		 */
1749 		cache_fromdvp(pvp, cred, makeit + 1, nch);
1750 		vrele(pvp);
1751 		if (nch->ncp == NULL)
1752 			break;
1753 
1754 		/*
1755 		 * Do an inefficient scan of pvp (embodied by ncp) to look
1756 		 * for dvp.  This will create a namecache record for dvp on
1757 		 * success.  We loop up to recheck on success.
1758 		 *
1759 		 * ncp and dvp are both held but not locked.
1760 		 */
1761 		error = cache_inefficient_scan(nch, cred, dvp, fakename);
1762 		if (error) {
1763 			kprintf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n",
1764 				pvp, nch->ncp->nc_name, dvp);
1765 			cache_drop(nch);
1766 			/* nch was NULLed out, reload mount */
1767 			nch->mount = dvp->v_mount;
1768 			break;
1769 		}
1770 		if (ncvp_debug) {
1771 			kprintf("cache_fromdvp: scan %p (%s) succeeded\n",
1772 				pvp, nch->ncp->nc_name);
1773 		}
1774 		cache_drop(nch);
1775 		/* nch was NULLed out, reload mount */
1776 		nch->mount = dvp->v_mount;
1777 	}
1778 
1779 	/*
1780 	 * If nch->ncp is non-NULL it will have been held already.
1781 	 */
1782 	if (fakename)
1783 		kfree(fakename, M_TEMP);
1784 	if (saved_dvp)
1785 		vrele(saved_dvp);
1786 	if (nch->ncp)
1787 		return (0);
1788 	return (EINVAL);
1789 }
1790 
1791 /*
1792  * Go up the chain of parent directories until we find something
1793  * we can resolve into the namecache.  This is very inefficient.
1794  */
1795 static
1796 int
1797 cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
1798 		  struct vnode **saved_dvp)
1799 {
1800 	struct nchandle nch;
1801 	struct vnode *pvp;
1802 	int error;
1803 	static time_t last_fromdvp_report;
1804 	char *fakename;
1805 
1806 	/*
1807 	 * Loop getting the parent directory vnode until we get something we
1808 	 * can resolve in the namecache.
1809 	 */
1810 	vref(dvp);
1811 	nch.mount = dvp->v_mount;
1812 	nch.ncp = NULL;
1813 	fakename = NULL;
1814 
1815 	for (;;) {
1816 		if (fakename) {
1817 			kfree(fakename, M_TEMP);
1818 			fakename = NULL;
1819 		}
1820 		error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred,
1821 					  &fakename);
1822 		if (error) {
1823 			vrele(dvp);
1824 			break;
1825 		}
1826 		vn_unlock(pvp);
1827 		spin_lock(&pvp->v_spin);
1828 		if ((nch.ncp = TAILQ_FIRST(&pvp->v_namecache)) != NULL) {
1829 			_cache_hold(nch.ncp);
1830 			spin_unlock(&pvp->v_spin);
1831 			vrele(pvp);
1832 			break;
1833 		}
1834 		spin_unlock(&pvp->v_spin);
1835 		if (pvp->v_flag & VROOT) {
1836 			nch.ncp = _cache_get(pvp->v_mount->mnt_ncmountpt.ncp);
1837 			error = cache_resolve_mp(nch.mount);
1838 			_cache_unlock(nch.ncp);
1839 			vrele(pvp);
1840 			if (error) {
1841 				_cache_drop(nch.ncp);
1842 				nch.ncp = NULL;
1843 				vrele(dvp);
1844 			}
1845 			break;
1846 		}
1847 		vrele(dvp);
1848 		dvp = pvp;
1849 	}
1850 	if (error == 0) {
1851 		if (last_fromdvp_report != time_second) {
1852 			last_fromdvp_report = time_second;
1853 			kprintf("Warning: extremely inefficient path "
1854 				"resolution on %s\n",
1855 				nch.ncp->nc_name);
1856 		}
1857 		error = cache_inefficient_scan(&nch, cred, dvp, fakename);
1858 
1859 		/*
1860 		 * Hopefully dvp now has a namecache record associated with
1861 		 * it.  Leave it referenced to prevent the kernel from
1862 		 * recycling the vnode.  Otherwise extremely long directory
1863 		 * paths could result in endless recycling.
1864 		 */
1865 		if (*saved_dvp)
1866 		    vrele(*saved_dvp);
1867 		*saved_dvp = dvp;
1868 		_cache_drop(nch.ncp);
1869 	}
1870 	if (fakename)
1871 		kfree(fakename, M_TEMP);
1872 	return (error);
1873 }
1874 
1875 /*
1876  * Do an inefficient scan of the directory represented by ncp looking for
1877  * the directory vnode dvp.  ncp must be held but not locked on entry and
1878  * will be held on return.  dvp must be refd but not locked on entry and
1879  * will remain refd on return.
1880  *
1881  * Why do this at all?  Well, due to its stateless nature the NFS server
1882  * converts file handles directly to vnodes without necessarily going through
1883  * the namecache ops that would otherwise create the namecache topology
1884  * leading to the vnode.  We could either (1) Change the namecache algorithms
1885  * to allow disconnect namecache records that are re-merged opportunistically,
1886  * or (2) Make the NFS server backtrack and scan to recover a connected
1887  * namecache topology in order to then be able to issue new API lookups.
1888  *
1889  * It turns out that (1) is a huge mess.  It takes a nice clean set of
1890  * namecache algorithms and introduces a lot of complication in every subsystem
1891  * that calls into the namecache to deal with the re-merge case, especially
1892  * since we are using the namecache to placehold negative lookups and the
1893  * vnode might not be immediately assigned. (2) is certainly far less
1894  * efficient then (1), but since we are only talking about directories here
1895  * (which are likely to remain cached), the case does not actually run all
1896  * that often and has the supreme advantage of not polluting the namecache
1897  * algorithms.
1898  *
1899  * If a fakename is supplied just construct a namecache entry using the
1900  * fake name.
1901  */
1902 static int
1903 cache_inefficient_scan(struct nchandle *nch, struct ucred *cred,
1904 		       struct vnode *dvp, char *fakename)
1905 {
1906 	struct nlcomponent nlc;
1907 	struct nchandle rncp;
1908 	struct dirent *den;
1909 	struct vnode *pvp;
1910 	struct vattr vat;
1911 	struct iovec iov;
1912 	struct uio uio;
1913 	int blksize;
1914 	int eofflag;
1915 	int bytes;
1916 	char *rbuf;
1917 	int error;
1918 
1919 	vat.va_blocksize = 0;
1920 	if ((error = VOP_GETATTR(dvp, &vat)) != 0)
1921 		return (error);
1922 	cache_lock(nch);
1923 	error = cache_vref(nch, cred, &pvp);
1924 	cache_unlock(nch);
1925 	if (error)
1926 		return (error);
1927 	if (ncvp_debug) {
1928 		kprintf("inefficient_scan: directory iosize %ld "
1929 			"vattr fileid = %lld\n",
1930 			vat.va_blocksize,
1931 			(long long)vat.va_fileid);
1932 	}
1933 
1934 	/*
1935 	 * Use the supplied fakename if not NULL.  Fake names are typically
1936 	 * not in the actual filesystem hierarchy.  This is used by HAMMER
1937 	 * to glue @@timestamp recursions together.
1938 	 */
1939 	if (fakename) {
1940 		nlc.nlc_nameptr = fakename;
1941 		nlc.nlc_namelen = strlen(fakename);
1942 		rncp = cache_nlookup(nch, &nlc);
1943 		goto done;
1944 	}
1945 
1946 	if ((blksize = vat.va_blocksize) == 0)
1947 		blksize = DEV_BSIZE;
1948 	rbuf = kmalloc(blksize, M_TEMP, M_WAITOK);
1949 	rncp.ncp = NULL;
1950 
1951 	eofflag = 0;
1952 	uio.uio_offset = 0;
1953 again:
1954 	iov.iov_base = rbuf;
1955 	iov.iov_len = blksize;
1956 	uio.uio_iov = &iov;
1957 	uio.uio_iovcnt = 1;
1958 	uio.uio_resid = blksize;
1959 	uio.uio_segflg = UIO_SYSSPACE;
1960 	uio.uio_rw = UIO_READ;
1961 	uio.uio_td = curthread;
1962 
1963 	if (ncvp_debug >= 2)
1964 		kprintf("cache_inefficient_scan: readdir @ %08x\n", (int)uio.uio_offset);
1965 	error = VOP_READDIR(pvp, &uio, cred, &eofflag, NULL, NULL);
1966 	if (error == 0) {
1967 		den = (struct dirent *)rbuf;
1968 		bytes = blksize - uio.uio_resid;
1969 
1970 		while (bytes > 0) {
1971 			if (ncvp_debug >= 2) {
1972 				kprintf("cache_inefficient_scan: %*.*s\n",
1973 					den->d_namlen, den->d_namlen,
1974 					den->d_name);
1975 			}
1976 			if (den->d_type != DT_WHT &&
1977 			    den->d_ino == vat.va_fileid) {
1978 				if (ncvp_debug) {
1979 					kprintf("cache_inefficient_scan: "
1980 					       "MATCHED inode %lld path %s/%*.*s\n",
1981 					       (long long)vat.va_fileid,
1982 					       nch->ncp->nc_name,
1983 					       den->d_namlen, den->d_namlen,
1984 					       den->d_name);
1985 				}
1986 				nlc.nlc_nameptr = den->d_name;
1987 				nlc.nlc_namelen = den->d_namlen;
1988 				rncp = cache_nlookup(nch, &nlc);
1989 				KKASSERT(rncp.ncp != NULL);
1990 				break;
1991 			}
1992 			bytes -= _DIRENT_DIRSIZ(den);
1993 			den = _DIRENT_NEXT(den);
1994 		}
1995 		if (rncp.ncp == NULL && eofflag == 0 && uio.uio_resid != blksize)
1996 			goto again;
1997 	}
1998 	kfree(rbuf, M_TEMP);
1999 done:
2000 	vrele(pvp);
2001 	if (rncp.ncp) {
2002 		if (rncp.ncp->nc_flag & NCF_UNRESOLVED) {
2003 			_cache_setvp(rncp.mount, rncp.ncp, dvp);
2004 			if (ncvp_debug >= 2) {
2005 				kprintf("cache_inefficient_scan: setvp %s/%s = %p\n",
2006 					nch->ncp->nc_name, rncp.ncp->nc_name, dvp);
2007 			}
2008 		} else {
2009 			if (ncvp_debug >= 2) {
2010 				kprintf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n",
2011 					nch->ncp->nc_name, rncp.ncp->nc_name, dvp,
2012 					rncp.ncp->nc_vp);
2013 			}
2014 		}
2015 		if (rncp.ncp->nc_vp == NULL)
2016 			error = rncp.ncp->nc_error;
2017 		/*
2018 		 * Release rncp after a successful nlookup.  rncp was fully
2019 		 * referenced.
2020 		 */
2021 		cache_put(&rncp);
2022 	} else {
2023 		kprintf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n",
2024 			dvp, nch->ncp->nc_name);
2025 		error = ENOENT;
2026 	}
2027 	return (error);
2028 }
2029 
2030 /*
2031  * Zap a namecache entry.  The ncp is unconditionally set to an unresolved
2032  * state, which disassociates it from its vnode or ncneglist.
2033  *
2034  * Then, if there are no additional references to the ncp and no children,
2035  * the ncp is removed from the topology and destroyed.
2036  *
2037  * References and/or children may exist if the ncp is in the middle of the
2038  * topology, preventing the ncp from being destroyed.
2039  *
2040  * This function must be called with the ncp held and locked and will unlock
2041  * and drop it during zapping.
2042  *
2043  * If nonblock is non-zero and the parent ncp cannot be locked we give up.
2044  * This case can occur in the cache_drop() path.
2045  *
2046  * This function may returned a held (but NOT locked) parent node which the
2047  * caller must drop.  We do this so _cache_drop() can loop, to avoid
2048  * blowing out the kernel stack.
2049  *
2050  * WARNING!  For MPSAFE operation this routine must acquire up to three
2051  *	     spin locks to be able to safely test nc_refs.  Lock order is
2052  *	     very important.
2053  *
2054  *	     hash spinlock if on hash list
2055  *	     parent spinlock if child of parent
2056  *	     (the ncp is unresolved so there is no vnode association)
2057  */
2058 static struct namecache *
2059 cache_zap(struct namecache *ncp, int nonblock)
2060 {
2061 	struct namecache *par;
2062 	struct vnode *dropvp;
2063 	int refs;
2064 
2065 	/*
2066 	 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED.
2067 	 */
2068 	_cache_setunresolved(ncp);
2069 
2070 	/*
2071 	 * Try to scrap the entry and possibly tail-recurse on its parent.
2072 	 * We only scrap unref'd (other then our ref) unresolved entries,
2073 	 * we do not scrap 'live' entries.
2074 	 *
2075 	 * Note that once the spinlocks are acquired if nc_refs == 1 no
2076 	 * other references are possible.  If it isn't, however, we have
2077 	 * to decrement but also be sure to avoid a 1->0 transition.
2078 	 */
2079 	KKASSERT(ncp->nc_flag & NCF_UNRESOLVED);
2080 	KKASSERT(ncp->nc_refs > 0);
2081 
2082 	/*
2083 	 * Acquire locks.  Note that the parent can't go away while we hold
2084 	 * a child locked.
2085 	 */
2086 	if ((par = ncp->nc_parent) != NULL) {
2087 		if (nonblock) {
2088 			for (;;) {
2089 				if (_cache_lock_nonblock(par) == 0)
2090 					break;
2091 				refs = ncp->nc_refs;
2092 				ncp->nc_flag |= NCF_DEFEREDZAP;
2093 				++numdefered;	/* MP race ok */
2094 				if (atomic_cmpset_int(&ncp->nc_refs,
2095 						      refs, refs - 1)) {
2096 					_cache_unlock(ncp);
2097 					return(NULL);
2098 				}
2099 				cpu_pause();
2100 			}
2101 			_cache_hold(par);
2102 		} else {
2103 			_cache_hold(par);
2104 			_cache_lock(par);
2105 		}
2106 		spin_lock(&ncp->nc_head->spin);
2107 	}
2108 
2109 	/*
2110 	 * If someone other then us has a ref or we have children
2111 	 * we cannot zap the entry.  The 1->0 transition and any
2112 	 * further list operation is protected by the spinlocks
2113 	 * we have acquired but other transitions are not.
2114 	 */
2115 	for (;;) {
2116 		refs = ncp->nc_refs;
2117 		if (refs == 1 && TAILQ_EMPTY(&ncp->nc_list))
2118 			break;
2119 		if (atomic_cmpset_int(&ncp->nc_refs, refs, refs - 1)) {
2120 			if (par) {
2121 				spin_unlock(&ncp->nc_head->spin);
2122 				_cache_put(par);
2123 			}
2124 			_cache_unlock(ncp);
2125 			return(NULL);
2126 		}
2127 		cpu_pause();
2128 	}
2129 
2130 	/*
2131 	 * We are the only ref and with the spinlocks held no further
2132 	 * refs can be acquired by others.
2133 	 *
2134 	 * Remove us from the hash list and parent list.  We have to
2135 	 * drop a ref on the parent's vp if the parent's list becomes
2136 	 * empty.
2137 	 */
2138 	dropvp = NULL;
2139 	if (par) {
2140 		struct nchash_head *nchpp = ncp->nc_head;
2141 
2142 		KKASSERT(nchpp != NULL);
2143 		LIST_REMOVE(ncp, nc_hash);
2144 		TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
2145 		if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
2146 			dropvp = par->nc_vp;
2147 		ncp->nc_head = NULL;
2148 		ncp->nc_parent = NULL;
2149 		spin_unlock(&nchpp->spin);
2150 		_cache_unlock(par);
2151 	} else {
2152 		KKASSERT(ncp->nc_head == NULL);
2153 	}
2154 
2155 	/*
2156 	 * ncp should not have picked up any refs.  Physically
2157 	 * destroy the ncp.
2158 	 */
2159 	KKASSERT(ncp->nc_refs == 1);
2160 	/* _cache_unlock(ncp) not required */
2161 	ncp->nc_refs = -1;	/* safety */
2162 	if (ncp->nc_name)
2163 		kfree(ncp->nc_name, M_VFSCACHE);
2164 	kfree(ncp, M_VFSCACHE);
2165 
2166 	/*
2167 	 * Delayed drop (we had to release our spinlocks)
2168 	 *
2169 	 * The refed parent (if not  NULL) must be dropped.  The
2170 	 * caller is responsible for looping.
2171 	 */
2172 	if (dropvp)
2173 		vdrop(dropvp);
2174 	return(par);
2175 }
2176 
2177 /*
2178  * Clean up dangling negative cache and defered-drop entries in the
2179  * namecache.
2180  */
2181 typedef enum { CHI_LOW, CHI_HIGH } cache_hs_t;
2182 
2183 static cache_hs_t neg_cache_hysteresis_state = CHI_LOW;
2184 static cache_hs_t pos_cache_hysteresis_state = CHI_LOW;
2185 
2186 void
2187 cache_hysteresis(void)
2188 {
2189 	int poslimit;
2190 
2191 	/*
2192 	 * Don't cache too many negative hits.  We use hysteresis to reduce
2193 	 * the impact on the critical path.
2194 	 */
2195 	switch(neg_cache_hysteresis_state) {
2196 	case CHI_LOW:
2197 		if (numneg > MINNEG && numneg * ncnegfactor > numcache) {
2198 			_cache_cleanneg(10);
2199 			neg_cache_hysteresis_state = CHI_HIGH;
2200 		}
2201 		break;
2202 	case CHI_HIGH:
2203 		if (numneg > MINNEG * 9 / 10 &&
2204 		    numneg * ncnegfactor * 9 / 10 > numcache
2205 		) {
2206 			_cache_cleanneg(10);
2207 		} else {
2208 			neg_cache_hysteresis_state = CHI_LOW;
2209 		}
2210 		break;
2211 	}
2212 
2213 	/*
2214 	 * Don't cache too many positive hits.  We use hysteresis to reduce
2215 	 * the impact on the critical path.
2216 	 *
2217 	 * Excessive positive hits can accumulate due to large numbers of
2218 	 * hardlinks (the vnode cache will not prevent hl ncps from growing
2219 	 * into infinity).
2220 	 */
2221 	if ((poslimit = ncposlimit) == 0)
2222 		poslimit = desiredvnodes * 2;
2223 
2224 	switch(pos_cache_hysteresis_state) {
2225 	case CHI_LOW:
2226 		if (numcache > poslimit && numcache > MINPOS) {
2227 			_cache_cleanpos(10);
2228 			pos_cache_hysteresis_state = CHI_HIGH;
2229 		}
2230 		break;
2231 	case CHI_HIGH:
2232 		if (numcache > poslimit * 5 / 6 && numcache > MINPOS) {
2233 			_cache_cleanpos(10);
2234 		} else {
2235 			pos_cache_hysteresis_state = CHI_LOW;
2236 		}
2237 		break;
2238 	}
2239 
2240 	/*
2241 	 * Clean out dangling defered-zap ncps which could not
2242 	 * be cleanly dropped if too many build up.  Note
2243 	 * that numdefered is not an exact number as such ncps
2244 	 * can be reused and the counter is not handled in a MP
2245 	 * safe manner by design.
2246 	 */
2247 	if (numdefered * ncnegfactor > numcache) {
2248 		_cache_cleandefered();
2249 	}
2250 }
2251 
2252 /*
2253  * NEW NAMECACHE LOOKUP API
2254  *
2255  * Lookup an entry in the namecache.  The passed par_nch must be referenced
2256  * and unlocked.  A referenced and locked nchandle with a non-NULL nch.ncp
2257  * is ALWAYS returned, eve if the supplied component is illegal.
2258  *
2259  * The resulting namecache entry should be returned to the system with
2260  * cache_put() or cache_unlock() + cache_drop().
2261  *
2262  * namecache locks are recursive but care must be taken to avoid lock order
2263  * reversals (hence why the passed par_nch must be unlocked).  Locking
2264  * rules are to order for parent traversals, not for child traversals.
2265  *
2266  * Nobody else will be able to manipulate the associated namespace (e.g.
2267  * create, delete, rename, rename-target) until the caller unlocks the
2268  * entry.
2269  *
2270  * The returned entry will be in one of three states:  positive hit (non-null
2271  * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set).
2272  * Unresolved entries must be resolved through the filesystem to associate the
2273  * vnode and/or determine whether a positive or negative hit has occured.
2274  *
2275  * It is not necessary to lock a directory in order to lock namespace under
2276  * that directory.  In fact, it is explicitly not allowed to do that.  A
2277  * directory is typically only locked when being created, renamed, or
2278  * destroyed.
2279  *
2280  * The directory (par) may be unresolved, in which case any returned child
2281  * will likely also be marked unresolved.  Likely but not guarenteed.  Since
2282  * the filesystem lookup requires a resolved directory vnode the caller is
2283  * responsible for resolving the namecache chain top-down.  This API
2284  * specifically allows whole chains to be created in an unresolved state.
2285  */
2286 struct nchandle
2287 cache_nlookup(struct nchandle *par_nch, struct nlcomponent *nlc)
2288 {
2289 	struct nchandle nch;
2290 	struct namecache *ncp;
2291 	struct namecache *new_ncp;
2292 	struct nchash_head *nchpp;
2293 	struct mount *mp;
2294 	u_int32_t hash;
2295 	globaldata_t gd;
2296 	int par_locked;
2297 
2298 	numcalls++;
2299 	gd = mycpu;
2300 	mp = par_nch->mount;
2301 	par_locked = 0;
2302 
2303 	/*
2304 	 * This is a good time to call it, no ncp's are locked by
2305 	 * the caller or us.
2306 	 */
2307 	cache_hysteresis();
2308 
2309 	/*
2310 	 * Try to locate an existing entry
2311 	 */
2312 	hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
2313 	hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash);
2314 	new_ncp = NULL;
2315 	nchpp = NCHHASH(hash);
2316 restart:
2317 	spin_lock(&nchpp->spin);
2318 	LIST_FOREACH(ncp, &nchpp->list, nc_hash) {
2319 		numchecks++;
2320 
2321 		/*
2322 		 * Break out if we find a matching entry.  Note that
2323 		 * UNRESOLVED entries may match, but DESTROYED entries
2324 		 * do not.
2325 		 */
2326 		if (ncp->nc_parent == par_nch->ncp &&
2327 		    ncp->nc_nlen == nlc->nlc_namelen &&
2328 		    bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
2329 		    (ncp->nc_flag & NCF_DESTROYED) == 0
2330 		) {
2331 			_cache_hold(ncp);
2332 			spin_unlock(&nchpp->spin);
2333 			if (par_locked) {
2334 				_cache_unlock(par_nch->ncp);
2335 				par_locked = 0;
2336 			}
2337 			if (_cache_lock_special(ncp) == 0) {
2338 				_cache_auto_unresolve(mp, ncp);
2339 				if (new_ncp)
2340 					_cache_free(new_ncp);
2341 				goto found;
2342 			}
2343 			_cache_get(ncp);
2344 			_cache_put(ncp);
2345 			_cache_drop(ncp);
2346 			goto restart;
2347 		}
2348 	}
2349 
2350 	/*
2351 	 * We failed to locate an entry, create a new entry and add it to
2352 	 * the cache.  The parent ncp must also be locked so we
2353 	 * can link into it.
2354 	 *
2355 	 * We have to relookup after possibly blocking in kmalloc or
2356 	 * when locking par_nch.
2357 	 *
2358 	 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special
2359 	 *	 mount case, in which case nc_name will be NULL.
2360 	 */
2361 	if (new_ncp == NULL) {
2362 		spin_unlock(&nchpp->spin);
2363 		new_ncp = cache_alloc(nlc->nlc_namelen);
2364 		if (nlc->nlc_namelen) {
2365 			bcopy(nlc->nlc_nameptr, new_ncp->nc_name,
2366 			      nlc->nlc_namelen);
2367 			new_ncp->nc_name[nlc->nlc_namelen] = 0;
2368 		}
2369 		goto restart;
2370 	}
2371 	if (par_locked == 0) {
2372 		spin_unlock(&nchpp->spin);
2373 		_cache_lock(par_nch->ncp);
2374 		par_locked = 1;
2375 		goto restart;
2376 	}
2377 
2378 	/*
2379 	 * WARNING!  We still hold the spinlock.  We have to set the hash
2380 	 *	     table entry atomically.
2381 	 */
2382 	ncp = new_ncp;
2383 	_cache_link_parent(ncp, par_nch->ncp, nchpp);
2384 	spin_unlock(&nchpp->spin);
2385 	_cache_unlock(par_nch->ncp);
2386 	/* par_locked = 0 - not used */
2387 found:
2388 	/*
2389 	 * stats and namecache size management
2390 	 */
2391 	if (ncp->nc_flag & NCF_UNRESOLVED)
2392 		++gd->gd_nchstats->ncs_miss;
2393 	else if (ncp->nc_vp)
2394 		++gd->gd_nchstats->ncs_goodhits;
2395 	else
2396 		++gd->gd_nchstats->ncs_neghits;
2397 	nch.mount = mp;
2398 	nch.ncp = ncp;
2399 	atomic_add_int(&nch.mount->mnt_refs, 1);
2400 	return(nch);
2401 }
2402 
2403 /*
2404  * This is a non-blocking verison of cache_nlookup() used by
2405  * nfs_readdirplusrpc_uio().  It can fail for any reason and
2406  * will return nch.ncp == NULL in that case.
2407  */
2408 struct nchandle
2409 cache_nlookup_nonblock(struct nchandle *par_nch, struct nlcomponent *nlc)
2410 {
2411 	struct nchandle nch;
2412 	struct namecache *ncp;
2413 	struct namecache *new_ncp;
2414 	struct nchash_head *nchpp;
2415 	struct mount *mp;
2416 	u_int32_t hash;
2417 	globaldata_t gd;
2418 	int par_locked;
2419 
2420 	numcalls++;
2421 	gd = mycpu;
2422 	mp = par_nch->mount;
2423 	par_locked = 0;
2424 
2425 	/*
2426 	 * Try to locate an existing entry
2427 	 */
2428 	hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
2429 	hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash);
2430 	new_ncp = NULL;
2431 	nchpp = NCHHASH(hash);
2432 restart:
2433 	spin_lock(&nchpp->spin);
2434 	LIST_FOREACH(ncp, &nchpp->list, nc_hash) {
2435 		numchecks++;
2436 
2437 		/*
2438 		 * Break out if we find a matching entry.  Note that
2439 		 * UNRESOLVED entries may match, but DESTROYED entries
2440 		 * do not.
2441 		 */
2442 		if (ncp->nc_parent == par_nch->ncp &&
2443 		    ncp->nc_nlen == nlc->nlc_namelen &&
2444 		    bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
2445 		    (ncp->nc_flag & NCF_DESTROYED) == 0
2446 		) {
2447 			_cache_hold(ncp);
2448 			spin_unlock(&nchpp->spin);
2449 			if (par_locked) {
2450 				_cache_unlock(par_nch->ncp);
2451 				par_locked = 0;
2452 			}
2453 			if (_cache_lock_special(ncp) == 0) {
2454 				_cache_auto_unresolve(mp, ncp);
2455 				if (new_ncp) {
2456 					_cache_free(new_ncp);
2457 					new_ncp = NULL;
2458 				}
2459 				goto found;
2460 			}
2461 			_cache_drop(ncp);
2462 			goto failed;
2463 		}
2464 	}
2465 
2466 	/*
2467 	 * We failed to locate an entry, create a new entry and add it to
2468 	 * the cache.  The parent ncp must also be locked so we
2469 	 * can link into it.
2470 	 *
2471 	 * We have to relookup after possibly blocking in kmalloc or
2472 	 * when locking par_nch.
2473 	 *
2474 	 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special
2475 	 *	 mount case, in which case nc_name will be NULL.
2476 	 */
2477 	if (new_ncp == NULL) {
2478 		spin_unlock(&nchpp->spin);
2479 		new_ncp = cache_alloc(nlc->nlc_namelen);
2480 		if (nlc->nlc_namelen) {
2481 			bcopy(nlc->nlc_nameptr, new_ncp->nc_name,
2482 			      nlc->nlc_namelen);
2483 			new_ncp->nc_name[nlc->nlc_namelen] = 0;
2484 		}
2485 		goto restart;
2486 	}
2487 	if (par_locked == 0) {
2488 		spin_unlock(&nchpp->spin);
2489 		if (_cache_lock_nonblock(par_nch->ncp) == 0) {
2490 			par_locked = 1;
2491 			goto restart;
2492 		}
2493 		goto failed;
2494 	}
2495 
2496 	/*
2497 	 * WARNING!  We still hold the spinlock.  We have to set the hash
2498 	 *	     table entry atomically.
2499 	 */
2500 	ncp = new_ncp;
2501 	_cache_link_parent(ncp, par_nch->ncp, nchpp);
2502 	spin_unlock(&nchpp->spin);
2503 	_cache_unlock(par_nch->ncp);
2504 	/* par_locked = 0 - not used */
2505 found:
2506 	/*
2507 	 * stats and namecache size management
2508 	 */
2509 	if (ncp->nc_flag & NCF_UNRESOLVED)
2510 		++gd->gd_nchstats->ncs_miss;
2511 	else if (ncp->nc_vp)
2512 		++gd->gd_nchstats->ncs_goodhits;
2513 	else
2514 		++gd->gd_nchstats->ncs_neghits;
2515 	nch.mount = mp;
2516 	nch.ncp = ncp;
2517 	atomic_add_int(&nch.mount->mnt_refs, 1);
2518 	return(nch);
2519 failed:
2520 	if (new_ncp) {
2521 		_cache_free(new_ncp);
2522 		new_ncp = NULL;
2523 	}
2524 	nch.mount = NULL;
2525 	nch.ncp = NULL;
2526 	return(nch);
2527 }
2528 
2529 /*
2530  * The namecache entry is marked as being used as a mount point.
2531  * Locate the mount if it is visible to the caller.
2532  */
2533 struct findmount_info {
2534 	struct mount *result;
2535 	struct mount *nch_mount;
2536 	struct namecache *nch_ncp;
2537 };
2538 
2539 static
2540 int
2541 cache_findmount_callback(struct mount *mp, void *data)
2542 {
2543 	struct findmount_info *info = data;
2544 
2545 	/*
2546 	 * Check the mount's mounted-on point against the passed nch.
2547 	 */
2548 	if (mp->mnt_ncmounton.mount == info->nch_mount &&
2549 	    mp->mnt_ncmounton.ncp == info->nch_ncp
2550 	) {
2551 	    info->result = mp;
2552 	    atomic_add_int(&mp->mnt_refs, 1);
2553 	    return(-1);
2554 	}
2555 	return(0);
2556 }
2557 
2558 struct mount *
2559 cache_findmount(struct nchandle *nch)
2560 {
2561 	struct findmount_info info;
2562 
2563 	info.result = NULL;
2564 	info.nch_mount = nch->mount;
2565 	info.nch_ncp = nch->ncp;
2566 	mountlist_scan(cache_findmount_callback, &info,
2567 			       MNTSCAN_FORWARD|MNTSCAN_NOBUSY);
2568 	return(info.result);
2569 }
2570 
2571 void
2572 cache_dropmount(struct mount *mp)
2573 {
2574 	atomic_add_int(&mp->mnt_refs, -1);
2575 }
2576 
2577 /*
2578  * Resolve an unresolved namecache entry, generally by looking it up.
2579  * The passed ncp must be locked and refd.
2580  *
2581  * Theoretically since a vnode cannot be recycled while held, and since
2582  * the nc_parent chain holds its vnode as long as children exist, the
2583  * direct parent of the cache entry we are trying to resolve should
2584  * have a valid vnode.  If not then generate an error that we can
2585  * determine is related to a resolver bug.
2586  *
2587  * However, if a vnode was in the middle of a recyclement when the NCP
2588  * got locked, ncp->nc_vp might point to a vnode that is about to become
2589  * invalid.  cache_resolve() handles this case by unresolving the entry
2590  * and then re-resolving it.
2591  *
2592  * Note that successful resolution does not necessarily return an error
2593  * code of 0.  If the ncp resolves to a negative cache hit then ENOENT
2594  * will be returned.
2595  *
2596  * MPSAFE
2597  */
2598 int
2599 cache_resolve(struct nchandle *nch, struct ucred *cred)
2600 {
2601 	struct namecache *par_tmp;
2602 	struct namecache *par;
2603 	struct namecache *ncp;
2604 	struct nchandle nctmp;
2605 	struct mount *mp;
2606 	struct vnode *dvp;
2607 	int error;
2608 
2609 	ncp = nch->ncp;
2610 	mp = nch->mount;
2611 restart:
2612 	/*
2613 	 * If the ncp is already resolved we have nothing to do.  However,
2614 	 * we do want to guarentee that a usable vnode is returned when
2615 	 * a vnode is present, so make sure it hasn't been reclaimed.
2616 	 */
2617 	if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
2618 		if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
2619 			_cache_setunresolved(ncp);
2620 		if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
2621 			return (ncp->nc_error);
2622 	}
2623 
2624 	/*
2625 	 * If the ncp was destroyed it will never resolve again.  This
2626 	 * can basically only happen when someone is chdir'd into an
2627 	 * empty directory which is then rmdir'd.  We want to catch this
2628 	 * here and not dive the VFS because the VFS might actually
2629 	 * have a way to re-resolve the disconnected ncp, which will
2630 	 * result in inconsistencies in the cdir/nch for proc->p_fd.
2631 	 */
2632 	if (ncp->nc_flag & NCF_DESTROYED) {
2633 		kprintf("Warning: cache_resolve: ncp '%s' was unlinked\n",
2634 			ncp->nc_name);
2635 		return(EINVAL);
2636 	}
2637 
2638 	/*
2639 	 * Mount points need special handling because the parent does not
2640 	 * belong to the same filesystem as the ncp.
2641 	 */
2642 	if (ncp == mp->mnt_ncmountpt.ncp)
2643 		return (cache_resolve_mp(mp));
2644 
2645 	/*
2646 	 * We expect an unbroken chain of ncps to at least the mount point,
2647 	 * and even all the way to root (but this code doesn't have to go
2648 	 * past the mount point).
2649 	 */
2650 	if (ncp->nc_parent == NULL) {
2651 		kprintf("EXDEV case 1 %p %*.*s\n", ncp,
2652 			ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
2653 		ncp->nc_error = EXDEV;
2654 		return(ncp->nc_error);
2655 	}
2656 
2657 	/*
2658 	 * The vp's of the parent directories in the chain are held via vhold()
2659 	 * due to the existance of the child, and should not disappear.
2660 	 * However, there are cases where they can disappear:
2661 	 *
2662 	 *	- due to filesystem I/O errors.
2663 	 *	- due to NFS being stupid about tracking the namespace and
2664 	 *	  destroys the namespace for entire directories quite often.
2665 	 *	- due to forced unmounts.
2666 	 *	- due to an rmdir (parent will be marked DESTROYED)
2667 	 *
2668 	 * When this occurs we have to track the chain backwards and resolve
2669 	 * it, looping until the resolver catches up to the current node.  We
2670 	 * could recurse here but we might run ourselves out of kernel stack
2671 	 * so we do it in a more painful manner.  This situation really should
2672 	 * not occur all that often, or if it does not have to go back too
2673 	 * many nodes to resolve the ncp.
2674 	 */
2675 	while ((dvp = cache_dvpref(ncp)) == NULL) {
2676 		/*
2677 		 * This case can occur if a process is CD'd into a
2678 		 * directory which is then rmdir'd.  If the parent is marked
2679 		 * destroyed there is no point trying to resolve it.
2680 		 */
2681 		if (ncp->nc_parent->nc_flag & NCF_DESTROYED)
2682 			return(ENOENT);
2683 		par = ncp->nc_parent;
2684 		_cache_hold(par);
2685 		_cache_lock(par);
2686 		while ((par_tmp = par->nc_parent) != NULL &&
2687 		       par_tmp->nc_vp == NULL) {
2688 			_cache_hold(par_tmp);
2689 			_cache_lock(par_tmp);
2690 			_cache_put(par);
2691 			par = par_tmp;
2692 		}
2693 		if (par->nc_parent == NULL) {
2694 			kprintf("EXDEV case 2 %*.*s\n",
2695 				par->nc_nlen, par->nc_nlen, par->nc_name);
2696 			_cache_put(par);
2697 			return (EXDEV);
2698 		}
2699 		kprintf("[diagnostic] cache_resolve: had to recurse on %*.*s\n",
2700 			par->nc_nlen, par->nc_nlen, par->nc_name);
2701 		/*
2702 		 * The parent is not set in stone, ref and lock it to prevent
2703 		 * it from disappearing.  Also note that due to renames it
2704 		 * is possible for our ncp to move and for par to no longer
2705 		 * be one of its parents.  We resolve it anyway, the loop
2706 		 * will handle any moves.
2707 		 */
2708 		_cache_get(par);	/* additional hold/lock */
2709 		_cache_put(par);	/* from earlier hold/lock */
2710 		if (par == nch->mount->mnt_ncmountpt.ncp) {
2711 			cache_resolve_mp(nch->mount);
2712 		} else if ((dvp = cache_dvpref(par)) == NULL) {
2713 			kprintf("[diagnostic] cache_resolve: raced on %*.*s\n", par->nc_nlen, par->nc_nlen, par->nc_name);
2714 			_cache_put(par);
2715 			continue;
2716 		} else {
2717 			if (par->nc_flag & NCF_UNRESOLVED) {
2718 				nctmp.mount = mp;
2719 				nctmp.ncp = par;
2720 				par->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred);
2721 			}
2722 			vrele(dvp);
2723 		}
2724 		if ((error = par->nc_error) != 0) {
2725 			if (par->nc_error != EAGAIN) {
2726 				kprintf("EXDEV case 3 %*.*s error %d\n",
2727 				    par->nc_nlen, par->nc_nlen, par->nc_name,
2728 				    par->nc_error);
2729 				_cache_put(par);
2730 				return(error);
2731 			}
2732 			kprintf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n",
2733 				par, par->nc_nlen, par->nc_nlen, par->nc_name);
2734 		}
2735 		_cache_put(par);
2736 		/* loop */
2737 	}
2738 
2739 	/*
2740 	 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected
2741 	 * ncp's and reattach them.  If this occurs the original ncp is marked
2742 	 * EAGAIN to force a relookup.
2743 	 *
2744 	 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed
2745 	 * ncp must already be resolved.
2746 	 */
2747 	if (dvp) {
2748 		nctmp.mount = mp;
2749 		nctmp.ncp = ncp;
2750 		ncp->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred);
2751 		vrele(dvp);
2752 	} else {
2753 		ncp->nc_error = EPERM;
2754 	}
2755 	if (ncp->nc_error == EAGAIN) {
2756 		kprintf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n",
2757 			ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
2758 		goto restart;
2759 	}
2760 	return(ncp->nc_error);
2761 }
2762 
2763 /*
2764  * Resolve the ncp associated with a mount point.  Such ncp's almost always
2765  * remain resolved and this routine is rarely called.  NFS MPs tends to force
2766  * re-resolution more often due to its mac-truck-smash-the-namecache
2767  * method of tracking namespace changes.
2768  *
2769  * The semantics for this call is that the passed ncp must be locked on
2770  * entry and will be locked on return.  However, if we actually have to
2771  * resolve the mount point we temporarily unlock the entry in order to
2772  * avoid race-to-root deadlocks due to e.g. dead NFS mounts.  Because of
2773  * the unlock we have to recheck the flags after we relock.
2774  */
2775 static int
2776 cache_resolve_mp(struct mount *mp)
2777 {
2778 	struct namecache *ncp = mp->mnt_ncmountpt.ncp;
2779 	struct vnode *vp;
2780 	int error;
2781 
2782 	KKASSERT(mp != NULL);
2783 
2784 	/*
2785 	 * If the ncp is already resolved we have nothing to do.  However,
2786 	 * we do want to guarentee that a usable vnode is returned when
2787 	 * a vnode is present, so make sure it hasn't been reclaimed.
2788 	 */
2789 	if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
2790 		if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
2791 			_cache_setunresolved(ncp);
2792 	}
2793 
2794 	if (ncp->nc_flag & NCF_UNRESOLVED) {
2795 		_cache_unlock(ncp);
2796 		while (vfs_busy(mp, 0))
2797 			;
2798 		error = VFS_ROOT(mp, &vp);
2799 		_cache_lock(ncp);
2800 
2801 		/*
2802 		 * recheck the ncp state after relocking.
2803 		 */
2804 		if (ncp->nc_flag & NCF_UNRESOLVED) {
2805 			ncp->nc_error = error;
2806 			if (error == 0) {
2807 				_cache_setvp(mp, ncp, vp);
2808 				vput(vp);
2809 			} else {
2810 				kprintf("[diagnostic] cache_resolve_mp: failed"
2811 					" to resolve mount %p err=%d ncp=%p\n",
2812 					mp, error, ncp);
2813 				_cache_setvp(mp, ncp, NULL);
2814 			}
2815 		} else if (error == 0) {
2816 			vput(vp);
2817 		}
2818 		vfs_unbusy(mp);
2819 	}
2820 	return(ncp->nc_error);
2821 }
2822 
2823 /*
2824  * Clean out negative cache entries when too many have accumulated.
2825  *
2826  * MPSAFE
2827  */
2828 static void
2829 _cache_cleanneg(int count)
2830 {
2831 	struct namecache *ncp;
2832 
2833 	/*
2834 	 * Attempt to clean out the specified number of negative cache
2835 	 * entries.
2836 	 */
2837 	while (count) {
2838 		spin_lock(&ncspin);
2839 		ncp = TAILQ_FIRST(&ncneglist);
2840 		if (ncp == NULL) {
2841 			spin_unlock(&ncspin);
2842 			break;
2843 		}
2844 		TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
2845 		TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
2846 		_cache_hold(ncp);
2847 		spin_unlock(&ncspin);
2848 		if (_cache_lock_special(ncp) == 0) {
2849 			ncp = cache_zap(ncp, 1);
2850 			if (ncp)
2851 				_cache_drop(ncp);
2852 		} else {
2853 			_cache_drop(ncp);
2854 		}
2855 		--count;
2856 	}
2857 }
2858 
2859 /*
2860  * Clean out positive cache entries when too many have accumulated.
2861  *
2862  * MPSAFE
2863  */
2864 static void
2865 _cache_cleanpos(int count)
2866 {
2867 	static volatile int rover;
2868 	struct nchash_head *nchpp;
2869 	struct namecache *ncp;
2870 	int rover_copy;
2871 
2872 	/*
2873 	 * Attempt to clean out the specified number of negative cache
2874 	 * entries.
2875 	 */
2876 	while (count) {
2877 		rover_copy = ++rover;	/* MPSAFEENOUGH */
2878 		cpu_ccfence();
2879 		nchpp = NCHHASH(rover_copy);
2880 
2881 		spin_lock(&nchpp->spin);
2882 		ncp = LIST_FIRST(&nchpp->list);
2883 		if (ncp)
2884 			_cache_hold(ncp);
2885 		spin_unlock(&nchpp->spin);
2886 
2887 		if (ncp) {
2888 			if (_cache_lock_special(ncp) == 0) {
2889 				ncp = cache_zap(ncp, 1);
2890 				if (ncp)
2891 					_cache_drop(ncp);
2892 			} else {
2893 				_cache_drop(ncp);
2894 			}
2895 		}
2896 		--count;
2897 	}
2898 }
2899 
2900 /*
2901  * This is a kitchen sink function to clean out ncps which we
2902  * tried to zap from cache_drop() but failed because we were
2903  * unable to acquire the parent lock.
2904  *
2905  * Such entries can also be removed via cache_inval_vp(), such
2906  * as when unmounting.
2907  *
2908  * MPSAFE
2909  */
2910 static void
2911 _cache_cleandefered(void)
2912 {
2913 	struct nchash_head *nchpp;
2914 	struct namecache *ncp;
2915 	struct namecache dummy;
2916 	int i;
2917 
2918 	numdefered = 0;
2919 	bzero(&dummy, sizeof(dummy));
2920 	dummy.nc_flag = NCF_DESTROYED;
2921 
2922 	for (i = 0; i <= nchash; ++i) {
2923 		nchpp = &nchashtbl[i];
2924 
2925 		spin_lock(&nchpp->spin);
2926 		LIST_INSERT_HEAD(&nchpp->list, &dummy, nc_hash);
2927 		ncp = &dummy;
2928 		while ((ncp = LIST_NEXT(ncp, nc_hash)) != NULL) {
2929 			if ((ncp->nc_flag & NCF_DEFEREDZAP) == 0)
2930 				continue;
2931 			LIST_REMOVE(&dummy, nc_hash);
2932 			LIST_INSERT_AFTER(ncp, &dummy, nc_hash);
2933 			_cache_hold(ncp);
2934 			spin_unlock(&nchpp->spin);
2935 			if (_cache_lock_nonblock(ncp) == 0) {
2936 				ncp->nc_flag &= ~NCF_DEFEREDZAP;
2937 				_cache_unlock(ncp);
2938 			}
2939 			_cache_drop(ncp);
2940 			spin_lock(&nchpp->spin);
2941 			ncp = &dummy;
2942 		}
2943 		LIST_REMOVE(&dummy, nc_hash);
2944 		spin_unlock(&nchpp->spin);
2945 	}
2946 }
2947 
2948 /*
2949  * Name cache initialization, from vfsinit() when we are booting
2950  */
2951 void
2952 nchinit(void)
2953 {
2954 	int i;
2955 	globaldata_t gd;
2956 
2957 	/* initialise per-cpu namecache effectiveness statistics. */
2958 	for (i = 0; i < ncpus; ++i) {
2959 		gd = globaldata_find(i);
2960 		gd->gd_nchstats = &nchstats[i];
2961 	}
2962 	TAILQ_INIT(&ncneglist);
2963 	spin_init(&ncspin);
2964 	nchashtbl = hashinit_ext(desiredvnodes / 2,
2965 				 sizeof(struct nchash_head),
2966 				 M_VFSCACHE, &nchash);
2967 	for (i = 0; i <= (int)nchash; ++i) {
2968 		LIST_INIT(&nchashtbl[i].list);
2969 		spin_init(&nchashtbl[i].spin);
2970 	}
2971 	nclockwarn = 5 * hz;
2972 }
2973 
2974 /*
2975  * Called from start_init() to bootstrap the root filesystem.  Returns
2976  * a referenced, unlocked namecache record.
2977  */
2978 void
2979 cache_allocroot(struct nchandle *nch, struct mount *mp, struct vnode *vp)
2980 {
2981 	nch->ncp = cache_alloc(0);
2982 	nch->mount = mp;
2983 	atomic_add_int(&mp->mnt_refs, 1);
2984 	if (vp)
2985 		_cache_setvp(nch->mount, nch->ncp, vp);
2986 }
2987 
2988 /*
2989  * vfs_cache_setroot()
2990  *
2991  *	Create an association between the root of our namecache and
2992  *	the root vnode.  This routine may be called several times during
2993  *	booting.
2994  *
2995  *	If the caller intends to save the returned namecache pointer somewhere
2996  *	it must cache_hold() it.
2997  */
2998 void
2999 vfs_cache_setroot(struct vnode *nvp, struct nchandle *nch)
3000 {
3001 	struct vnode *ovp;
3002 	struct nchandle onch;
3003 
3004 	ovp = rootvnode;
3005 	onch = rootnch;
3006 	rootvnode = nvp;
3007 	if (nch)
3008 		rootnch = *nch;
3009 	else
3010 		cache_zero(&rootnch);
3011 	if (ovp)
3012 		vrele(ovp);
3013 	if (onch.ncp)
3014 		cache_drop(&onch);
3015 }
3016 
3017 /*
3018  * XXX OLD API COMPAT FUNCTION.  This really messes up the new namecache
3019  * topology and is being removed as quickly as possible.  The new VOP_N*()
3020  * API calls are required to make specific adjustments using the supplied
3021  * ncp pointers rather then just bogusly purging random vnodes.
3022  *
3023  * Invalidate all namecache entries to a particular vnode as well as
3024  * any direct children of that vnode in the namecache.  This is a
3025  * 'catch all' purge used by filesystems that do not know any better.
3026  *
3027  * Note that the linkage between the vnode and its namecache entries will
3028  * be removed, but the namecache entries themselves might stay put due to
3029  * active references from elsewhere in the system or due to the existance of
3030  * the children.   The namecache topology is left intact even if we do not
3031  * know what the vnode association is.  Such entries will be marked
3032  * NCF_UNRESOLVED.
3033  */
3034 void
3035 cache_purge(struct vnode *vp)
3036 {
3037 	cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN);
3038 }
3039 
3040 /*
3041  * Flush all entries referencing a particular filesystem.
3042  *
3043  * Since we need to check it anyway, we will flush all the invalid
3044  * entries at the same time.
3045  */
3046 #if 0
3047 
3048 void
3049 cache_purgevfs(struct mount *mp)
3050 {
3051 	struct nchash_head *nchpp;
3052 	struct namecache *ncp, *nnp;
3053 
3054 	/*
3055 	 * Scan hash tables for applicable entries.
3056 	 */
3057 	for (nchpp = &nchashtbl[nchash]; nchpp >= nchashtbl; nchpp--) {
3058 		spin_lock_wr(&nchpp->spin); XXX
3059 		ncp = LIST_FIRST(&nchpp->list);
3060 		if (ncp)
3061 			_cache_hold(ncp);
3062 		while (ncp) {
3063 			nnp = LIST_NEXT(ncp, nc_hash);
3064 			if (nnp)
3065 				_cache_hold(nnp);
3066 			if (ncp->nc_mount == mp) {
3067 				_cache_lock(ncp);
3068 				ncp = cache_zap(ncp, 0);
3069 				if (ncp)
3070 					_cache_drop(ncp);
3071 			} else {
3072 				_cache_drop(ncp);
3073 			}
3074 			ncp = nnp;
3075 		}
3076 		spin_unlock_wr(&nchpp->spin); XXX
3077 	}
3078 }
3079 
3080 #endif
3081 
3082 static int disablecwd;
3083 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0,
3084     "Disable getcwd");
3085 
3086 static u_long numcwdcalls;
3087 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdcalls, CTLFLAG_RD, &numcwdcalls, 0,
3088     "Number of current directory resolution calls");
3089 static u_long numcwdfailnf;
3090 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdfailnf, CTLFLAG_RD, &numcwdfailnf, 0,
3091     "Number of current directory failures due to lack of file");
3092 static u_long numcwdfailsz;
3093 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdfailsz, CTLFLAG_RD, &numcwdfailsz, 0,
3094     "Number of current directory failures due to large result");
3095 static u_long numcwdfound;
3096 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdfound, CTLFLAG_RD, &numcwdfound, 0,
3097     "Number of current directory resolution successes");
3098 
3099 /*
3100  * MPALMOSTSAFE
3101  */
3102 int
3103 sys___getcwd(struct __getcwd_args *uap)
3104 {
3105 	u_int buflen;
3106 	int error;
3107 	char *buf;
3108 	char *bp;
3109 
3110 	if (disablecwd)
3111 		return (ENODEV);
3112 
3113 	buflen = uap->buflen;
3114 	if (buflen == 0)
3115 		return (EINVAL);
3116 	if (buflen > MAXPATHLEN)
3117 		buflen = MAXPATHLEN;
3118 
3119 	buf = kmalloc(buflen, M_TEMP, M_WAITOK);
3120 	get_mplock();
3121 	bp = kern_getcwd(buf, buflen, &error);
3122 	rel_mplock();
3123 	if (error == 0)
3124 		error = copyout(bp, uap->buf, strlen(bp) + 1);
3125 	kfree(buf, M_TEMP);
3126 	return (error);
3127 }
3128 
3129 char *
3130 kern_getcwd(char *buf, size_t buflen, int *error)
3131 {
3132 	struct proc *p = curproc;
3133 	char *bp;
3134 	int i, slash_prefixed;
3135 	struct filedesc *fdp;
3136 	struct nchandle nch;
3137 	struct namecache *ncp;
3138 
3139 	numcwdcalls++;
3140 	bp = buf;
3141 	bp += buflen - 1;
3142 	*bp = '\0';
3143 	fdp = p->p_fd;
3144 	slash_prefixed = 0;
3145 
3146 	nch = fdp->fd_ncdir;
3147 	ncp = nch.ncp;
3148 	if (ncp)
3149 		_cache_hold(ncp);
3150 
3151 	while (ncp && (ncp != fdp->fd_nrdir.ncp ||
3152 	       nch.mount != fdp->fd_nrdir.mount)
3153 	) {
3154 		/*
3155 		 * While traversing upwards if we encounter the root
3156 		 * of the current mount we have to skip to the mount point
3157 		 * in the underlying filesystem.
3158 		 */
3159 		if (ncp == nch.mount->mnt_ncmountpt.ncp) {
3160 			nch = nch.mount->mnt_ncmounton;
3161 			_cache_drop(ncp);
3162 			ncp = nch.ncp;
3163 			if (ncp)
3164 				_cache_hold(ncp);
3165 			continue;
3166 		}
3167 
3168 		/*
3169 		 * Prepend the path segment
3170 		 */
3171 		for (i = ncp->nc_nlen - 1; i >= 0; i--) {
3172 			if (bp == buf) {
3173 				numcwdfailsz++;
3174 				*error = ERANGE;
3175 				bp = NULL;
3176 				goto done;
3177 			}
3178 			*--bp = ncp->nc_name[i];
3179 		}
3180 		if (bp == buf) {
3181 			numcwdfailsz++;
3182 			*error = ERANGE;
3183 			bp = NULL;
3184 			goto done;
3185 		}
3186 		*--bp = '/';
3187 		slash_prefixed = 1;
3188 
3189 		/*
3190 		 * Go up a directory.  This isn't a mount point so we don't
3191 		 * have to check again.
3192 		 */
3193 		while ((nch.ncp = ncp->nc_parent) != NULL) {
3194 			_cache_lock(ncp);
3195 			if (nch.ncp != ncp->nc_parent) {
3196 				_cache_unlock(ncp);
3197 				continue;
3198 			}
3199 			_cache_hold(nch.ncp);
3200 			_cache_unlock(ncp);
3201 			break;
3202 		}
3203 		_cache_drop(ncp);
3204 		ncp = nch.ncp;
3205 	}
3206 	if (ncp == NULL) {
3207 		numcwdfailnf++;
3208 		*error = ENOENT;
3209 		bp = NULL;
3210 		goto done;
3211 	}
3212 	if (!slash_prefixed) {
3213 		if (bp == buf) {
3214 			numcwdfailsz++;
3215 			*error = ERANGE;
3216 			bp = NULL;
3217 			goto done;
3218 		}
3219 		*--bp = '/';
3220 	}
3221 	numcwdfound++;
3222 	*error = 0;
3223 done:
3224 	if (ncp)
3225 		_cache_drop(ncp);
3226 	return (bp);
3227 }
3228 
3229 /*
3230  * Thus begins the fullpath magic.
3231  *
3232  * The passed nchp is referenced but not locked.
3233  */
3234 static int disablefullpath;
3235 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW,
3236     &disablefullpath, 0,
3237     "Disable fullpath lookups");
3238 
3239 static u_int numfullpathcalls;
3240 SYSCTL_UINT(_vfs_cache, OID_AUTO, numfullpathcalls, CTLFLAG_RD,
3241     &numfullpathcalls, 0,
3242     "Number of full path resolutions in progress");
3243 static u_int numfullpathfailnf;
3244 SYSCTL_UINT(_vfs_cache, OID_AUTO, numfullpathfailnf, CTLFLAG_RD,
3245     &numfullpathfailnf, 0,
3246     "Number of full path resolution failures due to lack of file");
3247 static u_int numfullpathfailsz;
3248 SYSCTL_UINT(_vfs_cache, OID_AUTO, numfullpathfailsz, CTLFLAG_RD,
3249     &numfullpathfailsz, 0,
3250     "Number of full path resolution failures due to insufficient memory");
3251 static u_int numfullpathfound;
3252 SYSCTL_UINT(_vfs_cache, OID_AUTO, numfullpathfound, CTLFLAG_RD,
3253     &numfullpathfound, 0,
3254     "Number of full path resolution successes");
3255 
3256 int
3257 cache_fullpath(struct proc *p, struct nchandle *nchp, struct nchandle *nchbase,
3258 	       char **retbuf, char **freebuf, int guess)
3259 {
3260 	struct nchandle fd_nrdir;
3261 	struct nchandle nch;
3262 	struct namecache *ncp;
3263 	struct mount *mp, *new_mp;
3264 	char *bp, *buf;
3265 	int slash_prefixed;
3266 	int error = 0;
3267 	int i;
3268 
3269 	atomic_add_int(&numfullpathcalls, -1);
3270 
3271 	*retbuf = NULL;
3272 	*freebuf = NULL;
3273 
3274 	buf = kmalloc(MAXPATHLEN, M_TEMP, M_WAITOK);
3275 	bp = buf + MAXPATHLEN - 1;
3276 	*bp = '\0';
3277 	if (nchbase)
3278 		fd_nrdir = *nchbase;
3279 	else if (p != NULL)
3280 		fd_nrdir = p->p_fd->fd_nrdir;
3281 	else
3282 		fd_nrdir = rootnch;
3283 	slash_prefixed = 0;
3284 	nch = *nchp;
3285 	ncp = nch.ncp;
3286 	if (ncp)
3287 		_cache_hold(ncp);
3288 	mp = nch.mount;
3289 
3290 	while (ncp && (ncp != fd_nrdir.ncp || mp != fd_nrdir.mount)) {
3291 		new_mp = NULL;
3292 
3293 		/*
3294 		 * If we are asked to guess the upwards path, we do so whenever
3295 		 * we encounter an ncp marked as a mountpoint. We try to find
3296 		 * the actual mountpoint by finding the mountpoint with this
3297 		 * ncp.
3298 		 */
3299 		if (guess && (ncp->nc_flag & NCF_ISMOUNTPT)) {
3300 			new_mp = mount_get_by_nc(ncp);
3301 		}
3302 		/*
3303 		 * While traversing upwards if we encounter the root
3304 		 * of the current mount we have to skip to the mount point.
3305 		 */
3306 		if (ncp == mp->mnt_ncmountpt.ncp) {
3307 			new_mp = mp;
3308 		}
3309 		if (new_mp) {
3310 			nch = new_mp->mnt_ncmounton;
3311 			_cache_drop(ncp);
3312 			ncp = nch.ncp;
3313 			if (ncp)
3314 				_cache_hold(ncp);
3315 			mp = nch.mount;
3316 			continue;
3317 		}
3318 
3319 		/*
3320 		 * Prepend the path segment
3321 		 */
3322 		for (i = ncp->nc_nlen - 1; i >= 0; i--) {
3323 			if (bp == buf) {
3324 				numfullpathfailsz++;
3325 				kfree(buf, M_TEMP);
3326 				error = ENOMEM;
3327 				goto done;
3328 			}
3329 			*--bp = ncp->nc_name[i];
3330 		}
3331 		if (bp == buf) {
3332 			numfullpathfailsz++;
3333 			kfree(buf, M_TEMP);
3334 			error = ENOMEM;
3335 			goto done;
3336 		}
3337 		*--bp = '/';
3338 		slash_prefixed = 1;
3339 
3340 		/*
3341 		 * Go up a directory.  This isn't a mount point so we don't
3342 		 * have to check again.
3343 		 *
3344 		 * We can only safely access nc_parent with ncp held locked.
3345 		 */
3346 		while ((nch.ncp = ncp->nc_parent) != NULL) {
3347 			_cache_lock(ncp);
3348 			if (nch.ncp != ncp->nc_parent) {
3349 				_cache_unlock(ncp);
3350 				continue;
3351 			}
3352 			_cache_hold(nch.ncp);
3353 			_cache_unlock(ncp);
3354 			break;
3355 		}
3356 		_cache_drop(ncp);
3357 		ncp = nch.ncp;
3358 	}
3359 	if (ncp == NULL) {
3360 		numfullpathfailnf++;
3361 		kfree(buf, M_TEMP);
3362 		error = ENOENT;
3363 		goto done;
3364 	}
3365 
3366 	if (!slash_prefixed) {
3367 		if (bp == buf) {
3368 			numfullpathfailsz++;
3369 			kfree(buf, M_TEMP);
3370 			error = ENOMEM;
3371 			goto done;
3372 		}
3373 		*--bp = '/';
3374 	}
3375 	numfullpathfound++;
3376 	*retbuf = bp;
3377 	*freebuf = buf;
3378 	error = 0;
3379 done:
3380 	if (ncp)
3381 		_cache_drop(ncp);
3382 	return(error);
3383 }
3384 
3385 int
3386 vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf, char **freebuf,
3387     int guess)
3388 {
3389 	struct namecache *ncp;
3390 	struct nchandle nch;
3391 	int error;
3392 
3393 	*freebuf = NULL;
3394 	atomic_add_int(&numfullpathcalls, 1);
3395 	if (disablefullpath)
3396 		return (ENODEV);
3397 
3398 	if (p == NULL)
3399 		return (EINVAL);
3400 
3401 	/* vn is NULL, client wants us to use p->p_textvp */
3402 	if (vn == NULL) {
3403 		if ((vn = p->p_textvp) == NULL)
3404 			return (EINVAL);
3405 	}
3406 	spin_lock(&vn->v_spin);
3407 	TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) {
3408 		if (ncp->nc_nlen)
3409 			break;
3410 	}
3411 	if (ncp == NULL) {
3412 		spin_unlock(&vn->v_spin);
3413 		return (EINVAL);
3414 	}
3415 	_cache_hold(ncp);
3416 	spin_unlock(&vn->v_spin);
3417 
3418 	atomic_add_int(&numfullpathcalls, -1);
3419 	nch.ncp = ncp;
3420 	nch.mount = vn->v_mount;
3421 	error = cache_fullpath(p, &nch, NULL, retbuf, freebuf, guess);
3422 	_cache_drop(ncp);
3423 	return (error);
3424 }
3425