xref: /netbsd-src/sys/kern/vfs_vnode.c (revision 0e552da7216834a96e91ad098e59272b41087480)
1 /*	$NetBSD: vfs_vnode.c,v 1.122 2020/05/18 08:27:54 hannken Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997-2011, 2019, 2020 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1989, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  * (c) UNIX System Laboratories, Inc.
37  * All or some portions of this file are derived from material licensed
38  * to the University of California by American Telephone and Telegraph
39  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40  * the permission of UNIX System Laboratories, Inc.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
67  */
68 
69 /*
70  * The vnode cache subsystem.
71  *
72  * Life-cycle
73  *
74  *	Normally, there are two points where new vnodes are created:
75  *	VOP_CREATE(9) and VOP_LOOKUP(9).  The life-cycle of a vnode
76  *	starts in one of the following ways:
77  *
78  *	- Allocation, via vcache_get(9) or vcache_new(9).
79  *	- Reclamation of inactive vnode, via vcache_vget(9).
80  *
81  *	Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
82  *	was another, traditional way.  Currently, only the draining thread
83  *	recycles the vnodes.  This behaviour might be revisited.
84  *
85  *	The life-cycle ends when the last reference is dropped, usually
86  *	in VOP_REMOVE(9).  In such case, VOP_INACTIVE(9) is called to inform
87  *	the file system that vnode is inactive.  Via this call, file system
88  *	indicates whether vnode can be recycled (usually, it checks its own
89  *	references, e.g. count of links, whether the file was removed).
90  *
91  *	Depending on indication, vnode can be put into a free list (cache),
92  *	or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to
93  *	disassociate underlying file system from the vnode, and finally
94  *	destroyed.
95  *
96  * Vnode state
97  *
98  *	Vnode is always in one of six states:
99  *	- MARKER	This is a marker vnode to help list traversal.  It
100  *			will never change its state.
101  *	- LOADING	Vnode is associating underlying file system and not
102  *			yet ready to use.
103  *	- LOADED	Vnode has associated underlying file system and is
104  *			ready to use.
105  *	- BLOCKED	Vnode is active but cannot get new references.
106  *	- RECLAIMING	Vnode is disassociating from the underlying file
107  *			system.
108  *	- RECLAIMED	Vnode has disassociated from underlying file system
109  *			and is dead.
110  *
111  *	Valid state changes are:
112  *	LOADING -> LOADED
113  *			Vnode has been initialised in vcache_get() or
114  *			vcache_new() and is ready to use.
115  *	LOADED -> RECLAIMING
116  *			Vnode starts disassociation from underlying file
117  *			system in vcache_reclaim().
118  *	RECLAIMING -> RECLAIMED
119  *			Vnode finished disassociation from underlying file
120  *			system in vcache_reclaim().
121  *	LOADED -> BLOCKED
122  *			Either vcache_rekey*() is changing the vnode key or
123  *			vrelel() is about to call VOP_INACTIVE().
124  *	BLOCKED -> LOADED
125  *			The block condition is over.
126  *	LOADING -> RECLAIMED
127  *			Either vcache_get() or vcache_new() failed to
128  *			associate the underlying file system or vcache_rekey*()
129  *			drops a vnode used as placeholder.
130  *
131  *	Of these states LOADING, BLOCKED and RECLAIMING are intermediate
132  *	and it is possible to wait for state change.
133  *
134  *	State is protected with v_interlock with one exception:
135  *	to change from LOADING both v_interlock and vcache_lock must be held
136  *	so it is possible to check "state == LOADING" without holding
137  *	v_interlock.  See vcache_get() for details.
138  *
139  * Reference counting
140  *
141  *	Vnode is considered active, if reference count (vnode_t::v_usecount)
142  *	is non-zero.  It is maintained using: vref(9) and vrele(9), as well
143  *	as vput(9), routines.  Common points holding references are e.g.
144  *	file openings, current working directory, mount points, etc.
145  *
146  * Note on v_usecount and its locking
147  *
148  *	At nearly all points it is known that v_usecount could be zero,
149  *	the vnode_t::v_interlock will be held.  To change the count away
150  *	from zero, the interlock must be held.  To change from a non-zero
151  *	value to zero, again the interlock must be held.
152  *
153  *	Changing the usecount from a non-zero value to a non-zero value can
154  *	safely be done using atomic operations, without the interlock held.
155  */
156 
157 #include <sys/cdefs.h>
158 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.122 2020/05/18 08:27:54 hannken Exp $");
159 
160 #ifdef _KERNEL_OPT
161 #include "opt_pax.h"
162 #endif
163 
164 #include <sys/param.h>
165 #include <sys/kernel.h>
166 
167 #include <sys/atomic.h>
168 #include <sys/buf.h>
169 #include <sys/conf.h>
170 #include <sys/device.h>
171 #include <sys/hash.h>
172 #include <sys/kauth.h>
173 #include <sys/kmem.h>
174 #include <sys/kthread.h>
175 #include <sys/module.h>
176 #include <sys/mount.h>
177 #include <sys/namei.h>
178 #include <sys/pax.h>
179 #include <sys/syscallargs.h>
180 #include <sys/sysctl.h>
181 #include <sys/systm.h>
182 #include <sys/vnode_impl.h>
183 #include <sys/wapbl.h>
184 #include <sys/fstrans.h>
185 
186 #include <uvm/uvm.h>
187 #include <uvm/uvm_readahead.h>
188 #include <uvm/uvm_stat.h>
189 
190 /* Flags to vrelel. */
191 #define	VRELEL_ASYNC	0x0001	/* Always defer to vrele thread. */
192 
193 #define	LRU_VRELE	0
194 #define	LRU_FREE	1
195 #define	LRU_HOLD	2
196 #define	LRU_COUNT	3
197 
198 /*
199  * There are three lru lists: one holds vnodes waiting for async release,
200  * one is for vnodes which have no buffer/page references and one for those
201  * which do (i.e.  v_holdcnt is non-zero).  We put the lists into a single,
202  * private cache line as vnodes migrate between them while under the same
203  * lock (vdrain_lock).
204  */
205 u_int			numvnodes		__cacheline_aligned;
206 static vnodelst_t	lru_list[LRU_COUNT]	__cacheline_aligned;
207 static kmutex_t		vdrain_lock		__cacheline_aligned;
208 static kcondvar_t	vdrain_cv;
209 static int		vdrain_gen;
210 static kcondvar_t	vdrain_gen_cv;
211 static bool		vdrain_retry;
212 static lwp_t *		vdrain_lwp;
213 SLIST_HEAD(hashhead, vnode_impl);
214 static kmutex_t		vcache_lock		__cacheline_aligned;
215 static kcondvar_t	vcache_cv;
216 static u_int		vcache_hashsize;
217 static u_long		vcache_hashmask;
218 static struct hashhead	*vcache_hashtab;
219 static pool_cache_t	vcache_pool;
220 static void		lru_requeue(vnode_t *, vnodelst_t *);
221 static vnodelst_t *	lru_which(vnode_t *);
222 static vnode_impl_t *	vcache_alloc(void);
223 static void		vcache_dealloc(vnode_impl_t *);
224 static void		vcache_free(vnode_impl_t *);
225 static void		vcache_init(void);
226 static void		vcache_reinit(void);
227 static void		vcache_reclaim(vnode_t *);
228 static void		vrelel(vnode_t *, int, int);
229 static void		vdrain_thread(void *);
230 static void		vnpanic(vnode_t *, const char *, ...)
231     __printflike(2, 3);
232 
233 /* Routines having to do with the management of the vnode table. */
234 extern struct mount	*dead_rootmount;
235 extern int		(**dead_vnodeop_p)(void *);
236 extern int		(**spec_vnodeop_p)(void *);
237 extern struct vfsops	dead_vfsops;
238 
239 /*
240  * Return the current usecount of a vnode.
241  */
242 inline int
243 vrefcnt(struct vnode *vp)
244 {
245 
246 	return atomic_load_relaxed(&vp->v_usecount);
247 }
248 
249 /* Vnode state operations and diagnostics. */
250 
251 #if defined(DIAGNOSTIC)
252 
253 #define VSTATE_VALID(state) \
254 	((state) != VS_ACTIVE && (state) != VS_MARKER)
255 #define VSTATE_GET(vp) \
256 	vstate_assert_get((vp), __func__, __LINE__)
257 #define VSTATE_CHANGE(vp, from, to) \
258 	vstate_assert_change((vp), (from), (to), __func__, __LINE__)
259 #define VSTATE_WAIT_STABLE(vp) \
260 	vstate_assert_wait_stable((vp), __func__, __LINE__)
261 
262 void
263 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
264     bool has_lock)
265 {
266 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
267 	int refcnt = vrefcnt(vp);
268 
269 	if (!has_lock) {
270 		/*
271 		 * Prevent predictive loads from the CPU, but check the state
272 		 * without loooking first.
273 		 */
274 		membar_enter();
275 		if (state == VS_ACTIVE && refcnt > 0 &&
276 		    (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED))
277 			return;
278 		if (vip->vi_state == state)
279 			return;
280 		mutex_enter((vp)->v_interlock);
281 	}
282 
283 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
284 
285 	if ((state == VS_ACTIVE && refcnt > 0 &&
286 	    (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED)) ||
287 	    vip->vi_state == state) {
288 		if (!has_lock)
289 			mutex_exit((vp)->v_interlock);
290 		return;
291 	}
292 	vnpanic(vp, "state is %s, usecount %d, expected %s at %s:%d",
293 	    vstate_name(vip->vi_state), refcnt,
294 	    vstate_name(state), func, line);
295 }
296 
297 static enum vnode_state
298 vstate_assert_get(vnode_t *vp, const char *func, int line)
299 {
300 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
301 
302 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
303 	if (! VSTATE_VALID(vip->vi_state))
304 		vnpanic(vp, "state is %s at %s:%d",
305 		    vstate_name(vip->vi_state), func, line);
306 
307 	return vip->vi_state;
308 }
309 
310 static void
311 vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
312 {
313 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
314 
315 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
316 	if (! VSTATE_VALID(vip->vi_state))
317 		vnpanic(vp, "state is %s at %s:%d",
318 		    vstate_name(vip->vi_state), func, line);
319 
320 	while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
321 		cv_wait(&vp->v_cv, vp->v_interlock);
322 
323 	if (! VSTATE_VALID(vip->vi_state))
324 		vnpanic(vp, "state is %s at %s:%d",
325 		    vstate_name(vip->vi_state), func, line);
326 }
327 
328 static void
329 vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
330     const char *func, int line)
331 {
332 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
333 	int refcnt = vrefcnt(vp);
334 
335 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
336 	if (from == VS_LOADING)
337 		KASSERTMSG(mutex_owned(&vcache_lock), "at %s:%d", func, line);
338 
339 	if (! VSTATE_VALID(from))
340 		vnpanic(vp, "from is %s at %s:%d",
341 		    vstate_name(from), func, line);
342 	if (! VSTATE_VALID(to))
343 		vnpanic(vp, "to is %s at %s:%d",
344 		    vstate_name(to), func, line);
345 	if (vip->vi_state != from)
346 		vnpanic(vp, "from is %s, expected %s at %s:%d\n",
347 		    vstate_name(vip->vi_state), vstate_name(from), func, line);
348 	if ((from == VS_BLOCKED || to == VS_BLOCKED) && refcnt != 1)
349 		vnpanic(vp, "%s to %s with usecount %d at %s:%d",
350 		    vstate_name(from), vstate_name(to), refcnt,
351 		    func, line);
352 
353 	vip->vi_state = to;
354 	if (from == VS_LOADING)
355 		cv_broadcast(&vcache_cv);
356 	if (to == VS_LOADED || to == VS_RECLAIMED)
357 		cv_broadcast(&vp->v_cv);
358 }
359 
360 #else /* defined(DIAGNOSTIC) */
361 
362 #define VSTATE_GET(vp) \
363 	(VNODE_TO_VIMPL((vp))->vi_state)
364 #define VSTATE_CHANGE(vp, from, to) \
365 	vstate_change((vp), (from), (to))
366 #define VSTATE_WAIT_STABLE(vp) \
367 	vstate_wait_stable((vp))
368 void
369 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
370     bool has_lock)
371 {
372 
373 }
374 
375 static void
376 vstate_wait_stable(vnode_t *vp)
377 {
378 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
379 
380 	while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
381 		cv_wait(&vp->v_cv, vp->v_interlock);
382 }
383 
384 static void
385 vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
386 {
387 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
388 
389 	vip->vi_state = to;
390 	if (from == VS_LOADING)
391 		cv_broadcast(&vcache_cv);
392 	if (to == VS_LOADED || to == VS_RECLAIMED)
393 		cv_broadcast(&vp->v_cv);
394 }
395 
396 #endif /* defined(DIAGNOSTIC) */
397 
398 void
399 vfs_vnode_sysinit(void)
400 {
401 	int error __diagused, i;
402 
403 	dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
404 	KASSERT(dead_rootmount != NULL);
405 	dead_rootmount->mnt_iflag |= IMNT_MPSAFE;
406 
407 	mutex_init(&vdrain_lock, MUTEX_DEFAULT, IPL_NONE);
408 	for (i = 0; i < LRU_COUNT; i++) {
409 		TAILQ_INIT(&lru_list[i]);
410 	}
411 	vcache_init();
412 
413 	cv_init(&vdrain_cv, "vdrain");
414 	cv_init(&vdrain_gen_cv, "vdrainwt");
415 	error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
416 	    NULL, &vdrain_lwp, "vdrain");
417 	KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
418 }
419 
420 /*
421  * Allocate a new marker vnode.
422  */
423 vnode_t *
424 vnalloc_marker(struct mount *mp)
425 {
426 	vnode_impl_t *vip;
427 	vnode_t *vp;
428 
429 	vip = pool_cache_get(vcache_pool, PR_WAITOK);
430 	memset(vip, 0, sizeof(*vip));
431 	vp = VIMPL_TO_VNODE(vip);
432 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
433 	vp->v_mount = mp;
434 	vp->v_type = VBAD;
435 	vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
436 	vip->vi_state = VS_MARKER;
437 
438 	return vp;
439 }
440 
441 /*
442  * Free a marker vnode.
443  */
444 void
445 vnfree_marker(vnode_t *vp)
446 {
447 	vnode_impl_t *vip;
448 
449 	vip = VNODE_TO_VIMPL(vp);
450 	KASSERT(vip->vi_state == VS_MARKER);
451 	mutex_obj_free(vp->v_interlock);
452 	uvm_obj_destroy(&vp->v_uobj, true);
453 	pool_cache_put(vcache_pool, vip);
454 }
455 
456 /*
457  * Test a vnode for being a marker vnode.
458  */
459 bool
460 vnis_marker(vnode_t *vp)
461 {
462 
463 	return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
464 }
465 
466 /*
467  * Return the lru list this node should be on.
468  */
469 static vnodelst_t *
470 lru_which(vnode_t *vp)
471 {
472 
473 	KASSERT(mutex_owned(vp->v_interlock));
474 
475 	if (vp->v_holdcnt > 0)
476 		return &lru_list[LRU_HOLD];
477 	else
478 		return &lru_list[LRU_FREE];
479 }
480 
481 /*
482  * Put vnode to end of given list.
483  * Both the current and the new list may be NULL, used on vnode alloc/free.
484  * Adjust numvnodes and signal vdrain thread if there is work.
485  */
486 static void
487 lru_requeue(vnode_t *vp, vnodelst_t *listhd)
488 {
489 	vnode_impl_t *vip;
490 	int d;
491 
492 	/*
493 	 * If the vnode is on the correct list, and was put there recently,
494 	 * then leave it be, thus avoiding huge cache and lock contention.
495 	 */
496 	vip = VNODE_TO_VIMPL(vp);
497 	if (listhd == vip->vi_lrulisthd &&
498 	    (getticks() - vip->vi_lrulisttm) < hz) {
499 	    	return;
500 	}
501 
502 	mutex_enter(&vdrain_lock);
503 	d = 0;
504 	if (vip->vi_lrulisthd != NULL)
505 		TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
506 	else
507 		d++;
508 	vip->vi_lrulisthd = listhd;
509 	vip->vi_lrulisttm = getticks();
510 	if (vip->vi_lrulisthd != NULL)
511 		TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
512 	else
513 		d--;
514 	if (d != 0) {
515 		/*
516 		 * Looks strange?  This is not a bug.  Don't store
517 		 * numvnodes unless there is a change - avoid false
518 		 * sharing on MP.
519 		 */
520 		numvnodes += d;
521 	}
522 	if ((d > 0 && numvnodes > desiredvnodes) ||
523 	    listhd == &lru_list[LRU_VRELE])
524 		cv_signal(&vdrain_cv);
525 	mutex_exit(&vdrain_lock);
526 }
527 
528 /*
529  * Release deferred vrele vnodes for this mount.
530  * Called with file system suspended.
531  */
532 void
533 vrele_flush(struct mount *mp)
534 {
535 	vnode_impl_t *vip, *marker;
536 	vnode_t *vp;
537 	int when = 0;
538 
539 	KASSERT(fstrans_is_owner(mp));
540 
541 	marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
542 
543 	mutex_enter(&vdrain_lock);
544 	TAILQ_INSERT_HEAD(&lru_list[LRU_VRELE], marker, vi_lrulist);
545 
546 	while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
547 		TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
548 		TAILQ_INSERT_AFTER(&lru_list[LRU_VRELE], vip, marker,
549 		    vi_lrulist);
550 		vp = VIMPL_TO_VNODE(vip);
551 		if (vnis_marker(vp))
552 			continue;
553 
554 		KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
555 		TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
556 		vip->vi_lrulisthd = &lru_list[LRU_HOLD];
557 		vip->vi_lrulisttm = getticks();
558 		TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
559 		mutex_exit(&vdrain_lock);
560 
561 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
562 		mutex_enter(vp->v_interlock);
563 		vrelel(vp, 0, LK_EXCLUSIVE);
564 
565 		if (getticks() > when) {
566 			yield();
567 			when = getticks() + hz / 10;
568 		}
569 
570 		mutex_enter(&vdrain_lock);
571 	}
572 
573 	TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
574 	mutex_exit(&vdrain_lock);
575 
576 	vnfree_marker(VIMPL_TO_VNODE(marker));
577 }
578 
579 /*
580  * Reclaim a cached vnode.  Used from vdrain_thread only.
581  */
582 static __inline void
583 vdrain_remove(vnode_t *vp)
584 {
585 	struct mount *mp;
586 
587 	KASSERT(mutex_owned(&vdrain_lock));
588 
589 	/* Probe usecount (unlocked). */
590 	if (vrefcnt(vp) > 0)
591 		return;
592 	/* Try v_interlock -- we lock the wrong direction! */
593 	if (!mutex_tryenter(vp->v_interlock))
594 		return;
595 	/* Probe usecount and state. */
596 	if (vrefcnt(vp) > 0 || VSTATE_GET(vp) != VS_LOADED) {
597 		mutex_exit(vp->v_interlock);
598 		return;
599 	}
600 	mp = vp->v_mount;
601 	if (fstrans_start_nowait(mp) != 0) {
602 		mutex_exit(vp->v_interlock);
603 		return;
604 	}
605 	vdrain_retry = true;
606 	mutex_exit(&vdrain_lock);
607 
608 	if (vcache_vget(vp) == 0) {
609 		if (!vrecycle(vp)) {
610 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
611 			mutex_enter(vp->v_interlock);
612 			vrelel(vp, 0, LK_EXCLUSIVE);
613 		}
614 	}
615 	fstrans_done(mp);
616 
617 	mutex_enter(&vdrain_lock);
618 }
619 
620 /*
621  * Release a cached vnode.  Used from vdrain_thread only.
622  */
623 static __inline void
624 vdrain_vrele(vnode_t *vp)
625 {
626 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
627 	struct mount *mp;
628 
629 	KASSERT(mutex_owned(&vdrain_lock));
630 
631 	mp = vp->v_mount;
632 	if (fstrans_start_nowait(mp) != 0)
633 		return;
634 
635 	/*
636 	 * First remove the vnode from the vrele list.
637 	 * Put it on the last lru list, the last vrele()
638 	 * will put it back onto the right list before
639 	 * its usecount reaches zero.
640 	 */
641 	KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
642 	TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
643 	vip->vi_lrulisthd = &lru_list[LRU_HOLD];
644 	vip->vi_lrulisttm = getticks();
645 	TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
646 
647 	vdrain_retry = true;
648 	mutex_exit(&vdrain_lock);
649 
650 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
651 	mutex_enter(vp->v_interlock);
652 	vrelel(vp, 0, LK_EXCLUSIVE);
653 	fstrans_done(mp);
654 
655 	mutex_enter(&vdrain_lock);
656 }
657 
658 /*
659  * Helper thread to keep the number of vnodes below desiredvnodes
660  * and release vnodes from asynchronous vrele.
661  */
662 static void
663 vdrain_thread(void *cookie)
664 {
665 	int i;
666 	u_int target;
667 	vnode_impl_t *vip, *marker;
668 
669 	marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
670 
671 	mutex_enter(&vdrain_lock);
672 
673 	for (;;) {
674 		vdrain_retry = false;
675 		target = desiredvnodes - desiredvnodes/10;
676 
677 		for (i = 0; i < LRU_COUNT; i++) {
678 			TAILQ_INSERT_HEAD(&lru_list[i], marker, vi_lrulist);
679 			while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
680 				TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
681 				TAILQ_INSERT_AFTER(&lru_list[i], vip, marker,
682 				    vi_lrulist);
683 				if (vnis_marker(VIMPL_TO_VNODE(vip)))
684 					continue;
685 				if (i == LRU_VRELE)
686 					vdrain_vrele(VIMPL_TO_VNODE(vip));
687 				else if (numvnodes < target)
688 					break;
689 				else
690 					vdrain_remove(VIMPL_TO_VNODE(vip));
691 			}
692 			TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
693 		}
694 
695 		if (vdrain_retry) {
696 			kpause("vdrainrt", false, 1, &vdrain_lock);
697 		} else {
698 			vdrain_gen++;
699 			cv_broadcast(&vdrain_gen_cv);
700 			cv_wait(&vdrain_cv, &vdrain_lock);
701 		}
702 	}
703 }
704 
705 /*
706  * Try to drop reference on a vnode.  Abort if we are releasing the
707  * last reference.  Note: this _must_ succeed if not the last reference.
708  */
709 static bool
710 vtryrele(vnode_t *vp)
711 {
712 	u_int use, next;
713 
714 	for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
715 		if (__predict_false(use == 1)) {
716 			return false;
717 		}
718 		KASSERT(use > 1);
719 		next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
720 		if (__predict_true(next == use)) {
721 			return true;
722 		}
723 	}
724 }
725 
726 /*
727  * vput: unlock and release the reference.
728  */
729 void
730 vput(vnode_t *vp)
731 {
732 	int lktype;
733 
734 	/*
735 	 * Do an unlocked check of the usecount.  If it looks like we're not
736 	 * about to drop the last reference, then unlock the vnode and try
737 	 * to drop the reference.  If it ends up being the last reference
738 	 * after all, vrelel() can fix it all up.  Most of the time this
739 	 * will all go to plan.
740 	 */
741 	if (vrefcnt(vp) > 1) {
742 		VOP_UNLOCK(vp);
743 		if (vtryrele(vp)) {
744 			return;
745 		}
746 		lktype = LK_NONE;
747 	} else if ((vp->v_vflag & VV_LOCKSWORK) == 0) {
748 		lktype = LK_EXCLUSIVE;
749 	} else {
750 		lktype = VOP_ISLOCKED(vp);
751 		KASSERT(lktype != LK_NONE);
752 	}
753 	mutex_enter(vp->v_interlock);
754 	vrelel(vp, 0, lktype);
755 }
756 
757 /*
758  * Vnode release.  If reference count drops to zero, call inactive
759  * routine and either return to freelist or free to the pool.
760  */
761 static void
762 vrelel(vnode_t *vp, int flags, int lktype)
763 {
764 	const bool async = ((flags & VRELEL_ASYNC) != 0);
765 	bool recycle, defer;
766 	int error;
767 
768 	KASSERT(mutex_owned(vp->v_interlock));
769 
770 	if (__predict_false(vp->v_op == dead_vnodeop_p &&
771 	    VSTATE_GET(vp) != VS_RECLAIMED)) {
772 		vnpanic(vp, "dead but not clean");
773 	}
774 
775 	/*
776 	 * If not the last reference, just drop the reference count and
777 	 * unlock.  VOP_UNLOCK() is called here without a vnode reference
778 	 * held, but is ok as the hold of v_interlock will stop the vnode
779 	 * from disappearing.
780 	 */
781 	if (vtryrele(vp)) {
782 		if (lktype != LK_NONE) {
783 			VOP_UNLOCK(vp);
784 		}
785 		mutex_exit(vp->v_interlock);
786 		return;
787 	}
788 	if (vrefcnt(vp) <= 0 || vp->v_writecount != 0) {
789 		vnpanic(vp, "%s: bad ref count", __func__);
790 	}
791 
792 #ifdef DIAGNOSTIC
793 	if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
794 	    vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
795 		vprint("vrelel: missing VOP_CLOSE()", vp);
796 	}
797 #endif
798 
799 	/*
800 	 * First try to get the vnode locked for VOP_INACTIVE().
801 	 * Defer vnode release to vdrain_thread if caller requests
802 	 * it explicitly, is the pagedaemon or the lock failed.
803 	 */
804 	defer = false;
805 	if ((curlwp == uvm.pagedaemon_lwp) || async) {
806 		defer = true;
807 	} else if (lktype == LK_SHARED) {
808 		/* Excellent chance of getting, if the last ref. */
809 		error = vn_lock(vp, LK_UPGRADE | LK_RETRY |
810 		    LK_NOWAIT);
811 		if (error != 0) {
812 			defer = true;
813 		} else {
814 			lktype = LK_EXCLUSIVE;
815 		}
816 	} else if (lktype == LK_NONE) {
817 		/* Excellent chance of getting, if the last ref. */
818 		error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY |
819 		    LK_NOWAIT);
820 		if (error != 0) {
821 			defer = true;
822 		} else {
823 			lktype = LK_EXCLUSIVE;
824 		}
825 	}
826 	KASSERT(mutex_owned(vp->v_interlock));
827 	if (defer) {
828 		/*
829 		 * Defer reclaim to the kthread; it's not safe to
830 		 * clean it here.  We donate it our last reference.
831 		 */
832 		if (lktype != LK_NONE) {
833 			VOP_UNLOCK(vp);
834 		}
835 		lru_requeue(vp, &lru_list[LRU_VRELE]);
836 		mutex_exit(vp->v_interlock);
837 		return;
838 	}
839 	KASSERT(lktype == LK_EXCLUSIVE);
840 
841 	/*
842 	 * If not clean, deactivate the vnode, but preserve
843 	 * our reference across the call to VOP_INACTIVE().
844 	 */
845 	if (VSTATE_GET(vp) == VS_RECLAIMED) {
846 		VOP_UNLOCK(vp);
847 	} else {
848 		/*
849 		 * The vnode must not gain another reference while being
850 		 * deactivated.  If VOP_INACTIVE() indicates that
851 		 * the described file has been deleted, then recycle
852 		 * the vnode.
853 		 *
854 		 * Note that VOP_INACTIVE() will not drop the vnode lock.
855 		 */
856 		mutex_exit(vp->v_interlock);
857 		recycle = false;
858 		VOP_INACTIVE(vp, &recycle);
859 		rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
860 		mutex_enter(vp->v_interlock);
861 		if (vtryrele(vp)) {
862 			VOP_UNLOCK(vp);
863 			mutex_exit(vp->v_interlock);
864 			rw_exit(vp->v_uobj.vmobjlock);
865 			return;
866 		}
867 
868 		/* Take care of space accounting. */
869 		if ((vp->v_iflag & VI_EXECMAP) != 0 &&
870 		    vp->v_uobj.uo_npages != 0) {
871 			cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
872 			cpu_count(CPU_COUNT_FILEPAGES, vp->v_uobj.uo_npages);
873 		}
874 		vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
875 		vp->v_vflag &= ~VV_MAPPED;
876 		rw_exit(vp->v_uobj.vmobjlock);
877 
878 		/*
879 		 * Recycle the vnode if the file is now unused (unlinked),
880 		 * otherwise just free it.
881 		 */
882 		if (recycle) {
883 			VSTATE_ASSERT(vp, VS_LOADED);
884 			/* vcache_reclaim drops the lock. */
885 			vcache_reclaim(vp);
886 		} else {
887 			VOP_UNLOCK(vp);
888 		}
889 		KASSERT(vrefcnt(vp) > 0);
890 	}
891 
892 	if (atomic_dec_uint_nv(&vp->v_usecount) != 0) {
893 		/* Gained another reference while being reclaimed. */
894 		mutex_exit(vp->v_interlock);
895 		return;
896 	}
897 
898 	if (VSTATE_GET(vp) == VS_RECLAIMED && vp->v_holdcnt == 0) {
899 		/*
900 		 * It's clean so destroy it.  It isn't referenced
901 		 * anywhere since it has been reclaimed.
902 		 */
903 		vcache_free(VNODE_TO_VIMPL(vp));
904 	} else {
905 		/*
906 		 * Otherwise, put it back onto the freelist.  It
907 		 * can't be destroyed while still associated with
908 		 * a file system.
909 		 */
910 		lru_requeue(vp, lru_which(vp));
911 		mutex_exit(vp->v_interlock);
912 	}
913 }
914 
915 void
916 vrele(vnode_t *vp)
917 {
918 
919 	if (vtryrele(vp)) {
920 		return;
921 	}
922 	mutex_enter(vp->v_interlock);
923 	vrelel(vp, 0, LK_NONE);
924 }
925 
926 /*
927  * Asynchronous vnode release, vnode is released in different context.
928  */
929 void
930 vrele_async(vnode_t *vp)
931 {
932 
933 	if (vtryrele(vp)) {
934 		return;
935 	}
936 	mutex_enter(vp->v_interlock);
937 	vrelel(vp, VRELEL_ASYNC, LK_NONE);
938 }
939 
940 /*
941  * Vnode reference, where a reference is already held by some other
942  * object (for example, a file structure).
943  *
944  * NB: we have lockless code sequences that rely on this not blocking.
945  */
946 void
947 vref(vnode_t *vp)
948 {
949 
950 	KASSERT(vrefcnt(vp) > 0);
951 
952 	atomic_inc_uint(&vp->v_usecount);
953 }
954 
955 /*
956  * Page or buffer structure gets a reference.
957  * Called with v_interlock held.
958  */
959 void
960 vholdl(vnode_t *vp)
961 {
962 
963 	KASSERT(mutex_owned(vp->v_interlock));
964 
965 	if (vp->v_holdcnt++ == 0 && vrefcnt(vp) == 0)
966 		lru_requeue(vp, lru_which(vp));
967 }
968 
969 /*
970  * Page or buffer structure gets a reference.
971  */
972 void
973 vhold(vnode_t *vp)
974 {
975 
976 	mutex_enter(vp->v_interlock);
977 	vholdl(vp);
978 	mutex_exit(vp->v_interlock);
979 }
980 
981 /*
982  * Page or buffer structure frees a reference.
983  * Called with v_interlock held.
984  */
985 void
986 holdrelel(vnode_t *vp)
987 {
988 
989 	KASSERT(mutex_owned(vp->v_interlock));
990 
991 	if (vp->v_holdcnt <= 0) {
992 		vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
993 	}
994 
995 	vp->v_holdcnt--;
996 	if (vp->v_holdcnt == 0 && vrefcnt(vp) == 0)
997 		lru_requeue(vp, lru_which(vp));
998 }
999 
1000 /*
1001  * Page or buffer structure frees a reference.
1002  */
1003 void
1004 holdrele(vnode_t *vp)
1005 {
1006 
1007 	mutex_enter(vp->v_interlock);
1008 	holdrelel(vp);
1009 	mutex_exit(vp->v_interlock);
1010 }
1011 
1012 /*
1013  * Recycle an unused vnode if caller holds the last reference.
1014  */
1015 bool
1016 vrecycle(vnode_t *vp)
1017 {
1018 	int error __diagused;
1019 
1020 	mutex_enter(vp->v_interlock);
1021 
1022 	/* Make sure we hold the last reference. */
1023 	VSTATE_WAIT_STABLE(vp);
1024 	if (vrefcnt(vp) != 1) {
1025 		mutex_exit(vp->v_interlock);
1026 		return false;
1027 	}
1028 
1029 	/* If the vnode is already clean we're done. */
1030 	if (VSTATE_GET(vp) != VS_LOADED) {
1031 		VSTATE_ASSERT(vp, VS_RECLAIMED);
1032 		vrelel(vp, 0, LK_NONE);
1033 		return true;
1034 	}
1035 
1036 	/* Prevent further references until the vnode is locked. */
1037 	VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
1038 	mutex_exit(vp->v_interlock);
1039 
1040 	/*
1041 	 * On a leaf file system this lock will always succeed as we hold
1042 	 * the last reference and prevent further references.
1043 	 * On layered file systems waiting for the lock would open a can of
1044 	 * deadlocks as the lower vnodes may have other active references.
1045 	 */
1046 	error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
1047 
1048 	mutex_enter(vp->v_interlock);
1049 	VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
1050 
1051 	if (error) {
1052 		mutex_exit(vp->v_interlock);
1053 		return false;
1054 	}
1055 
1056 	KASSERT(vrefcnt(vp) == 1);
1057 	vcache_reclaim(vp);
1058 	vrelel(vp, 0, LK_NONE);
1059 
1060 	return true;
1061 }
1062 
1063 /*
1064  * Helper for vrevoke() to propagate suspension from lastmp
1065  * to thismp.  Both args may be NULL.
1066  * Returns the currently suspended file system or NULL.
1067  */
1068 static struct mount *
1069 vrevoke_suspend_next(struct mount *lastmp, struct mount *thismp)
1070 {
1071 	int error;
1072 
1073 	if (lastmp == thismp)
1074 		return thismp;
1075 
1076 	if (lastmp != NULL)
1077 		vfs_resume(lastmp);
1078 
1079 	if (thismp == NULL)
1080 		return NULL;
1081 
1082 	do {
1083 		error = vfs_suspend(thismp, 0);
1084 	} while (error == EINTR || error == ERESTART);
1085 
1086 	if (error == 0)
1087 		return thismp;
1088 
1089 	KASSERT(error == EOPNOTSUPP);
1090 	return NULL;
1091 }
1092 
1093 /*
1094  * Eliminate all activity associated with the requested vnode
1095  * and with all vnodes aliased to the requested vnode.
1096  */
1097 void
1098 vrevoke(vnode_t *vp)
1099 {
1100 	struct mount *mp;
1101 	vnode_t *vq;
1102 	enum vtype type;
1103 	dev_t dev;
1104 
1105 	KASSERT(vrefcnt(vp) > 0);
1106 
1107 	mp = vrevoke_suspend_next(NULL, vp->v_mount);
1108 
1109 	mutex_enter(vp->v_interlock);
1110 	VSTATE_WAIT_STABLE(vp);
1111 	if (VSTATE_GET(vp) == VS_RECLAIMED) {
1112 		mutex_exit(vp->v_interlock);
1113 	} else if (vp->v_type != VBLK && vp->v_type != VCHR) {
1114 		atomic_inc_uint(&vp->v_usecount);
1115 		mutex_exit(vp->v_interlock);
1116 		vgone(vp);
1117 	} else {
1118 		dev = vp->v_rdev;
1119 		type = vp->v_type;
1120 		mutex_exit(vp->v_interlock);
1121 
1122 		while (spec_node_lookup_by_dev(type, dev, &vq) == 0) {
1123 			mp = vrevoke_suspend_next(mp, vq->v_mount);
1124 			vgone(vq);
1125 		}
1126 	}
1127 	vrevoke_suspend_next(mp, NULL);
1128 }
1129 
1130 /*
1131  * Eliminate all activity associated with a vnode in preparation for
1132  * reuse.  Drops a reference from the vnode.
1133  */
1134 void
1135 vgone(vnode_t *vp)
1136 {
1137 	int lktype;
1138 
1139 	KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
1140 
1141 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1142 	lktype = LK_EXCLUSIVE;
1143 	mutex_enter(vp->v_interlock);
1144 	VSTATE_WAIT_STABLE(vp);
1145 	if (VSTATE_GET(vp) == VS_LOADED) {
1146 		vcache_reclaim(vp);
1147 		lktype = LK_NONE;
1148 	}
1149 	VSTATE_ASSERT(vp, VS_RECLAIMED);
1150 	vrelel(vp, 0, lktype);
1151 }
1152 
1153 static inline uint32_t
1154 vcache_hash(const struct vcache_key *key)
1155 {
1156 	uint32_t hash = HASH32_BUF_INIT;
1157 
1158 	KASSERT(key->vk_key_len > 0);
1159 
1160 	hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
1161 	hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
1162 	return hash;
1163 }
1164 
1165 static void
1166 vcache_init(void)
1167 {
1168 
1169 	vcache_pool = pool_cache_init(sizeof(vnode_impl_t), coherency_unit,
1170 	    0, 0, "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
1171 	KASSERT(vcache_pool != NULL);
1172 	mutex_init(&vcache_lock, MUTEX_DEFAULT, IPL_NONE);
1173 	cv_init(&vcache_cv, "vcache");
1174 	vcache_hashsize = desiredvnodes;
1175 	vcache_hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
1176 	    &vcache_hashmask);
1177 }
1178 
1179 static void
1180 vcache_reinit(void)
1181 {
1182 	int i;
1183 	uint32_t hash;
1184 	u_long oldmask, newmask;
1185 	struct hashhead *oldtab, *newtab;
1186 	vnode_impl_t *vip;
1187 
1188 	newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
1189 	mutex_enter(&vcache_lock);
1190 	oldtab = vcache_hashtab;
1191 	oldmask = vcache_hashmask;
1192 	vcache_hashsize = desiredvnodes;
1193 	vcache_hashtab = newtab;
1194 	vcache_hashmask = newmask;
1195 	for (i = 0; i <= oldmask; i++) {
1196 		while ((vip = SLIST_FIRST(&oldtab[i])) != NULL) {
1197 			SLIST_REMOVE(&oldtab[i], vip, vnode_impl, vi_hash);
1198 			hash = vcache_hash(&vip->vi_key);
1199 			SLIST_INSERT_HEAD(&newtab[hash & vcache_hashmask],
1200 			    vip, vi_hash);
1201 		}
1202 	}
1203 	mutex_exit(&vcache_lock);
1204 	hashdone(oldtab, HASH_SLIST, oldmask);
1205 }
1206 
1207 static inline vnode_impl_t *
1208 vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
1209 {
1210 	struct hashhead *hashp;
1211 	vnode_impl_t *vip;
1212 
1213 	KASSERT(mutex_owned(&vcache_lock));
1214 
1215 	hashp = &vcache_hashtab[hash & vcache_hashmask];
1216 	SLIST_FOREACH(vip, hashp, vi_hash) {
1217 		if (key->vk_mount != vip->vi_key.vk_mount)
1218 			continue;
1219 		if (key->vk_key_len != vip->vi_key.vk_key_len)
1220 			continue;
1221 		if (memcmp(key->vk_key, vip->vi_key.vk_key, key->vk_key_len))
1222 			continue;
1223 		return vip;
1224 	}
1225 	return NULL;
1226 }
1227 
1228 /*
1229  * Allocate a new, uninitialized vcache node.
1230  */
1231 static vnode_impl_t *
1232 vcache_alloc(void)
1233 {
1234 	vnode_impl_t *vip;
1235 	vnode_t *vp;
1236 
1237 	vip = pool_cache_get(vcache_pool, PR_WAITOK);
1238 	vp = VIMPL_TO_VNODE(vip);
1239 	memset(vip, 0, sizeof(*vip));
1240 
1241 	rw_init(&vip->vi_lock);
1242 	vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
1243 
1244 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
1245 	cv_init(&vp->v_cv, "vnode");
1246 	cache_vnode_init(vp);
1247 
1248 	vp->v_usecount = 1;
1249 	vp->v_type = VNON;
1250 	vp->v_size = vp->v_writesize = VSIZENOTSET;
1251 
1252 	vip->vi_state = VS_LOADING;
1253 
1254 	lru_requeue(vp, &lru_list[LRU_FREE]);
1255 
1256 	return vip;
1257 }
1258 
1259 /*
1260  * Deallocate a vcache node in state VS_LOADING.
1261  *
1262  * vcache_lock held on entry and released on return.
1263  */
1264 static void
1265 vcache_dealloc(vnode_impl_t *vip)
1266 {
1267 	vnode_t *vp;
1268 
1269 	KASSERT(mutex_owned(&vcache_lock));
1270 
1271 	vp = VIMPL_TO_VNODE(vip);
1272 	vfs_ref(dead_rootmount);
1273 	vfs_insmntque(vp, dead_rootmount);
1274 	mutex_enter(vp->v_interlock);
1275 	vp->v_op = dead_vnodeop_p;
1276 	VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1277 	mutex_exit(&vcache_lock);
1278 	vrelel(vp, 0, LK_NONE);
1279 }
1280 
1281 /*
1282  * Free an unused, unreferenced vcache node.
1283  * v_interlock locked on entry.
1284  */
1285 static void
1286 vcache_free(vnode_impl_t *vip)
1287 {
1288 	vnode_t *vp;
1289 
1290 	vp = VIMPL_TO_VNODE(vip);
1291 	KASSERT(mutex_owned(vp->v_interlock));
1292 
1293 	KASSERT(vrefcnt(vp) == 0);
1294 	KASSERT(vp->v_holdcnt == 0);
1295 	KASSERT(vp->v_writecount == 0);
1296 	lru_requeue(vp, NULL);
1297 	mutex_exit(vp->v_interlock);
1298 
1299 	vfs_insmntque(vp, NULL);
1300 	if (vp->v_type == VBLK || vp->v_type == VCHR)
1301 		spec_node_destroy(vp);
1302 
1303 	mutex_obj_free(vp->v_interlock);
1304 	rw_destroy(&vip->vi_lock);
1305 	uvm_obj_destroy(&vp->v_uobj, true);
1306 	cv_destroy(&vp->v_cv);
1307 	cache_vnode_fini(vp);
1308 	pool_cache_put(vcache_pool, vip);
1309 }
1310 
1311 /*
1312  * Try to get an initial reference on this cached vnode.
1313  * Returns zero on success,  ENOENT if the vnode has been reclaimed and
1314  * EBUSY if the vnode state is unstable.
1315  *
1316  * v_interlock locked on entry and unlocked on exit.
1317  */
1318 int
1319 vcache_tryvget(vnode_t *vp)
1320 {
1321 	int error = 0;
1322 
1323 	KASSERT(mutex_owned(vp->v_interlock));
1324 
1325 	if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED))
1326 		error = ENOENT;
1327 	else if (__predict_false(VSTATE_GET(vp) != VS_LOADED))
1328 		error = EBUSY;
1329 	else if (vp->v_usecount == 0)
1330 		vp->v_usecount = 1;
1331 	else
1332 		atomic_inc_uint(&vp->v_usecount);
1333 
1334 	mutex_exit(vp->v_interlock);
1335 
1336 	return error;
1337 }
1338 
1339 /*
1340  * Try to get an initial reference on this cached vnode.
1341  * Returns zero on success and  ENOENT if the vnode has been reclaimed.
1342  * Will wait for the vnode state to be stable.
1343  *
1344  * v_interlock locked on entry and unlocked on exit.
1345  */
1346 int
1347 vcache_vget(vnode_t *vp)
1348 {
1349 
1350 	KASSERT(mutex_owned(vp->v_interlock));
1351 
1352 	/* Increment hold count to prevent vnode from disappearing. */
1353 	vp->v_holdcnt++;
1354 	VSTATE_WAIT_STABLE(vp);
1355 	vp->v_holdcnt--;
1356 
1357 	/* If this was the last reference to a reclaimed vnode free it now. */
1358 	if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) {
1359 		if (vp->v_holdcnt == 0 && vrefcnt(vp) == 0)
1360 			vcache_free(VNODE_TO_VIMPL(vp));
1361 		else
1362 			mutex_exit(vp->v_interlock);
1363 		return ENOENT;
1364 	}
1365 	VSTATE_ASSERT(vp, VS_LOADED);
1366 	if (vp->v_usecount == 0)
1367 		vp->v_usecount = 1;
1368 	else
1369 		atomic_inc_uint(&vp->v_usecount);
1370 	mutex_exit(vp->v_interlock);
1371 
1372 	return 0;
1373 }
1374 
1375 /*
1376  * Get a vnode / fs node pair by key and return it referenced through vpp.
1377  */
1378 int
1379 vcache_get(struct mount *mp, const void *key, size_t key_len,
1380     struct vnode **vpp)
1381 {
1382 	int error;
1383 	uint32_t hash;
1384 	const void *new_key;
1385 	struct vnode *vp;
1386 	struct vcache_key vcache_key;
1387 	vnode_impl_t *vip, *new_vip;
1388 
1389 	new_key = NULL;
1390 	*vpp = NULL;
1391 
1392 	vcache_key.vk_mount = mp;
1393 	vcache_key.vk_key = key;
1394 	vcache_key.vk_key_len = key_len;
1395 	hash = vcache_hash(&vcache_key);
1396 
1397 again:
1398 	mutex_enter(&vcache_lock);
1399 	vip = vcache_hash_lookup(&vcache_key, hash);
1400 
1401 	/* If found, take a reference or retry. */
1402 	if (__predict_true(vip != NULL)) {
1403 		/*
1404 		 * If the vnode is loading we cannot take the v_interlock
1405 		 * here as it might change during load (see uvm_obj_setlock()).
1406 		 * As changing state from VS_LOADING requires both vcache_lock
1407 		 * and v_interlock it is safe to test with vcache_lock held.
1408 		 *
1409 		 * Wait for vnodes changing state from VS_LOADING and retry.
1410 		 */
1411 		if (__predict_false(vip->vi_state == VS_LOADING)) {
1412 			cv_wait(&vcache_cv, &vcache_lock);
1413 			mutex_exit(&vcache_lock);
1414 			goto again;
1415 		}
1416 		vp = VIMPL_TO_VNODE(vip);
1417 		mutex_enter(vp->v_interlock);
1418 		mutex_exit(&vcache_lock);
1419 		error = vcache_vget(vp);
1420 		if (error == ENOENT)
1421 			goto again;
1422 		if (error == 0)
1423 			*vpp = vp;
1424 		KASSERT((error != 0) == (*vpp == NULL));
1425 		return error;
1426 	}
1427 	mutex_exit(&vcache_lock);
1428 
1429 	/* Allocate and initialize a new vcache / vnode pair. */
1430 	error = vfs_busy(mp);
1431 	if (error)
1432 		return error;
1433 	new_vip = vcache_alloc();
1434 	new_vip->vi_key = vcache_key;
1435 	vp = VIMPL_TO_VNODE(new_vip);
1436 	mutex_enter(&vcache_lock);
1437 	vip = vcache_hash_lookup(&vcache_key, hash);
1438 	if (vip == NULL) {
1439 		SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1440 		    new_vip, vi_hash);
1441 		vip = new_vip;
1442 	}
1443 
1444 	/* If another thread beat us inserting this node, retry. */
1445 	if (vip != new_vip) {
1446 		vcache_dealloc(new_vip);
1447 		vfs_unbusy(mp);
1448 		goto again;
1449 	}
1450 	mutex_exit(&vcache_lock);
1451 
1452 	/* Load the fs node.  Exclusive as new_node is VS_LOADING. */
1453 	error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
1454 	if (error) {
1455 		mutex_enter(&vcache_lock);
1456 		SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1457 		    new_vip, vnode_impl, vi_hash);
1458 		vcache_dealloc(new_vip);
1459 		vfs_unbusy(mp);
1460 		KASSERT(*vpp == NULL);
1461 		return error;
1462 	}
1463 	KASSERT(new_key != NULL);
1464 	KASSERT(memcmp(key, new_key, key_len) == 0);
1465 	KASSERT(vp->v_op != NULL);
1466 	vfs_insmntque(vp, mp);
1467 	if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1468 		vp->v_vflag |= VV_MPSAFE;
1469 	vfs_ref(mp);
1470 	vfs_unbusy(mp);
1471 
1472 	/* Finished loading, finalize node. */
1473 	mutex_enter(&vcache_lock);
1474 	new_vip->vi_key.vk_key = new_key;
1475 	mutex_enter(vp->v_interlock);
1476 	VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1477 	mutex_exit(vp->v_interlock);
1478 	mutex_exit(&vcache_lock);
1479 	*vpp = vp;
1480 	return 0;
1481 }
1482 
1483 /*
1484  * Create a new vnode / fs node pair and return it referenced through vpp.
1485  */
1486 int
1487 vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
1488     kauth_cred_t cred, void *extra, struct vnode **vpp)
1489 {
1490 	int error;
1491 	uint32_t hash;
1492 	struct vnode *vp, *ovp;
1493 	vnode_impl_t *vip, *ovip;
1494 
1495 	*vpp = NULL;
1496 
1497 	/* Allocate and initialize a new vcache / vnode pair. */
1498 	error = vfs_busy(mp);
1499 	if (error)
1500 		return error;
1501 	vip = vcache_alloc();
1502 	vip->vi_key.vk_mount = mp;
1503 	vp = VIMPL_TO_VNODE(vip);
1504 
1505 	/* Create and load the fs node. */
1506 	error = VFS_NEWVNODE(mp, dvp, vp, vap, cred, extra,
1507 	    &vip->vi_key.vk_key_len, &vip->vi_key.vk_key);
1508 	if (error) {
1509 		mutex_enter(&vcache_lock);
1510 		vcache_dealloc(vip);
1511 		vfs_unbusy(mp);
1512 		KASSERT(*vpp == NULL);
1513 		return error;
1514 	}
1515 	KASSERT(vp->v_op != NULL);
1516 	KASSERT((vip->vi_key.vk_key_len == 0) == (mp == dead_rootmount));
1517 	if (vip->vi_key.vk_key_len > 0) {
1518 		KASSERT(vip->vi_key.vk_key != NULL);
1519 		hash = vcache_hash(&vip->vi_key);
1520 
1521 		/*
1522 		 * Wait for previous instance to be reclaimed,
1523 		 * then insert new node.
1524 		 */
1525 		mutex_enter(&vcache_lock);
1526 		while ((ovip = vcache_hash_lookup(&vip->vi_key, hash))) {
1527 			ovp = VIMPL_TO_VNODE(ovip);
1528 			mutex_enter(ovp->v_interlock);
1529 			mutex_exit(&vcache_lock);
1530 			error = vcache_vget(ovp);
1531 			KASSERT(error == ENOENT);
1532 			mutex_enter(&vcache_lock);
1533 		}
1534 		SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1535 		    vip, vi_hash);
1536 		mutex_exit(&vcache_lock);
1537 	}
1538 	vfs_insmntque(vp, mp);
1539 	if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1540 		vp->v_vflag |= VV_MPSAFE;
1541 	vfs_ref(mp);
1542 	vfs_unbusy(mp);
1543 
1544 	/* Finished loading, finalize node. */
1545 	mutex_enter(&vcache_lock);
1546 	mutex_enter(vp->v_interlock);
1547 	VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1548 	mutex_exit(&vcache_lock);
1549 	mutex_exit(vp->v_interlock);
1550 	*vpp = vp;
1551 	return 0;
1552 }
1553 
1554 /*
1555  * Prepare key change: update old cache nodes key and lock new cache node.
1556  * Return an error if the new node already exists.
1557  */
1558 int
1559 vcache_rekey_enter(struct mount *mp, struct vnode *vp,
1560     const void *old_key, size_t old_key_len,
1561     const void *new_key, size_t new_key_len)
1562 {
1563 	uint32_t old_hash, new_hash;
1564 	struct vcache_key old_vcache_key, new_vcache_key;
1565 	vnode_impl_t *vip, *new_vip;
1566 
1567 	old_vcache_key.vk_mount = mp;
1568 	old_vcache_key.vk_key = old_key;
1569 	old_vcache_key.vk_key_len = old_key_len;
1570 	old_hash = vcache_hash(&old_vcache_key);
1571 
1572 	new_vcache_key.vk_mount = mp;
1573 	new_vcache_key.vk_key = new_key;
1574 	new_vcache_key.vk_key_len = new_key_len;
1575 	new_hash = vcache_hash(&new_vcache_key);
1576 
1577 	new_vip = vcache_alloc();
1578 	new_vip->vi_key = new_vcache_key;
1579 
1580 	/* Insert locked new node used as placeholder. */
1581 	mutex_enter(&vcache_lock);
1582 	vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1583 	if (vip != NULL) {
1584 		vcache_dealloc(new_vip);
1585 		return EEXIST;
1586 	}
1587 	SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1588 	    new_vip, vi_hash);
1589 
1590 	/* Replace old nodes key with the temporary copy. */
1591 	vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1592 	KASSERT(vip != NULL);
1593 	KASSERT(VIMPL_TO_VNODE(vip) == vp);
1594 	KASSERT(vip->vi_key.vk_key != old_vcache_key.vk_key);
1595 	vip->vi_key = old_vcache_key;
1596 	mutex_exit(&vcache_lock);
1597 	return 0;
1598 }
1599 
1600 /*
1601  * Key change complete: update old node and remove placeholder.
1602  */
1603 void
1604 vcache_rekey_exit(struct mount *mp, struct vnode *vp,
1605     const void *old_key, size_t old_key_len,
1606     const void *new_key, size_t new_key_len)
1607 {
1608 	uint32_t old_hash, new_hash;
1609 	struct vcache_key old_vcache_key, new_vcache_key;
1610 	vnode_impl_t *vip, *new_vip;
1611 	struct vnode *new_vp;
1612 
1613 	old_vcache_key.vk_mount = mp;
1614 	old_vcache_key.vk_key = old_key;
1615 	old_vcache_key.vk_key_len = old_key_len;
1616 	old_hash = vcache_hash(&old_vcache_key);
1617 
1618 	new_vcache_key.vk_mount = mp;
1619 	new_vcache_key.vk_key = new_key;
1620 	new_vcache_key.vk_key_len = new_key_len;
1621 	new_hash = vcache_hash(&new_vcache_key);
1622 
1623 	mutex_enter(&vcache_lock);
1624 
1625 	/* Lookup old and new node. */
1626 	vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1627 	KASSERT(vip != NULL);
1628 	KASSERT(VIMPL_TO_VNODE(vip) == vp);
1629 
1630 	new_vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1631 	KASSERT(new_vip != NULL);
1632 	KASSERT(new_vip->vi_key.vk_key_len == new_key_len);
1633 	new_vp = VIMPL_TO_VNODE(new_vip);
1634 	mutex_enter(new_vp->v_interlock);
1635 	VSTATE_ASSERT(VIMPL_TO_VNODE(new_vip), VS_LOADING);
1636 	mutex_exit(new_vp->v_interlock);
1637 
1638 	/* Rekey old node and put it onto its new hashlist. */
1639 	vip->vi_key = new_vcache_key;
1640 	if (old_hash != new_hash) {
1641 		SLIST_REMOVE(&vcache_hashtab[old_hash & vcache_hashmask],
1642 		    vip, vnode_impl, vi_hash);
1643 		SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1644 		    vip, vi_hash);
1645 	}
1646 
1647 	/* Remove new node used as placeholder. */
1648 	SLIST_REMOVE(&vcache_hashtab[new_hash & vcache_hashmask],
1649 	    new_vip, vnode_impl, vi_hash);
1650 	vcache_dealloc(new_vip);
1651 }
1652 
1653 /*
1654  * Disassociate the underlying file system from a vnode.
1655  *
1656  * Must be called with vnode locked and will return unlocked.
1657  * Must be called with the interlock held, and will return with it held.
1658  */
1659 static void
1660 vcache_reclaim(vnode_t *vp)
1661 {
1662 	lwp_t *l = curlwp;
1663 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
1664 	struct mount *mp = vp->v_mount;
1665 	uint32_t hash;
1666 	uint8_t temp_buf[64], *temp_key;
1667 	size_t temp_key_len;
1668 	bool recycle, active;
1669 	int error;
1670 
1671 	KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1672 	    VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1673 	KASSERT(mutex_owned(vp->v_interlock));
1674 	KASSERT(vrefcnt(vp) != 0);
1675 
1676 	active = (vrefcnt(vp) > 1);
1677 	temp_key_len = vip->vi_key.vk_key_len;
1678 	/*
1679 	 * Prevent the vnode from being recycled or brought into use
1680 	 * while we clean it out.
1681 	 */
1682 	VSTATE_CHANGE(vp, VS_LOADED, VS_RECLAIMING);
1683 	mutex_exit(vp->v_interlock);
1684 
1685 	rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
1686 	mutex_enter(vp->v_interlock);
1687 	if ((vp->v_iflag & VI_EXECMAP) != 0 && vp->v_uobj.uo_npages != 0) {
1688 		cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
1689 		cpu_count(CPU_COUNT_FILEPAGES, vp->v_uobj.uo_npages);
1690 	}
1691 	vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
1692 	vp->v_iflag |= VI_DEADCHECK; /* for genfs_getpages() */
1693 	mutex_exit(vp->v_interlock);
1694 	rw_exit(vp->v_uobj.vmobjlock);
1695 
1696 	/*
1697 	 * With vnode state set to reclaiming, purge name cache immediately
1698 	 * to prevent new handles on vnode, and wait for existing threads
1699 	 * trying to get a handle to notice VS_RECLAIMED status and abort.
1700 	 */
1701 	cache_purge(vp);
1702 
1703 	/* Replace the vnode key with a temporary copy. */
1704 	if (vip->vi_key.vk_key_len > sizeof(temp_buf)) {
1705 		temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
1706 	} else {
1707 		temp_key = temp_buf;
1708 	}
1709 	if (vip->vi_key.vk_key_len > 0) {
1710 		mutex_enter(&vcache_lock);
1711 		memcpy(temp_key, vip->vi_key.vk_key, temp_key_len);
1712 		vip->vi_key.vk_key = temp_key;
1713 		mutex_exit(&vcache_lock);
1714 	}
1715 
1716 	fstrans_start(mp);
1717 
1718 	/*
1719 	 * Clean out any cached data associated with the vnode.
1720 	 * If purging an active vnode, it must be closed and
1721 	 * deactivated before being reclaimed.
1722 	 */
1723 	error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
1724 	if (error != 0) {
1725 		if (wapbl_vphaswapbl(vp))
1726 			WAPBL_DISCARD(wapbl_vptomp(vp));
1727 		error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
1728 	}
1729 	KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
1730 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1731 	if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
1732 		 spec_node_revoke(vp);
1733 	}
1734 
1735 	/*
1736 	 * Disassociate the underlying file system from the vnode.
1737 	 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
1738 	 * the vnode, and may destroy the vnode so that VOP_UNLOCK
1739 	 * would no longer function.
1740 	 */
1741 	VOP_INACTIVE(vp, &recycle);
1742 	KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1743 	    VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1744 	if (VOP_RECLAIM(vp)) {
1745 		vnpanic(vp, "%s: cannot reclaim", __func__);
1746 	}
1747 
1748 	KASSERT(vp->v_data == NULL);
1749 	KASSERT((vp->v_iflag & VI_PAGES) == 0);
1750 
1751 	if (vp->v_type == VREG && vp->v_ractx != NULL) {
1752 		uvm_ra_freectx(vp->v_ractx);
1753 		vp->v_ractx = NULL;
1754 	}
1755 
1756 	if (vip->vi_key.vk_key_len > 0) {
1757 	/* Remove from vnode cache. */
1758 		hash = vcache_hash(&vip->vi_key);
1759 		mutex_enter(&vcache_lock);
1760 		KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
1761 		SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1762 		    vip, vnode_impl, vi_hash);
1763 		mutex_exit(&vcache_lock);
1764 	}
1765 	if (temp_key != temp_buf)
1766 		kmem_free(temp_key, temp_key_len);
1767 
1768 	/* Done with purge, notify sleepers of the grim news. */
1769 	mutex_enter(vp->v_interlock);
1770 	vp->v_op = dead_vnodeop_p;
1771 	vp->v_vflag |= VV_LOCKSWORK;
1772 	VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
1773 	vp->v_tag = VT_NON;
1774 	KNOTE(&vp->v_klist, NOTE_REVOKE);
1775 	mutex_exit(vp->v_interlock);
1776 
1777 	/*
1778 	 * Move to dead mount.  Must be after changing the operations
1779 	 * vector as vnode operations enter the mount before using the
1780 	 * operations vector.  See sys/kern/vnode_if.c.
1781 	 */
1782 	vp->v_vflag &= ~VV_ROOT;
1783 	vfs_ref(dead_rootmount);
1784 	vfs_insmntque(vp, dead_rootmount);
1785 
1786 #ifdef PAX_SEGVGUARD
1787 	pax_segvguard_cleanup(vp);
1788 #endif /* PAX_SEGVGUARD */
1789 
1790 	mutex_enter(vp->v_interlock);
1791 	fstrans_done(mp);
1792 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1793 }
1794 
1795 /*
1796  * Disassociate the underlying file system from an open device vnode
1797  * and make it anonymous.
1798  *
1799  * Vnode unlocked on entry, drops a reference to the vnode.
1800  */
1801 void
1802 vcache_make_anon(vnode_t *vp)
1803 {
1804 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
1805 	uint32_t hash;
1806 	bool recycle;
1807 
1808 	KASSERT(vp->v_type == VBLK || vp->v_type == VCHR);
1809 	KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
1810 	VSTATE_ASSERT_UNLOCKED(vp, VS_ACTIVE);
1811 
1812 	/* Remove from vnode cache. */
1813 	hash = vcache_hash(&vip->vi_key);
1814 	mutex_enter(&vcache_lock);
1815 	KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
1816 	SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1817 	    vip, vnode_impl, vi_hash);
1818 	vip->vi_key.vk_mount = dead_rootmount;
1819 	vip->vi_key.vk_key_len = 0;
1820 	vip->vi_key.vk_key = NULL;
1821 	mutex_exit(&vcache_lock);
1822 
1823 	/*
1824 	 * Disassociate the underlying file system from the vnode.
1825 	 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
1826 	 * the vnode, and may destroy the vnode so that VOP_UNLOCK
1827 	 * would no longer function.
1828 	 */
1829 	if (vn_lock(vp, LK_EXCLUSIVE)) {
1830 		vnpanic(vp, "%s: cannot lock", __func__);
1831 	}
1832 	VOP_INACTIVE(vp, &recycle);
1833 	KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1834 	    VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1835 	if (VOP_RECLAIM(vp)) {
1836 		vnpanic(vp, "%s: cannot reclaim", __func__);
1837 	}
1838 
1839 	/* Purge name cache. */
1840 	cache_purge(vp);
1841 
1842 	/* Done with purge, change operations vector. */
1843 	mutex_enter(vp->v_interlock);
1844 	vp->v_op = spec_vnodeop_p;
1845 	vp->v_vflag |= VV_MPSAFE;
1846 	vp->v_vflag &= ~VV_LOCKSWORK;
1847 	mutex_exit(vp->v_interlock);
1848 
1849 	/*
1850 	 * Move to dead mount.  Must be after changing the operations
1851 	 * vector as vnode operations enter the mount before using the
1852 	 * operations vector.  See sys/kern/vnode_if.c.
1853 	 */
1854 	vfs_ref(dead_rootmount);
1855 	vfs_insmntque(vp, dead_rootmount);
1856 
1857 	vrele(vp);
1858 }
1859 
1860 /*
1861  * Update outstanding I/O count and do wakeup if requested.
1862  */
1863 void
1864 vwakeup(struct buf *bp)
1865 {
1866 	vnode_t *vp;
1867 
1868 	if ((vp = bp->b_vp) == NULL)
1869 		return;
1870 
1871 	KASSERT(bp->b_objlock == vp->v_interlock);
1872 	KASSERT(mutex_owned(bp->b_objlock));
1873 
1874 	if (--vp->v_numoutput < 0)
1875 		vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
1876 	if (vp->v_numoutput == 0)
1877 		cv_broadcast(&vp->v_cv);
1878 }
1879 
1880 /*
1881  * Test a vnode for being or becoming dead.  Returns one of:
1882  * EBUSY:  vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
1883  * ENOENT: vnode is dead.
1884  * 0:      otherwise.
1885  *
1886  * Whenever this function returns a non-zero value all future
1887  * calls will also return a non-zero value.
1888  */
1889 int
1890 vdead_check(struct vnode *vp, int flags)
1891 {
1892 
1893 	KASSERT(mutex_owned(vp->v_interlock));
1894 
1895 	if (! ISSET(flags, VDEAD_NOWAIT))
1896 		VSTATE_WAIT_STABLE(vp);
1897 
1898 	if (VSTATE_GET(vp) == VS_RECLAIMING) {
1899 		KASSERT(ISSET(flags, VDEAD_NOWAIT));
1900 		return EBUSY;
1901 	} else if (VSTATE_GET(vp) == VS_RECLAIMED) {
1902 		return ENOENT;
1903 	}
1904 
1905 	return 0;
1906 }
1907 
1908 int
1909 vfs_drainvnodes(void)
1910 {
1911 	int i, gen;
1912 
1913 	mutex_enter(&vdrain_lock);
1914 	for (i = 0; i < 2; i++) {
1915 		gen = vdrain_gen;
1916 		while (gen == vdrain_gen) {
1917 			cv_broadcast(&vdrain_cv);
1918 			cv_wait(&vdrain_gen_cv, &vdrain_lock);
1919 		}
1920 	}
1921 	mutex_exit(&vdrain_lock);
1922 
1923 	if (numvnodes >= desiredvnodes)
1924 		return EBUSY;
1925 
1926 	if (vcache_hashsize != desiredvnodes)
1927 		vcache_reinit();
1928 
1929 	return 0;
1930 }
1931 
1932 void
1933 vnpanic(vnode_t *vp, const char *fmt, ...)
1934 {
1935 	va_list ap;
1936 
1937 #ifdef DIAGNOSTIC
1938 	vprint(NULL, vp);
1939 #endif
1940 	va_start(ap, fmt);
1941 	vpanic(fmt, ap);
1942 	va_end(ap);
1943 }
1944 
1945 void
1946 vshareilock(vnode_t *tvp, vnode_t *fvp)
1947 {
1948 	kmutex_t *oldlock;
1949 
1950 	oldlock = tvp->v_interlock;
1951 	mutex_obj_hold(fvp->v_interlock);
1952 	tvp->v_interlock = fvp->v_interlock;
1953 	mutex_obj_free(oldlock);
1954 }
1955