xref: /netbsd-src/sys/kern/vfs_vnode.c (revision 33881f779a77dce6440bdc44610d94de75bebefe)
1 /*	$NetBSD: vfs_vnode.c,v 1.113 2020/02/27 22:12:54 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997-2011, 2019, 2020 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1989, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  * (c) UNIX System Laboratories, Inc.
37  * All or some portions of this file are derived from material licensed
38  * to the University of California by American Telephone and Telegraph
39  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40  * the permission of UNIX System Laboratories, Inc.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
67  */
68 
69 /*
70  * The vnode cache subsystem.
71  *
72  * Life-cycle
73  *
74  *	Normally, there are two points where new vnodes are created:
75  *	VOP_CREATE(9) and VOP_LOOKUP(9).  The life-cycle of a vnode
76  *	starts in one of the following ways:
77  *
78  *	- Allocation, via vcache_get(9) or vcache_new(9).
79  *	- Reclamation of inactive vnode, via vcache_vget(9).
80  *
81  *	Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
82  *	was another, traditional way.  Currently, only the draining thread
83  *	recycles the vnodes.  This behaviour might be revisited.
84  *
85  *	The life-cycle ends when the last reference is dropped, usually
86  *	in VOP_REMOVE(9).  In such case, VOP_INACTIVE(9) is called to inform
87  *	the file system that vnode is inactive.  Via this call, file system
88  *	indicates whether vnode can be recycled (usually, it checks its own
89  *	references, e.g. count of links, whether the file was removed).
90  *
91  *	Depending on indication, vnode can be put into a free list (cache),
92  *	or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to
93  *	disassociate underlying file system from the vnode, and finally
94  *	destroyed.
95  *
96  * Vnode state
97  *
98  *	Vnode is always in one of six states:
99  *	- MARKER	This is a marker vnode to help list traversal.  It
100  *			will never change its state.
101  *	- LOADING	Vnode is associating underlying file system and not
102  *			yet ready to use.
103  *	- LOADED	Vnode has associated underlying file system and is
104  *			ready to use.
105  *	- BLOCKED	Vnode is active but cannot get new references.
106  *	- RECLAIMING	Vnode is disassociating from the underlying file
107  *			system.
108  *	- RECLAIMED	Vnode has disassociated from underlying file system
109  *			and is dead.
110  *
111  *	Valid state changes are:
112  *	LOADING -> LOADED
113  *			Vnode has been initialised in vcache_get() or
114  *			vcache_new() and is ready to use.
115  *	LOADED -> RECLAIMING
116  *			Vnode starts disassociation from underlying file
117  *			system in vcache_reclaim().
118  *	RECLAIMING -> RECLAIMED
119  *			Vnode finished disassociation from underlying file
120  *			system in vcache_reclaim().
121  *	LOADED -> BLOCKED
122  *			Either vcache_rekey*() is changing the vnode key or
123  *			vrelel() is about to call VOP_INACTIVE().
124  *	BLOCKED -> LOADED
125  *			The block condition is over.
126  *	LOADING -> RECLAIMED
127  *			Either vcache_get() or vcache_new() failed to
128  *			associate the underlying file system or vcache_rekey*()
129  *			drops a vnode used as placeholder.
130  *
131  *	Of these states LOADING, BLOCKED and RECLAIMING are intermediate
132  *	and it is possible to wait for state change.
133  *
134  *	State is protected with v_interlock with one exception:
135  *	to change from LOADING both v_interlock and vcache_lock must be held
136  *	so it is possible to check "state == LOADING" without holding
137  *	v_interlock.  See vcache_get() for details.
138  *
139  * Reference counting
140  *
141  *	Vnode is considered active, if reference count (vnode_t::v_usecount)
142  *	is non-zero.  It is maintained using: vref(9) and vrele(9), as well
143  *	as vput(9), routines.  Common points holding references are e.g.
144  *	file openings, current working directory, mount points, etc.
145  *
146  * Note on v_usecount and its locking
147  *
148  *	At nearly all points it is known that v_usecount could be zero,
149  *	the vnode_t::v_interlock will be held.  To change the count away
150  *	from zero, the interlock must be held.  To change from a non-zero
151  *	value to zero, again the interlock must be held.
152  *
153  *	Changing the usecount from a non-zero value to a non-zero value can
154  *	safely be done using atomic operations, without the interlock held.
155  */
156 
157 #include <sys/cdefs.h>
158 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.113 2020/02/27 22:12:54 ad Exp $");
159 
160 #ifdef _KERNEL_OPT
161 #include "opt_pax.h"
162 #endif
163 
164 #include <sys/param.h>
165 #include <sys/kernel.h>
166 
167 #include <sys/atomic.h>
168 #include <sys/buf.h>
169 #include <sys/conf.h>
170 #include <sys/device.h>
171 #include <sys/hash.h>
172 #include <sys/kauth.h>
173 #include <sys/kmem.h>
174 #include <sys/kthread.h>
175 #include <sys/module.h>
176 #include <sys/mount.h>
177 #include <sys/namei.h>
178 #include <sys/pax.h>
179 #include <sys/syscallargs.h>
180 #include <sys/sysctl.h>
181 #include <sys/systm.h>
182 #include <sys/vnode_impl.h>
183 #include <sys/wapbl.h>
184 #include <sys/fstrans.h>
185 
186 #include <uvm/uvm.h>
187 #include <uvm/uvm_readahead.h>
188 #include <uvm/uvm_stat.h>
189 
190 /* Flags to vrelel. */
191 #define	VRELEL_ASYNC	0x0001	/* Always defer to vrele thread. */
192 
193 #define	LRU_VRELE	0
194 #define	LRU_FREE	1
195 #define	LRU_HOLD	2
196 #define	LRU_COUNT	3
197 
198 /*
199  * There are three lru lists: one holds vnodes waiting for async release,
200  * one is for vnodes which have no buffer/page references and one for those
201  * which do (i.e.  v_holdcnt is non-zero).  We put the lists into a single,
202  * private cache line as vnodes migrate between them while under the same
203  * lock (vdrain_lock).
204  */
205 u_int			numvnodes		__cacheline_aligned;
206 static vnodelst_t	lru_list[LRU_COUNT]	__cacheline_aligned;
207 static kmutex_t		vdrain_lock		__cacheline_aligned;
208 static kcondvar_t	vdrain_cv;
209 static int		vdrain_gen;
210 static kcondvar_t	vdrain_gen_cv;
211 static bool		vdrain_retry;
212 static lwp_t *		vdrain_lwp;
213 SLIST_HEAD(hashhead, vnode_impl);
214 static kmutex_t		vcache_lock		__cacheline_aligned;
215 static kcondvar_t	vcache_cv;
216 static u_int		vcache_hashsize;
217 static u_long		vcache_hashmask;
218 static struct hashhead	*vcache_hashtab;
219 static pool_cache_t	vcache_pool;
220 static void		lru_requeue(vnode_t *, vnodelst_t *);
221 static vnodelst_t *	lru_which(vnode_t *);
222 static vnode_impl_t *	vcache_alloc(void);
223 static void		vcache_dealloc(vnode_impl_t *);
224 static void		vcache_free(vnode_impl_t *);
225 static void		vcache_init(void);
226 static void		vcache_reinit(void);
227 static void		vcache_reclaim(vnode_t *);
228 static void		vrelel(vnode_t *, int, int);
229 static void		vdrain_thread(void *);
230 static void		vnpanic(vnode_t *, const char *, ...)
231     __printflike(2, 3);
232 
233 /* Routines having to do with the management of the vnode table. */
234 extern struct mount	*dead_rootmount;
235 extern int		(**dead_vnodeop_p)(void *);
236 extern int		(**spec_vnodeop_p)(void *);
237 extern struct vfsops	dead_vfsops;
238 
239 /* Vnode state operations and diagnostics. */
240 
241 #if defined(DIAGNOSTIC)
242 
243 #define VSTATE_VALID(state) \
244 	((state) != VS_ACTIVE && (state) != VS_MARKER)
245 #define VSTATE_GET(vp) \
246 	vstate_assert_get((vp), __func__, __LINE__)
247 #define VSTATE_CHANGE(vp, from, to) \
248 	vstate_assert_change((vp), (from), (to), __func__, __LINE__)
249 #define VSTATE_WAIT_STABLE(vp) \
250 	vstate_assert_wait_stable((vp), __func__, __LINE__)
251 
252 void
253 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
254     bool has_lock)
255 {
256 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
257 
258 	if (!has_lock) {
259 		/*
260 		 * Prevent predictive loads from the CPU, but check the state
261 		 * without loooking first.
262 		 */
263 		membar_enter();
264 		if (state == VS_ACTIVE && vp->v_usecount > 0 &&
265 		    (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED))
266 			return;
267 		if (vip->vi_state == state)
268 			return;
269 		mutex_enter((vp)->v_interlock);
270 	}
271 
272 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
273 
274 	if ((state == VS_ACTIVE && vp->v_usecount > 0 &&
275 	    (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED)) ||
276 	    vip->vi_state == state) {
277 		if (!has_lock)
278 			mutex_exit((vp)->v_interlock);
279 		return;
280 	}
281 	vnpanic(vp, "state is %s, usecount %d, expected %s at %s:%d",
282 	    vstate_name(vip->vi_state), vp->v_usecount,
283 	    vstate_name(state), func, line);
284 }
285 
286 static enum vnode_state
287 vstate_assert_get(vnode_t *vp, const char *func, int line)
288 {
289 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
290 
291 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
292 	if (! VSTATE_VALID(vip->vi_state))
293 		vnpanic(vp, "state is %s at %s:%d",
294 		    vstate_name(vip->vi_state), func, line);
295 
296 	return vip->vi_state;
297 }
298 
299 static void
300 vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
301 {
302 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
303 
304 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
305 	if (! VSTATE_VALID(vip->vi_state))
306 		vnpanic(vp, "state is %s at %s:%d",
307 		    vstate_name(vip->vi_state), func, line);
308 
309 	while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
310 		cv_wait(&vp->v_cv, vp->v_interlock);
311 
312 	if (! VSTATE_VALID(vip->vi_state))
313 		vnpanic(vp, "state is %s at %s:%d",
314 		    vstate_name(vip->vi_state), func, line);
315 }
316 
317 static void
318 vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
319     const char *func, int line)
320 {
321 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
322 
323 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
324 	if (from == VS_LOADING)
325 		KASSERTMSG(mutex_owned(&vcache_lock), "at %s:%d", func, line);
326 
327 	if (! VSTATE_VALID(from))
328 		vnpanic(vp, "from is %s at %s:%d",
329 		    vstate_name(from), func, line);
330 	if (! VSTATE_VALID(to))
331 		vnpanic(vp, "to is %s at %s:%d",
332 		    vstate_name(to), func, line);
333 	if (vip->vi_state != from)
334 		vnpanic(vp, "from is %s, expected %s at %s:%d\n",
335 		    vstate_name(vip->vi_state), vstate_name(from), func, line);
336 	if ((from == VS_BLOCKED || to == VS_BLOCKED) && vp->v_usecount != 1)
337 		vnpanic(vp, "%s to %s with usecount %d at %s:%d",
338 		    vstate_name(from), vstate_name(to), vp->v_usecount,
339 		    func, line);
340 
341 	vip->vi_state = to;
342 	if (from == VS_LOADING)
343 		cv_broadcast(&vcache_cv);
344 	if (to == VS_LOADED || to == VS_RECLAIMED)
345 		cv_broadcast(&vp->v_cv);
346 }
347 
348 #else /* defined(DIAGNOSTIC) */
349 
350 #define VSTATE_GET(vp) \
351 	(VNODE_TO_VIMPL((vp))->vi_state)
352 #define VSTATE_CHANGE(vp, from, to) \
353 	vstate_change((vp), (from), (to))
354 #define VSTATE_WAIT_STABLE(vp) \
355 	vstate_wait_stable((vp))
356 void
357 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
358     bool has_lock)
359 {
360 
361 }
362 
363 static void
364 vstate_wait_stable(vnode_t *vp)
365 {
366 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
367 
368 	while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
369 		cv_wait(&vp->v_cv, vp->v_interlock);
370 }
371 
372 static void
373 vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
374 {
375 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
376 
377 	vip->vi_state = to;
378 	if (from == VS_LOADING)
379 		cv_broadcast(&vcache_cv);
380 	if (to == VS_LOADED || to == VS_RECLAIMED)
381 		cv_broadcast(&vp->v_cv);
382 }
383 
384 #endif /* defined(DIAGNOSTIC) */
385 
386 void
387 vfs_vnode_sysinit(void)
388 {
389 	int error __diagused, i;
390 
391 	dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
392 	KASSERT(dead_rootmount != NULL);
393 	dead_rootmount->mnt_iflag |= IMNT_MPSAFE;
394 
395 	mutex_init(&vdrain_lock, MUTEX_DEFAULT, IPL_NONE);
396 	for (i = 0; i < LRU_COUNT; i++) {
397 		TAILQ_INIT(&lru_list[i]);
398 	}
399 	vcache_init();
400 
401 	cv_init(&vdrain_cv, "vdrain");
402 	cv_init(&vdrain_gen_cv, "vdrainwt");
403 	error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
404 	    NULL, &vdrain_lwp, "vdrain");
405 	KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
406 }
407 
408 /*
409  * Allocate a new marker vnode.
410  */
411 vnode_t *
412 vnalloc_marker(struct mount *mp)
413 {
414 	vnode_impl_t *vip;
415 	vnode_t *vp;
416 
417 	vip = pool_cache_get(vcache_pool, PR_WAITOK);
418 	memset(vip, 0, sizeof(*vip));
419 	vp = VIMPL_TO_VNODE(vip);
420 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
421 	vp->v_mount = mp;
422 	vp->v_type = VBAD;
423 	vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
424 	vip->vi_state = VS_MARKER;
425 
426 	return vp;
427 }
428 
429 /*
430  * Free a marker vnode.
431  */
432 void
433 vnfree_marker(vnode_t *vp)
434 {
435 	vnode_impl_t *vip;
436 
437 	vip = VNODE_TO_VIMPL(vp);
438 	KASSERT(vip->vi_state == VS_MARKER);
439 	mutex_obj_free(vp->v_interlock);
440 	uvm_obj_destroy(&vp->v_uobj, true);
441 	pool_cache_put(vcache_pool, vip);
442 }
443 
444 /*
445  * Test a vnode for being a marker vnode.
446  */
447 bool
448 vnis_marker(vnode_t *vp)
449 {
450 
451 	return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
452 }
453 
454 /*
455  * Return the lru list this node should be on.
456  */
457 static vnodelst_t *
458 lru_which(vnode_t *vp)
459 {
460 
461 	KASSERT(mutex_owned(vp->v_interlock));
462 
463 	if (vp->v_holdcnt > 0)
464 		return &lru_list[LRU_HOLD];
465 	else
466 		return &lru_list[LRU_FREE];
467 }
468 
469 /*
470  * Put vnode to end of given list.
471  * Both the current and the new list may be NULL, used on vnode alloc/free.
472  * Adjust numvnodes and signal vdrain thread if there is work.
473  */
474 static void
475 lru_requeue(vnode_t *vp, vnodelst_t *listhd)
476 {
477 	vnode_impl_t *vip;
478 	int d;
479 
480 	/*
481 	 * If the vnode is on the correct list, and was put there recently,
482 	 * then leave it be, thus avoiding huge cache and lock contention.
483 	 */
484 	vip = VNODE_TO_VIMPL(vp);
485 	if (listhd == vip->vi_lrulisthd &&
486 	    (hardclock_ticks - vip->vi_lrulisttm) < hz) {
487 	    	return;
488 	}
489 
490 	mutex_enter(&vdrain_lock);
491 	d = 0;
492 	if (vip->vi_lrulisthd != NULL)
493 		TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
494 	else
495 		d++;
496 	vip->vi_lrulisthd = listhd;
497 	vip->vi_lrulisttm = hardclock_ticks;
498 	if (vip->vi_lrulisthd != NULL)
499 		TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
500 	else
501 		d--;
502 	if (d != 0) {
503 		/*
504 		 * Looks strange?  This is not a bug.  Don't store
505 		 * numvnodes unless there is a change - avoid false
506 		 * sharing on MP.
507 		 */
508 		numvnodes += d;
509 	}
510 	if (numvnodes > desiredvnodes || listhd == &lru_list[LRU_VRELE])
511 		cv_broadcast(&vdrain_cv);
512 	mutex_exit(&vdrain_lock);
513 }
514 
515 /*
516  * Release deferred vrele vnodes for this mount.
517  * Called with file system suspended.
518  */
519 void
520 vrele_flush(struct mount *mp)
521 {
522 	vnode_impl_t *vip, *marker;
523 	vnode_t *vp;
524 
525 	KASSERT(fstrans_is_owner(mp));
526 
527 	marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
528 
529 	mutex_enter(&vdrain_lock);
530 	TAILQ_INSERT_HEAD(&lru_list[LRU_VRELE], marker, vi_lrulist);
531 
532 	while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
533 		TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
534 		TAILQ_INSERT_AFTER(&lru_list[LRU_VRELE], vip, marker,
535 		    vi_lrulist);
536 		vp = VIMPL_TO_VNODE(vip);
537 		if (vnis_marker(vp))
538 			continue;
539 
540 		KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
541 		TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
542 		vip->vi_lrulisthd = &lru_list[LRU_HOLD];
543 		vip->vi_lrulisttm = hardclock_ticks;
544 		TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
545 		mutex_exit(&vdrain_lock);
546 
547 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
548 		mutex_enter(vp->v_interlock);
549 		vrelel(vp, 0, LK_EXCLUSIVE);
550 
551 		mutex_enter(&vdrain_lock);
552 	}
553 
554 	TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
555 	mutex_exit(&vdrain_lock);
556 
557 	vnfree_marker(VIMPL_TO_VNODE(marker));
558 }
559 
560 /*
561  * Reclaim a cached vnode.  Used from vdrain_thread only.
562  */
563 static __inline void
564 vdrain_remove(vnode_t *vp)
565 {
566 	struct mount *mp;
567 
568 	KASSERT(mutex_owned(&vdrain_lock));
569 
570 	/* Probe usecount (unlocked). */
571 	if (vp->v_usecount > 0)
572 		return;
573 	/* Try v_interlock -- we lock the wrong direction! */
574 	if (!mutex_tryenter(vp->v_interlock))
575 		return;
576 	/* Probe usecount and state. */
577 	if (vp->v_usecount > 0 || VSTATE_GET(vp) != VS_LOADED) {
578 		mutex_exit(vp->v_interlock);
579 		return;
580 	}
581 	mp = vp->v_mount;
582 	if (fstrans_start_nowait(mp) != 0) {
583 		mutex_exit(vp->v_interlock);
584 		return;
585 	}
586 	vdrain_retry = true;
587 	mutex_exit(&vdrain_lock);
588 
589 	if (vcache_vget(vp) == 0) {
590 		if (!vrecycle(vp)) {
591 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
592 			mutex_enter(vp->v_interlock);
593 			vrelel(vp, 0, LK_EXCLUSIVE);
594 		}
595 	}
596 	fstrans_done(mp);
597 
598 	mutex_enter(&vdrain_lock);
599 }
600 
601 /*
602  * Release a cached vnode.  Used from vdrain_thread only.
603  */
604 static __inline void
605 vdrain_vrele(vnode_t *vp)
606 {
607 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
608 	struct mount *mp;
609 
610 	KASSERT(mutex_owned(&vdrain_lock));
611 
612 	mp = vp->v_mount;
613 	if (fstrans_start_nowait(mp) != 0)
614 		return;
615 
616 	/*
617 	 * First remove the vnode from the vrele list.
618 	 * Put it on the last lru list, the last vrele()
619 	 * will put it back onto the right list before
620 	 * its v_usecount reaches zero.
621 	 */
622 	KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
623 	TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
624 	vip->vi_lrulisthd = &lru_list[LRU_HOLD];
625 	vip->vi_lrulisttm = hardclock_ticks;
626 	TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
627 
628 	vdrain_retry = true;
629 	mutex_exit(&vdrain_lock);
630 
631 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
632 	mutex_enter(vp->v_interlock);
633 	vrelel(vp, 0, LK_EXCLUSIVE);
634 	fstrans_done(mp);
635 
636 	mutex_enter(&vdrain_lock);
637 }
638 
639 /*
640  * Helper thread to keep the number of vnodes below desiredvnodes
641  * and release vnodes from asynchronous vrele.
642  */
643 static void
644 vdrain_thread(void *cookie)
645 {
646 	int i;
647 	u_int target;
648 	vnode_impl_t *vip, *marker;
649 
650 	marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
651 
652 	mutex_enter(&vdrain_lock);
653 
654 	for (;;) {
655 		vdrain_retry = false;
656 		target = desiredvnodes - desiredvnodes/10;
657 
658 		for (i = 0; i < LRU_COUNT; i++) {
659 			TAILQ_INSERT_HEAD(&lru_list[i], marker, vi_lrulist);
660 			while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
661 				TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
662 				TAILQ_INSERT_AFTER(&lru_list[i], vip, marker,
663 				    vi_lrulist);
664 				if (vnis_marker(VIMPL_TO_VNODE(vip)))
665 					continue;
666 				if (i == LRU_VRELE)
667 					vdrain_vrele(VIMPL_TO_VNODE(vip));
668 				else if (numvnodes < target)
669 					break;
670 				else
671 					vdrain_remove(VIMPL_TO_VNODE(vip));
672 			}
673 			TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
674 		}
675 
676 		if (vdrain_retry) {
677 			mutex_exit(&vdrain_lock);
678 			yield();
679 			mutex_enter(&vdrain_lock);
680 		} else {
681 			vdrain_gen++;
682 			cv_broadcast(&vdrain_gen_cv);
683 			cv_wait(&vdrain_cv, &vdrain_lock);
684 		}
685 	}
686 }
687 
688 /*
689  * Try to drop reference on a vnode.  Abort if we are releasing the
690  * last reference.  Note: this _must_ succeed if not the last reference.
691  */
692 static bool
693 vtryrele(vnode_t *vp)
694 {
695 	u_int use, next;
696 
697 	for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
698 		if (__predict_false(use == 1)) {
699 			return false;
700 		}
701 		KASSERT(use > 1);
702 		next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
703 		if (__predict_true(next == use)) {
704 			return true;
705 		}
706 	}
707 }
708 
709 /*
710  * vput: unlock and release the reference.
711  */
712 void
713 vput(vnode_t *vp)
714 {
715 	int lktype;
716 
717 	/*
718 	 * Do an unlocked check of v_usecount.  If it looks like we're not
719 	 * about to drop the last reference, then unlock the vnode and try
720 	 * to drop the reference.  If it ends up being the last reference
721 	 * after all, vrelel() can fix it all up.  Most of the time this
722 	 * will all go to plan.
723 	 */
724 	if (atomic_load_relaxed(&vp->v_usecount) > 1) {
725 		VOP_UNLOCK(vp);
726 		if (vtryrele(vp)) {
727 			return;
728 		}
729 		lktype = LK_NONE;
730 	} else if ((vp->v_vflag & VV_LOCKSWORK) == 0) {
731 		lktype = LK_EXCLUSIVE;
732 	} else {
733 		lktype = VOP_ISLOCKED(vp);
734 		KASSERT(lktype != LK_NONE);
735 	}
736 	mutex_enter(vp->v_interlock);
737 	vrelel(vp, 0, lktype);
738 }
739 
740 /*
741  * Vnode release.  If reference count drops to zero, call inactive
742  * routine and either return to freelist or free to the pool.
743  */
744 static void
745 vrelel(vnode_t *vp, int flags, int lktype)
746 {
747 	const bool async = ((flags & VRELEL_ASYNC) != 0);
748 	bool recycle, defer;
749 	int error;
750 
751 	KASSERT(mutex_owned(vp->v_interlock));
752 
753 	if (__predict_false(vp->v_op == dead_vnodeop_p &&
754 	    VSTATE_GET(vp) != VS_RECLAIMED)) {
755 		vnpanic(vp, "dead but not clean");
756 	}
757 
758 	/*
759 	 * If not the last reference, just drop the reference count and
760 	 * unlock.  VOP_UNLOCK() is called here without a vnode reference
761 	 * held, but is ok as the hold of v_interlock will stop the vnode
762 	 * from disappearing.
763 	 */
764 	if (vtryrele(vp)) {
765 		if (lktype != LK_NONE) {
766 			VOP_UNLOCK(vp);
767 		}
768 		mutex_exit(vp->v_interlock);
769 		return;
770 	}
771 	if (vp->v_usecount <= 0 || vp->v_writecount != 0) {
772 		vnpanic(vp, "%s: bad ref count", __func__);
773 	}
774 
775 #ifdef DIAGNOSTIC
776 	if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
777 	    vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
778 		vprint("vrelel: missing VOP_CLOSE()", vp);
779 	}
780 #endif
781 
782 	/*
783 	 * First try to get the vnode locked for VOP_INACTIVE().
784 	 * Defer vnode release to vdrain_thread if caller requests
785 	 * it explicitly, is the pagedaemon or the lock failed.
786 	 */
787 	defer = false;
788 	if ((curlwp == uvm.pagedaemon_lwp) || async) {
789 		defer = true;
790 	} else if (lktype == LK_SHARED) {
791 		/* Excellent chance of getting, if the last ref. */
792 		error = vn_lock(vp, LK_UPGRADE | LK_RETRY |
793 		    LK_NOWAIT);
794 		if (error != 0) {
795 			defer = true;
796 		} else {
797 			lktype = LK_EXCLUSIVE;
798 		}
799 	} else if (lktype == LK_NONE) {
800 		/* Excellent chance of getting, if the last ref. */
801 		error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY |
802 		    LK_NOWAIT);
803 		if (error != 0) {
804 			defer = true;
805 		} else {
806 			lktype = LK_EXCLUSIVE;
807 		}
808 	}
809 	KASSERT(mutex_owned(vp->v_interlock));
810 	if (defer) {
811 		/*
812 		 * Defer reclaim to the kthread; it's not safe to
813 		 * clean it here.  We donate it our last reference.
814 		 */
815 		if (lktype != LK_NONE) {
816 			VOP_UNLOCK(vp);
817 		}
818 		lru_requeue(vp, &lru_list[LRU_VRELE]);
819 		mutex_exit(vp->v_interlock);
820 		return;
821 	}
822 	KASSERT(lktype == LK_EXCLUSIVE);
823 
824 	/*
825 	 * If not clean, deactivate the vnode, but preserve
826 	 * our reference across the call to VOP_INACTIVE().
827 	 */
828 	if (VSTATE_GET(vp) == VS_RECLAIMED) {
829 		VOP_UNLOCK(vp);
830 	} else {
831 		VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
832 		mutex_exit(vp->v_interlock);
833 
834 		/*
835 		 * The vnode must not gain another reference while being
836 		 * deactivated.  If VOP_INACTIVE() indicates that
837 		 * the described file has been deleted, then recycle
838 		 * the vnode.
839 		 *
840 		 * Note that VOP_INACTIVE() will not drop the vnode lock.
841 		 */
842 		recycle = false;
843 		VOP_INACTIVE(vp, &recycle);
844 		if (!recycle)
845 			VOP_UNLOCK(vp);
846 		rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
847 		mutex_enter(vp->v_interlock);
848 		VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
849 		if (!recycle) {
850 			if (vtryrele(vp)) {
851 				mutex_exit(vp->v_interlock);
852 				rw_exit(vp->v_uobj.vmobjlock);
853 				return;
854 			}
855 		}
856 
857 		/* Take care of space accounting. */
858 		if ((vp->v_iflag & VI_EXECMAP) != 0 &&
859 		    vp->v_uobj.uo_npages != 0) {
860 			cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
861 			cpu_count(CPU_COUNT_FILEPAGES, vp->v_uobj.uo_npages);
862 		}
863 		vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
864 		vp->v_vflag &= ~VV_MAPPED;
865 		rw_exit(vp->v_uobj.vmobjlock);
866 
867 		/*
868 		 * Recycle the vnode if the file is now unused (unlinked),
869 		 * otherwise just free it.
870 		 */
871 		if (recycle) {
872 			VSTATE_ASSERT(vp, VS_LOADED);
873 			/* vcache_reclaim drops the lock. */
874 			vcache_reclaim(vp);
875 		}
876 		KASSERT(vp->v_usecount > 0);
877 	}
878 
879 	if (atomic_dec_uint_nv(&vp->v_usecount) != 0) {
880 		/* Gained another reference while being reclaimed. */
881 		mutex_exit(vp->v_interlock);
882 		return;
883 	}
884 
885 	if (VSTATE_GET(vp) == VS_RECLAIMED && vp->v_holdcnt == 0) {
886 		/*
887 		 * It's clean so destroy it.  It isn't referenced
888 		 * anywhere since it has been reclaimed.
889 		 */
890 		vcache_free(VNODE_TO_VIMPL(vp));
891 	} else {
892 		/*
893 		 * Otherwise, put it back onto the freelist.  It
894 		 * can't be destroyed while still associated with
895 		 * a file system.
896 		 */
897 		lru_requeue(vp, lru_which(vp));
898 		mutex_exit(vp->v_interlock);
899 	}
900 }
901 
902 void
903 vrele(vnode_t *vp)
904 {
905 
906 	if (vtryrele(vp)) {
907 		return;
908 	}
909 	mutex_enter(vp->v_interlock);
910 	vrelel(vp, 0, LK_NONE);
911 }
912 
913 /*
914  * Asynchronous vnode release, vnode is released in different context.
915  */
916 void
917 vrele_async(vnode_t *vp)
918 {
919 
920 	if (vtryrele(vp)) {
921 		return;
922 	}
923 	mutex_enter(vp->v_interlock);
924 	vrelel(vp, VRELEL_ASYNC, LK_NONE);
925 }
926 
927 /*
928  * Vnode reference, where a reference is already held by some other
929  * object (for example, a file structure).
930  *
931  * NB: we have lockless code sequences that rely on this not blocking.
932  */
933 void
934 vref(vnode_t *vp)
935 {
936 
937 	KASSERT(atomic_load_relaxed(&vp->v_usecount) != 0);
938 
939 	atomic_inc_uint(&vp->v_usecount);
940 }
941 
942 /*
943  * Page or buffer structure gets a reference.
944  * Called with v_interlock held.
945  */
946 void
947 vholdl(vnode_t *vp)
948 {
949 
950 	KASSERT(mutex_owned(vp->v_interlock));
951 
952 	if (vp->v_holdcnt++ == 0 && vp->v_usecount == 0)
953 		lru_requeue(vp, lru_which(vp));
954 }
955 
956 /*
957  * Page or buffer structure gets a reference.
958  */
959 void
960 vhold(vnode_t *vp)
961 {
962 
963 	mutex_enter(vp->v_interlock);
964 	vholdl(vp);
965 	mutex_exit(vp->v_interlock);
966 }
967 
968 /*
969  * Page or buffer structure frees a reference.
970  * Called with v_interlock held.
971  */
972 void
973 holdrelel(vnode_t *vp)
974 {
975 
976 	KASSERT(mutex_owned(vp->v_interlock));
977 
978 	if (vp->v_holdcnt <= 0) {
979 		vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
980 	}
981 
982 	vp->v_holdcnt--;
983 	if (vp->v_holdcnt == 0 && vp->v_usecount == 0)
984 		lru_requeue(vp, lru_which(vp));
985 }
986 
987 /*
988  * Page or buffer structure frees a reference.
989  */
990 void
991 holdrele(vnode_t *vp)
992 {
993 
994 	mutex_enter(vp->v_interlock);
995 	holdrelel(vp);
996 	mutex_exit(vp->v_interlock);
997 }
998 
999 /*
1000  * Recycle an unused vnode if caller holds the last reference.
1001  */
1002 bool
1003 vrecycle(vnode_t *vp)
1004 {
1005 	int error __diagused;
1006 
1007 	mutex_enter(vp->v_interlock);
1008 
1009 	/* Make sure we hold the last reference. */
1010 	VSTATE_WAIT_STABLE(vp);
1011 	if (vp->v_usecount != 1) {
1012 		mutex_exit(vp->v_interlock);
1013 		return false;
1014 	}
1015 
1016 	/* If the vnode is already clean we're done. */
1017 	if (VSTATE_GET(vp) != VS_LOADED) {
1018 		VSTATE_ASSERT(vp, VS_RECLAIMED);
1019 		vrelel(vp, 0, LK_NONE);
1020 		return true;
1021 	}
1022 
1023 	/* Prevent further references until the vnode is locked. */
1024 	VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
1025 	mutex_exit(vp->v_interlock);
1026 
1027 	/*
1028 	 * On a leaf file system this lock will always succeed as we hold
1029 	 * the last reference and prevent further references.
1030 	 * On layered file systems waiting for the lock would open a can of
1031 	 * deadlocks as the lower vnodes may have other active references.
1032 	 */
1033 	error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
1034 
1035 	mutex_enter(vp->v_interlock);
1036 	VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
1037 
1038 	if (error) {
1039 		mutex_exit(vp->v_interlock);
1040 		return false;
1041 	}
1042 
1043 	KASSERT(vp->v_usecount == 1);
1044 	vcache_reclaim(vp);
1045 	vrelel(vp, 0, LK_NONE);
1046 
1047 	return true;
1048 }
1049 
1050 /*
1051  * Helper for vrevoke() to propagate suspension from lastmp
1052  * to thismp.  Both args may be NULL.
1053  * Returns the currently suspended file system or NULL.
1054  */
1055 static struct mount *
1056 vrevoke_suspend_next(struct mount *lastmp, struct mount *thismp)
1057 {
1058 	int error;
1059 
1060 	if (lastmp == thismp)
1061 		return thismp;
1062 
1063 	if (lastmp != NULL)
1064 		vfs_resume(lastmp);
1065 
1066 	if (thismp == NULL)
1067 		return NULL;
1068 
1069 	do {
1070 		error = vfs_suspend(thismp, 0);
1071 	} while (error == EINTR || error == ERESTART);
1072 
1073 	if (error == 0)
1074 		return thismp;
1075 
1076 	KASSERT(error == EOPNOTSUPP);
1077 	return NULL;
1078 }
1079 
1080 /*
1081  * Eliminate all activity associated with the requested vnode
1082  * and with all vnodes aliased to the requested vnode.
1083  */
1084 void
1085 vrevoke(vnode_t *vp)
1086 {
1087 	struct mount *mp;
1088 	vnode_t *vq;
1089 	enum vtype type;
1090 	dev_t dev;
1091 
1092 	KASSERT(vp->v_usecount > 0);
1093 
1094 	mp = vrevoke_suspend_next(NULL, vp->v_mount);
1095 
1096 	mutex_enter(vp->v_interlock);
1097 	VSTATE_WAIT_STABLE(vp);
1098 	if (VSTATE_GET(vp) == VS_RECLAIMED) {
1099 		mutex_exit(vp->v_interlock);
1100 	} else if (vp->v_type != VBLK && vp->v_type != VCHR) {
1101 		atomic_inc_uint(&vp->v_usecount);
1102 		mutex_exit(vp->v_interlock);
1103 		vgone(vp);
1104 	} else {
1105 		dev = vp->v_rdev;
1106 		type = vp->v_type;
1107 		mutex_exit(vp->v_interlock);
1108 
1109 		while (spec_node_lookup_by_dev(type, dev, &vq) == 0) {
1110 			mp = vrevoke_suspend_next(mp, vq->v_mount);
1111 			vgone(vq);
1112 		}
1113 	}
1114 	vrevoke_suspend_next(mp, NULL);
1115 }
1116 
1117 /*
1118  * Eliminate all activity associated with a vnode in preparation for
1119  * reuse.  Drops a reference from the vnode.
1120  */
1121 void
1122 vgone(vnode_t *vp)
1123 {
1124 	int lktype;
1125 
1126 	KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
1127 
1128 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1129 	lktype = LK_EXCLUSIVE;
1130 	mutex_enter(vp->v_interlock);
1131 	VSTATE_WAIT_STABLE(vp);
1132 	if (VSTATE_GET(vp) == VS_LOADED) {
1133 		vcache_reclaim(vp);
1134 		lktype = LK_NONE;
1135 	}
1136 	VSTATE_ASSERT(vp, VS_RECLAIMED);
1137 	vrelel(vp, 0, lktype);
1138 }
1139 
1140 static inline uint32_t
1141 vcache_hash(const struct vcache_key *key)
1142 {
1143 	uint32_t hash = HASH32_BUF_INIT;
1144 
1145 	KASSERT(key->vk_key_len > 0);
1146 
1147 	hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
1148 	hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
1149 	return hash;
1150 }
1151 
1152 static void
1153 vcache_init(void)
1154 {
1155 
1156 	vcache_pool = pool_cache_init(sizeof(vnode_impl_t), coherency_unit,
1157 	    0, 0, "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
1158 	KASSERT(vcache_pool != NULL);
1159 	mutex_init(&vcache_lock, MUTEX_DEFAULT, IPL_NONE);
1160 	cv_init(&vcache_cv, "vcache");
1161 	vcache_hashsize = desiredvnodes;
1162 	vcache_hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
1163 	    &vcache_hashmask);
1164 }
1165 
1166 static void
1167 vcache_reinit(void)
1168 {
1169 	int i;
1170 	uint32_t hash;
1171 	u_long oldmask, newmask;
1172 	struct hashhead *oldtab, *newtab;
1173 	vnode_impl_t *vip;
1174 
1175 	newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
1176 	mutex_enter(&vcache_lock);
1177 	oldtab = vcache_hashtab;
1178 	oldmask = vcache_hashmask;
1179 	vcache_hashsize = desiredvnodes;
1180 	vcache_hashtab = newtab;
1181 	vcache_hashmask = newmask;
1182 	for (i = 0; i <= oldmask; i++) {
1183 		while ((vip = SLIST_FIRST(&oldtab[i])) != NULL) {
1184 			SLIST_REMOVE(&oldtab[i], vip, vnode_impl, vi_hash);
1185 			hash = vcache_hash(&vip->vi_key);
1186 			SLIST_INSERT_HEAD(&newtab[hash & vcache_hashmask],
1187 			    vip, vi_hash);
1188 		}
1189 	}
1190 	mutex_exit(&vcache_lock);
1191 	hashdone(oldtab, HASH_SLIST, oldmask);
1192 }
1193 
1194 static inline vnode_impl_t *
1195 vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
1196 {
1197 	struct hashhead *hashp;
1198 	vnode_impl_t *vip;
1199 
1200 	KASSERT(mutex_owned(&vcache_lock));
1201 
1202 	hashp = &vcache_hashtab[hash & vcache_hashmask];
1203 	SLIST_FOREACH(vip, hashp, vi_hash) {
1204 		if (key->vk_mount != vip->vi_key.vk_mount)
1205 			continue;
1206 		if (key->vk_key_len != vip->vi_key.vk_key_len)
1207 			continue;
1208 		if (memcmp(key->vk_key, vip->vi_key.vk_key, key->vk_key_len))
1209 			continue;
1210 		return vip;
1211 	}
1212 	return NULL;
1213 }
1214 
1215 /*
1216  * Allocate a new, uninitialized vcache node.
1217  */
1218 static vnode_impl_t *
1219 vcache_alloc(void)
1220 {
1221 	vnode_impl_t *vip;
1222 	vnode_t *vp;
1223 
1224 	vip = pool_cache_get(vcache_pool, PR_WAITOK);
1225 	vp = VIMPL_TO_VNODE(vip);
1226 	memset(vip, 0, sizeof(*vip));
1227 
1228 	rw_init(&vip->vi_lock);
1229 	vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
1230 
1231 	/* SLIST_INIT(&vip->vi_hash); */
1232 	TAILQ_INIT(&vip->vi_nclist);
1233 	/* LIST_INIT(&vip->vi_dnclist); */
1234 
1235 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
1236 	cv_init(&vp->v_cv, "vnode");
1237 
1238 	vp->v_usecount = 1;
1239 	vp->v_type = VNON;
1240 	vp->v_size = vp->v_writesize = VSIZENOTSET;
1241 
1242 	vip->vi_state = VS_LOADING;
1243 
1244 	lru_requeue(vp, &lru_list[LRU_FREE]);
1245 
1246 	return vip;
1247 }
1248 
1249 /*
1250  * Deallocate a vcache node in state VS_LOADING.
1251  *
1252  * vcache_lock held on entry and released on return.
1253  */
1254 static void
1255 vcache_dealloc(vnode_impl_t *vip)
1256 {
1257 	vnode_t *vp;
1258 
1259 	KASSERT(mutex_owned(&vcache_lock));
1260 
1261 	vp = VIMPL_TO_VNODE(vip);
1262 	vfs_ref(dead_rootmount);
1263 	vfs_insmntque(vp, dead_rootmount);
1264 	mutex_enter(vp->v_interlock);
1265 	vp->v_op = dead_vnodeop_p;
1266 	VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1267 	mutex_exit(&vcache_lock);
1268 	vrelel(vp, 0, LK_NONE);
1269 }
1270 
1271 /*
1272  * Free an unused, unreferenced vcache node.
1273  * v_interlock locked on entry.
1274  */
1275 static void
1276 vcache_free(vnode_impl_t *vip)
1277 {
1278 	vnode_t *vp;
1279 
1280 	vp = VIMPL_TO_VNODE(vip);
1281 	KASSERT(mutex_owned(vp->v_interlock));
1282 
1283 	KASSERT(vp->v_usecount == 0);
1284 	KASSERT(vp->v_holdcnt == 0);
1285 	KASSERT(vp->v_writecount == 0);
1286 	lru_requeue(vp, NULL);
1287 	mutex_exit(vp->v_interlock);
1288 
1289 	vfs_insmntque(vp, NULL);
1290 	if (vp->v_type == VBLK || vp->v_type == VCHR)
1291 		spec_node_destroy(vp);
1292 
1293 	mutex_obj_free(vp->v_interlock);
1294 	rw_destroy(&vip->vi_lock);
1295 	uvm_obj_destroy(&vp->v_uobj, true);
1296 	cv_destroy(&vp->v_cv);
1297 	pool_cache_put(vcache_pool, vip);
1298 }
1299 
1300 /*
1301  * Try to get an initial reference on this cached vnode.
1302  * Returns zero on success,  ENOENT if the vnode has been reclaimed and
1303  * EBUSY if the vnode state is unstable.
1304  *
1305  * v_interlock locked on entry and unlocked on exit.
1306  */
1307 int
1308 vcache_tryvget(vnode_t *vp)
1309 {
1310 	int error = 0;
1311 
1312 	KASSERT(mutex_owned(vp->v_interlock));
1313 
1314 	if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED))
1315 		error = ENOENT;
1316 	else if (__predict_false(VSTATE_GET(vp) != VS_LOADED))
1317 		error = EBUSY;
1318 	else if (vp->v_usecount == 0)
1319 		vp->v_usecount = 1;
1320 	else
1321 		atomic_inc_uint(&vp->v_usecount);
1322 
1323 	mutex_exit(vp->v_interlock);
1324 
1325 	return error;
1326 }
1327 
1328 /*
1329  * Try to get an initial reference on this cached vnode.
1330  * Returns zero on success and  ENOENT if the vnode has been reclaimed.
1331  * Will wait for the vnode state to be stable.
1332  *
1333  * v_interlock locked on entry and unlocked on exit.
1334  */
1335 int
1336 vcache_vget(vnode_t *vp)
1337 {
1338 
1339 	KASSERT(mutex_owned(vp->v_interlock));
1340 
1341 	/* Increment hold count to prevent vnode from disappearing. */
1342 	vp->v_holdcnt++;
1343 	VSTATE_WAIT_STABLE(vp);
1344 	vp->v_holdcnt--;
1345 
1346 	/* If this was the last reference to a reclaimed vnode free it now. */
1347 	if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) {
1348 		if (vp->v_holdcnt == 0 && vp->v_usecount == 0)
1349 			vcache_free(VNODE_TO_VIMPL(vp));
1350 		else
1351 			mutex_exit(vp->v_interlock);
1352 		return ENOENT;
1353 	}
1354 	VSTATE_ASSERT(vp, VS_LOADED);
1355 	if (vp->v_usecount == 0)
1356 		vp->v_usecount = 1;
1357 	else
1358 		atomic_inc_uint(&vp->v_usecount);
1359 	mutex_exit(vp->v_interlock);
1360 
1361 	return 0;
1362 }
1363 
1364 /*
1365  * Get a vnode / fs node pair by key and return it referenced through vpp.
1366  */
1367 int
1368 vcache_get(struct mount *mp, const void *key, size_t key_len,
1369     struct vnode **vpp)
1370 {
1371 	int error;
1372 	uint32_t hash;
1373 	const void *new_key;
1374 	struct vnode *vp;
1375 	struct vcache_key vcache_key;
1376 	vnode_impl_t *vip, *new_vip;
1377 
1378 	new_key = NULL;
1379 	*vpp = NULL;
1380 
1381 	vcache_key.vk_mount = mp;
1382 	vcache_key.vk_key = key;
1383 	vcache_key.vk_key_len = key_len;
1384 	hash = vcache_hash(&vcache_key);
1385 
1386 again:
1387 	mutex_enter(&vcache_lock);
1388 	vip = vcache_hash_lookup(&vcache_key, hash);
1389 
1390 	/* If found, take a reference or retry. */
1391 	if (__predict_true(vip != NULL)) {
1392 		/*
1393 		 * If the vnode is loading we cannot take the v_interlock
1394 		 * here as it might change during load (see uvm_obj_setlock()).
1395 		 * As changing state from VS_LOADING requires both vcache_lock
1396 		 * and v_interlock it is safe to test with vcache_lock held.
1397 		 *
1398 		 * Wait for vnodes changing state from VS_LOADING and retry.
1399 		 */
1400 		if (__predict_false(vip->vi_state == VS_LOADING)) {
1401 			cv_wait(&vcache_cv, &vcache_lock);
1402 			mutex_exit(&vcache_lock);
1403 			goto again;
1404 		}
1405 		vp = VIMPL_TO_VNODE(vip);
1406 		mutex_enter(vp->v_interlock);
1407 		mutex_exit(&vcache_lock);
1408 		error = vcache_vget(vp);
1409 		if (error == ENOENT)
1410 			goto again;
1411 		if (error == 0)
1412 			*vpp = vp;
1413 		KASSERT((error != 0) == (*vpp == NULL));
1414 		return error;
1415 	}
1416 	mutex_exit(&vcache_lock);
1417 
1418 	/* Allocate and initialize a new vcache / vnode pair. */
1419 	error = vfs_busy(mp);
1420 	if (error)
1421 		return error;
1422 	new_vip = vcache_alloc();
1423 	new_vip->vi_key = vcache_key;
1424 	vp = VIMPL_TO_VNODE(new_vip);
1425 	mutex_enter(&vcache_lock);
1426 	vip = vcache_hash_lookup(&vcache_key, hash);
1427 	if (vip == NULL) {
1428 		SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1429 		    new_vip, vi_hash);
1430 		vip = new_vip;
1431 	}
1432 
1433 	/* If another thread beat us inserting this node, retry. */
1434 	if (vip != new_vip) {
1435 		vcache_dealloc(new_vip);
1436 		vfs_unbusy(mp);
1437 		goto again;
1438 	}
1439 	mutex_exit(&vcache_lock);
1440 
1441 	/* Load the fs node.  Exclusive as new_node is VS_LOADING. */
1442 	error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
1443 	if (error) {
1444 		mutex_enter(&vcache_lock);
1445 		SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1446 		    new_vip, vnode_impl, vi_hash);
1447 		vcache_dealloc(new_vip);
1448 		vfs_unbusy(mp);
1449 		KASSERT(*vpp == NULL);
1450 		return error;
1451 	}
1452 	KASSERT(new_key != NULL);
1453 	KASSERT(memcmp(key, new_key, key_len) == 0);
1454 	KASSERT(vp->v_op != NULL);
1455 	vfs_insmntque(vp, mp);
1456 	if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1457 		vp->v_vflag |= VV_MPSAFE;
1458 	vfs_ref(mp);
1459 	vfs_unbusy(mp);
1460 
1461 	/* Finished loading, finalize node. */
1462 	mutex_enter(&vcache_lock);
1463 	new_vip->vi_key.vk_key = new_key;
1464 	mutex_enter(vp->v_interlock);
1465 	VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1466 	mutex_exit(vp->v_interlock);
1467 	mutex_exit(&vcache_lock);
1468 	*vpp = vp;
1469 	return 0;
1470 }
1471 
1472 /*
1473  * Create a new vnode / fs node pair and return it referenced through vpp.
1474  */
1475 int
1476 vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
1477     kauth_cred_t cred, void *extra, struct vnode **vpp)
1478 {
1479 	int error;
1480 	uint32_t hash;
1481 	struct vnode *vp, *ovp;
1482 	vnode_impl_t *vip, *ovip;
1483 
1484 	*vpp = NULL;
1485 
1486 	/* Allocate and initialize a new vcache / vnode pair. */
1487 	error = vfs_busy(mp);
1488 	if (error)
1489 		return error;
1490 	vip = vcache_alloc();
1491 	vip->vi_key.vk_mount = mp;
1492 	vp = VIMPL_TO_VNODE(vip);
1493 
1494 	/* Create and load the fs node. */
1495 	error = VFS_NEWVNODE(mp, dvp, vp, vap, cred, extra,
1496 	    &vip->vi_key.vk_key_len, &vip->vi_key.vk_key);
1497 	if (error) {
1498 		mutex_enter(&vcache_lock);
1499 		vcache_dealloc(vip);
1500 		vfs_unbusy(mp);
1501 		KASSERT(*vpp == NULL);
1502 		return error;
1503 	}
1504 	KASSERT(vp->v_op != NULL);
1505 	KASSERT((vip->vi_key.vk_key_len == 0) == (mp == dead_rootmount));
1506 	if (vip->vi_key.vk_key_len > 0) {
1507 		KASSERT(vip->vi_key.vk_key != NULL);
1508 		hash = vcache_hash(&vip->vi_key);
1509 
1510 		/*
1511 		 * Wait for previous instance to be reclaimed,
1512 		 * then insert new node.
1513 		 */
1514 		mutex_enter(&vcache_lock);
1515 		while ((ovip = vcache_hash_lookup(&vip->vi_key, hash))) {
1516 			ovp = VIMPL_TO_VNODE(ovip);
1517 			mutex_enter(ovp->v_interlock);
1518 			mutex_exit(&vcache_lock);
1519 			error = vcache_vget(ovp);
1520 			KASSERT(error == ENOENT);
1521 			mutex_enter(&vcache_lock);
1522 		}
1523 		SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1524 		    vip, vi_hash);
1525 		mutex_exit(&vcache_lock);
1526 	}
1527 	vfs_insmntque(vp, mp);
1528 	if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1529 		vp->v_vflag |= VV_MPSAFE;
1530 	vfs_ref(mp);
1531 	vfs_unbusy(mp);
1532 
1533 	/* Finished loading, finalize node. */
1534 	mutex_enter(&vcache_lock);
1535 	mutex_enter(vp->v_interlock);
1536 	VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1537 	mutex_exit(&vcache_lock);
1538 	mutex_exit(vp->v_interlock);
1539 	*vpp = vp;
1540 	return 0;
1541 }
1542 
1543 /*
1544  * Prepare key change: update old cache nodes key and lock new cache node.
1545  * Return an error if the new node already exists.
1546  */
1547 int
1548 vcache_rekey_enter(struct mount *mp, struct vnode *vp,
1549     const void *old_key, size_t old_key_len,
1550     const void *new_key, size_t new_key_len)
1551 {
1552 	uint32_t old_hash, new_hash;
1553 	struct vcache_key old_vcache_key, new_vcache_key;
1554 	vnode_impl_t *vip, *new_vip;
1555 
1556 	old_vcache_key.vk_mount = mp;
1557 	old_vcache_key.vk_key = old_key;
1558 	old_vcache_key.vk_key_len = old_key_len;
1559 	old_hash = vcache_hash(&old_vcache_key);
1560 
1561 	new_vcache_key.vk_mount = mp;
1562 	new_vcache_key.vk_key = new_key;
1563 	new_vcache_key.vk_key_len = new_key_len;
1564 	new_hash = vcache_hash(&new_vcache_key);
1565 
1566 	new_vip = vcache_alloc();
1567 	new_vip->vi_key = new_vcache_key;
1568 
1569 	/* Insert locked new node used as placeholder. */
1570 	mutex_enter(&vcache_lock);
1571 	vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1572 	if (vip != NULL) {
1573 		vcache_dealloc(new_vip);
1574 		return EEXIST;
1575 	}
1576 	SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1577 	    new_vip, vi_hash);
1578 
1579 	/* Replace old nodes key with the temporary copy. */
1580 	vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1581 	KASSERT(vip != NULL);
1582 	KASSERT(VIMPL_TO_VNODE(vip) == vp);
1583 	KASSERT(vip->vi_key.vk_key != old_vcache_key.vk_key);
1584 	vip->vi_key = old_vcache_key;
1585 	mutex_exit(&vcache_lock);
1586 	return 0;
1587 }
1588 
1589 /*
1590  * Key change complete: update old node and remove placeholder.
1591  */
1592 void
1593 vcache_rekey_exit(struct mount *mp, struct vnode *vp,
1594     const void *old_key, size_t old_key_len,
1595     const void *new_key, size_t new_key_len)
1596 {
1597 	uint32_t old_hash, new_hash;
1598 	struct vcache_key old_vcache_key, new_vcache_key;
1599 	vnode_impl_t *vip, *new_vip;
1600 	struct vnode *new_vp;
1601 
1602 	old_vcache_key.vk_mount = mp;
1603 	old_vcache_key.vk_key = old_key;
1604 	old_vcache_key.vk_key_len = old_key_len;
1605 	old_hash = vcache_hash(&old_vcache_key);
1606 
1607 	new_vcache_key.vk_mount = mp;
1608 	new_vcache_key.vk_key = new_key;
1609 	new_vcache_key.vk_key_len = new_key_len;
1610 	new_hash = vcache_hash(&new_vcache_key);
1611 
1612 	mutex_enter(&vcache_lock);
1613 
1614 	/* Lookup old and new node. */
1615 	vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1616 	KASSERT(vip != NULL);
1617 	KASSERT(VIMPL_TO_VNODE(vip) == vp);
1618 
1619 	new_vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1620 	KASSERT(new_vip != NULL);
1621 	KASSERT(new_vip->vi_key.vk_key_len == new_key_len);
1622 	new_vp = VIMPL_TO_VNODE(new_vip);
1623 	mutex_enter(new_vp->v_interlock);
1624 	VSTATE_ASSERT(VIMPL_TO_VNODE(new_vip), VS_LOADING);
1625 	mutex_exit(new_vp->v_interlock);
1626 
1627 	/* Rekey old node and put it onto its new hashlist. */
1628 	vip->vi_key = new_vcache_key;
1629 	if (old_hash != new_hash) {
1630 		SLIST_REMOVE(&vcache_hashtab[old_hash & vcache_hashmask],
1631 		    vip, vnode_impl, vi_hash);
1632 		SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1633 		    vip, vi_hash);
1634 	}
1635 
1636 	/* Remove new node used as placeholder. */
1637 	SLIST_REMOVE(&vcache_hashtab[new_hash & vcache_hashmask],
1638 	    new_vip, vnode_impl, vi_hash);
1639 	vcache_dealloc(new_vip);
1640 }
1641 
1642 /*
1643  * Disassociate the underlying file system from a vnode.
1644  *
1645  * Must be called with vnode locked and will return unlocked.
1646  * Must be called with the interlock held, and will return with it held.
1647  */
1648 static void
1649 vcache_reclaim(vnode_t *vp)
1650 {
1651 	lwp_t *l = curlwp;
1652 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
1653 	struct mount *mp = vp->v_mount;
1654 	uint32_t hash;
1655 	uint8_t temp_buf[64], *temp_key;
1656 	size_t temp_key_len;
1657 	bool recycle, active;
1658 	int error;
1659 
1660 	KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1661 	    VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1662 	KASSERT(mutex_owned(vp->v_interlock));
1663 	KASSERT(vp->v_usecount != 0);
1664 
1665 	active = (vp->v_usecount > 1);
1666 	temp_key_len = vip->vi_key.vk_key_len;
1667 	/*
1668 	 * Prevent the vnode from being recycled or brought into use
1669 	 * while we clean it out.
1670 	 */
1671 	VSTATE_CHANGE(vp, VS_LOADED, VS_RECLAIMING);
1672 	mutex_exit(vp->v_interlock);
1673 
1674 	rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
1675 	mutex_enter(vp->v_interlock);
1676 	if ((vp->v_iflag & VI_EXECMAP) != 0 && vp->v_uobj.uo_npages != 0) {
1677 		cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
1678 		cpu_count(CPU_COUNT_FILEPAGES, vp->v_uobj.uo_npages);
1679 	}
1680 	vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
1681 	mutex_exit(vp->v_interlock);
1682 	rw_exit(vp->v_uobj.vmobjlock);
1683 
1684 	/* Replace the vnode key with a temporary copy. */
1685 	if (vip->vi_key.vk_key_len > sizeof(temp_buf)) {
1686 		temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
1687 	} else {
1688 		temp_key = temp_buf;
1689 	}
1690 	if (vip->vi_key.vk_key_len > 0) {
1691 		mutex_enter(&vcache_lock);
1692 		memcpy(temp_key, vip->vi_key.vk_key, temp_key_len);
1693 		vip->vi_key.vk_key = temp_key;
1694 		mutex_exit(&vcache_lock);
1695 	}
1696 
1697 	fstrans_start(mp);
1698 
1699 	/*
1700 	 * Clean out any cached data associated with the vnode.
1701 	 * If purging an active vnode, it must be closed and
1702 	 * deactivated before being reclaimed.
1703 	 */
1704 	error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
1705 	if (error != 0) {
1706 		if (wapbl_vphaswapbl(vp))
1707 			WAPBL_DISCARD(wapbl_vptomp(vp));
1708 		error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
1709 	}
1710 	KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
1711 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1712 	if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
1713 		 spec_node_revoke(vp);
1714 	}
1715 
1716 	/*
1717 	 * Disassociate the underlying file system from the vnode.
1718 	 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
1719 	 * the vnode, and may destroy the vnode so that VOP_UNLOCK
1720 	 * would no longer function.
1721 	 */
1722 	VOP_INACTIVE(vp, &recycle);
1723 	KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1724 	    VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1725 	if (VOP_RECLAIM(vp)) {
1726 		vnpanic(vp, "%s: cannot reclaim", __func__);
1727 	}
1728 
1729 	KASSERT(vp->v_data == NULL);
1730 	KASSERT((vp->v_iflag & VI_PAGES) == 0);
1731 
1732 	if (vp->v_type == VREG && vp->v_ractx != NULL) {
1733 		uvm_ra_freectx(vp->v_ractx);
1734 		vp->v_ractx = NULL;
1735 	}
1736 
1737 	/* Purge name cache. */
1738 	cache_purge(vp);
1739 
1740 	if (vip->vi_key.vk_key_len > 0) {
1741 	/* Remove from vnode cache. */
1742 		hash = vcache_hash(&vip->vi_key);
1743 		mutex_enter(&vcache_lock);
1744 		KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
1745 		SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1746 		    vip, vnode_impl, vi_hash);
1747 		mutex_exit(&vcache_lock);
1748 	}
1749 	if (temp_key != temp_buf)
1750 		kmem_free(temp_key, temp_key_len);
1751 
1752 	/* Done with purge, notify sleepers of the grim news. */
1753 	mutex_enter(vp->v_interlock);
1754 	vp->v_op = dead_vnodeop_p;
1755 	vp->v_vflag |= VV_LOCKSWORK;
1756 	VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
1757 	vp->v_tag = VT_NON;
1758 	KNOTE(&vp->v_klist, NOTE_REVOKE);
1759 	mutex_exit(vp->v_interlock);
1760 
1761 	/*
1762 	 * Move to dead mount.  Must be after changing the operations
1763 	 * vector as vnode operations enter the mount before using the
1764 	 * operations vector.  See sys/kern/vnode_if.c.
1765 	 */
1766 	vp->v_vflag &= ~VV_ROOT;
1767 	vfs_ref(dead_rootmount);
1768 	vfs_insmntque(vp, dead_rootmount);
1769 
1770 #ifdef PAX_SEGVGUARD
1771 	pax_segvguard_cleanup(vp);
1772 #endif /* PAX_SEGVGUARD */
1773 
1774 	mutex_enter(vp->v_interlock);
1775 	fstrans_done(mp);
1776 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1777 }
1778 
1779 /*
1780  * Disassociate the underlying file system from an open device vnode
1781  * and make it anonymous.
1782  *
1783  * Vnode unlocked on entry, drops a reference to the vnode.
1784  */
1785 void
1786 vcache_make_anon(vnode_t *vp)
1787 {
1788 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
1789 	uint32_t hash;
1790 	bool recycle;
1791 
1792 	KASSERT(vp->v_type == VBLK || vp->v_type == VCHR);
1793 	KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
1794 	VSTATE_ASSERT_UNLOCKED(vp, VS_ACTIVE);
1795 
1796 	/* Remove from vnode cache. */
1797 	hash = vcache_hash(&vip->vi_key);
1798 	mutex_enter(&vcache_lock);
1799 	KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
1800 	SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1801 	    vip, vnode_impl, vi_hash);
1802 	vip->vi_key.vk_mount = dead_rootmount;
1803 	vip->vi_key.vk_key_len = 0;
1804 	vip->vi_key.vk_key = NULL;
1805 	mutex_exit(&vcache_lock);
1806 
1807 	/*
1808 	 * Disassociate the underlying file system from the vnode.
1809 	 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
1810 	 * the vnode, and may destroy the vnode so that VOP_UNLOCK
1811 	 * would no longer function.
1812 	 */
1813 	if (vn_lock(vp, LK_EXCLUSIVE)) {
1814 		vnpanic(vp, "%s: cannot lock", __func__);
1815 	}
1816 	VOP_INACTIVE(vp, &recycle);
1817 	KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1818 	    VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1819 	if (VOP_RECLAIM(vp)) {
1820 		vnpanic(vp, "%s: cannot reclaim", __func__);
1821 	}
1822 
1823 	/* Purge name cache. */
1824 	cache_purge(vp);
1825 
1826 	/* Done with purge, change operations vector. */
1827 	mutex_enter(vp->v_interlock);
1828 	vp->v_op = spec_vnodeop_p;
1829 	vp->v_vflag |= VV_MPSAFE;
1830 	vp->v_vflag &= ~VV_LOCKSWORK;
1831 	mutex_exit(vp->v_interlock);
1832 
1833 	/*
1834 	 * Move to dead mount.  Must be after changing the operations
1835 	 * vector as vnode operations enter the mount before using the
1836 	 * operations vector.  See sys/kern/vnode_if.c.
1837 	 */
1838 	vfs_ref(dead_rootmount);
1839 	vfs_insmntque(vp, dead_rootmount);
1840 
1841 	vrele(vp);
1842 }
1843 
1844 /*
1845  * Update outstanding I/O count and do wakeup if requested.
1846  */
1847 void
1848 vwakeup(struct buf *bp)
1849 {
1850 	vnode_t *vp;
1851 
1852 	if ((vp = bp->b_vp) == NULL)
1853 		return;
1854 
1855 	KASSERT(bp->b_objlock == vp->v_interlock);
1856 	KASSERT(mutex_owned(bp->b_objlock));
1857 
1858 	if (--vp->v_numoutput < 0)
1859 		vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
1860 	if (vp->v_numoutput == 0)
1861 		cv_broadcast(&vp->v_cv);
1862 }
1863 
1864 /*
1865  * Test a vnode for being or becoming dead.  Returns one of:
1866  * EBUSY:  vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
1867  * ENOENT: vnode is dead.
1868  * 0:      otherwise.
1869  *
1870  * Whenever this function returns a non-zero value all future
1871  * calls will also return a non-zero value.
1872  */
1873 int
1874 vdead_check(struct vnode *vp, int flags)
1875 {
1876 
1877 	KASSERT(mutex_owned(vp->v_interlock));
1878 
1879 	if (! ISSET(flags, VDEAD_NOWAIT))
1880 		VSTATE_WAIT_STABLE(vp);
1881 
1882 	if (VSTATE_GET(vp) == VS_RECLAIMING) {
1883 		KASSERT(ISSET(flags, VDEAD_NOWAIT));
1884 		return EBUSY;
1885 	} else if (VSTATE_GET(vp) == VS_RECLAIMED) {
1886 		return ENOENT;
1887 	}
1888 
1889 	return 0;
1890 }
1891 
1892 int
1893 vfs_drainvnodes(void)
1894 {
1895 	int i, gen;
1896 
1897 	mutex_enter(&vdrain_lock);
1898 	for (i = 0; i < 2; i++) {
1899 		gen = vdrain_gen;
1900 		while (gen == vdrain_gen) {
1901 			cv_broadcast(&vdrain_cv);
1902 			cv_wait(&vdrain_gen_cv, &vdrain_lock);
1903 		}
1904 	}
1905 	mutex_exit(&vdrain_lock);
1906 
1907 	if (numvnodes >= desiredvnodes)
1908 		return EBUSY;
1909 
1910 	if (vcache_hashsize != desiredvnodes)
1911 		vcache_reinit();
1912 
1913 	return 0;
1914 }
1915 
1916 void
1917 vnpanic(vnode_t *vp, const char *fmt, ...)
1918 {
1919 	va_list ap;
1920 
1921 #ifdef DIAGNOSTIC
1922 	vprint(NULL, vp);
1923 #endif
1924 	va_start(ap, fmt);
1925 	vpanic(fmt, ap);
1926 	va_end(ap);
1927 }
1928 
1929 void
1930 vshareilock(vnode_t *tvp, vnode_t *fvp)
1931 {
1932 	kmutex_t *oldlock;
1933 
1934 	oldlock = tvp->v_interlock;
1935 	mutex_obj_hold(fvp->v_interlock);
1936 	tvp->v_interlock = fvp->v_interlock;
1937 	mutex_obj_free(oldlock);
1938 }
1939