xref: /netbsd-src/sys/kern/vfs_vnode.c (revision aef5eb5f59cdfe8314f1b5f78ac04eb144e44010)
1 /*	$NetBSD: vfs_vnode.c,v 1.145 2022/08/05 05:20:39 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997-2011, 2019, 2020 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1989, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  * (c) UNIX System Laboratories, Inc.
37  * All or some portions of this file are derived from material licensed
38  * to the University of California by American Telephone and Telegraph
39  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40  * the permission of UNIX System Laboratories, Inc.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
67  */
68 
69 /*
70  * The vnode cache subsystem.
71  *
72  * Life-cycle
73  *
74  *	Normally, there are two points where new vnodes are created:
75  *	VOP_CREATE(9) and VOP_LOOKUP(9).  The life-cycle of a vnode
76  *	starts in one of the following ways:
77  *
78  *	- Allocation, via vcache_get(9) or vcache_new(9).
79  *	- Reclamation of inactive vnode, via vcache_vget(9).
80  *
81  *	Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
82  *	was another, traditional way.  Currently, only the draining thread
83  *	recycles the vnodes.  This behaviour might be revisited.
84  *
85  *	The life-cycle ends when the last reference is dropped, usually
86  *	in VOP_REMOVE(9).  In such case, VOP_INACTIVE(9) is called to inform
87  *	the file system that vnode is inactive.  Via this call, file system
88  *	indicates whether vnode can be recycled (usually, it checks its own
89  *	references, e.g. count of links, whether the file was removed).
90  *
91  *	Depending on indication, vnode can be put into a free list (cache),
92  *	or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to
93  *	disassociate underlying file system from the vnode, and finally
94  *	destroyed.
95  *
96  * Vnode state
97  *
98  *	Vnode is always in one of six states:
99  *	- MARKER	This is a marker vnode to help list traversal.  It
100  *			will never change its state.
101  *	- LOADING	Vnode is associating underlying file system and not
102  *			yet ready to use.
103  *	- LOADED	Vnode has associated underlying file system and is
104  *			ready to use.
105  *	- BLOCKED	Vnode is active but cannot get new references.
106  *	- RECLAIMING	Vnode is disassociating from the underlying file
107  *			system.
108  *	- RECLAIMED	Vnode has disassociated from underlying file system
109  *			and is dead.
110  *
111  *	Valid state changes are:
112  *	LOADING -> LOADED
113  *			Vnode has been initialised in vcache_get() or
114  *			vcache_new() and is ready to use.
115  *	BLOCKED -> RECLAIMING
116  *			Vnode starts disassociation from underlying file
117  *			system in vcache_reclaim().
118  *	RECLAIMING -> RECLAIMED
119  *			Vnode finished disassociation from underlying file
120  *			system in vcache_reclaim().
121  *	LOADED -> BLOCKED
122  *			Either vcache_rekey*() is changing the vnode key or
123  *			vrelel() is about to call VOP_INACTIVE().
124  *	BLOCKED -> LOADED
125  *			The block condition is over.
126  *	LOADING -> RECLAIMED
127  *			Either vcache_get() or vcache_new() failed to
128  *			associate the underlying file system or vcache_rekey*()
129  *			drops a vnode used as placeholder.
130  *
131  *	Of these states LOADING, BLOCKED and RECLAIMING are intermediate
132  *	and it is possible to wait for state change.
133  *
134  *	State is protected with v_interlock with one exception:
135  *	to change from LOADING both v_interlock and vcache_lock must be held
136  *	so it is possible to check "state == LOADING" without holding
137  *	v_interlock.  See vcache_get() for details.
138  *
139  * Reference counting
140  *
141  *	Vnode is considered active, if reference count (vnode_t::v_usecount)
142  *	is non-zero.  It is maintained using: vref(9) and vrele(9), as well
143  *	as vput(9), routines.  Common points holding references are e.g.
144  *	file openings, current working directory, mount points, etc.
145  *
146  *	v_usecount is adjusted with atomic operations, however to change
147  *	from a non-zero value to zero the interlock must also be held.
148  */
149 
150 #include <sys/cdefs.h>
151 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.145 2022/08/05 05:20:39 thorpej Exp $");
152 
153 #ifdef _KERNEL_OPT
154 #include "opt_pax.h"
155 #endif
156 
157 #include <sys/param.h>
158 #include <sys/kernel.h>
159 
160 #include <sys/atomic.h>
161 #include <sys/buf.h>
162 #include <sys/conf.h>
163 #include <sys/device.h>
164 #include <sys/hash.h>
165 #include <sys/kauth.h>
166 #include <sys/kmem.h>
167 #include <sys/kthread.h>
168 #include <sys/module.h>
169 #include <sys/mount.h>
170 #include <sys/namei.h>
171 #include <sys/pax.h>
172 #include <sys/syscallargs.h>
173 #include <sys/sysctl.h>
174 #include <sys/systm.h>
175 #include <sys/vnode_impl.h>
176 #include <sys/wapbl.h>
177 #include <sys/fstrans.h>
178 
179 #include <uvm/uvm.h>
180 #include <uvm/uvm_readahead.h>
181 #include <uvm/uvm_stat.h>
182 
183 /* Flags to vrelel. */
184 #define	VRELEL_ASYNC	0x0001	/* Always defer to vrele thread. */
185 
186 #define	LRU_VRELE	0
187 #define	LRU_FREE	1
188 #define	LRU_HOLD	2
189 #define	LRU_COUNT	3
190 
191 /*
192  * There are three lru lists: one holds vnodes waiting for async release,
193  * one is for vnodes which have no buffer/page references and one for those
194  * which do (i.e.  v_holdcnt is non-zero).  We put the lists into a single,
195  * private cache line as vnodes migrate between them while under the same
196  * lock (vdrain_lock).
197  */
198 u_int			numvnodes		__cacheline_aligned;
199 static vnodelst_t	lru_list[LRU_COUNT]	__cacheline_aligned;
200 static kmutex_t		vdrain_lock		__cacheline_aligned;
201 static kcondvar_t	vdrain_cv;
202 static int		vdrain_gen;
203 static kcondvar_t	vdrain_gen_cv;
204 static bool		vdrain_retry;
205 static lwp_t *		vdrain_lwp;
206 SLIST_HEAD(hashhead, vnode_impl);
207 static kmutex_t		vcache_lock		__cacheline_aligned;
208 static kcondvar_t	vcache_cv;
209 static u_int		vcache_hashsize;
210 static u_long		vcache_hashmask;
211 static struct hashhead	*vcache_hashtab;
212 static pool_cache_t	vcache_pool;
213 static void		lru_requeue(vnode_t *, vnodelst_t *);
214 static vnodelst_t *	lru_which(vnode_t *);
215 static vnode_impl_t *	vcache_alloc(void);
216 static void		vcache_dealloc(vnode_impl_t *);
217 static void		vcache_free(vnode_impl_t *);
218 static void		vcache_init(void);
219 static void		vcache_reinit(void);
220 static void		vcache_reclaim(vnode_t *);
221 static void		vrelel(vnode_t *, int, int);
222 static void		vdrain_thread(void *);
223 static void		vnpanic(vnode_t *, const char *, ...)
224     __printflike(2, 3);
225 
226 /* Routines having to do with the management of the vnode table. */
227 extern struct mount	*dead_rootmount;
228 extern int		(**dead_vnodeop_p)(void *);
229 extern int		(**spec_vnodeop_p)(void *);
230 extern struct vfsops	dead_vfsops;
231 
232 /*
233  * The high bit of v_usecount is a gate for vcache_tryvget().  It's set
234  * only when the vnode state is LOADED.
235  * The next bit of v_usecount is a flag for vrelel().  It's set
236  * from vcache_vget() and vcache_tryvget() whenever the operation succeeds.
237  */
238 #define	VUSECOUNT_MASK	0x3fffffff
239 #define	VUSECOUNT_GATE	0x80000000
240 #define	VUSECOUNT_VGET	0x40000000
241 
242 /*
243  * Return the current usecount of a vnode.
244  */
245 inline int
246 vrefcnt(struct vnode *vp)
247 {
248 
249 	return atomic_load_relaxed(&vp->v_usecount) & VUSECOUNT_MASK;
250 }
251 
252 /* Vnode state operations and diagnostics. */
253 
254 #if defined(DIAGNOSTIC)
255 
256 #define VSTATE_VALID(state) \
257 	((state) != VS_ACTIVE && (state) != VS_MARKER)
258 #define VSTATE_GET(vp) \
259 	vstate_assert_get((vp), __func__, __LINE__)
260 #define VSTATE_CHANGE(vp, from, to) \
261 	vstate_assert_change((vp), (from), (to), __func__, __LINE__)
262 #define VSTATE_WAIT_STABLE(vp) \
263 	vstate_assert_wait_stable((vp), __func__, __LINE__)
264 
265 void
266 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
267     bool has_lock)
268 {
269 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
270 	int refcnt = vrefcnt(vp);
271 
272 	if (!has_lock) {
273 		/*
274 		 * Prevent predictive loads from the CPU, but check the state
275 		 * without loooking first.
276 		 *
277 		 * XXX what does this pair with?
278 		 */
279 		membar_enter();
280 		if (state == VS_ACTIVE && refcnt > 0 &&
281 		    (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED))
282 			return;
283 		if (vip->vi_state == state)
284 			return;
285 		mutex_enter((vp)->v_interlock);
286 	}
287 
288 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
289 
290 	if ((state == VS_ACTIVE && refcnt > 0 &&
291 	    (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED)) ||
292 	    vip->vi_state == state) {
293 		if (!has_lock)
294 			mutex_exit((vp)->v_interlock);
295 		return;
296 	}
297 	vnpanic(vp, "state is %s, usecount %d, expected %s at %s:%d",
298 	    vstate_name(vip->vi_state), refcnt,
299 	    vstate_name(state), func, line);
300 }
301 
302 static enum vnode_state
303 vstate_assert_get(vnode_t *vp, const char *func, int line)
304 {
305 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
306 
307 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
308 	if (! VSTATE_VALID(vip->vi_state))
309 		vnpanic(vp, "state is %s at %s:%d",
310 		    vstate_name(vip->vi_state), func, line);
311 
312 	return vip->vi_state;
313 }
314 
315 static void
316 vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
317 {
318 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
319 
320 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
321 	if (! VSTATE_VALID(vip->vi_state))
322 		vnpanic(vp, "state is %s at %s:%d",
323 		    vstate_name(vip->vi_state), func, line);
324 
325 	while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
326 		cv_wait(&vp->v_cv, vp->v_interlock);
327 
328 	if (! VSTATE_VALID(vip->vi_state))
329 		vnpanic(vp, "state is %s at %s:%d",
330 		    vstate_name(vip->vi_state), func, line);
331 }
332 
333 static void
334 vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
335     const char *func, int line)
336 {
337 	bool gated = (atomic_load_relaxed(&vp->v_usecount) & VUSECOUNT_GATE);
338 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
339 
340 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
341 	if (from == VS_LOADING)
342 		KASSERTMSG(mutex_owned(&vcache_lock), "at %s:%d", func, line);
343 
344 	if (! VSTATE_VALID(from))
345 		vnpanic(vp, "from is %s at %s:%d",
346 		    vstate_name(from), func, line);
347 	if (! VSTATE_VALID(to))
348 		vnpanic(vp, "to is %s at %s:%d",
349 		    vstate_name(to), func, line);
350 	if (vip->vi_state != from)
351 		vnpanic(vp, "from is %s, expected %s at %s:%d\n",
352 		    vstate_name(vip->vi_state), vstate_name(from), func, line);
353 	if ((from == VS_LOADED) != gated)
354 		vnpanic(vp, "state is %s, gate %d does not match at %s:%d\n",
355 		    vstate_name(vip->vi_state), gated, func, line);
356 
357 	/* Open/close the gate for vcache_tryvget(). */
358 	if (to == VS_LOADED) {
359 #ifndef __HAVE_ATOMIC_AS_MEMBAR
360 		membar_release();
361 #endif
362 		atomic_or_uint(&vp->v_usecount, VUSECOUNT_GATE);
363 	} else {
364 		atomic_and_uint(&vp->v_usecount, ~VUSECOUNT_GATE);
365 	}
366 
367 	vip->vi_state = to;
368 	if (from == VS_LOADING)
369 		cv_broadcast(&vcache_cv);
370 	if (to == VS_LOADED || to == VS_RECLAIMED)
371 		cv_broadcast(&vp->v_cv);
372 }
373 
374 #else /* defined(DIAGNOSTIC) */
375 
376 #define VSTATE_GET(vp) \
377 	(VNODE_TO_VIMPL((vp))->vi_state)
378 #define VSTATE_CHANGE(vp, from, to) \
379 	vstate_change((vp), (from), (to))
380 #define VSTATE_WAIT_STABLE(vp) \
381 	vstate_wait_stable((vp))
382 void
383 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
384     bool has_lock)
385 {
386 
387 }
388 
389 static void
390 vstate_wait_stable(vnode_t *vp)
391 {
392 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
393 
394 	while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
395 		cv_wait(&vp->v_cv, vp->v_interlock);
396 }
397 
398 static void
399 vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
400 {
401 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
402 
403 	/* Open/close the gate for vcache_tryvget(). */
404 	if (to == VS_LOADED) {
405 #ifndef __HAVE_ATOMIC_AS_MEMBAR
406 		membar_release();
407 #endif
408 		atomic_or_uint(&vp->v_usecount, VUSECOUNT_GATE);
409 	} else {
410 		atomic_and_uint(&vp->v_usecount, ~VUSECOUNT_GATE);
411 	}
412 
413 	vip->vi_state = to;
414 	if (from == VS_LOADING)
415 		cv_broadcast(&vcache_cv);
416 	if (to == VS_LOADED || to == VS_RECLAIMED)
417 		cv_broadcast(&vp->v_cv);
418 }
419 
420 #endif /* defined(DIAGNOSTIC) */
421 
422 void
423 vfs_vnode_sysinit(void)
424 {
425 	int error __diagused, i;
426 
427 	dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
428 	KASSERT(dead_rootmount != NULL);
429 	dead_rootmount->mnt_iflag |= IMNT_MPSAFE;
430 
431 	mutex_init(&vdrain_lock, MUTEX_DEFAULT, IPL_NONE);
432 	for (i = 0; i < LRU_COUNT; i++) {
433 		TAILQ_INIT(&lru_list[i]);
434 	}
435 	vcache_init();
436 
437 	cv_init(&vdrain_cv, "vdrain");
438 	cv_init(&vdrain_gen_cv, "vdrainwt");
439 	error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
440 	    NULL, &vdrain_lwp, "vdrain");
441 	KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
442 }
443 
444 /*
445  * Allocate a new marker vnode.
446  */
447 vnode_t *
448 vnalloc_marker(struct mount *mp)
449 {
450 	vnode_impl_t *vip;
451 	vnode_t *vp;
452 
453 	vip = pool_cache_get(vcache_pool, PR_WAITOK);
454 	memset(vip, 0, sizeof(*vip));
455 	vp = VIMPL_TO_VNODE(vip);
456 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
457 	vp->v_mount = mp;
458 	vp->v_type = VBAD;
459 	vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
460 	klist_init(&vip->vi_klist.vk_klist);
461 	vp->v_klist = &vip->vi_klist;
462 	vip->vi_state = VS_MARKER;
463 
464 	return vp;
465 }
466 
467 /*
468  * Free a marker vnode.
469  */
470 void
471 vnfree_marker(vnode_t *vp)
472 {
473 	vnode_impl_t *vip;
474 
475 	vip = VNODE_TO_VIMPL(vp);
476 	KASSERT(vip->vi_state == VS_MARKER);
477 	mutex_obj_free(vp->v_interlock);
478 	uvm_obj_destroy(&vp->v_uobj, true);
479 	klist_fini(&vip->vi_klist.vk_klist);
480 	pool_cache_put(vcache_pool, vip);
481 }
482 
483 /*
484  * Test a vnode for being a marker vnode.
485  */
486 bool
487 vnis_marker(vnode_t *vp)
488 {
489 
490 	return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
491 }
492 
493 /*
494  * Return the lru list this node should be on.
495  */
496 static vnodelst_t *
497 lru_which(vnode_t *vp)
498 {
499 
500 	KASSERT(mutex_owned(vp->v_interlock));
501 
502 	if (vp->v_holdcnt > 0)
503 		return &lru_list[LRU_HOLD];
504 	else
505 		return &lru_list[LRU_FREE];
506 }
507 
508 /*
509  * Put vnode to end of given list.
510  * Both the current and the new list may be NULL, used on vnode alloc/free.
511  * Adjust numvnodes and signal vdrain thread if there is work.
512  */
513 static void
514 lru_requeue(vnode_t *vp, vnodelst_t *listhd)
515 {
516 	vnode_impl_t *vip;
517 	int d;
518 
519 	/*
520 	 * If the vnode is on the correct list, and was put there recently,
521 	 * then leave it be, thus avoiding huge cache and lock contention.
522 	 */
523 	vip = VNODE_TO_VIMPL(vp);
524 	if (listhd == vip->vi_lrulisthd &&
525 	    (getticks() - vip->vi_lrulisttm) < hz) {
526 	    	return;
527 	}
528 
529 	mutex_enter(&vdrain_lock);
530 	d = 0;
531 	if (vip->vi_lrulisthd != NULL)
532 		TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
533 	else
534 		d++;
535 	vip->vi_lrulisthd = listhd;
536 	vip->vi_lrulisttm = getticks();
537 	if (vip->vi_lrulisthd != NULL)
538 		TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
539 	else
540 		d--;
541 	if (d != 0) {
542 		/*
543 		 * Looks strange?  This is not a bug.  Don't store
544 		 * numvnodes unless there is a change - avoid false
545 		 * sharing on MP.
546 		 */
547 		numvnodes += d;
548 	}
549 	if ((d > 0 && numvnodes > desiredvnodes) ||
550 	    listhd == &lru_list[LRU_VRELE])
551 		cv_signal(&vdrain_cv);
552 	mutex_exit(&vdrain_lock);
553 }
554 
555 /*
556  * Release deferred vrele vnodes for this mount.
557  * Called with file system suspended.
558  */
559 void
560 vrele_flush(struct mount *mp)
561 {
562 	vnode_impl_t *vip, *marker;
563 	vnode_t *vp;
564 	int when = 0;
565 
566 	KASSERT(fstrans_is_owner(mp));
567 
568 	marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
569 
570 	mutex_enter(&vdrain_lock);
571 	TAILQ_INSERT_HEAD(&lru_list[LRU_VRELE], marker, vi_lrulist);
572 
573 	while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
574 		TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
575 		TAILQ_INSERT_AFTER(&lru_list[LRU_VRELE], vip, marker,
576 		    vi_lrulist);
577 		vp = VIMPL_TO_VNODE(vip);
578 		if (vnis_marker(vp))
579 			continue;
580 
581 		KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
582 		TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
583 		vip->vi_lrulisthd = &lru_list[LRU_HOLD];
584 		vip->vi_lrulisttm = getticks();
585 		TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
586 		mutex_exit(&vdrain_lock);
587 
588 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
589 		mutex_enter(vp->v_interlock);
590 		vrelel(vp, 0, LK_EXCLUSIVE);
591 
592 		if (getticks() > when) {
593 			yield();
594 			when = getticks() + hz / 10;
595 		}
596 
597 		mutex_enter(&vdrain_lock);
598 	}
599 
600 	TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
601 	mutex_exit(&vdrain_lock);
602 
603 	vnfree_marker(VIMPL_TO_VNODE(marker));
604 }
605 
606 /*
607  * Reclaim a cached vnode.  Used from vdrain_thread only.
608  */
609 static __inline void
610 vdrain_remove(vnode_t *vp)
611 {
612 	struct mount *mp;
613 
614 	KASSERT(mutex_owned(&vdrain_lock));
615 
616 	/* Probe usecount (unlocked). */
617 	if (vrefcnt(vp) > 0)
618 		return;
619 	/* Try v_interlock -- we lock the wrong direction! */
620 	if (!mutex_tryenter(vp->v_interlock))
621 		return;
622 	/* Probe usecount and state. */
623 	if (vrefcnt(vp) > 0 || VSTATE_GET(vp) != VS_LOADED) {
624 		mutex_exit(vp->v_interlock);
625 		return;
626 	}
627 	mp = vp->v_mount;
628 	if (fstrans_start_nowait(mp) != 0) {
629 		mutex_exit(vp->v_interlock);
630 		return;
631 	}
632 	vdrain_retry = true;
633 	mutex_exit(&vdrain_lock);
634 
635 	if (vcache_vget(vp) == 0) {
636 		if (!vrecycle(vp)) {
637 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
638 			mutex_enter(vp->v_interlock);
639 			vrelel(vp, 0, LK_EXCLUSIVE);
640 		}
641 	}
642 	fstrans_done(mp);
643 
644 	mutex_enter(&vdrain_lock);
645 }
646 
647 /*
648  * Release a cached vnode.  Used from vdrain_thread only.
649  */
650 static __inline void
651 vdrain_vrele(vnode_t *vp)
652 {
653 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
654 	struct mount *mp;
655 
656 	KASSERT(mutex_owned(&vdrain_lock));
657 
658 	mp = vp->v_mount;
659 	if (fstrans_start_nowait(mp) != 0)
660 		return;
661 
662 	/*
663 	 * First remove the vnode from the vrele list.
664 	 * Put it on the last lru list, the last vrele()
665 	 * will put it back onto the right list before
666 	 * its usecount reaches zero.
667 	 */
668 	KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
669 	TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
670 	vip->vi_lrulisthd = &lru_list[LRU_HOLD];
671 	vip->vi_lrulisttm = getticks();
672 	TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
673 
674 	vdrain_retry = true;
675 	mutex_exit(&vdrain_lock);
676 
677 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
678 	mutex_enter(vp->v_interlock);
679 	vrelel(vp, 0, LK_EXCLUSIVE);
680 	fstrans_done(mp);
681 
682 	mutex_enter(&vdrain_lock);
683 }
684 
685 /*
686  * Helper thread to keep the number of vnodes below desiredvnodes
687  * and release vnodes from asynchronous vrele.
688  */
689 static void
690 vdrain_thread(void *cookie)
691 {
692 	int i;
693 	u_int target;
694 	vnode_impl_t *vip, *marker;
695 
696 	marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
697 
698 	mutex_enter(&vdrain_lock);
699 
700 	for (;;) {
701 		vdrain_retry = false;
702 		target = desiredvnodes - desiredvnodes/10;
703 
704 		for (i = 0; i < LRU_COUNT; i++) {
705 			TAILQ_INSERT_HEAD(&lru_list[i], marker, vi_lrulist);
706 			while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
707 				TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
708 				TAILQ_INSERT_AFTER(&lru_list[i], vip, marker,
709 				    vi_lrulist);
710 				if (vnis_marker(VIMPL_TO_VNODE(vip)))
711 					continue;
712 				if (i == LRU_VRELE)
713 					vdrain_vrele(VIMPL_TO_VNODE(vip));
714 				else if (numvnodes < target)
715 					break;
716 				else
717 					vdrain_remove(VIMPL_TO_VNODE(vip));
718 			}
719 			TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
720 		}
721 
722 		if (vdrain_retry) {
723 			kpause("vdrainrt", false, 1, &vdrain_lock);
724 		} else {
725 			vdrain_gen++;
726 			cv_broadcast(&vdrain_gen_cv);
727 			cv_wait(&vdrain_cv, &vdrain_lock);
728 		}
729 	}
730 }
731 
732 /*
733  * Try to drop reference on a vnode.  Abort if we are releasing the
734  * last reference.  Note: this _must_ succeed if not the last reference.
735  */
736 static bool
737 vtryrele(vnode_t *vp)
738 {
739 	u_int use, next;
740 
741 #ifndef __HAVE_ATOMIC_AS_MEMBAR
742 	membar_release();
743 #endif
744 	for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
745 		if (__predict_false((use & VUSECOUNT_MASK) == 1)) {
746 			return false;
747 		}
748 		KASSERT((use & VUSECOUNT_MASK) > 1);
749 		next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
750 		if (__predict_true(next == use)) {
751 			return true;
752 		}
753 	}
754 }
755 
756 /*
757  * vput: unlock and release the reference.
758  */
759 void
760 vput(vnode_t *vp)
761 {
762 	int lktype;
763 
764 	/*
765 	 * Do an unlocked check of the usecount.  If it looks like we're not
766 	 * about to drop the last reference, then unlock the vnode and try
767 	 * to drop the reference.  If it ends up being the last reference
768 	 * after all, vrelel() can fix it all up.  Most of the time this
769 	 * will all go to plan.
770 	 */
771 	if (vrefcnt(vp) > 1) {
772 		VOP_UNLOCK(vp);
773 		if (vtryrele(vp)) {
774 			return;
775 		}
776 		lktype = LK_NONE;
777 	} else {
778 		lktype = VOP_ISLOCKED(vp);
779 		KASSERT(lktype != LK_NONE);
780 	}
781 	mutex_enter(vp->v_interlock);
782 	vrelel(vp, 0, lktype);
783 }
784 
785 /*
786  * Vnode release.  If reference count drops to zero, call inactive
787  * routine and either return to freelist or free to the pool.
788  */
789 static void
790 vrelel(vnode_t *vp, int flags, int lktype)
791 {
792 	const bool async = ((flags & VRELEL_ASYNC) != 0);
793 	bool recycle, defer, objlock_held;
794 	u_int use, next;
795 	int error;
796 
797 	objlock_held = false;
798 
799 retry:
800 	KASSERT(mutex_owned(vp->v_interlock));
801 
802 	if (__predict_false(vp->v_op == dead_vnodeop_p &&
803 	    VSTATE_GET(vp) != VS_RECLAIMED)) {
804 		vnpanic(vp, "dead but not clean");
805 	}
806 
807 	/*
808 	 * If not the last reference, just unlock and drop the reference count.
809 	 *
810 	 * Otherwise make sure we pass a point in time where we hold the
811 	 * last reference with VGET flag unset.
812 	 */
813 	for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
814 		if (__predict_false((use & VUSECOUNT_MASK) > 1)) {
815 			if (objlock_held) {
816 				objlock_held = false;
817 				rw_exit(vp->v_uobj.vmobjlock);
818 			}
819 			if (lktype != LK_NONE) {
820 				mutex_exit(vp->v_interlock);
821 				lktype = LK_NONE;
822 				VOP_UNLOCK(vp);
823 				mutex_enter(vp->v_interlock);
824 			}
825 			if (vtryrele(vp)) {
826 				mutex_exit(vp->v_interlock);
827 				return;
828 			}
829 			next = atomic_load_relaxed(&vp->v_usecount);
830 			continue;
831 		}
832 		KASSERT((use & VUSECOUNT_MASK) == 1);
833 		next = use & ~VUSECOUNT_VGET;
834 		if (next != use) {
835 			next = atomic_cas_uint(&vp->v_usecount, use, next);
836 		}
837 		if (__predict_true(next == use)) {
838 			break;
839 		}
840 	}
841 #ifndef __HAVE_ATOMIC_AS_MEMBAR
842 	membar_acquire();
843 #endif
844 	if (vrefcnt(vp) <= 0 || vp->v_writecount != 0) {
845 		vnpanic(vp, "%s: bad ref count", __func__);
846 	}
847 
848 #ifdef DIAGNOSTIC
849 	if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
850 	    vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
851 		vprint("vrelel: missing VOP_CLOSE()", vp);
852 	}
853 #endif
854 
855 	/*
856 	 * If already clean there is no need to lock, defer or
857 	 * deactivate this node.
858 	 */
859 	if (VSTATE_GET(vp) == VS_RECLAIMED) {
860 		if (objlock_held) {
861 			objlock_held = false;
862 			rw_exit(vp->v_uobj.vmobjlock);
863 		}
864 		if (lktype != LK_NONE) {
865 			mutex_exit(vp->v_interlock);
866 			lktype = LK_NONE;
867 			VOP_UNLOCK(vp);
868 			mutex_enter(vp->v_interlock);
869 		}
870 		goto out;
871 	}
872 
873 	/*
874 	 * First try to get the vnode locked for VOP_INACTIVE().
875 	 * Defer vnode release to vdrain_thread if caller requests
876 	 * it explicitly, is the pagedaemon or the lock failed.
877 	 */
878 	defer = false;
879 	if ((curlwp == uvm.pagedaemon_lwp) || async) {
880 		defer = true;
881 	} else if (lktype == LK_SHARED) {
882 		/* Excellent chance of getting, if the last ref. */
883 		error = vn_lock(vp, LK_UPGRADE | LK_RETRY | LK_NOWAIT);
884 		if (error != 0) {
885 			defer = true;
886 		} else {
887 			lktype = LK_EXCLUSIVE;
888 		}
889 	} else if (lktype == LK_NONE) {
890 		/* Excellent chance of getting, if the last ref. */
891 		error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
892 		if (error != 0) {
893 			defer = true;
894 		} else {
895 			lktype = LK_EXCLUSIVE;
896 		}
897 	}
898 	KASSERT(mutex_owned(vp->v_interlock));
899 	if (defer) {
900 		/*
901 		 * Defer reclaim to the kthread; it's not safe to
902 		 * clean it here.  We donate it our last reference.
903 		 */
904 		if (lktype != LK_NONE) {
905 			mutex_exit(vp->v_interlock);
906 			VOP_UNLOCK(vp);
907 			mutex_enter(vp->v_interlock);
908 		}
909 		lru_requeue(vp, &lru_list[LRU_VRELE]);
910 		mutex_exit(vp->v_interlock);
911 		return;
912 	}
913 	KASSERT(lktype == LK_EXCLUSIVE);
914 
915 	/* If the node gained another reference, retry. */
916 	use = atomic_load_relaxed(&vp->v_usecount);
917 	if ((use & VUSECOUNT_VGET) != 0) {
918 		goto retry;
919 	}
920 	KASSERT((use & VUSECOUNT_MASK) == 1);
921 
922 	if ((vp->v_iflag & (VI_TEXT|VI_EXECMAP|VI_WRMAP)) != 0 ||
923 	    (vp->v_vflag & VV_MAPPED) != 0) {
924 		/* Take care of space accounting. */
925 		if (!objlock_held) {
926 			objlock_held = true;
927 			if (!rw_tryenter(vp->v_uobj.vmobjlock, RW_WRITER)) {
928 				mutex_exit(vp->v_interlock);
929 				rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
930 				mutex_enter(vp->v_interlock);
931 				goto retry;
932 			}
933 		}
934 		if ((vp->v_iflag & VI_EXECMAP) != 0) {
935 			cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
936 		}
937 		vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
938 		vp->v_vflag &= ~VV_MAPPED;
939 	}
940 	if (objlock_held) {
941 		objlock_held = false;
942 		rw_exit(vp->v_uobj.vmobjlock);
943 	}
944 
945 	/*
946 	 * Deactivate the vnode, but preserve our reference across
947 	 * the call to VOP_INACTIVE().
948 	 *
949 	 * If VOP_INACTIVE() indicates that the file has been
950 	 * deleted, then recycle the vnode.
951 	 *
952 	 * Note that VOP_INACTIVE() will not drop the vnode lock.
953 	 */
954 	mutex_exit(vp->v_interlock);
955 	recycle = false;
956 	VOP_INACTIVE(vp, &recycle);
957 	if (!recycle) {
958 		lktype = LK_NONE;
959 		VOP_UNLOCK(vp);
960 	}
961 	mutex_enter(vp->v_interlock);
962 
963 	/*
964 	 * Block new references then check again to see if a
965 	 * new reference was acquired in the meantime.  If
966 	 * it was, restore the vnode state and try again.
967 	 */
968 	if (recycle) {
969 		VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
970 		use = atomic_load_relaxed(&vp->v_usecount);
971 		if ((use & VUSECOUNT_VGET) != 0) {
972 			VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
973 			goto retry;
974 		}
975 		KASSERT((use & VUSECOUNT_MASK) == 1);
976 	}
977 
978 	/*
979 	 * Recycle the vnode if the file is now unused (unlinked).
980 	 */
981 	if (recycle) {
982 		VSTATE_ASSERT(vp, VS_BLOCKED);
983 		KASSERT(lktype == LK_EXCLUSIVE);
984 		/* vcache_reclaim drops the lock. */
985 		lktype = LK_NONE;
986 		vcache_reclaim(vp);
987 	}
988 	KASSERT(vrefcnt(vp) > 0);
989 	KASSERT(lktype == LK_NONE);
990 
991 out:
992 	for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
993 		if (__predict_false((use & VUSECOUNT_VGET) != 0 &&
994 		    (use & VUSECOUNT_MASK) == 1)) {
995 			/* Gained and released another reference, retry. */
996 			goto retry;
997 		}
998 		next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
999 		if (__predict_true(next == use)) {
1000 			if (__predict_false((use & VUSECOUNT_MASK) != 1)) {
1001 				/* Gained another reference. */
1002 				mutex_exit(vp->v_interlock);
1003 				return;
1004 			}
1005 			break;
1006 		}
1007 	}
1008 #ifndef __HAVE_ATOMIC_AS_MEMBAR
1009 	membar_acquire();
1010 #endif
1011 
1012 	if (VSTATE_GET(vp) == VS_RECLAIMED && vp->v_holdcnt == 0) {
1013 		/*
1014 		 * It's clean so destroy it.  It isn't referenced
1015 		 * anywhere since it has been reclaimed.
1016 		 */
1017 		vcache_free(VNODE_TO_VIMPL(vp));
1018 	} else {
1019 		/*
1020 		 * Otherwise, put it back onto the freelist.  It
1021 		 * can't be destroyed while still associated with
1022 		 * a file system.
1023 		 */
1024 		lru_requeue(vp, lru_which(vp));
1025 		mutex_exit(vp->v_interlock);
1026 	}
1027 }
1028 
1029 void
1030 vrele(vnode_t *vp)
1031 {
1032 
1033 	if (vtryrele(vp)) {
1034 		return;
1035 	}
1036 	mutex_enter(vp->v_interlock);
1037 	vrelel(vp, 0, LK_NONE);
1038 }
1039 
1040 /*
1041  * Asynchronous vnode release, vnode is released in different context.
1042  */
1043 void
1044 vrele_async(vnode_t *vp)
1045 {
1046 
1047 	if (vtryrele(vp)) {
1048 		return;
1049 	}
1050 	mutex_enter(vp->v_interlock);
1051 	vrelel(vp, VRELEL_ASYNC, LK_NONE);
1052 }
1053 
1054 /*
1055  * Vnode reference, where a reference is already held by some other
1056  * object (for example, a file structure).
1057  *
1058  * NB: lockless code sequences may rely on this not blocking.
1059  */
1060 void
1061 vref(vnode_t *vp)
1062 {
1063 
1064 	KASSERT(vrefcnt(vp) > 0);
1065 
1066 	atomic_inc_uint(&vp->v_usecount);
1067 }
1068 
1069 /*
1070  * Page or buffer structure gets a reference.
1071  * Called with v_interlock held.
1072  */
1073 void
1074 vholdl(vnode_t *vp)
1075 {
1076 
1077 	KASSERT(mutex_owned(vp->v_interlock));
1078 
1079 	if (vp->v_holdcnt++ == 0 && vrefcnt(vp) == 0)
1080 		lru_requeue(vp, lru_which(vp));
1081 }
1082 
1083 /*
1084  * Page or buffer structure gets a reference.
1085  */
1086 void
1087 vhold(vnode_t *vp)
1088 {
1089 
1090 	mutex_enter(vp->v_interlock);
1091 	vholdl(vp);
1092 	mutex_exit(vp->v_interlock);
1093 }
1094 
1095 /*
1096  * Page or buffer structure frees a reference.
1097  * Called with v_interlock held.
1098  */
1099 void
1100 holdrelel(vnode_t *vp)
1101 {
1102 
1103 	KASSERT(mutex_owned(vp->v_interlock));
1104 
1105 	if (vp->v_holdcnt <= 0) {
1106 		vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
1107 	}
1108 
1109 	vp->v_holdcnt--;
1110 	if (vp->v_holdcnt == 0 && vrefcnt(vp) == 0)
1111 		lru_requeue(vp, lru_which(vp));
1112 }
1113 
1114 /*
1115  * Page or buffer structure frees a reference.
1116  */
1117 void
1118 holdrele(vnode_t *vp)
1119 {
1120 
1121 	mutex_enter(vp->v_interlock);
1122 	holdrelel(vp);
1123 	mutex_exit(vp->v_interlock);
1124 }
1125 
1126 /*
1127  * Recycle an unused vnode if caller holds the last reference.
1128  */
1129 bool
1130 vrecycle(vnode_t *vp)
1131 {
1132 	int error __diagused;
1133 
1134 	mutex_enter(vp->v_interlock);
1135 
1136 	/* If the vnode is already clean we're done. */
1137 	VSTATE_WAIT_STABLE(vp);
1138 	if (VSTATE_GET(vp) != VS_LOADED) {
1139 		VSTATE_ASSERT(vp, VS_RECLAIMED);
1140 		vrelel(vp, 0, LK_NONE);
1141 		return true;
1142 	}
1143 
1144 	/* Prevent further references until the vnode is locked. */
1145 	VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
1146 
1147 	/* Make sure we hold the last reference. */
1148 	if (vrefcnt(vp) != 1) {
1149 		VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
1150 		mutex_exit(vp->v_interlock);
1151 		return false;
1152 	}
1153 
1154 	mutex_exit(vp->v_interlock);
1155 
1156 	/*
1157 	 * On a leaf file system this lock will always succeed as we hold
1158 	 * the last reference and prevent further references.
1159 	 * On layered file systems waiting for the lock would open a can of
1160 	 * deadlocks as the lower vnodes may have other active references.
1161 	 */
1162 	error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
1163 
1164 	mutex_enter(vp->v_interlock);
1165 	if (error) {
1166 		VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
1167 		mutex_exit(vp->v_interlock);
1168 		return false;
1169 	}
1170 
1171 	KASSERT(vrefcnt(vp) == 1);
1172 	vcache_reclaim(vp);
1173 	vrelel(vp, 0, LK_NONE);
1174 
1175 	return true;
1176 }
1177 
1178 /*
1179  * Helper for vrevoke() to propagate suspension from lastmp
1180  * to thismp.  Both args may be NULL.
1181  * Returns the currently suspended file system or NULL.
1182  */
1183 static struct mount *
1184 vrevoke_suspend_next(struct mount *lastmp, struct mount *thismp)
1185 {
1186 	int error;
1187 
1188 	if (lastmp == thismp)
1189 		return thismp;
1190 
1191 	if (lastmp != NULL)
1192 		vfs_resume(lastmp);
1193 
1194 	if (thismp == NULL)
1195 		return NULL;
1196 
1197 	do {
1198 		error = vfs_suspend(thismp, 0);
1199 	} while (error == EINTR || error == ERESTART);
1200 
1201 	if (error == 0)
1202 		return thismp;
1203 
1204 	KASSERT(error == EOPNOTSUPP || error == ENOENT);
1205 	return NULL;
1206 }
1207 
1208 /*
1209  * Eliminate all activity associated with the requested vnode
1210  * and with all vnodes aliased to the requested vnode.
1211  */
1212 void
1213 vrevoke(vnode_t *vp)
1214 {
1215 	struct mount *mp;
1216 	vnode_t *vq;
1217 	enum vtype type;
1218 	dev_t dev;
1219 
1220 	KASSERT(vrefcnt(vp) > 0);
1221 
1222 	mp = vrevoke_suspend_next(NULL, vp->v_mount);
1223 
1224 	mutex_enter(vp->v_interlock);
1225 	VSTATE_WAIT_STABLE(vp);
1226 	if (VSTATE_GET(vp) == VS_RECLAIMED) {
1227 		mutex_exit(vp->v_interlock);
1228 	} else if (vp->v_type != VBLK && vp->v_type != VCHR) {
1229 		atomic_inc_uint(&vp->v_usecount);
1230 		mutex_exit(vp->v_interlock);
1231 		vgone(vp);
1232 	} else {
1233 		dev = vp->v_rdev;
1234 		type = vp->v_type;
1235 		mutex_exit(vp->v_interlock);
1236 
1237 		while (spec_node_lookup_by_dev(type, dev, VDEAD_NOWAIT, &vq)
1238 		    == 0) {
1239 			mp = vrevoke_suspend_next(mp, vq->v_mount);
1240 			vgone(vq);
1241 		}
1242 	}
1243 	vrevoke_suspend_next(mp, NULL);
1244 }
1245 
1246 /*
1247  * Eliminate all activity associated with a vnode in preparation for
1248  * reuse.  Drops a reference from the vnode.
1249  */
1250 void
1251 vgone(vnode_t *vp)
1252 {
1253 	int lktype;
1254 
1255 	KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
1256 
1257 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1258 	lktype = LK_EXCLUSIVE;
1259 	mutex_enter(vp->v_interlock);
1260 	VSTATE_WAIT_STABLE(vp);
1261 	if (VSTATE_GET(vp) == VS_LOADED) {
1262 		VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
1263 		vcache_reclaim(vp);
1264 		lktype = LK_NONE;
1265 	}
1266 	VSTATE_ASSERT(vp, VS_RECLAIMED);
1267 	vrelel(vp, 0, lktype);
1268 }
1269 
1270 static inline uint32_t
1271 vcache_hash(const struct vcache_key *key)
1272 {
1273 	uint32_t hash = HASH32_BUF_INIT;
1274 
1275 	KASSERT(key->vk_key_len > 0);
1276 
1277 	hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
1278 	hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
1279 	return hash;
1280 }
1281 
1282 static int
1283 vcache_stats(struct hashstat_sysctl *hs, bool fill)
1284 {
1285 	vnode_impl_t *vip;
1286 	uint64_t chain;
1287 
1288 	strlcpy(hs->hash_name, "vcache", sizeof(hs->hash_name));
1289 	strlcpy(hs->hash_desc, "vnode cache hash", sizeof(hs->hash_desc));
1290 	if (!fill)
1291 		return 0;
1292 
1293 	hs->hash_size = vcache_hashmask + 1;
1294 
1295 	for (size_t i = 0; i < hs->hash_size; i++) {
1296 		chain = 0;
1297 		mutex_enter(&vcache_lock);
1298 		SLIST_FOREACH(vip, &vcache_hashtab[i], vi_hash) {
1299 			chain++;
1300 		}
1301 		mutex_exit(&vcache_lock);
1302 		if (chain > 0) {
1303 			hs->hash_used++;
1304 			hs->hash_items += chain;
1305 			if (chain > hs->hash_maxchain)
1306 				hs->hash_maxchain = chain;
1307 		}
1308 		preempt_point();
1309 	}
1310 
1311 	return 0;
1312 }
1313 
1314 static void
1315 vcache_init(void)
1316 {
1317 
1318 	vcache_pool = pool_cache_init(sizeof(vnode_impl_t), coherency_unit,
1319 	    0, 0, "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
1320 	KASSERT(vcache_pool != NULL);
1321 	mutex_init(&vcache_lock, MUTEX_DEFAULT, IPL_NONE);
1322 	cv_init(&vcache_cv, "vcache");
1323 	vcache_hashsize = desiredvnodes;
1324 	vcache_hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
1325 	    &vcache_hashmask);
1326 	hashstat_register("vcache", vcache_stats);
1327 }
1328 
1329 static void
1330 vcache_reinit(void)
1331 {
1332 	int i;
1333 	uint32_t hash;
1334 	u_long oldmask, newmask;
1335 	struct hashhead *oldtab, *newtab;
1336 	vnode_impl_t *vip;
1337 
1338 	newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
1339 	mutex_enter(&vcache_lock);
1340 	oldtab = vcache_hashtab;
1341 	oldmask = vcache_hashmask;
1342 	vcache_hashsize = desiredvnodes;
1343 	vcache_hashtab = newtab;
1344 	vcache_hashmask = newmask;
1345 	for (i = 0; i <= oldmask; i++) {
1346 		while ((vip = SLIST_FIRST(&oldtab[i])) != NULL) {
1347 			SLIST_REMOVE(&oldtab[i], vip, vnode_impl, vi_hash);
1348 			hash = vcache_hash(&vip->vi_key);
1349 			SLIST_INSERT_HEAD(&newtab[hash & vcache_hashmask],
1350 			    vip, vi_hash);
1351 		}
1352 	}
1353 	mutex_exit(&vcache_lock);
1354 	hashdone(oldtab, HASH_SLIST, oldmask);
1355 }
1356 
1357 static inline vnode_impl_t *
1358 vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
1359 {
1360 	struct hashhead *hashp;
1361 	vnode_impl_t *vip;
1362 
1363 	KASSERT(mutex_owned(&vcache_lock));
1364 
1365 	hashp = &vcache_hashtab[hash & vcache_hashmask];
1366 	SLIST_FOREACH(vip, hashp, vi_hash) {
1367 		if (key->vk_mount != vip->vi_key.vk_mount)
1368 			continue;
1369 		if (key->vk_key_len != vip->vi_key.vk_key_len)
1370 			continue;
1371 		if (memcmp(key->vk_key, vip->vi_key.vk_key, key->vk_key_len))
1372 			continue;
1373 		return vip;
1374 	}
1375 	return NULL;
1376 }
1377 
1378 /*
1379  * Allocate a new, uninitialized vcache node.
1380  */
1381 static vnode_impl_t *
1382 vcache_alloc(void)
1383 {
1384 	vnode_impl_t *vip;
1385 	vnode_t *vp;
1386 
1387 	vip = pool_cache_get(vcache_pool, PR_WAITOK);
1388 	vp = VIMPL_TO_VNODE(vip);
1389 	memset(vip, 0, sizeof(*vip));
1390 
1391 	rw_init(&vip->vi_lock);
1392 	vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
1393 
1394 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
1395 	klist_init(&vip->vi_klist.vk_klist);
1396 	vp->v_klist = &vip->vi_klist;
1397 	cv_init(&vp->v_cv, "vnode");
1398 	cache_vnode_init(vp);
1399 
1400 	vp->v_usecount = 1;
1401 	vp->v_type = VNON;
1402 	vp->v_size = vp->v_writesize = VSIZENOTSET;
1403 
1404 	vip->vi_state = VS_LOADING;
1405 
1406 	lru_requeue(vp, &lru_list[LRU_FREE]);
1407 
1408 	return vip;
1409 }
1410 
1411 /*
1412  * Deallocate a vcache node in state VS_LOADING.
1413  *
1414  * vcache_lock held on entry and released on return.
1415  */
1416 static void
1417 vcache_dealloc(vnode_impl_t *vip)
1418 {
1419 	vnode_t *vp;
1420 
1421 	KASSERT(mutex_owned(&vcache_lock));
1422 
1423 	vp = VIMPL_TO_VNODE(vip);
1424 	vfs_ref(dead_rootmount);
1425 	vfs_insmntque(vp, dead_rootmount);
1426 	mutex_enter(vp->v_interlock);
1427 	vp->v_op = dead_vnodeop_p;
1428 	VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1429 	mutex_exit(&vcache_lock);
1430 	vrelel(vp, 0, LK_NONE);
1431 }
1432 
1433 /*
1434  * Free an unused, unreferenced vcache node.
1435  * v_interlock locked on entry.
1436  */
1437 static void
1438 vcache_free(vnode_impl_t *vip)
1439 {
1440 	vnode_t *vp;
1441 
1442 	vp = VIMPL_TO_VNODE(vip);
1443 	KASSERT(mutex_owned(vp->v_interlock));
1444 
1445 	KASSERT(vrefcnt(vp) == 0);
1446 	KASSERT(vp->v_holdcnt == 0);
1447 	KASSERT(vp->v_writecount == 0);
1448 	lru_requeue(vp, NULL);
1449 	mutex_exit(vp->v_interlock);
1450 
1451 	vfs_insmntque(vp, NULL);
1452 	if (vp->v_type == VBLK || vp->v_type == VCHR)
1453 		spec_node_destroy(vp);
1454 
1455 	mutex_obj_free(vp->v_interlock);
1456 	rw_destroy(&vip->vi_lock);
1457 	uvm_obj_destroy(&vp->v_uobj, true);
1458 	KASSERT(vp->v_klist == &vip->vi_klist);
1459 	klist_fini(&vip->vi_klist.vk_klist);
1460 	cv_destroy(&vp->v_cv);
1461 	cache_vnode_fini(vp);
1462 	pool_cache_put(vcache_pool, vip);
1463 }
1464 
1465 /*
1466  * Try to get an initial reference on this cached vnode.
1467  * Returns zero on success or EBUSY if the vnode state is not LOADED.
1468  *
1469  * NB: lockless code sequences may rely on this not blocking.
1470  */
1471 int
1472 vcache_tryvget(vnode_t *vp)
1473 {
1474 	u_int use, next;
1475 
1476 	for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
1477 		if (__predict_false((use & VUSECOUNT_GATE) == 0)) {
1478 			return EBUSY;
1479 		}
1480 		next = atomic_cas_uint(&vp->v_usecount,
1481 		    use, (use + 1) | VUSECOUNT_VGET);
1482 		if (__predict_true(next == use)) {
1483 #ifndef __HAVE_ATOMIC_AS_MEMBAR
1484 			membar_acquire();
1485 #endif
1486 			return 0;
1487 		}
1488 	}
1489 }
1490 
1491 /*
1492  * Try to get an initial reference on this cached vnode.
1493  * Returns zero on success and  ENOENT if the vnode has been reclaimed.
1494  * Will wait for the vnode state to be stable.
1495  *
1496  * v_interlock locked on entry and unlocked on exit.
1497  */
1498 int
1499 vcache_vget(vnode_t *vp)
1500 {
1501 	int error;
1502 
1503 	KASSERT(mutex_owned(vp->v_interlock));
1504 
1505 	/* Increment hold count to prevent vnode from disappearing. */
1506 	vp->v_holdcnt++;
1507 	VSTATE_WAIT_STABLE(vp);
1508 	vp->v_holdcnt--;
1509 
1510 	/* If this was the last reference to a reclaimed vnode free it now. */
1511 	if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) {
1512 		if (vp->v_holdcnt == 0 && vrefcnt(vp) == 0)
1513 			vcache_free(VNODE_TO_VIMPL(vp));
1514 		else
1515 			mutex_exit(vp->v_interlock);
1516 		return ENOENT;
1517 	}
1518 	VSTATE_ASSERT(vp, VS_LOADED);
1519 	error = vcache_tryvget(vp);
1520 	KASSERT(error == 0);
1521 	mutex_exit(vp->v_interlock);
1522 
1523 	return 0;
1524 }
1525 
1526 /*
1527  * Get a vnode / fs node pair by key and return it referenced through vpp.
1528  */
1529 int
1530 vcache_get(struct mount *mp, const void *key, size_t key_len,
1531     struct vnode **vpp)
1532 {
1533 	int error;
1534 	uint32_t hash;
1535 	const void *new_key;
1536 	struct vnode *vp;
1537 	struct vcache_key vcache_key;
1538 	vnode_impl_t *vip, *new_vip;
1539 
1540 	new_key = NULL;
1541 	*vpp = NULL;
1542 
1543 	vcache_key.vk_mount = mp;
1544 	vcache_key.vk_key = key;
1545 	vcache_key.vk_key_len = key_len;
1546 	hash = vcache_hash(&vcache_key);
1547 
1548 again:
1549 	mutex_enter(&vcache_lock);
1550 	vip = vcache_hash_lookup(&vcache_key, hash);
1551 
1552 	/* If found, take a reference or retry. */
1553 	if (__predict_true(vip != NULL)) {
1554 		/*
1555 		 * If the vnode is loading we cannot take the v_interlock
1556 		 * here as it might change during load (see uvm_obj_setlock()).
1557 		 * As changing state from VS_LOADING requires both vcache_lock
1558 		 * and v_interlock it is safe to test with vcache_lock held.
1559 		 *
1560 		 * Wait for vnodes changing state from VS_LOADING and retry.
1561 		 */
1562 		if (__predict_false(vip->vi_state == VS_LOADING)) {
1563 			cv_wait(&vcache_cv, &vcache_lock);
1564 			mutex_exit(&vcache_lock);
1565 			goto again;
1566 		}
1567 		vp = VIMPL_TO_VNODE(vip);
1568 		mutex_enter(vp->v_interlock);
1569 		mutex_exit(&vcache_lock);
1570 		error = vcache_vget(vp);
1571 		if (error == ENOENT)
1572 			goto again;
1573 		if (error == 0)
1574 			*vpp = vp;
1575 		KASSERT((error != 0) == (*vpp == NULL));
1576 		return error;
1577 	}
1578 	mutex_exit(&vcache_lock);
1579 
1580 	/* Allocate and initialize a new vcache / vnode pair. */
1581 	error = vfs_busy(mp);
1582 	if (error)
1583 		return error;
1584 	new_vip = vcache_alloc();
1585 	new_vip->vi_key = vcache_key;
1586 	vp = VIMPL_TO_VNODE(new_vip);
1587 	mutex_enter(&vcache_lock);
1588 	vip = vcache_hash_lookup(&vcache_key, hash);
1589 	if (vip == NULL) {
1590 		SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1591 		    new_vip, vi_hash);
1592 		vip = new_vip;
1593 	}
1594 
1595 	/* If another thread beat us inserting this node, retry. */
1596 	if (vip != new_vip) {
1597 		vcache_dealloc(new_vip);
1598 		vfs_unbusy(mp);
1599 		goto again;
1600 	}
1601 	mutex_exit(&vcache_lock);
1602 
1603 	/* Load the fs node.  Exclusive as new_node is VS_LOADING. */
1604 	error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
1605 	if (error) {
1606 		mutex_enter(&vcache_lock);
1607 		SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1608 		    new_vip, vnode_impl, vi_hash);
1609 		vcache_dealloc(new_vip);
1610 		vfs_unbusy(mp);
1611 		KASSERT(*vpp == NULL);
1612 		return error;
1613 	}
1614 	KASSERT(new_key != NULL);
1615 	KASSERT(memcmp(key, new_key, key_len) == 0);
1616 	KASSERT(vp->v_op != NULL);
1617 	vfs_insmntque(vp, mp);
1618 	if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1619 		vp->v_vflag |= VV_MPSAFE;
1620 	vfs_ref(mp);
1621 	vfs_unbusy(mp);
1622 
1623 	/* Finished loading, finalize node. */
1624 	mutex_enter(&vcache_lock);
1625 	new_vip->vi_key.vk_key = new_key;
1626 	mutex_enter(vp->v_interlock);
1627 	VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1628 	mutex_exit(vp->v_interlock);
1629 	mutex_exit(&vcache_lock);
1630 	*vpp = vp;
1631 	return 0;
1632 }
1633 
1634 /*
1635  * Create a new vnode / fs node pair and return it referenced through vpp.
1636  */
1637 int
1638 vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
1639     kauth_cred_t cred, void *extra, struct vnode **vpp)
1640 {
1641 	int error;
1642 	uint32_t hash;
1643 	struct vnode *vp, *ovp;
1644 	vnode_impl_t *vip, *ovip;
1645 
1646 	*vpp = NULL;
1647 
1648 	/* Allocate and initialize a new vcache / vnode pair. */
1649 	error = vfs_busy(mp);
1650 	if (error)
1651 		return error;
1652 	vip = vcache_alloc();
1653 	vip->vi_key.vk_mount = mp;
1654 	vp = VIMPL_TO_VNODE(vip);
1655 
1656 	/* Create and load the fs node. */
1657 	error = VFS_NEWVNODE(mp, dvp, vp, vap, cred, extra,
1658 	    &vip->vi_key.vk_key_len, &vip->vi_key.vk_key);
1659 	if (error) {
1660 		mutex_enter(&vcache_lock);
1661 		vcache_dealloc(vip);
1662 		vfs_unbusy(mp);
1663 		KASSERT(*vpp == NULL);
1664 		return error;
1665 	}
1666 	KASSERT(vp->v_op != NULL);
1667 	KASSERT((vip->vi_key.vk_key_len == 0) == (mp == dead_rootmount));
1668 	if (vip->vi_key.vk_key_len > 0) {
1669 		KASSERT(vip->vi_key.vk_key != NULL);
1670 		hash = vcache_hash(&vip->vi_key);
1671 
1672 		/*
1673 		 * Wait for previous instance to be reclaimed,
1674 		 * then insert new node.
1675 		 */
1676 		mutex_enter(&vcache_lock);
1677 		while ((ovip = vcache_hash_lookup(&vip->vi_key, hash))) {
1678 			ovp = VIMPL_TO_VNODE(ovip);
1679 			mutex_enter(ovp->v_interlock);
1680 			mutex_exit(&vcache_lock);
1681 			error = vcache_vget(ovp);
1682 			KASSERT(error == ENOENT);
1683 			mutex_enter(&vcache_lock);
1684 		}
1685 		SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1686 		    vip, vi_hash);
1687 		mutex_exit(&vcache_lock);
1688 	}
1689 	vfs_insmntque(vp, mp);
1690 	if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1691 		vp->v_vflag |= VV_MPSAFE;
1692 	vfs_ref(mp);
1693 	vfs_unbusy(mp);
1694 
1695 	/* Finished loading, finalize node. */
1696 	mutex_enter(&vcache_lock);
1697 	mutex_enter(vp->v_interlock);
1698 	VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1699 	mutex_exit(&vcache_lock);
1700 	mutex_exit(vp->v_interlock);
1701 	*vpp = vp;
1702 	return 0;
1703 }
1704 
1705 /*
1706  * Prepare key change: update old cache nodes key and lock new cache node.
1707  * Return an error if the new node already exists.
1708  */
1709 int
1710 vcache_rekey_enter(struct mount *mp, struct vnode *vp,
1711     const void *old_key, size_t old_key_len,
1712     const void *new_key, size_t new_key_len)
1713 {
1714 	uint32_t old_hash, new_hash;
1715 	struct vcache_key old_vcache_key, new_vcache_key;
1716 	vnode_impl_t *vip, *new_vip;
1717 
1718 	old_vcache_key.vk_mount = mp;
1719 	old_vcache_key.vk_key = old_key;
1720 	old_vcache_key.vk_key_len = old_key_len;
1721 	old_hash = vcache_hash(&old_vcache_key);
1722 
1723 	new_vcache_key.vk_mount = mp;
1724 	new_vcache_key.vk_key = new_key;
1725 	new_vcache_key.vk_key_len = new_key_len;
1726 	new_hash = vcache_hash(&new_vcache_key);
1727 
1728 	new_vip = vcache_alloc();
1729 	new_vip->vi_key = new_vcache_key;
1730 
1731 	/* Insert locked new node used as placeholder. */
1732 	mutex_enter(&vcache_lock);
1733 	vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1734 	if (vip != NULL) {
1735 		vcache_dealloc(new_vip);
1736 		return EEXIST;
1737 	}
1738 	SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1739 	    new_vip, vi_hash);
1740 
1741 	/* Replace old nodes key with the temporary copy. */
1742 	vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1743 	KASSERT(vip != NULL);
1744 	KASSERT(VIMPL_TO_VNODE(vip) == vp);
1745 	KASSERT(vip->vi_key.vk_key != old_vcache_key.vk_key);
1746 	vip->vi_key = old_vcache_key;
1747 	mutex_exit(&vcache_lock);
1748 	return 0;
1749 }
1750 
1751 /*
1752  * Key change complete: update old node and remove placeholder.
1753  */
1754 void
1755 vcache_rekey_exit(struct mount *mp, struct vnode *vp,
1756     const void *old_key, size_t old_key_len,
1757     const void *new_key, size_t new_key_len)
1758 {
1759 	uint32_t old_hash, new_hash;
1760 	struct vcache_key old_vcache_key, new_vcache_key;
1761 	vnode_impl_t *vip, *new_vip;
1762 	struct vnode *new_vp;
1763 
1764 	old_vcache_key.vk_mount = mp;
1765 	old_vcache_key.vk_key = old_key;
1766 	old_vcache_key.vk_key_len = old_key_len;
1767 	old_hash = vcache_hash(&old_vcache_key);
1768 
1769 	new_vcache_key.vk_mount = mp;
1770 	new_vcache_key.vk_key = new_key;
1771 	new_vcache_key.vk_key_len = new_key_len;
1772 	new_hash = vcache_hash(&new_vcache_key);
1773 
1774 	mutex_enter(&vcache_lock);
1775 
1776 	/* Lookup old and new node. */
1777 	vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1778 	KASSERT(vip != NULL);
1779 	KASSERT(VIMPL_TO_VNODE(vip) == vp);
1780 
1781 	new_vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1782 	KASSERT(new_vip != NULL);
1783 	KASSERT(new_vip->vi_key.vk_key_len == new_key_len);
1784 	new_vp = VIMPL_TO_VNODE(new_vip);
1785 	mutex_enter(new_vp->v_interlock);
1786 	VSTATE_ASSERT(VIMPL_TO_VNODE(new_vip), VS_LOADING);
1787 	mutex_exit(new_vp->v_interlock);
1788 
1789 	/* Rekey old node and put it onto its new hashlist. */
1790 	vip->vi_key = new_vcache_key;
1791 	if (old_hash != new_hash) {
1792 		SLIST_REMOVE(&vcache_hashtab[old_hash & vcache_hashmask],
1793 		    vip, vnode_impl, vi_hash);
1794 		SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1795 		    vip, vi_hash);
1796 	}
1797 
1798 	/* Remove new node used as placeholder. */
1799 	SLIST_REMOVE(&vcache_hashtab[new_hash & vcache_hashmask],
1800 	    new_vip, vnode_impl, vi_hash);
1801 	vcache_dealloc(new_vip);
1802 }
1803 
1804 /*
1805  * Disassociate the underlying file system from a vnode.
1806  *
1807  * Must be called with vnode locked and will return unlocked.
1808  * Must be called with the interlock held, and will return with it held.
1809  */
1810 static void
1811 vcache_reclaim(vnode_t *vp)
1812 {
1813 	lwp_t *l = curlwp;
1814 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
1815 	struct mount *mp = vp->v_mount;
1816 	uint32_t hash;
1817 	uint8_t temp_buf[64], *temp_key;
1818 	size_t temp_key_len;
1819 	bool recycle;
1820 	int error;
1821 
1822 	KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1823 	KASSERT(mutex_owned(vp->v_interlock));
1824 	KASSERT(vrefcnt(vp) != 0);
1825 
1826 	temp_key_len = vip->vi_key.vk_key_len;
1827 	/*
1828 	 * Prevent the vnode from being recycled or brought into use
1829 	 * while we clean it out.
1830 	 */
1831 	VSTATE_CHANGE(vp, VS_BLOCKED, VS_RECLAIMING);
1832 
1833 	/*
1834 	 * Send NOTE_REVOKE now, before we call VOP_RECLAIM(),
1835 	 * because VOP_RECLAIM() could cause vp->v_klist to
1836 	 * become invalid.  Don't check for interest in NOTE_REVOKE
1837 	 * here; it's always posted because it sets EV_EOF.
1838 	 *
1839 	 * Once it's been posted, reset vp->v_klist to point to
1840 	 * our own local storage, in case we were sharing with
1841 	 * someone else.
1842 	 */
1843 	KNOTE(&vp->v_klist->vk_klist, NOTE_REVOKE);
1844 	vp->v_klist = &vip->vi_klist;
1845 	mutex_exit(vp->v_interlock);
1846 
1847 	rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
1848 	mutex_enter(vp->v_interlock);
1849 	if ((vp->v_iflag & VI_EXECMAP) != 0) {
1850 		cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
1851 	}
1852 	vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
1853 	vp->v_iflag |= VI_DEADCHECK; /* for genfs_getpages() */
1854 	mutex_exit(vp->v_interlock);
1855 	rw_exit(vp->v_uobj.vmobjlock);
1856 
1857 	/*
1858 	 * With vnode state set to reclaiming, purge name cache immediately
1859 	 * to prevent new handles on vnode, and wait for existing threads
1860 	 * trying to get a handle to notice VS_RECLAIMED status and abort.
1861 	 */
1862 	cache_purge(vp);
1863 
1864 	/* Replace the vnode key with a temporary copy. */
1865 	if (vip->vi_key.vk_key_len > sizeof(temp_buf)) {
1866 		temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
1867 	} else {
1868 		temp_key = temp_buf;
1869 	}
1870 	if (vip->vi_key.vk_key_len > 0) {
1871 		mutex_enter(&vcache_lock);
1872 		memcpy(temp_key, vip->vi_key.vk_key, temp_key_len);
1873 		vip->vi_key.vk_key = temp_key;
1874 		mutex_exit(&vcache_lock);
1875 	}
1876 
1877 	fstrans_start(mp);
1878 
1879 	/*
1880 	 * Clean out any cached data associated with the vnode.
1881 	 */
1882 	error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
1883 	if (error != 0) {
1884 		if (wapbl_vphaswapbl(vp))
1885 			WAPBL_DISCARD(wapbl_vptomp(vp));
1886 		error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
1887 	}
1888 	KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
1889 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1890 	if (vp->v_type == VBLK || vp->v_type == VCHR) {
1891 		 spec_node_revoke(vp);
1892 	}
1893 
1894 	/*
1895 	 * Disassociate the underlying file system from the vnode.
1896 	 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
1897 	 * the vnode, and may destroy the vnode so that VOP_UNLOCK
1898 	 * would no longer function.
1899 	 */
1900 	VOP_INACTIVE(vp, &recycle);
1901 	KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1902 	if (VOP_RECLAIM(vp)) {
1903 		vnpanic(vp, "%s: cannot reclaim", __func__);
1904 	}
1905 
1906 	KASSERT(vp->v_data == NULL);
1907 	KASSERT((vp->v_iflag & VI_PAGES) == 0);
1908 
1909 	if (vp->v_type == VREG && vp->v_ractx != NULL) {
1910 		uvm_ra_freectx(vp->v_ractx);
1911 		vp->v_ractx = NULL;
1912 	}
1913 
1914 	if (vip->vi_key.vk_key_len > 0) {
1915 	/* Remove from vnode cache. */
1916 		hash = vcache_hash(&vip->vi_key);
1917 		mutex_enter(&vcache_lock);
1918 		KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
1919 		SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1920 		    vip, vnode_impl, vi_hash);
1921 		mutex_exit(&vcache_lock);
1922 	}
1923 	if (temp_key != temp_buf)
1924 		kmem_free(temp_key, temp_key_len);
1925 
1926 	/* Done with purge, notify sleepers of the grim news. */
1927 	mutex_enter(vp->v_interlock);
1928 	vp->v_op = dead_vnodeop_p;
1929 	VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
1930 	vp->v_tag = VT_NON;
1931 	mutex_exit(vp->v_interlock);
1932 
1933 	/*
1934 	 * Move to dead mount.  Must be after changing the operations
1935 	 * vector as vnode operations enter the mount before using the
1936 	 * operations vector.  See sys/kern/vnode_if.c.
1937 	 */
1938 	vp->v_vflag &= ~VV_ROOT;
1939 	vfs_ref(dead_rootmount);
1940 	vfs_insmntque(vp, dead_rootmount);
1941 
1942 #ifdef PAX_SEGVGUARD
1943 	pax_segvguard_cleanup(vp);
1944 #endif /* PAX_SEGVGUARD */
1945 
1946 	mutex_enter(vp->v_interlock);
1947 	fstrans_done(mp);
1948 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1949 }
1950 
1951 /*
1952  * Disassociate the underlying file system from an open device vnode
1953  * and make it anonymous.
1954  *
1955  * Vnode unlocked on entry, drops a reference to the vnode.
1956  */
1957 void
1958 vcache_make_anon(vnode_t *vp)
1959 {
1960 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
1961 	uint32_t hash;
1962 	bool recycle;
1963 
1964 	KASSERT(vp->v_type == VBLK || vp->v_type == VCHR);
1965 	KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
1966 	VSTATE_ASSERT_UNLOCKED(vp, VS_ACTIVE);
1967 
1968 	/* Remove from vnode cache. */
1969 	hash = vcache_hash(&vip->vi_key);
1970 	mutex_enter(&vcache_lock);
1971 	KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
1972 	SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1973 	    vip, vnode_impl, vi_hash);
1974 	vip->vi_key.vk_mount = dead_rootmount;
1975 	vip->vi_key.vk_key_len = 0;
1976 	vip->vi_key.vk_key = NULL;
1977 	mutex_exit(&vcache_lock);
1978 
1979 	/*
1980 	 * Disassociate the underlying file system from the vnode.
1981 	 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
1982 	 * the vnode, and may destroy the vnode so that VOP_UNLOCK
1983 	 * would no longer function.
1984 	 */
1985 	if (vn_lock(vp, LK_EXCLUSIVE)) {
1986 		vnpanic(vp, "%s: cannot lock", __func__);
1987 	}
1988 	VOP_INACTIVE(vp, &recycle);
1989 	KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1990 	if (VOP_RECLAIM(vp)) {
1991 		vnpanic(vp, "%s: cannot reclaim", __func__);
1992 	}
1993 
1994 	/* Purge name cache. */
1995 	cache_purge(vp);
1996 
1997 	/* Done with purge, change operations vector. */
1998 	mutex_enter(vp->v_interlock);
1999 	vp->v_op = spec_vnodeop_p;
2000 	vp->v_vflag |= VV_MPSAFE;
2001 	mutex_exit(vp->v_interlock);
2002 
2003 	/*
2004 	 * Move to dead mount.  Must be after changing the operations
2005 	 * vector as vnode operations enter the mount before using the
2006 	 * operations vector.  See sys/kern/vnode_if.c.
2007 	 */
2008 	vfs_ref(dead_rootmount);
2009 	vfs_insmntque(vp, dead_rootmount);
2010 
2011 	vrele(vp);
2012 }
2013 
2014 /*
2015  * Update outstanding I/O count and do wakeup if requested.
2016  */
2017 void
2018 vwakeup(struct buf *bp)
2019 {
2020 	vnode_t *vp;
2021 
2022 	if ((vp = bp->b_vp) == NULL)
2023 		return;
2024 
2025 	KASSERT(bp->b_objlock == vp->v_interlock);
2026 	KASSERT(mutex_owned(bp->b_objlock));
2027 
2028 	if (--vp->v_numoutput < 0)
2029 		vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
2030 	if (vp->v_numoutput == 0)
2031 		cv_broadcast(&vp->v_cv);
2032 }
2033 
2034 /*
2035  * Test a vnode for being or becoming dead.  Returns one of:
2036  * EBUSY:  vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
2037  * ENOENT: vnode is dead.
2038  * 0:      otherwise.
2039  *
2040  * Whenever this function returns a non-zero value all future
2041  * calls will also return a non-zero value.
2042  */
2043 int
2044 vdead_check(struct vnode *vp, int flags)
2045 {
2046 
2047 	KASSERT(mutex_owned(vp->v_interlock));
2048 
2049 	if (! ISSET(flags, VDEAD_NOWAIT))
2050 		VSTATE_WAIT_STABLE(vp);
2051 
2052 	if (VSTATE_GET(vp) == VS_RECLAIMING) {
2053 		KASSERT(ISSET(flags, VDEAD_NOWAIT));
2054 		return EBUSY;
2055 	} else if (VSTATE_GET(vp) == VS_RECLAIMED) {
2056 		return ENOENT;
2057 	}
2058 
2059 	return 0;
2060 }
2061 
2062 int
2063 vfs_drainvnodes(void)
2064 {
2065 	int i, gen;
2066 
2067 	mutex_enter(&vdrain_lock);
2068 	for (i = 0; i < 2; i++) {
2069 		gen = vdrain_gen;
2070 		while (gen == vdrain_gen) {
2071 			cv_broadcast(&vdrain_cv);
2072 			cv_wait(&vdrain_gen_cv, &vdrain_lock);
2073 		}
2074 	}
2075 	mutex_exit(&vdrain_lock);
2076 
2077 	if (numvnodes >= desiredvnodes)
2078 		return EBUSY;
2079 
2080 	if (vcache_hashsize != desiredvnodes)
2081 		vcache_reinit();
2082 
2083 	return 0;
2084 }
2085 
2086 void
2087 vnpanic(vnode_t *vp, const char *fmt, ...)
2088 {
2089 	va_list ap;
2090 
2091 #ifdef DIAGNOSTIC
2092 	vprint(NULL, vp);
2093 #endif
2094 	va_start(ap, fmt);
2095 	vpanic(fmt, ap);
2096 	va_end(ap);
2097 }
2098 
2099 void
2100 vshareilock(vnode_t *tvp, vnode_t *fvp)
2101 {
2102 	kmutex_t *oldlock;
2103 
2104 	oldlock = tvp->v_interlock;
2105 	mutex_obj_hold(fvp->v_interlock);
2106 	tvp->v_interlock = fvp->v_interlock;
2107 	mutex_obj_free(oldlock);
2108 }
2109 
2110 void
2111 vshareklist(vnode_t *tvp, vnode_t *fvp)
2112 {
2113 	/*
2114 	 * If two vnodes share klist state, they must also share
2115 	 * an interlock.
2116 	 */
2117 	KASSERT(tvp->v_interlock == fvp->v_interlock);
2118 
2119 	/*
2120 	 * We make the following assumptions:
2121 	 *
2122 	 * ==> Some other synchronization is happening outside of
2123 	 *     our view to make this safe.
2124 	 *
2125 	 * ==> That the "to" vnode will have the necessary references
2126 	 *     on the "from" vnode so that the storage for the klist
2127 	 *     won't be yanked out from beneath us (the vnode_impl).
2128 	 *
2129 	 * ==> If "from" is also sharing, we then assume that "from"
2130 	 *     has the necessary references, and so on.
2131 	 */
2132 	tvp->v_klist = fvp->v_klist;
2133 }
2134