xref: /netbsd-src/sys/kern/vfs_vnode.c (revision 53b02e147d4ed531c0d2a5ca9b3e8026ba3e99b5)
1 /*	$NetBSD: vfs_vnode.c,v 1.128 2021/10/20 03:08:18 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997-2011, 2019, 2020 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1989, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  * (c) UNIX System Laboratories, Inc.
37  * All or some portions of this file are derived from material licensed
38  * to the University of California by American Telephone and Telegraph
39  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40  * the permission of UNIX System Laboratories, Inc.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
67  */
68 
69 /*
70  * The vnode cache subsystem.
71  *
72  * Life-cycle
73  *
74  *	Normally, there are two points where new vnodes are created:
75  *	VOP_CREATE(9) and VOP_LOOKUP(9).  The life-cycle of a vnode
76  *	starts in one of the following ways:
77  *
78  *	- Allocation, via vcache_get(9) or vcache_new(9).
79  *	- Reclamation of inactive vnode, via vcache_vget(9).
80  *
81  *	Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
82  *	was another, traditional way.  Currently, only the draining thread
83  *	recycles the vnodes.  This behaviour might be revisited.
84  *
85  *	The life-cycle ends when the last reference is dropped, usually
86  *	in VOP_REMOVE(9).  In such case, VOP_INACTIVE(9) is called to inform
87  *	the file system that vnode is inactive.  Via this call, file system
88  *	indicates whether vnode can be recycled (usually, it checks its own
89  *	references, e.g. count of links, whether the file was removed).
90  *
91  *	Depending on indication, vnode can be put into a free list (cache),
92  *	or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to
93  *	disassociate underlying file system from the vnode, and finally
94  *	destroyed.
95  *
96  * Vnode state
97  *
98  *	Vnode is always in one of six states:
99  *	- MARKER	This is a marker vnode to help list traversal.  It
100  *			will never change its state.
101  *	- LOADING	Vnode is associating underlying file system and not
102  *			yet ready to use.
103  *	- LOADED	Vnode has associated underlying file system and is
104  *			ready to use.
105  *	- BLOCKED	Vnode is active but cannot get new references.
106  *	- RECLAIMING	Vnode is disassociating from the underlying file
107  *			system.
108  *	- RECLAIMED	Vnode has disassociated from underlying file system
109  *			and is dead.
110  *
111  *	Valid state changes are:
112  *	LOADING -> LOADED
113  *			Vnode has been initialised in vcache_get() or
114  *			vcache_new() and is ready to use.
115  *	BLOCKED -> RECLAIMING
116  *			Vnode starts disassociation from underlying file
117  *			system in vcache_reclaim().
118  *	RECLAIMING -> RECLAIMED
119  *			Vnode finished disassociation from underlying file
120  *			system in vcache_reclaim().
121  *	LOADED -> BLOCKED
122  *			Either vcache_rekey*() is changing the vnode key or
123  *			vrelel() is about to call VOP_INACTIVE().
124  *	BLOCKED -> LOADED
125  *			The block condition is over.
126  *	LOADING -> RECLAIMED
127  *			Either vcache_get() or vcache_new() failed to
128  *			associate the underlying file system or vcache_rekey*()
129  *			drops a vnode used as placeholder.
130  *
131  *	Of these states LOADING, BLOCKED and RECLAIMING are intermediate
132  *	and it is possible to wait for state change.
133  *
134  *	State is protected with v_interlock with one exception:
135  *	to change from LOADING both v_interlock and vcache_lock must be held
136  *	so it is possible to check "state == LOADING" without holding
137  *	v_interlock.  See vcache_get() for details.
138  *
139  * Reference counting
140  *
141  *	Vnode is considered active, if reference count (vnode_t::v_usecount)
142  *	is non-zero.  It is maintained using: vref(9) and vrele(9), as well
143  *	as vput(9), routines.  Common points holding references are e.g.
144  *	file openings, current working directory, mount points, etc.
145  *
146  *	v_usecount is adjusted with atomic operations, however to change
147  *	from a non-zero value to zero the interlock must also be held.
148  */
149 
150 #include <sys/cdefs.h>
151 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.128 2021/10/20 03:08:18 thorpej Exp $");
152 
153 #ifdef _KERNEL_OPT
154 #include "opt_pax.h"
155 #endif
156 
157 #include <sys/param.h>
158 #include <sys/kernel.h>
159 
160 #include <sys/atomic.h>
161 #include <sys/buf.h>
162 #include <sys/conf.h>
163 #include <sys/device.h>
164 #include <sys/hash.h>
165 #include <sys/kauth.h>
166 #include <sys/kmem.h>
167 #include <sys/kthread.h>
168 #include <sys/module.h>
169 #include <sys/mount.h>
170 #include <sys/namei.h>
171 #include <sys/pax.h>
172 #include <sys/syscallargs.h>
173 #include <sys/sysctl.h>
174 #include <sys/systm.h>
175 #include <sys/vnode_impl.h>
176 #include <sys/wapbl.h>
177 #include <sys/fstrans.h>
178 
179 #include <uvm/uvm.h>
180 #include <uvm/uvm_readahead.h>
181 #include <uvm/uvm_stat.h>
182 
183 /* Flags to vrelel. */
184 #define	VRELEL_ASYNC	0x0001	/* Always defer to vrele thread. */
185 
186 #define	LRU_VRELE	0
187 #define	LRU_FREE	1
188 #define	LRU_HOLD	2
189 #define	LRU_COUNT	3
190 
191 /*
192  * There are three lru lists: one holds vnodes waiting for async release,
193  * one is for vnodes which have no buffer/page references and one for those
194  * which do (i.e.  v_holdcnt is non-zero).  We put the lists into a single,
195  * private cache line as vnodes migrate between them while under the same
196  * lock (vdrain_lock).
197  */
198 u_int			numvnodes		__cacheline_aligned;
199 static vnodelst_t	lru_list[LRU_COUNT]	__cacheline_aligned;
200 static kmutex_t		vdrain_lock		__cacheline_aligned;
201 static kcondvar_t	vdrain_cv;
202 static int		vdrain_gen;
203 static kcondvar_t	vdrain_gen_cv;
204 static bool		vdrain_retry;
205 static lwp_t *		vdrain_lwp;
206 SLIST_HEAD(hashhead, vnode_impl);
207 static kmutex_t		vcache_lock		__cacheline_aligned;
208 static kcondvar_t	vcache_cv;
209 static u_int		vcache_hashsize;
210 static u_long		vcache_hashmask;
211 static struct hashhead	*vcache_hashtab;
212 static pool_cache_t	vcache_pool;
213 static void		lru_requeue(vnode_t *, vnodelst_t *);
214 static vnodelst_t *	lru_which(vnode_t *);
215 static vnode_impl_t *	vcache_alloc(void);
216 static void		vcache_dealloc(vnode_impl_t *);
217 static void		vcache_free(vnode_impl_t *);
218 static void		vcache_init(void);
219 static void		vcache_reinit(void);
220 static void		vcache_reclaim(vnode_t *);
221 static void		vrelel(vnode_t *, int, int);
222 static void		vdrain_thread(void *);
223 static void		vnpanic(vnode_t *, const char *, ...)
224     __printflike(2, 3);
225 
226 /* Routines having to do with the management of the vnode table. */
227 extern struct mount	*dead_rootmount;
228 extern int		(**dead_vnodeop_p)(void *);
229 extern int		(**spec_vnodeop_p)(void *);
230 extern struct vfsops	dead_vfsops;
231 
232 /*
233  * The high bit of v_usecount is a gate for vcache_tryvget().  It's set
234  * only when the vnode state is LOADED.
235  */
236 #define	VUSECOUNT_MASK	0x7fffffff
237 #define	VUSECOUNT_GATE	0x80000000
238 
239 /*
240  * Return the current usecount of a vnode.
241  */
242 inline int
243 vrefcnt(struct vnode *vp)
244 {
245 
246 	return atomic_load_relaxed(&vp->v_usecount) & VUSECOUNT_MASK;
247 }
248 
249 /* Vnode state operations and diagnostics. */
250 
251 #if defined(DIAGNOSTIC)
252 
253 #define VSTATE_VALID(state) \
254 	((state) != VS_ACTIVE && (state) != VS_MARKER)
255 #define VSTATE_GET(vp) \
256 	vstate_assert_get((vp), __func__, __LINE__)
257 #define VSTATE_CHANGE(vp, from, to) \
258 	vstate_assert_change((vp), (from), (to), __func__, __LINE__)
259 #define VSTATE_WAIT_STABLE(vp) \
260 	vstate_assert_wait_stable((vp), __func__, __LINE__)
261 
262 void
263 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
264     bool has_lock)
265 {
266 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
267 	int refcnt = vrefcnt(vp);
268 
269 	if (!has_lock) {
270 		/*
271 		 * Prevent predictive loads from the CPU, but check the state
272 		 * without loooking first.
273 		 */
274 		membar_enter();
275 		if (state == VS_ACTIVE && refcnt > 0 &&
276 		    (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED))
277 			return;
278 		if (vip->vi_state == state)
279 			return;
280 		mutex_enter((vp)->v_interlock);
281 	}
282 
283 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
284 
285 	if ((state == VS_ACTIVE && refcnt > 0 &&
286 	    (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED)) ||
287 	    vip->vi_state == state) {
288 		if (!has_lock)
289 			mutex_exit((vp)->v_interlock);
290 		return;
291 	}
292 	vnpanic(vp, "state is %s, usecount %d, expected %s at %s:%d",
293 	    vstate_name(vip->vi_state), refcnt,
294 	    vstate_name(state), func, line);
295 }
296 
297 static enum vnode_state
298 vstate_assert_get(vnode_t *vp, const char *func, int line)
299 {
300 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
301 
302 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
303 	if (! VSTATE_VALID(vip->vi_state))
304 		vnpanic(vp, "state is %s at %s:%d",
305 		    vstate_name(vip->vi_state), func, line);
306 
307 	return vip->vi_state;
308 }
309 
310 static void
311 vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
312 {
313 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
314 
315 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
316 	if (! VSTATE_VALID(vip->vi_state))
317 		vnpanic(vp, "state is %s at %s:%d",
318 		    vstate_name(vip->vi_state), func, line);
319 
320 	while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
321 		cv_wait(&vp->v_cv, vp->v_interlock);
322 
323 	if (! VSTATE_VALID(vip->vi_state))
324 		vnpanic(vp, "state is %s at %s:%d",
325 		    vstate_name(vip->vi_state), func, line);
326 }
327 
328 static void
329 vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
330     const char *func, int line)
331 {
332 	bool gated = (atomic_load_relaxed(&vp->v_usecount) & VUSECOUNT_GATE);
333 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
334 
335 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
336 	if (from == VS_LOADING)
337 		KASSERTMSG(mutex_owned(&vcache_lock), "at %s:%d", func, line);
338 
339 	if (! VSTATE_VALID(from))
340 		vnpanic(vp, "from is %s at %s:%d",
341 		    vstate_name(from), func, line);
342 	if (! VSTATE_VALID(to))
343 		vnpanic(vp, "to is %s at %s:%d",
344 		    vstate_name(to), func, line);
345 	if (vip->vi_state != from)
346 		vnpanic(vp, "from is %s, expected %s at %s:%d\n",
347 		    vstate_name(vip->vi_state), vstate_name(from), func, line);
348 	if ((from == VS_LOADED) != gated)
349 		vnpanic(vp, "state is %s, gate %d does not match at %s:%d\n",
350 		    vstate_name(vip->vi_state), gated, func, line);
351 
352 	/* Open/close the gate for vcache_tryvget(). */
353 	if (to == VS_LOADED)
354 		atomic_or_uint(&vp->v_usecount, VUSECOUNT_GATE);
355 	else
356 		atomic_and_uint(&vp->v_usecount, ~VUSECOUNT_GATE);
357 
358 	vip->vi_state = to;
359 	if (from == VS_LOADING)
360 		cv_broadcast(&vcache_cv);
361 	if (to == VS_LOADED || to == VS_RECLAIMED)
362 		cv_broadcast(&vp->v_cv);
363 }
364 
365 #else /* defined(DIAGNOSTIC) */
366 
367 #define VSTATE_GET(vp) \
368 	(VNODE_TO_VIMPL((vp))->vi_state)
369 #define VSTATE_CHANGE(vp, from, to) \
370 	vstate_change((vp), (from), (to))
371 #define VSTATE_WAIT_STABLE(vp) \
372 	vstate_wait_stable((vp))
373 void
374 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
375     bool has_lock)
376 {
377 
378 }
379 
380 static void
381 vstate_wait_stable(vnode_t *vp)
382 {
383 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
384 
385 	while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
386 		cv_wait(&vp->v_cv, vp->v_interlock);
387 }
388 
389 static void
390 vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
391 {
392 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
393 
394 	/* Open/close the gate for vcache_tryvget(). */
395 	if (to == VS_LOADED)
396 		atomic_or_uint(&vp->v_usecount, VUSECOUNT_GATE);
397 	else
398 		atomic_and_uint(&vp->v_usecount, ~VUSECOUNT_GATE);
399 
400 	vip->vi_state = to;
401 	if (from == VS_LOADING)
402 		cv_broadcast(&vcache_cv);
403 	if (to == VS_LOADED || to == VS_RECLAIMED)
404 		cv_broadcast(&vp->v_cv);
405 }
406 
407 #endif /* defined(DIAGNOSTIC) */
408 
409 void
410 vfs_vnode_sysinit(void)
411 {
412 	int error __diagused, i;
413 
414 	dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
415 	KASSERT(dead_rootmount != NULL);
416 	dead_rootmount->mnt_iflag |= IMNT_MPSAFE;
417 
418 	mutex_init(&vdrain_lock, MUTEX_DEFAULT, IPL_NONE);
419 	for (i = 0; i < LRU_COUNT; i++) {
420 		TAILQ_INIT(&lru_list[i]);
421 	}
422 	vcache_init();
423 
424 	cv_init(&vdrain_cv, "vdrain");
425 	cv_init(&vdrain_gen_cv, "vdrainwt");
426 	error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
427 	    NULL, &vdrain_lwp, "vdrain");
428 	KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
429 }
430 
431 /*
432  * Allocate a new marker vnode.
433  */
434 vnode_t *
435 vnalloc_marker(struct mount *mp)
436 {
437 	vnode_impl_t *vip;
438 	vnode_t *vp;
439 
440 	vip = pool_cache_get(vcache_pool, PR_WAITOK);
441 	memset(vip, 0, sizeof(*vip));
442 	vp = VIMPL_TO_VNODE(vip);
443 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
444 	vp->v_mount = mp;
445 	vp->v_type = VBAD;
446 	vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
447 	vip->vi_state = VS_MARKER;
448 
449 	return vp;
450 }
451 
452 /*
453  * Free a marker vnode.
454  */
455 void
456 vnfree_marker(vnode_t *vp)
457 {
458 	vnode_impl_t *vip;
459 
460 	vip = VNODE_TO_VIMPL(vp);
461 	KASSERT(vip->vi_state == VS_MARKER);
462 	mutex_obj_free(vp->v_interlock);
463 	uvm_obj_destroy(&vp->v_uobj, true);
464 	pool_cache_put(vcache_pool, vip);
465 }
466 
467 /*
468  * Test a vnode for being a marker vnode.
469  */
470 bool
471 vnis_marker(vnode_t *vp)
472 {
473 
474 	return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
475 }
476 
477 /*
478  * Return the lru list this node should be on.
479  */
480 static vnodelst_t *
481 lru_which(vnode_t *vp)
482 {
483 
484 	KASSERT(mutex_owned(vp->v_interlock));
485 
486 	if (vp->v_holdcnt > 0)
487 		return &lru_list[LRU_HOLD];
488 	else
489 		return &lru_list[LRU_FREE];
490 }
491 
492 /*
493  * Put vnode to end of given list.
494  * Both the current and the new list may be NULL, used on vnode alloc/free.
495  * Adjust numvnodes and signal vdrain thread if there is work.
496  */
497 static void
498 lru_requeue(vnode_t *vp, vnodelst_t *listhd)
499 {
500 	vnode_impl_t *vip;
501 	int d;
502 
503 	/*
504 	 * If the vnode is on the correct list, and was put there recently,
505 	 * then leave it be, thus avoiding huge cache and lock contention.
506 	 */
507 	vip = VNODE_TO_VIMPL(vp);
508 	if (listhd == vip->vi_lrulisthd &&
509 	    (getticks() - vip->vi_lrulisttm) < hz) {
510 	    	return;
511 	}
512 
513 	mutex_enter(&vdrain_lock);
514 	d = 0;
515 	if (vip->vi_lrulisthd != NULL)
516 		TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
517 	else
518 		d++;
519 	vip->vi_lrulisthd = listhd;
520 	vip->vi_lrulisttm = getticks();
521 	if (vip->vi_lrulisthd != NULL)
522 		TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
523 	else
524 		d--;
525 	if (d != 0) {
526 		/*
527 		 * Looks strange?  This is not a bug.  Don't store
528 		 * numvnodes unless there is a change - avoid false
529 		 * sharing on MP.
530 		 */
531 		numvnodes += d;
532 	}
533 	if ((d > 0 && numvnodes > desiredvnodes) ||
534 	    listhd == &lru_list[LRU_VRELE])
535 		cv_signal(&vdrain_cv);
536 	mutex_exit(&vdrain_lock);
537 }
538 
539 /*
540  * Release deferred vrele vnodes for this mount.
541  * Called with file system suspended.
542  */
543 void
544 vrele_flush(struct mount *mp)
545 {
546 	vnode_impl_t *vip, *marker;
547 	vnode_t *vp;
548 	int when = 0;
549 
550 	KASSERT(fstrans_is_owner(mp));
551 
552 	marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
553 
554 	mutex_enter(&vdrain_lock);
555 	TAILQ_INSERT_HEAD(&lru_list[LRU_VRELE], marker, vi_lrulist);
556 
557 	while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
558 		TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
559 		TAILQ_INSERT_AFTER(&lru_list[LRU_VRELE], vip, marker,
560 		    vi_lrulist);
561 		vp = VIMPL_TO_VNODE(vip);
562 		if (vnis_marker(vp))
563 			continue;
564 
565 		KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
566 		TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
567 		vip->vi_lrulisthd = &lru_list[LRU_HOLD];
568 		vip->vi_lrulisttm = getticks();
569 		TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
570 		mutex_exit(&vdrain_lock);
571 
572 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
573 		mutex_enter(vp->v_interlock);
574 		vrelel(vp, 0, LK_EXCLUSIVE);
575 
576 		if (getticks() > when) {
577 			yield();
578 			when = getticks() + hz / 10;
579 		}
580 
581 		mutex_enter(&vdrain_lock);
582 	}
583 
584 	TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
585 	mutex_exit(&vdrain_lock);
586 
587 	vnfree_marker(VIMPL_TO_VNODE(marker));
588 }
589 
590 /*
591  * Reclaim a cached vnode.  Used from vdrain_thread only.
592  */
593 static __inline void
594 vdrain_remove(vnode_t *vp)
595 {
596 	struct mount *mp;
597 
598 	KASSERT(mutex_owned(&vdrain_lock));
599 
600 	/* Probe usecount (unlocked). */
601 	if (vrefcnt(vp) > 0)
602 		return;
603 	/* Try v_interlock -- we lock the wrong direction! */
604 	if (!mutex_tryenter(vp->v_interlock))
605 		return;
606 	/* Probe usecount and state. */
607 	if (vrefcnt(vp) > 0 || VSTATE_GET(vp) != VS_LOADED) {
608 		mutex_exit(vp->v_interlock);
609 		return;
610 	}
611 	mp = vp->v_mount;
612 	if (fstrans_start_nowait(mp) != 0) {
613 		mutex_exit(vp->v_interlock);
614 		return;
615 	}
616 	vdrain_retry = true;
617 	mutex_exit(&vdrain_lock);
618 
619 	if (vcache_vget(vp) == 0) {
620 		if (!vrecycle(vp)) {
621 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
622 			mutex_enter(vp->v_interlock);
623 			vrelel(vp, 0, LK_EXCLUSIVE);
624 		}
625 	}
626 	fstrans_done(mp);
627 
628 	mutex_enter(&vdrain_lock);
629 }
630 
631 /*
632  * Release a cached vnode.  Used from vdrain_thread only.
633  */
634 static __inline void
635 vdrain_vrele(vnode_t *vp)
636 {
637 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
638 	struct mount *mp;
639 
640 	KASSERT(mutex_owned(&vdrain_lock));
641 
642 	mp = vp->v_mount;
643 	if (fstrans_start_nowait(mp) != 0)
644 		return;
645 
646 	/*
647 	 * First remove the vnode from the vrele list.
648 	 * Put it on the last lru list, the last vrele()
649 	 * will put it back onto the right list before
650 	 * its usecount reaches zero.
651 	 */
652 	KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
653 	TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
654 	vip->vi_lrulisthd = &lru_list[LRU_HOLD];
655 	vip->vi_lrulisttm = getticks();
656 	TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
657 
658 	vdrain_retry = true;
659 	mutex_exit(&vdrain_lock);
660 
661 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
662 	mutex_enter(vp->v_interlock);
663 	vrelel(vp, 0, LK_EXCLUSIVE);
664 	fstrans_done(mp);
665 
666 	mutex_enter(&vdrain_lock);
667 }
668 
669 /*
670  * Helper thread to keep the number of vnodes below desiredvnodes
671  * and release vnodes from asynchronous vrele.
672  */
673 static void
674 vdrain_thread(void *cookie)
675 {
676 	int i;
677 	u_int target;
678 	vnode_impl_t *vip, *marker;
679 
680 	marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
681 
682 	mutex_enter(&vdrain_lock);
683 
684 	for (;;) {
685 		vdrain_retry = false;
686 		target = desiredvnodes - desiredvnodes/10;
687 
688 		for (i = 0; i < LRU_COUNT; i++) {
689 			TAILQ_INSERT_HEAD(&lru_list[i], marker, vi_lrulist);
690 			while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
691 				TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
692 				TAILQ_INSERT_AFTER(&lru_list[i], vip, marker,
693 				    vi_lrulist);
694 				if (vnis_marker(VIMPL_TO_VNODE(vip)))
695 					continue;
696 				if (i == LRU_VRELE)
697 					vdrain_vrele(VIMPL_TO_VNODE(vip));
698 				else if (numvnodes < target)
699 					break;
700 				else
701 					vdrain_remove(VIMPL_TO_VNODE(vip));
702 			}
703 			TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
704 		}
705 
706 		if (vdrain_retry) {
707 			kpause("vdrainrt", false, 1, &vdrain_lock);
708 		} else {
709 			vdrain_gen++;
710 			cv_broadcast(&vdrain_gen_cv);
711 			cv_wait(&vdrain_cv, &vdrain_lock);
712 		}
713 	}
714 }
715 
716 /*
717  * Try to drop reference on a vnode.  Abort if we are releasing the
718  * last reference.  Note: this _must_ succeed if not the last reference.
719  */
720 static bool
721 vtryrele(vnode_t *vp)
722 {
723 	u_int use, next;
724 
725 	for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
726 		if (__predict_false((use & VUSECOUNT_MASK) == 1)) {
727 			return false;
728 		}
729 		KASSERT((use & VUSECOUNT_MASK) > 1);
730 		next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
731 		if (__predict_true(next == use)) {
732 			return true;
733 		}
734 	}
735 }
736 
737 /*
738  * vput: unlock and release the reference.
739  */
740 void
741 vput(vnode_t *vp)
742 {
743 	int lktype;
744 
745 	/*
746 	 * Do an unlocked check of the usecount.  If it looks like we're not
747 	 * about to drop the last reference, then unlock the vnode and try
748 	 * to drop the reference.  If it ends up being the last reference
749 	 * after all, vrelel() can fix it all up.  Most of the time this
750 	 * will all go to plan.
751 	 */
752 	if (vrefcnt(vp) > 1) {
753 		VOP_UNLOCK(vp);
754 		if (vtryrele(vp)) {
755 			return;
756 		}
757 		lktype = LK_NONE;
758 	} else if ((vp->v_vflag & VV_LOCKSWORK) == 0) {
759 		VOP_UNLOCK(vp);
760 		lktype = LK_NONE;
761 	} else {
762 		lktype = VOP_ISLOCKED(vp);
763 		KASSERT(lktype != LK_NONE);
764 	}
765 	mutex_enter(vp->v_interlock);
766 	vrelel(vp, 0, lktype);
767 }
768 
769 /*
770  * Vnode release.  If reference count drops to zero, call inactive
771  * routine and either return to freelist or free to the pool.
772  */
773 static void
774 vrelel(vnode_t *vp, int flags, int lktype)
775 {
776 	const bool async = ((flags & VRELEL_ASYNC) != 0);
777 	bool recycle, defer;
778 	int error;
779 
780 	KASSERT(mutex_owned(vp->v_interlock));
781 
782 	if (__predict_false(vp->v_op == dead_vnodeop_p &&
783 	    VSTATE_GET(vp) != VS_RECLAIMED)) {
784 		vnpanic(vp, "dead but not clean");
785 	}
786 
787 	/*
788 	 * If not the last reference, just drop the reference count and
789 	 * unlock.  VOP_UNLOCK() is called here without a vnode reference
790 	 * held, but is ok as the hold of v_interlock will stop the vnode
791 	 * from disappearing.
792 	 */
793 	if (vtryrele(vp)) {
794 		if (lktype != LK_NONE) {
795 			VOP_UNLOCK(vp);
796 		}
797 		mutex_exit(vp->v_interlock);
798 		return;
799 	}
800 	if (vrefcnt(vp) <= 0 || vp->v_writecount != 0) {
801 		vnpanic(vp, "%s: bad ref count", __func__);
802 	}
803 
804 #ifdef DIAGNOSTIC
805 	if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
806 	    vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
807 		vprint("vrelel: missing VOP_CLOSE()", vp);
808 	}
809 #endif
810 
811 	/*
812 	 * First try to get the vnode locked for VOP_INACTIVE().
813 	 * Defer vnode release to vdrain_thread if caller requests
814 	 * it explicitly, is the pagedaemon or the lock failed.
815 	 */
816 	defer = false;
817 	if ((curlwp == uvm.pagedaemon_lwp) || async) {
818 		defer = true;
819 	} else if (lktype == LK_SHARED) {
820 		/* Excellent chance of getting, if the last ref. */
821 		error = vn_lock(vp, LK_UPGRADE | LK_RETRY |
822 		    LK_NOWAIT);
823 		if (error != 0) {
824 			defer = true;
825 		} else {
826 			lktype = LK_EXCLUSIVE;
827 		}
828 	} else if (lktype == LK_NONE) {
829 		/* Excellent chance of getting, if the last ref. */
830 		error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY |
831 		    LK_NOWAIT);
832 		if (error != 0) {
833 			defer = true;
834 		} else {
835 			lktype = LK_EXCLUSIVE;
836 		}
837 	}
838 	KASSERT(mutex_owned(vp->v_interlock));
839 	if (defer) {
840 		/*
841 		 * Defer reclaim to the kthread; it's not safe to
842 		 * clean it here.  We donate it our last reference.
843 		 */
844 		if (lktype != LK_NONE) {
845 			VOP_UNLOCK(vp);
846 		}
847 		lru_requeue(vp, &lru_list[LRU_VRELE]);
848 		mutex_exit(vp->v_interlock);
849 		return;
850 	}
851 	KASSERT(lktype == LK_EXCLUSIVE);
852 
853 	/*
854 	 * If not clean, deactivate the vnode, but preserve
855 	 * our reference across the call to VOP_INACTIVE().
856 	 */
857 	if (VSTATE_GET(vp) == VS_RECLAIMED) {
858 		VOP_UNLOCK(vp);
859 	} else {
860 		/*
861 		 * If VOP_INACTIVE() indicates that the file has been
862 		 * deleted, then recycle the vnode.
863 		 *
864 		 * Note that VOP_INACTIVE() will not drop the vnode lock.
865 		 */
866 		mutex_exit(vp->v_interlock);
867 		recycle = false;
868 		VOP_INACTIVE(vp, &recycle);
869 		rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
870 		mutex_enter(vp->v_interlock);
871 
872 		for (;;) {
873 			/*
874 			 * If no longer the last reference, try to shed it.
875 			 * On success, drop the interlock last thereby
876 			 * preventing the vnode being freed behind us.
877 			 */
878 			if (vtryrele(vp)) {
879 				VOP_UNLOCK(vp);
880 				rw_exit(vp->v_uobj.vmobjlock);
881 				mutex_exit(vp->v_interlock);
882 				return;
883 			}
884 			/*
885 			 * Block new references then check again to see if a
886 			 * new reference was acquired in the meantime.  If
887 			 * it was, restore the vnode state and try again.
888 			 */
889 			if (recycle) {
890 				VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
891 				if (vrefcnt(vp) != 1) {
892 					VSTATE_CHANGE(vp, VS_BLOCKED,
893 					    VS_LOADED);
894 					continue;
895 				}
896 			}
897 			break;
898  		}
899 
900 		/* Take care of space accounting. */
901 		if ((vp->v_iflag & VI_EXECMAP) != 0) {
902 			cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
903 		}
904 		vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
905 		vp->v_vflag &= ~VV_MAPPED;
906 		rw_exit(vp->v_uobj.vmobjlock);
907 
908 		/*
909 		 * Recycle the vnode if the file is now unused (unlinked),
910 		 * otherwise just free it.
911 		 */
912 		if (recycle) {
913 			VSTATE_ASSERT(vp, VS_BLOCKED);
914 			/* vcache_reclaim drops the lock. */
915 			vcache_reclaim(vp);
916 		} else {
917 			VOP_UNLOCK(vp);
918 		}
919 		KASSERT(vrefcnt(vp) > 0);
920 	}
921 
922 	if ((atomic_dec_uint_nv(&vp->v_usecount) & VUSECOUNT_MASK) != 0) {
923 		/* Gained another reference while being reclaimed. */
924 		mutex_exit(vp->v_interlock);
925 		return;
926 	}
927 
928 	if (VSTATE_GET(vp) == VS_RECLAIMED && vp->v_holdcnt == 0) {
929 		/*
930 		 * It's clean so destroy it.  It isn't referenced
931 		 * anywhere since it has been reclaimed.
932 		 */
933 		vcache_free(VNODE_TO_VIMPL(vp));
934 	} else {
935 		/*
936 		 * Otherwise, put it back onto the freelist.  It
937 		 * can't be destroyed while still associated with
938 		 * a file system.
939 		 */
940 		lru_requeue(vp, lru_which(vp));
941 		mutex_exit(vp->v_interlock);
942 	}
943 }
944 
945 void
946 vrele(vnode_t *vp)
947 {
948 
949 	if (vtryrele(vp)) {
950 		return;
951 	}
952 	mutex_enter(vp->v_interlock);
953 	vrelel(vp, 0, LK_NONE);
954 }
955 
956 /*
957  * Asynchronous vnode release, vnode is released in different context.
958  */
959 void
960 vrele_async(vnode_t *vp)
961 {
962 
963 	if (vtryrele(vp)) {
964 		return;
965 	}
966 	mutex_enter(vp->v_interlock);
967 	vrelel(vp, VRELEL_ASYNC, LK_NONE);
968 }
969 
970 /*
971  * Vnode reference, where a reference is already held by some other
972  * object (for example, a file structure).
973  *
974  * NB: lockless code sequences may rely on this not blocking.
975  */
976 void
977 vref(vnode_t *vp)
978 {
979 
980 	KASSERT(vrefcnt(vp) > 0);
981 
982 	atomic_inc_uint(&vp->v_usecount);
983 }
984 
985 /*
986  * Page or buffer structure gets a reference.
987  * Called with v_interlock held.
988  */
989 void
990 vholdl(vnode_t *vp)
991 {
992 
993 	KASSERT(mutex_owned(vp->v_interlock));
994 
995 	if (vp->v_holdcnt++ == 0 && vrefcnt(vp) == 0)
996 		lru_requeue(vp, lru_which(vp));
997 }
998 
999 /*
1000  * Page or buffer structure gets a reference.
1001  */
1002 void
1003 vhold(vnode_t *vp)
1004 {
1005 
1006 	mutex_enter(vp->v_interlock);
1007 	vholdl(vp);
1008 	mutex_exit(vp->v_interlock);
1009 }
1010 
1011 /*
1012  * Page or buffer structure frees a reference.
1013  * Called with v_interlock held.
1014  */
1015 void
1016 holdrelel(vnode_t *vp)
1017 {
1018 
1019 	KASSERT(mutex_owned(vp->v_interlock));
1020 
1021 	if (vp->v_holdcnt <= 0) {
1022 		vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
1023 	}
1024 
1025 	vp->v_holdcnt--;
1026 	if (vp->v_holdcnt == 0 && vrefcnt(vp) == 0)
1027 		lru_requeue(vp, lru_which(vp));
1028 }
1029 
1030 /*
1031  * Page or buffer structure frees a reference.
1032  */
1033 void
1034 holdrele(vnode_t *vp)
1035 {
1036 
1037 	mutex_enter(vp->v_interlock);
1038 	holdrelel(vp);
1039 	mutex_exit(vp->v_interlock);
1040 }
1041 
1042 /*
1043  * Recycle an unused vnode if caller holds the last reference.
1044  */
1045 bool
1046 vrecycle(vnode_t *vp)
1047 {
1048 	int error __diagused;
1049 
1050 	mutex_enter(vp->v_interlock);
1051 
1052 	/* If the vnode is already clean we're done. */
1053 	VSTATE_WAIT_STABLE(vp);
1054 	if (VSTATE_GET(vp) != VS_LOADED) {
1055 		VSTATE_ASSERT(vp, VS_RECLAIMED);
1056 		vrelel(vp, 0, LK_NONE);
1057 		return true;
1058 	}
1059 
1060 	/* Prevent further references until the vnode is locked. */
1061 	VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
1062 
1063 	/* Make sure we hold the last reference. */
1064 	if (vrefcnt(vp) != 1) {
1065 		VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
1066 		mutex_exit(vp->v_interlock);
1067 		return false;
1068 	}
1069 
1070 	mutex_exit(vp->v_interlock);
1071 
1072 	/*
1073 	 * On a leaf file system this lock will always succeed as we hold
1074 	 * the last reference and prevent further references.
1075 	 * On layered file systems waiting for the lock would open a can of
1076 	 * deadlocks as the lower vnodes may have other active references.
1077 	 */
1078 	error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
1079 
1080 	mutex_enter(vp->v_interlock);
1081 	if (error) {
1082 		VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
1083 		mutex_exit(vp->v_interlock);
1084 		return false;
1085 	}
1086 
1087 	KASSERT(vrefcnt(vp) == 1);
1088 	vcache_reclaim(vp);
1089 	vrelel(vp, 0, LK_NONE);
1090 
1091 	return true;
1092 }
1093 
1094 /*
1095  * Helper for vrevoke() to propagate suspension from lastmp
1096  * to thismp.  Both args may be NULL.
1097  * Returns the currently suspended file system or NULL.
1098  */
1099 static struct mount *
1100 vrevoke_suspend_next(struct mount *lastmp, struct mount *thismp)
1101 {
1102 	int error;
1103 
1104 	if (lastmp == thismp)
1105 		return thismp;
1106 
1107 	if (lastmp != NULL)
1108 		vfs_resume(lastmp);
1109 
1110 	if (thismp == NULL)
1111 		return NULL;
1112 
1113 	do {
1114 		error = vfs_suspend(thismp, 0);
1115 	} while (error == EINTR || error == ERESTART);
1116 
1117 	if (error == 0)
1118 		return thismp;
1119 
1120 	KASSERT(error == EOPNOTSUPP);
1121 	return NULL;
1122 }
1123 
1124 /*
1125  * Eliminate all activity associated with the requested vnode
1126  * and with all vnodes aliased to the requested vnode.
1127  */
1128 void
1129 vrevoke(vnode_t *vp)
1130 {
1131 	struct mount *mp;
1132 	vnode_t *vq;
1133 	enum vtype type;
1134 	dev_t dev;
1135 
1136 	KASSERT(vrefcnt(vp) > 0);
1137 
1138 	mp = vrevoke_suspend_next(NULL, vp->v_mount);
1139 
1140 	mutex_enter(vp->v_interlock);
1141 	VSTATE_WAIT_STABLE(vp);
1142 	if (VSTATE_GET(vp) == VS_RECLAIMED) {
1143 		mutex_exit(vp->v_interlock);
1144 	} else if (vp->v_type != VBLK && vp->v_type != VCHR) {
1145 		atomic_inc_uint(&vp->v_usecount);
1146 		mutex_exit(vp->v_interlock);
1147 		vgone(vp);
1148 	} else {
1149 		dev = vp->v_rdev;
1150 		type = vp->v_type;
1151 		mutex_exit(vp->v_interlock);
1152 
1153 		while (spec_node_lookup_by_dev(type, dev, &vq) == 0) {
1154 			mp = vrevoke_suspend_next(mp, vq->v_mount);
1155 			vgone(vq);
1156 		}
1157 	}
1158 	vrevoke_suspend_next(mp, NULL);
1159 }
1160 
1161 /*
1162  * Eliminate all activity associated with a vnode in preparation for
1163  * reuse.  Drops a reference from the vnode.
1164  */
1165 void
1166 vgone(vnode_t *vp)
1167 {
1168 	int lktype;
1169 
1170 	KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
1171 
1172 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1173 	lktype = LK_EXCLUSIVE;
1174 	mutex_enter(vp->v_interlock);
1175 	VSTATE_WAIT_STABLE(vp);
1176 	if (VSTATE_GET(vp) == VS_LOADED) {
1177 		VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
1178 		vcache_reclaim(vp);
1179 		lktype = LK_NONE;
1180 	}
1181 	VSTATE_ASSERT(vp, VS_RECLAIMED);
1182 	vrelel(vp, 0, lktype);
1183 }
1184 
1185 static inline uint32_t
1186 vcache_hash(const struct vcache_key *key)
1187 {
1188 	uint32_t hash = HASH32_BUF_INIT;
1189 
1190 	KASSERT(key->vk_key_len > 0);
1191 
1192 	hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
1193 	hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
1194 	return hash;
1195 }
1196 
1197 static int
1198 vcache_stats(struct hashstat_sysctl *hs, bool fill)
1199 {
1200 	vnode_impl_t *vip;
1201 	uint64_t chain;
1202 
1203 	strlcpy(hs->hash_name, "vcache", sizeof(hs->hash_name));
1204 	strlcpy(hs->hash_desc, "vnode cache hash", sizeof(hs->hash_desc));
1205 	if (!fill)
1206 		return 0;
1207 
1208 	hs->hash_size = vcache_hashmask + 1;
1209 
1210 	for (size_t i = 0; i < hs->hash_size; i++) {
1211 		chain = 0;
1212 		mutex_enter(&vcache_lock);
1213 		SLIST_FOREACH(vip, &vcache_hashtab[i], vi_hash) {
1214 			chain++;
1215 		}
1216 		mutex_exit(&vcache_lock);
1217 		if (chain > 0) {
1218 			hs->hash_used++;
1219 			hs->hash_items += chain;
1220 			if (chain > hs->hash_maxchain)
1221 				hs->hash_maxchain = chain;
1222 		}
1223 		preempt_point();
1224 	}
1225 
1226 	return 0;
1227 }
1228 
1229 static void
1230 vcache_init(void)
1231 {
1232 
1233 	vcache_pool = pool_cache_init(sizeof(vnode_impl_t), coherency_unit,
1234 	    0, 0, "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
1235 	KASSERT(vcache_pool != NULL);
1236 	mutex_init(&vcache_lock, MUTEX_DEFAULT, IPL_NONE);
1237 	cv_init(&vcache_cv, "vcache");
1238 	vcache_hashsize = desiredvnodes;
1239 	vcache_hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
1240 	    &vcache_hashmask);
1241 	hashstat_register("vcache", vcache_stats);
1242 }
1243 
1244 static void
1245 vcache_reinit(void)
1246 {
1247 	int i;
1248 	uint32_t hash;
1249 	u_long oldmask, newmask;
1250 	struct hashhead *oldtab, *newtab;
1251 	vnode_impl_t *vip;
1252 
1253 	newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
1254 	mutex_enter(&vcache_lock);
1255 	oldtab = vcache_hashtab;
1256 	oldmask = vcache_hashmask;
1257 	vcache_hashsize = desiredvnodes;
1258 	vcache_hashtab = newtab;
1259 	vcache_hashmask = newmask;
1260 	for (i = 0; i <= oldmask; i++) {
1261 		while ((vip = SLIST_FIRST(&oldtab[i])) != NULL) {
1262 			SLIST_REMOVE(&oldtab[i], vip, vnode_impl, vi_hash);
1263 			hash = vcache_hash(&vip->vi_key);
1264 			SLIST_INSERT_HEAD(&newtab[hash & vcache_hashmask],
1265 			    vip, vi_hash);
1266 		}
1267 	}
1268 	mutex_exit(&vcache_lock);
1269 	hashdone(oldtab, HASH_SLIST, oldmask);
1270 }
1271 
1272 static inline vnode_impl_t *
1273 vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
1274 {
1275 	struct hashhead *hashp;
1276 	vnode_impl_t *vip;
1277 
1278 	KASSERT(mutex_owned(&vcache_lock));
1279 
1280 	hashp = &vcache_hashtab[hash & vcache_hashmask];
1281 	SLIST_FOREACH(vip, hashp, vi_hash) {
1282 		if (key->vk_mount != vip->vi_key.vk_mount)
1283 			continue;
1284 		if (key->vk_key_len != vip->vi_key.vk_key_len)
1285 			continue;
1286 		if (memcmp(key->vk_key, vip->vi_key.vk_key, key->vk_key_len))
1287 			continue;
1288 		return vip;
1289 	}
1290 	return NULL;
1291 }
1292 
1293 /*
1294  * Allocate a new, uninitialized vcache node.
1295  */
1296 static vnode_impl_t *
1297 vcache_alloc(void)
1298 {
1299 	vnode_impl_t *vip;
1300 	vnode_t *vp;
1301 
1302 	vip = pool_cache_get(vcache_pool, PR_WAITOK);
1303 	vp = VIMPL_TO_VNODE(vip);
1304 	memset(vip, 0, sizeof(*vip));
1305 
1306 	rw_init(&vip->vi_lock);
1307 	vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
1308 
1309 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
1310 	cv_init(&vp->v_cv, "vnode");
1311 	cache_vnode_init(vp);
1312 
1313 	vp->v_usecount = 1;
1314 	vp->v_type = VNON;
1315 	vp->v_size = vp->v_writesize = VSIZENOTSET;
1316 
1317 	vip->vi_state = VS_LOADING;
1318 
1319 	lru_requeue(vp, &lru_list[LRU_FREE]);
1320 
1321 	return vip;
1322 }
1323 
1324 /*
1325  * Deallocate a vcache node in state VS_LOADING.
1326  *
1327  * vcache_lock held on entry and released on return.
1328  */
1329 static void
1330 vcache_dealloc(vnode_impl_t *vip)
1331 {
1332 	vnode_t *vp;
1333 
1334 	KASSERT(mutex_owned(&vcache_lock));
1335 
1336 	vp = VIMPL_TO_VNODE(vip);
1337 	vfs_ref(dead_rootmount);
1338 	vfs_insmntque(vp, dead_rootmount);
1339 	mutex_enter(vp->v_interlock);
1340 	vp->v_op = dead_vnodeop_p;
1341 	VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1342 	mutex_exit(&vcache_lock);
1343 	vrelel(vp, 0, LK_NONE);
1344 }
1345 
1346 /*
1347  * Free an unused, unreferenced vcache node.
1348  * v_interlock locked on entry.
1349  */
1350 static void
1351 vcache_free(vnode_impl_t *vip)
1352 {
1353 	vnode_t *vp;
1354 
1355 	vp = VIMPL_TO_VNODE(vip);
1356 	KASSERT(mutex_owned(vp->v_interlock));
1357 
1358 	KASSERT(vrefcnt(vp) == 0);
1359 	KASSERT(vp->v_holdcnt == 0);
1360 	KASSERT(vp->v_writecount == 0);
1361 	lru_requeue(vp, NULL);
1362 	mutex_exit(vp->v_interlock);
1363 
1364 	vfs_insmntque(vp, NULL);
1365 	if (vp->v_type == VBLK || vp->v_type == VCHR)
1366 		spec_node_destroy(vp);
1367 
1368 	mutex_obj_free(vp->v_interlock);
1369 	rw_destroy(&vip->vi_lock);
1370 	uvm_obj_destroy(&vp->v_uobj, true);
1371 	cv_destroy(&vp->v_cv);
1372 	cache_vnode_fini(vp);
1373 	pool_cache_put(vcache_pool, vip);
1374 }
1375 
1376 /*
1377  * Try to get an initial reference on this cached vnode.
1378  * Returns zero on success or EBUSY if the vnode state is not LOADED.
1379  *
1380  * NB: lockless code sequences may rely on this not blocking.
1381  */
1382 int
1383 vcache_tryvget(vnode_t *vp)
1384 {
1385 	u_int use, next;
1386 
1387 	for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
1388 		if (__predict_false((use & VUSECOUNT_GATE) == 0)) {
1389 			return EBUSY;
1390 		}
1391 		next = atomic_cas_uint(&vp->v_usecount, use, use + 1);
1392 		if (__predict_true(next == use)) {
1393 			return 0;
1394 		}
1395 	}
1396 }
1397 
1398 /*
1399  * Try to get an initial reference on this cached vnode.
1400  * Returns zero on success and  ENOENT if the vnode has been reclaimed.
1401  * Will wait for the vnode state to be stable.
1402  *
1403  * v_interlock locked on entry and unlocked on exit.
1404  */
1405 int
1406 vcache_vget(vnode_t *vp)
1407 {
1408 
1409 	KASSERT(mutex_owned(vp->v_interlock));
1410 
1411 	/* Increment hold count to prevent vnode from disappearing. */
1412 	vp->v_holdcnt++;
1413 	VSTATE_WAIT_STABLE(vp);
1414 	vp->v_holdcnt--;
1415 
1416 	/* If this was the last reference to a reclaimed vnode free it now. */
1417 	if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) {
1418 		if (vp->v_holdcnt == 0 && vrefcnt(vp) == 0)
1419 			vcache_free(VNODE_TO_VIMPL(vp));
1420 		else
1421 			mutex_exit(vp->v_interlock);
1422 		return ENOENT;
1423 	}
1424 	VSTATE_ASSERT(vp, VS_LOADED);
1425 	atomic_inc_uint(&vp->v_usecount);
1426 	mutex_exit(vp->v_interlock);
1427 
1428 	return 0;
1429 }
1430 
1431 /*
1432  * Get a vnode / fs node pair by key and return it referenced through vpp.
1433  */
1434 int
1435 vcache_get(struct mount *mp, const void *key, size_t key_len,
1436     struct vnode **vpp)
1437 {
1438 	int error;
1439 	uint32_t hash;
1440 	const void *new_key;
1441 	struct vnode *vp;
1442 	struct vcache_key vcache_key;
1443 	vnode_impl_t *vip, *new_vip;
1444 
1445 	new_key = NULL;
1446 	*vpp = NULL;
1447 
1448 	vcache_key.vk_mount = mp;
1449 	vcache_key.vk_key = key;
1450 	vcache_key.vk_key_len = key_len;
1451 	hash = vcache_hash(&vcache_key);
1452 
1453 again:
1454 	mutex_enter(&vcache_lock);
1455 	vip = vcache_hash_lookup(&vcache_key, hash);
1456 
1457 	/* If found, take a reference or retry. */
1458 	if (__predict_true(vip != NULL)) {
1459 		/*
1460 		 * If the vnode is loading we cannot take the v_interlock
1461 		 * here as it might change during load (see uvm_obj_setlock()).
1462 		 * As changing state from VS_LOADING requires both vcache_lock
1463 		 * and v_interlock it is safe to test with vcache_lock held.
1464 		 *
1465 		 * Wait for vnodes changing state from VS_LOADING and retry.
1466 		 */
1467 		if (__predict_false(vip->vi_state == VS_LOADING)) {
1468 			cv_wait(&vcache_cv, &vcache_lock);
1469 			mutex_exit(&vcache_lock);
1470 			goto again;
1471 		}
1472 		vp = VIMPL_TO_VNODE(vip);
1473 		mutex_enter(vp->v_interlock);
1474 		mutex_exit(&vcache_lock);
1475 		error = vcache_vget(vp);
1476 		if (error == ENOENT)
1477 			goto again;
1478 		if (error == 0)
1479 			*vpp = vp;
1480 		KASSERT((error != 0) == (*vpp == NULL));
1481 		return error;
1482 	}
1483 	mutex_exit(&vcache_lock);
1484 
1485 	/* Allocate and initialize a new vcache / vnode pair. */
1486 	error = vfs_busy(mp);
1487 	if (error)
1488 		return error;
1489 	new_vip = vcache_alloc();
1490 	new_vip->vi_key = vcache_key;
1491 	vp = VIMPL_TO_VNODE(new_vip);
1492 	mutex_enter(&vcache_lock);
1493 	vip = vcache_hash_lookup(&vcache_key, hash);
1494 	if (vip == NULL) {
1495 		SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1496 		    new_vip, vi_hash);
1497 		vip = new_vip;
1498 	}
1499 
1500 	/* If another thread beat us inserting this node, retry. */
1501 	if (vip != new_vip) {
1502 		vcache_dealloc(new_vip);
1503 		vfs_unbusy(mp);
1504 		goto again;
1505 	}
1506 	mutex_exit(&vcache_lock);
1507 
1508 	/* Load the fs node.  Exclusive as new_node is VS_LOADING. */
1509 	error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
1510 	if (error) {
1511 		mutex_enter(&vcache_lock);
1512 		SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1513 		    new_vip, vnode_impl, vi_hash);
1514 		vcache_dealloc(new_vip);
1515 		vfs_unbusy(mp);
1516 		KASSERT(*vpp == NULL);
1517 		return error;
1518 	}
1519 	KASSERT(new_key != NULL);
1520 	KASSERT(memcmp(key, new_key, key_len) == 0);
1521 	KASSERT(vp->v_op != NULL);
1522 	vfs_insmntque(vp, mp);
1523 	if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1524 		vp->v_vflag |= VV_MPSAFE;
1525 	vfs_ref(mp);
1526 	vfs_unbusy(mp);
1527 
1528 	/* Finished loading, finalize node. */
1529 	mutex_enter(&vcache_lock);
1530 	new_vip->vi_key.vk_key = new_key;
1531 	mutex_enter(vp->v_interlock);
1532 	VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1533 	mutex_exit(vp->v_interlock);
1534 	mutex_exit(&vcache_lock);
1535 	*vpp = vp;
1536 	return 0;
1537 }
1538 
1539 /*
1540  * Create a new vnode / fs node pair and return it referenced through vpp.
1541  */
1542 int
1543 vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
1544     kauth_cred_t cred, void *extra, struct vnode **vpp)
1545 {
1546 	int error;
1547 	uint32_t hash;
1548 	struct vnode *vp, *ovp;
1549 	vnode_impl_t *vip, *ovip;
1550 
1551 	*vpp = NULL;
1552 
1553 	/* Allocate and initialize a new vcache / vnode pair. */
1554 	error = vfs_busy(mp);
1555 	if (error)
1556 		return error;
1557 	vip = vcache_alloc();
1558 	vip->vi_key.vk_mount = mp;
1559 	vp = VIMPL_TO_VNODE(vip);
1560 
1561 	/* Create and load the fs node. */
1562 	error = VFS_NEWVNODE(mp, dvp, vp, vap, cred, extra,
1563 	    &vip->vi_key.vk_key_len, &vip->vi_key.vk_key);
1564 	if (error) {
1565 		mutex_enter(&vcache_lock);
1566 		vcache_dealloc(vip);
1567 		vfs_unbusy(mp);
1568 		KASSERT(*vpp == NULL);
1569 		return error;
1570 	}
1571 	KASSERT(vp->v_op != NULL);
1572 	KASSERT((vip->vi_key.vk_key_len == 0) == (mp == dead_rootmount));
1573 	if (vip->vi_key.vk_key_len > 0) {
1574 		KASSERT(vip->vi_key.vk_key != NULL);
1575 		hash = vcache_hash(&vip->vi_key);
1576 
1577 		/*
1578 		 * Wait for previous instance to be reclaimed,
1579 		 * then insert new node.
1580 		 */
1581 		mutex_enter(&vcache_lock);
1582 		while ((ovip = vcache_hash_lookup(&vip->vi_key, hash))) {
1583 			ovp = VIMPL_TO_VNODE(ovip);
1584 			mutex_enter(ovp->v_interlock);
1585 			mutex_exit(&vcache_lock);
1586 			error = vcache_vget(ovp);
1587 			KASSERT(error == ENOENT);
1588 			mutex_enter(&vcache_lock);
1589 		}
1590 		SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1591 		    vip, vi_hash);
1592 		mutex_exit(&vcache_lock);
1593 	}
1594 	vfs_insmntque(vp, mp);
1595 	if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1596 		vp->v_vflag |= VV_MPSAFE;
1597 	vfs_ref(mp);
1598 	vfs_unbusy(mp);
1599 
1600 	/* Finished loading, finalize node. */
1601 	mutex_enter(&vcache_lock);
1602 	mutex_enter(vp->v_interlock);
1603 	VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1604 	mutex_exit(&vcache_lock);
1605 	mutex_exit(vp->v_interlock);
1606 	*vpp = vp;
1607 	return 0;
1608 }
1609 
1610 /*
1611  * Prepare key change: update old cache nodes key and lock new cache node.
1612  * Return an error if the new node already exists.
1613  */
1614 int
1615 vcache_rekey_enter(struct mount *mp, struct vnode *vp,
1616     const void *old_key, size_t old_key_len,
1617     const void *new_key, size_t new_key_len)
1618 {
1619 	uint32_t old_hash, new_hash;
1620 	struct vcache_key old_vcache_key, new_vcache_key;
1621 	vnode_impl_t *vip, *new_vip;
1622 
1623 	old_vcache_key.vk_mount = mp;
1624 	old_vcache_key.vk_key = old_key;
1625 	old_vcache_key.vk_key_len = old_key_len;
1626 	old_hash = vcache_hash(&old_vcache_key);
1627 
1628 	new_vcache_key.vk_mount = mp;
1629 	new_vcache_key.vk_key = new_key;
1630 	new_vcache_key.vk_key_len = new_key_len;
1631 	new_hash = vcache_hash(&new_vcache_key);
1632 
1633 	new_vip = vcache_alloc();
1634 	new_vip->vi_key = new_vcache_key;
1635 
1636 	/* Insert locked new node used as placeholder. */
1637 	mutex_enter(&vcache_lock);
1638 	vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1639 	if (vip != NULL) {
1640 		vcache_dealloc(new_vip);
1641 		return EEXIST;
1642 	}
1643 	SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1644 	    new_vip, vi_hash);
1645 
1646 	/* Replace old nodes key with the temporary copy. */
1647 	vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1648 	KASSERT(vip != NULL);
1649 	KASSERT(VIMPL_TO_VNODE(vip) == vp);
1650 	KASSERT(vip->vi_key.vk_key != old_vcache_key.vk_key);
1651 	vip->vi_key = old_vcache_key;
1652 	mutex_exit(&vcache_lock);
1653 	return 0;
1654 }
1655 
1656 /*
1657  * Key change complete: update old node and remove placeholder.
1658  */
1659 void
1660 vcache_rekey_exit(struct mount *mp, struct vnode *vp,
1661     const void *old_key, size_t old_key_len,
1662     const void *new_key, size_t new_key_len)
1663 {
1664 	uint32_t old_hash, new_hash;
1665 	struct vcache_key old_vcache_key, new_vcache_key;
1666 	vnode_impl_t *vip, *new_vip;
1667 	struct vnode *new_vp;
1668 
1669 	old_vcache_key.vk_mount = mp;
1670 	old_vcache_key.vk_key = old_key;
1671 	old_vcache_key.vk_key_len = old_key_len;
1672 	old_hash = vcache_hash(&old_vcache_key);
1673 
1674 	new_vcache_key.vk_mount = mp;
1675 	new_vcache_key.vk_key = new_key;
1676 	new_vcache_key.vk_key_len = new_key_len;
1677 	new_hash = vcache_hash(&new_vcache_key);
1678 
1679 	mutex_enter(&vcache_lock);
1680 
1681 	/* Lookup old and new node. */
1682 	vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1683 	KASSERT(vip != NULL);
1684 	KASSERT(VIMPL_TO_VNODE(vip) == vp);
1685 
1686 	new_vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1687 	KASSERT(new_vip != NULL);
1688 	KASSERT(new_vip->vi_key.vk_key_len == new_key_len);
1689 	new_vp = VIMPL_TO_VNODE(new_vip);
1690 	mutex_enter(new_vp->v_interlock);
1691 	VSTATE_ASSERT(VIMPL_TO_VNODE(new_vip), VS_LOADING);
1692 	mutex_exit(new_vp->v_interlock);
1693 
1694 	/* Rekey old node and put it onto its new hashlist. */
1695 	vip->vi_key = new_vcache_key;
1696 	if (old_hash != new_hash) {
1697 		SLIST_REMOVE(&vcache_hashtab[old_hash & vcache_hashmask],
1698 		    vip, vnode_impl, vi_hash);
1699 		SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1700 		    vip, vi_hash);
1701 	}
1702 
1703 	/* Remove new node used as placeholder. */
1704 	SLIST_REMOVE(&vcache_hashtab[new_hash & vcache_hashmask],
1705 	    new_vip, vnode_impl, vi_hash);
1706 	vcache_dealloc(new_vip);
1707 }
1708 
1709 /*
1710  * Disassociate the underlying file system from a vnode.
1711  *
1712  * Must be called with vnode locked and will return unlocked.
1713  * Must be called with the interlock held, and will return with it held.
1714  */
1715 static void
1716 vcache_reclaim(vnode_t *vp)
1717 {
1718 	lwp_t *l = curlwp;
1719 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
1720 	struct mount *mp = vp->v_mount;
1721 	uint32_t hash;
1722 	uint8_t temp_buf[64], *temp_key;
1723 	size_t temp_key_len;
1724 	bool recycle, active;
1725 	int error;
1726 
1727 	KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1728 	    VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1729 	KASSERT(mutex_owned(vp->v_interlock));
1730 	KASSERT(vrefcnt(vp) != 0);
1731 
1732 	active = (vrefcnt(vp) > 1);
1733 	temp_key_len = vip->vi_key.vk_key_len;
1734 	/*
1735 	 * Prevent the vnode from being recycled or brought into use
1736 	 * while we clean it out.
1737 	 */
1738 	VSTATE_CHANGE(vp, VS_BLOCKED, VS_RECLAIMING);
1739 	mutex_exit(vp->v_interlock);
1740 
1741 	rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
1742 	mutex_enter(vp->v_interlock);
1743 	if ((vp->v_iflag & VI_EXECMAP) != 0) {
1744 		cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
1745 	}
1746 	vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
1747 	vp->v_iflag |= VI_DEADCHECK; /* for genfs_getpages() */
1748 	mutex_exit(vp->v_interlock);
1749 	rw_exit(vp->v_uobj.vmobjlock);
1750 
1751 	/*
1752 	 * With vnode state set to reclaiming, purge name cache immediately
1753 	 * to prevent new handles on vnode, and wait for existing threads
1754 	 * trying to get a handle to notice VS_RECLAIMED status and abort.
1755 	 */
1756 	cache_purge(vp);
1757 
1758 	/* Replace the vnode key with a temporary copy. */
1759 	if (vip->vi_key.vk_key_len > sizeof(temp_buf)) {
1760 		temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
1761 	} else {
1762 		temp_key = temp_buf;
1763 	}
1764 	if (vip->vi_key.vk_key_len > 0) {
1765 		mutex_enter(&vcache_lock);
1766 		memcpy(temp_key, vip->vi_key.vk_key, temp_key_len);
1767 		vip->vi_key.vk_key = temp_key;
1768 		mutex_exit(&vcache_lock);
1769 	}
1770 
1771 	fstrans_start(mp);
1772 
1773 	/*
1774 	 * Clean out any cached data associated with the vnode.
1775 	 * If purging an active vnode, it must be closed and
1776 	 * deactivated before being reclaimed.
1777 	 */
1778 	error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
1779 	if (error != 0) {
1780 		if (wapbl_vphaswapbl(vp))
1781 			WAPBL_DISCARD(wapbl_vptomp(vp));
1782 		error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
1783 	}
1784 	KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
1785 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1786 	if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
1787 		 spec_node_revoke(vp);
1788 	}
1789 
1790 	/*
1791 	 * Disassociate the underlying file system from the vnode.
1792 	 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
1793 	 * the vnode, and may destroy the vnode so that VOP_UNLOCK
1794 	 * would no longer function.
1795 	 */
1796 	VOP_INACTIVE(vp, &recycle);
1797 	KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1798 	    VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1799 	if (VOP_RECLAIM(vp)) {
1800 		vnpanic(vp, "%s: cannot reclaim", __func__);
1801 	}
1802 
1803 	KASSERT(vp->v_data == NULL);
1804 	KASSERT((vp->v_iflag & VI_PAGES) == 0);
1805 
1806 	if (vp->v_type == VREG && vp->v_ractx != NULL) {
1807 		uvm_ra_freectx(vp->v_ractx);
1808 		vp->v_ractx = NULL;
1809 	}
1810 
1811 	if (vip->vi_key.vk_key_len > 0) {
1812 	/* Remove from vnode cache. */
1813 		hash = vcache_hash(&vip->vi_key);
1814 		mutex_enter(&vcache_lock);
1815 		KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
1816 		SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1817 		    vip, vnode_impl, vi_hash);
1818 		mutex_exit(&vcache_lock);
1819 	}
1820 	if (temp_key != temp_buf)
1821 		kmem_free(temp_key, temp_key_len);
1822 
1823 	/* Done with purge, notify sleepers of the grim news. */
1824 	mutex_enter(vp->v_interlock);
1825 	vp->v_op = dead_vnodeop_p;
1826 	vp->v_vflag |= VV_LOCKSWORK;
1827 	VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
1828 	vp->v_tag = VT_NON;
1829 	/*
1830 	 * Don't check for interest in NOTE_REVOKE; it's always posted
1831 	 * because it sets EV_EOF.
1832 	 */
1833 	KNOTE(&vp->v_klist, NOTE_REVOKE);
1834 	mutex_exit(vp->v_interlock);
1835 
1836 	/*
1837 	 * Move to dead mount.  Must be after changing the operations
1838 	 * vector as vnode operations enter the mount before using the
1839 	 * operations vector.  See sys/kern/vnode_if.c.
1840 	 */
1841 	vp->v_vflag &= ~VV_ROOT;
1842 	vfs_ref(dead_rootmount);
1843 	vfs_insmntque(vp, dead_rootmount);
1844 
1845 #ifdef PAX_SEGVGUARD
1846 	pax_segvguard_cleanup(vp);
1847 #endif /* PAX_SEGVGUARD */
1848 
1849 	mutex_enter(vp->v_interlock);
1850 	fstrans_done(mp);
1851 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1852 }
1853 
1854 /*
1855  * Disassociate the underlying file system from an open device vnode
1856  * and make it anonymous.
1857  *
1858  * Vnode unlocked on entry, drops a reference to the vnode.
1859  */
1860 void
1861 vcache_make_anon(vnode_t *vp)
1862 {
1863 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
1864 	uint32_t hash;
1865 	bool recycle;
1866 
1867 	KASSERT(vp->v_type == VBLK || vp->v_type == VCHR);
1868 	KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
1869 	VSTATE_ASSERT_UNLOCKED(vp, VS_ACTIVE);
1870 
1871 	/* Remove from vnode cache. */
1872 	hash = vcache_hash(&vip->vi_key);
1873 	mutex_enter(&vcache_lock);
1874 	KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
1875 	SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1876 	    vip, vnode_impl, vi_hash);
1877 	vip->vi_key.vk_mount = dead_rootmount;
1878 	vip->vi_key.vk_key_len = 0;
1879 	vip->vi_key.vk_key = NULL;
1880 	mutex_exit(&vcache_lock);
1881 
1882 	/*
1883 	 * Disassociate the underlying file system from the vnode.
1884 	 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
1885 	 * the vnode, and may destroy the vnode so that VOP_UNLOCK
1886 	 * would no longer function.
1887 	 */
1888 	if (vn_lock(vp, LK_EXCLUSIVE)) {
1889 		vnpanic(vp, "%s: cannot lock", __func__);
1890 	}
1891 	VOP_INACTIVE(vp, &recycle);
1892 	KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1893 	    VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1894 	if (VOP_RECLAIM(vp)) {
1895 		vnpanic(vp, "%s: cannot reclaim", __func__);
1896 	}
1897 
1898 	/* Purge name cache. */
1899 	cache_purge(vp);
1900 
1901 	/* Done with purge, change operations vector. */
1902 	mutex_enter(vp->v_interlock);
1903 	vp->v_op = spec_vnodeop_p;
1904 	vp->v_vflag |= VV_MPSAFE;
1905 	vp->v_vflag &= ~VV_LOCKSWORK;
1906 	mutex_exit(vp->v_interlock);
1907 
1908 	/*
1909 	 * Move to dead mount.  Must be after changing the operations
1910 	 * vector as vnode operations enter the mount before using the
1911 	 * operations vector.  See sys/kern/vnode_if.c.
1912 	 */
1913 	vfs_ref(dead_rootmount);
1914 	vfs_insmntque(vp, dead_rootmount);
1915 
1916 	vrele(vp);
1917 }
1918 
1919 /*
1920  * Update outstanding I/O count and do wakeup if requested.
1921  */
1922 void
1923 vwakeup(struct buf *bp)
1924 {
1925 	vnode_t *vp;
1926 
1927 	if ((vp = bp->b_vp) == NULL)
1928 		return;
1929 
1930 	KASSERT(bp->b_objlock == vp->v_interlock);
1931 	KASSERT(mutex_owned(bp->b_objlock));
1932 
1933 	if (--vp->v_numoutput < 0)
1934 		vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
1935 	if (vp->v_numoutput == 0)
1936 		cv_broadcast(&vp->v_cv);
1937 }
1938 
1939 /*
1940  * Test a vnode for being or becoming dead.  Returns one of:
1941  * EBUSY:  vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
1942  * ENOENT: vnode is dead.
1943  * 0:      otherwise.
1944  *
1945  * Whenever this function returns a non-zero value all future
1946  * calls will also return a non-zero value.
1947  */
1948 int
1949 vdead_check(struct vnode *vp, int flags)
1950 {
1951 
1952 	KASSERT(mutex_owned(vp->v_interlock));
1953 
1954 	if (! ISSET(flags, VDEAD_NOWAIT))
1955 		VSTATE_WAIT_STABLE(vp);
1956 
1957 	if (VSTATE_GET(vp) == VS_RECLAIMING) {
1958 		KASSERT(ISSET(flags, VDEAD_NOWAIT));
1959 		return EBUSY;
1960 	} else if (VSTATE_GET(vp) == VS_RECLAIMED) {
1961 		return ENOENT;
1962 	}
1963 
1964 	return 0;
1965 }
1966 
1967 int
1968 vfs_drainvnodes(void)
1969 {
1970 	int i, gen;
1971 
1972 	mutex_enter(&vdrain_lock);
1973 	for (i = 0; i < 2; i++) {
1974 		gen = vdrain_gen;
1975 		while (gen == vdrain_gen) {
1976 			cv_broadcast(&vdrain_cv);
1977 			cv_wait(&vdrain_gen_cv, &vdrain_lock);
1978 		}
1979 	}
1980 	mutex_exit(&vdrain_lock);
1981 
1982 	if (numvnodes >= desiredvnodes)
1983 		return EBUSY;
1984 
1985 	if (vcache_hashsize != desiredvnodes)
1986 		vcache_reinit();
1987 
1988 	return 0;
1989 }
1990 
1991 void
1992 vnpanic(vnode_t *vp, const char *fmt, ...)
1993 {
1994 	va_list ap;
1995 
1996 #ifdef DIAGNOSTIC
1997 	vprint(NULL, vp);
1998 #endif
1999 	va_start(ap, fmt);
2000 	vpanic(fmt, ap);
2001 	va_end(ap);
2002 }
2003 
2004 void
2005 vshareilock(vnode_t *tvp, vnode_t *fvp)
2006 {
2007 	kmutex_t *oldlock;
2008 
2009 	oldlock = tvp->v_interlock;
2010 	mutex_obj_hold(fvp->v_interlock);
2011 	tvp->v_interlock = fvp->v_interlock;
2012 	mutex_obj_free(oldlock);
2013 }
2014