xref: /netbsd-src/sys/kern/vfs_vnode.c (revision dd3ee07da436799d8de85f3055253118b76bf345)
1 /*	$NetBSD: vfs_vnode.c,v 1.143 2022/04/09 23:45:45 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997-2011, 2019, 2020 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1989, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  * (c) UNIX System Laboratories, Inc.
37  * All or some portions of this file are derived from material licensed
38  * to the University of California by American Telephone and Telegraph
39  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40  * the permission of UNIX System Laboratories, Inc.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
67  */
68 
69 /*
70  * The vnode cache subsystem.
71  *
72  * Life-cycle
73  *
74  *	Normally, there are two points where new vnodes are created:
75  *	VOP_CREATE(9) and VOP_LOOKUP(9).  The life-cycle of a vnode
76  *	starts in one of the following ways:
77  *
78  *	- Allocation, via vcache_get(9) or vcache_new(9).
79  *	- Reclamation of inactive vnode, via vcache_vget(9).
80  *
81  *	Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
82  *	was another, traditional way.  Currently, only the draining thread
83  *	recycles the vnodes.  This behaviour might be revisited.
84  *
85  *	The life-cycle ends when the last reference is dropped, usually
86  *	in VOP_REMOVE(9).  In such case, VOP_INACTIVE(9) is called to inform
87  *	the file system that vnode is inactive.  Via this call, file system
88  *	indicates whether vnode can be recycled (usually, it checks its own
89  *	references, e.g. count of links, whether the file was removed).
90  *
91  *	Depending on indication, vnode can be put into a free list (cache),
92  *	or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to
93  *	disassociate underlying file system from the vnode, and finally
94  *	destroyed.
95  *
96  * Vnode state
97  *
98  *	Vnode is always in one of six states:
99  *	- MARKER	This is a marker vnode to help list traversal.  It
100  *			will never change its state.
101  *	- LOADING	Vnode is associating underlying file system and not
102  *			yet ready to use.
103  *	- LOADED	Vnode has associated underlying file system and is
104  *			ready to use.
105  *	- BLOCKED	Vnode is active but cannot get new references.
106  *	- RECLAIMING	Vnode is disassociating from the underlying file
107  *			system.
108  *	- RECLAIMED	Vnode has disassociated from underlying file system
109  *			and is dead.
110  *
111  *	Valid state changes are:
112  *	LOADING -> LOADED
113  *			Vnode has been initialised in vcache_get() or
114  *			vcache_new() and is ready to use.
115  *	BLOCKED -> RECLAIMING
116  *			Vnode starts disassociation from underlying file
117  *			system in vcache_reclaim().
118  *	RECLAIMING -> RECLAIMED
119  *			Vnode finished disassociation from underlying file
120  *			system in vcache_reclaim().
121  *	LOADED -> BLOCKED
122  *			Either vcache_rekey*() is changing the vnode key or
123  *			vrelel() is about to call VOP_INACTIVE().
124  *	BLOCKED -> LOADED
125  *			The block condition is over.
126  *	LOADING -> RECLAIMED
127  *			Either vcache_get() or vcache_new() failed to
128  *			associate the underlying file system or vcache_rekey*()
129  *			drops a vnode used as placeholder.
130  *
131  *	Of these states LOADING, BLOCKED and RECLAIMING are intermediate
132  *	and it is possible to wait for state change.
133  *
134  *	State is protected with v_interlock with one exception:
135  *	to change from LOADING both v_interlock and vcache_lock must be held
136  *	so it is possible to check "state == LOADING" without holding
137  *	v_interlock.  See vcache_get() for details.
138  *
139  * Reference counting
140  *
141  *	Vnode is considered active, if reference count (vnode_t::v_usecount)
142  *	is non-zero.  It is maintained using: vref(9) and vrele(9), as well
143  *	as vput(9), routines.  Common points holding references are e.g.
144  *	file openings, current working directory, mount points, etc.
145  *
146  *	v_usecount is adjusted with atomic operations, however to change
147  *	from a non-zero value to zero the interlock must also be held.
148  */
149 
150 #include <sys/cdefs.h>
151 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.143 2022/04/09 23:45:45 riastradh Exp $");
152 
153 #ifdef _KERNEL_OPT
154 #include "opt_pax.h"
155 #endif
156 
157 #include <sys/param.h>
158 #include <sys/kernel.h>
159 
160 #include <sys/atomic.h>
161 #include <sys/buf.h>
162 #include <sys/conf.h>
163 #include <sys/device.h>
164 #include <sys/hash.h>
165 #include <sys/kauth.h>
166 #include <sys/kmem.h>
167 #include <sys/kthread.h>
168 #include <sys/module.h>
169 #include <sys/mount.h>
170 #include <sys/namei.h>
171 #include <sys/pax.h>
172 #include <sys/syscallargs.h>
173 #include <sys/sysctl.h>
174 #include <sys/systm.h>
175 #include <sys/vnode_impl.h>
176 #include <sys/wapbl.h>
177 #include <sys/fstrans.h>
178 
179 #include <uvm/uvm.h>
180 #include <uvm/uvm_readahead.h>
181 #include <uvm/uvm_stat.h>
182 
183 /* Flags to vrelel. */
184 #define	VRELEL_ASYNC	0x0001	/* Always defer to vrele thread. */
185 
186 #define	LRU_VRELE	0
187 #define	LRU_FREE	1
188 #define	LRU_HOLD	2
189 #define	LRU_COUNT	3
190 
191 /*
192  * There are three lru lists: one holds vnodes waiting for async release,
193  * one is for vnodes which have no buffer/page references and one for those
194  * which do (i.e.  v_holdcnt is non-zero).  We put the lists into a single,
195  * private cache line as vnodes migrate between them while under the same
196  * lock (vdrain_lock).
197  */
198 u_int			numvnodes		__cacheline_aligned;
199 static vnodelst_t	lru_list[LRU_COUNT]	__cacheline_aligned;
200 static kmutex_t		vdrain_lock		__cacheline_aligned;
201 static kcondvar_t	vdrain_cv;
202 static int		vdrain_gen;
203 static kcondvar_t	vdrain_gen_cv;
204 static bool		vdrain_retry;
205 static lwp_t *		vdrain_lwp;
206 SLIST_HEAD(hashhead, vnode_impl);
207 static kmutex_t		vcache_lock		__cacheline_aligned;
208 static kcondvar_t	vcache_cv;
209 static u_int		vcache_hashsize;
210 static u_long		vcache_hashmask;
211 static struct hashhead	*vcache_hashtab;
212 static pool_cache_t	vcache_pool;
213 static void		lru_requeue(vnode_t *, vnodelst_t *);
214 static vnodelst_t *	lru_which(vnode_t *);
215 static vnode_impl_t *	vcache_alloc(void);
216 static void		vcache_dealloc(vnode_impl_t *);
217 static void		vcache_free(vnode_impl_t *);
218 static void		vcache_init(void);
219 static void		vcache_reinit(void);
220 static void		vcache_reclaim(vnode_t *);
221 static void		vrelel(vnode_t *, int, int);
222 static void		vdrain_thread(void *);
223 static void		vnpanic(vnode_t *, const char *, ...)
224     __printflike(2, 3);
225 
226 /* Routines having to do with the management of the vnode table. */
227 extern struct mount	*dead_rootmount;
228 extern int		(**dead_vnodeop_p)(void *);
229 extern int		(**spec_vnodeop_p)(void *);
230 extern struct vfsops	dead_vfsops;
231 
232 /*
233  * The high bit of v_usecount is a gate for vcache_tryvget().  It's set
234  * only when the vnode state is LOADED.
235  * The next bit of v_usecount is a flag for vrelel().  It's set
236  * from vcache_vget() and vcache_tryvget() whenever the operation succeeds.
237  */
238 #define	VUSECOUNT_MASK	0x3fffffff
239 #define	VUSECOUNT_GATE	0x80000000
240 #define	VUSECOUNT_VGET	0x40000000
241 
242 /*
243  * Return the current usecount of a vnode.
244  */
245 inline int
246 vrefcnt(struct vnode *vp)
247 {
248 
249 	return atomic_load_relaxed(&vp->v_usecount) & VUSECOUNT_MASK;
250 }
251 
252 /* Vnode state operations and diagnostics. */
253 
254 #if defined(DIAGNOSTIC)
255 
256 #define VSTATE_VALID(state) \
257 	((state) != VS_ACTIVE && (state) != VS_MARKER)
258 #define VSTATE_GET(vp) \
259 	vstate_assert_get((vp), __func__, __LINE__)
260 #define VSTATE_CHANGE(vp, from, to) \
261 	vstate_assert_change((vp), (from), (to), __func__, __LINE__)
262 #define VSTATE_WAIT_STABLE(vp) \
263 	vstate_assert_wait_stable((vp), __func__, __LINE__)
264 
265 void
266 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
267     bool has_lock)
268 {
269 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
270 	int refcnt = vrefcnt(vp);
271 
272 	if (!has_lock) {
273 		/*
274 		 * Prevent predictive loads from the CPU, but check the state
275 		 * without loooking first.
276 		 *
277 		 * XXX what does this pair with?
278 		 */
279 		membar_enter();
280 		if (state == VS_ACTIVE && refcnt > 0 &&
281 		    (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED))
282 			return;
283 		if (vip->vi_state == state)
284 			return;
285 		mutex_enter((vp)->v_interlock);
286 	}
287 
288 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
289 
290 	if ((state == VS_ACTIVE && refcnt > 0 &&
291 	    (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED)) ||
292 	    vip->vi_state == state) {
293 		if (!has_lock)
294 			mutex_exit((vp)->v_interlock);
295 		return;
296 	}
297 	vnpanic(vp, "state is %s, usecount %d, expected %s at %s:%d",
298 	    vstate_name(vip->vi_state), refcnt,
299 	    vstate_name(state), func, line);
300 }
301 
302 static enum vnode_state
303 vstate_assert_get(vnode_t *vp, const char *func, int line)
304 {
305 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
306 
307 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
308 	if (! VSTATE_VALID(vip->vi_state))
309 		vnpanic(vp, "state is %s at %s:%d",
310 		    vstate_name(vip->vi_state), func, line);
311 
312 	return vip->vi_state;
313 }
314 
315 static void
316 vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
317 {
318 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
319 
320 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
321 	if (! VSTATE_VALID(vip->vi_state))
322 		vnpanic(vp, "state is %s at %s:%d",
323 		    vstate_name(vip->vi_state), func, line);
324 
325 	while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
326 		cv_wait(&vp->v_cv, vp->v_interlock);
327 
328 	if (! VSTATE_VALID(vip->vi_state))
329 		vnpanic(vp, "state is %s at %s:%d",
330 		    vstate_name(vip->vi_state), func, line);
331 }
332 
333 static void
334 vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
335     const char *func, int line)
336 {
337 	bool gated = (atomic_load_relaxed(&vp->v_usecount) & VUSECOUNT_GATE);
338 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
339 
340 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
341 	if (from == VS_LOADING)
342 		KASSERTMSG(mutex_owned(&vcache_lock), "at %s:%d", func, line);
343 
344 	if (! VSTATE_VALID(from))
345 		vnpanic(vp, "from is %s at %s:%d",
346 		    vstate_name(from), func, line);
347 	if (! VSTATE_VALID(to))
348 		vnpanic(vp, "to is %s at %s:%d",
349 		    vstate_name(to), func, line);
350 	if (vip->vi_state != from)
351 		vnpanic(vp, "from is %s, expected %s at %s:%d\n",
352 		    vstate_name(vip->vi_state), vstate_name(from), func, line);
353 	if ((from == VS_LOADED) != gated)
354 		vnpanic(vp, "state is %s, gate %d does not match at %s:%d\n",
355 		    vstate_name(vip->vi_state), gated, func, line);
356 
357 	/* Open/close the gate for vcache_tryvget(). */
358 	if (to == VS_LOADED) {
359 #ifndef __HAVE_ATOMIC_AS_MEMBAR
360 		membar_release();
361 #endif
362 		atomic_or_uint(&vp->v_usecount, VUSECOUNT_GATE);
363 	} else {
364 		atomic_and_uint(&vp->v_usecount, ~VUSECOUNT_GATE);
365 	}
366 
367 	vip->vi_state = to;
368 	if (from == VS_LOADING)
369 		cv_broadcast(&vcache_cv);
370 	if (to == VS_LOADED || to == VS_RECLAIMED)
371 		cv_broadcast(&vp->v_cv);
372 }
373 
374 #else /* defined(DIAGNOSTIC) */
375 
376 #define VSTATE_GET(vp) \
377 	(VNODE_TO_VIMPL((vp))->vi_state)
378 #define VSTATE_CHANGE(vp, from, to) \
379 	vstate_change((vp), (from), (to))
380 #define VSTATE_WAIT_STABLE(vp) \
381 	vstate_wait_stable((vp))
382 void
383 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
384     bool has_lock)
385 {
386 
387 }
388 
389 static void
390 vstate_wait_stable(vnode_t *vp)
391 {
392 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
393 
394 	while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
395 		cv_wait(&vp->v_cv, vp->v_interlock);
396 }
397 
398 static void
399 vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
400 {
401 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
402 
403 	/* Open/close the gate for vcache_tryvget(). */
404 	if (to == VS_LOADED) {
405 #ifndef __HAVE_ATOMIC_AS_MEMBAR
406 		membar_release();
407 #endif
408 		atomic_or_uint(&vp->v_usecount, VUSECOUNT_GATE);
409 	} else {
410 		atomic_and_uint(&vp->v_usecount, ~VUSECOUNT_GATE);
411 	}
412 
413 	vip->vi_state = to;
414 	if (from == VS_LOADING)
415 		cv_broadcast(&vcache_cv);
416 	if (to == VS_LOADED || to == VS_RECLAIMED)
417 		cv_broadcast(&vp->v_cv);
418 }
419 
420 #endif /* defined(DIAGNOSTIC) */
421 
422 void
423 vfs_vnode_sysinit(void)
424 {
425 	int error __diagused, i;
426 
427 	dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
428 	KASSERT(dead_rootmount != NULL);
429 	dead_rootmount->mnt_iflag |= IMNT_MPSAFE;
430 
431 	mutex_init(&vdrain_lock, MUTEX_DEFAULT, IPL_NONE);
432 	for (i = 0; i < LRU_COUNT; i++) {
433 		TAILQ_INIT(&lru_list[i]);
434 	}
435 	vcache_init();
436 
437 	cv_init(&vdrain_cv, "vdrain");
438 	cv_init(&vdrain_gen_cv, "vdrainwt");
439 	error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
440 	    NULL, &vdrain_lwp, "vdrain");
441 	KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
442 }
443 
444 /*
445  * Allocate a new marker vnode.
446  */
447 vnode_t *
448 vnalloc_marker(struct mount *mp)
449 {
450 	vnode_impl_t *vip;
451 	vnode_t *vp;
452 
453 	vip = pool_cache_get(vcache_pool, PR_WAITOK);
454 	memset(vip, 0, sizeof(*vip));
455 	vp = VIMPL_TO_VNODE(vip);
456 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
457 	vp->v_mount = mp;
458 	vp->v_type = VBAD;
459 	vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
460 	klist_init(&vp->v_klist);
461 	vip->vi_state = VS_MARKER;
462 
463 	return vp;
464 }
465 
466 /*
467  * Free a marker vnode.
468  */
469 void
470 vnfree_marker(vnode_t *vp)
471 {
472 	vnode_impl_t *vip;
473 
474 	vip = VNODE_TO_VIMPL(vp);
475 	KASSERT(vip->vi_state == VS_MARKER);
476 	mutex_obj_free(vp->v_interlock);
477 	uvm_obj_destroy(&vp->v_uobj, true);
478 	klist_fini(&vp->v_klist);
479 	pool_cache_put(vcache_pool, vip);
480 }
481 
482 /*
483  * Test a vnode for being a marker vnode.
484  */
485 bool
486 vnis_marker(vnode_t *vp)
487 {
488 
489 	return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
490 }
491 
492 /*
493  * Return the lru list this node should be on.
494  */
495 static vnodelst_t *
496 lru_which(vnode_t *vp)
497 {
498 
499 	KASSERT(mutex_owned(vp->v_interlock));
500 
501 	if (vp->v_holdcnt > 0)
502 		return &lru_list[LRU_HOLD];
503 	else
504 		return &lru_list[LRU_FREE];
505 }
506 
507 /*
508  * Put vnode to end of given list.
509  * Both the current and the new list may be NULL, used on vnode alloc/free.
510  * Adjust numvnodes and signal vdrain thread if there is work.
511  */
512 static void
513 lru_requeue(vnode_t *vp, vnodelst_t *listhd)
514 {
515 	vnode_impl_t *vip;
516 	int d;
517 
518 	/*
519 	 * If the vnode is on the correct list, and was put there recently,
520 	 * then leave it be, thus avoiding huge cache and lock contention.
521 	 */
522 	vip = VNODE_TO_VIMPL(vp);
523 	if (listhd == vip->vi_lrulisthd &&
524 	    (getticks() - vip->vi_lrulisttm) < hz) {
525 	    	return;
526 	}
527 
528 	mutex_enter(&vdrain_lock);
529 	d = 0;
530 	if (vip->vi_lrulisthd != NULL)
531 		TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
532 	else
533 		d++;
534 	vip->vi_lrulisthd = listhd;
535 	vip->vi_lrulisttm = getticks();
536 	if (vip->vi_lrulisthd != NULL)
537 		TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
538 	else
539 		d--;
540 	if (d != 0) {
541 		/*
542 		 * Looks strange?  This is not a bug.  Don't store
543 		 * numvnodes unless there is a change - avoid false
544 		 * sharing on MP.
545 		 */
546 		numvnodes += d;
547 	}
548 	if ((d > 0 && numvnodes > desiredvnodes) ||
549 	    listhd == &lru_list[LRU_VRELE])
550 		cv_signal(&vdrain_cv);
551 	mutex_exit(&vdrain_lock);
552 }
553 
554 /*
555  * Release deferred vrele vnodes for this mount.
556  * Called with file system suspended.
557  */
558 void
559 vrele_flush(struct mount *mp)
560 {
561 	vnode_impl_t *vip, *marker;
562 	vnode_t *vp;
563 	int when = 0;
564 
565 	KASSERT(fstrans_is_owner(mp));
566 
567 	marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
568 
569 	mutex_enter(&vdrain_lock);
570 	TAILQ_INSERT_HEAD(&lru_list[LRU_VRELE], marker, vi_lrulist);
571 
572 	while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
573 		TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
574 		TAILQ_INSERT_AFTER(&lru_list[LRU_VRELE], vip, marker,
575 		    vi_lrulist);
576 		vp = VIMPL_TO_VNODE(vip);
577 		if (vnis_marker(vp))
578 			continue;
579 
580 		KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
581 		TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
582 		vip->vi_lrulisthd = &lru_list[LRU_HOLD];
583 		vip->vi_lrulisttm = getticks();
584 		TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
585 		mutex_exit(&vdrain_lock);
586 
587 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
588 		mutex_enter(vp->v_interlock);
589 		vrelel(vp, 0, LK_EXCLUSIVE);
590 
591 		if (getticks() > when) {
592 			yield();
593 			when = getticks() + hz / 10;
594 		}
595 
596 		mutex_enter(&vdrain_lock);
597 	}
598 
599 	TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
600 	mutex_exit(&vdrain_lock);
601 
602 	vnfree_marker(VIMPL_TO_VNODE(marker));
603 }
604 
605 /*
606  * Reclaim a cached vnode.  Used from vdrain_thread only.
607  */
608 static __inline void
609 vdrain_remove(vnode_t *vp)
610 {
611 	struct mount *mp;
612 
613 	KASSERT(mutex_owned(&vdrain_lock));
614 
615 	/* Probe usecount (unlocked). */
616 	if (vrefcnt(vp) > 0)
617 		return;
618 	/* Try v_interlock -- we lock the wrong direction! */
619 	if (!mutex_tryenter(vp->v_interlock))
620 		return;
621 	/* Probe usecount and state. */
622 	if (vrefcnt(vp) > 0 || VSTATE_GET(vp) != VS_LOADED) {
623 		mutex_exit(vp->v_interlock);
624 		return;
625 	}
626 	mp = vp->v_mount;
627 	if (fstrans_start_nowait(mp) != 0) {
628 		mutex_exit(vp->v_interlock);
629 		return;
630 	}
631 	vdrain_retry = true;
632 	mutex_exit(&vdrain_lock);
633 
634 	if (vcache_vget(vp) == 0) {
635 		if (!vrecycle(vp)) {
636 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
637 			mutex_enter(vp->v_interlock);
638 			vrelel(vp, 0, LK_EXCLUSIVE);
639 		}
640 	}
641 	fstrans_done(mp);
642 
643 	mutex_enter(&vdrain_lock);
644 }
645 
646 /*
647  * Release a cached vnode.  Used from vdrain_thread only.
648  */
649 static __inline void
650 vdrain_vrele(vnode_t *vp)
651 {
652 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
653 	struct mount *mp;
654 
655 	KASSERT(mutex_owned(&vdrain_lock));
656 
657 	mp = vp->v_mount;
658 	if (fstrans_start_nowait(mp) != 0)
659 		return;
660 
661 	/*
662 	 * First remove the vnode from the vrele list.
663 	 * Put it on the last lru list, the last vrele()
664 	 * will put it back onto the right list before
665 	 * its usecount reaches zero.
666 	 */
667 	KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
668 	TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
669 	vip->vi_lrulisthd = &lru_list[LRU_HOLD];
670 	vip->vi_lrulisttm = getticks();
671 	TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
672 
673 	vdrain_retry = true;
674 	mutex_exit(&vdrain_lock);
675 
676 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
677 	mutex_enter(vp->v_interlock);
678 	vrelel(vp, 0, LK_EXCLUSIVE);
679 	fstrans_done(mp);
680 
681 	mutex_enter(&vdrain_lock);
682 }
683 
684 /*
685  * Helper thread to keep the number of vnodes below desiredvnodes
686  * and release vnodes from asynchronous vrele.
687  */
688 static void
689 vdrain_thread(void *cookie)
690 {
691 	int i;
692 	u_int target;
693 	vnode_impl_t *vip, *marker;
694 
695 	marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
696 
697 	mutex_enter(&vdrain_lock);
698 
699 	for (;;) {
700 		vdrain_retry = false;
701 		target = desiredvnodes - desiredvnodes/10;
702 
703 		for (i = 0; i < LRU_COUNT; i++) {
704 			TAILQ_INSERT_HEAD(&lru_list[i], marker, vi_lrulist);
705 			while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
706 				TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
707 				TAILQ_INSERT_AFTER(&lru_list[i], vip, marker,
708 				    vi_lrulist);
709 				if (vnis_marker(VIMPL_TO_VNODE(vip)))
710 					continue;
711 				if (i == LRU_VRELE)
712 					vdrain_vrele(VIMPL_TO_VNODE(vip));
713 				else if (numvnodes < target)
714 					break;
715 				else
716 					vdrain_remove(VIMPL_TO_VNODE(vip));
717 			}
718 			TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
719 		}
720 
721 		if (vdrain_retry) {
722 			kpause("vdrainrt", false, 1, &vdrain_lock);
723 		} else {
724 			vdrain_gen++;
725 			cv_broadcast(&vdrain_gen_cv);
726 			cv_wait(&vdrain_cv, &vdrain_lock);
727 		}
728 	}
729 }
730 
731 /*
732  * Try to drop reference on a vnode.  Abort if we are releasing the
733  * last reference.  Note: this _must_ succeed if not the last reference.
734  */
735 static bool
736 vtryrele(vnode_t *vp)
737 {
738 	u_int use, next;
739 
740 #ifndef __HAVE_ATOMIC_AS_MEMBAR
741 	membar_release();
742 #endif
743 	for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
744 		if (__predict_false((use & VUSECOUNT_MASK) == 1)) {
745 			return false;
746 		}
747 		KASSERT((use & VUSECOUNT_MASK) > 1);
748 		next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
749 		if (__predict_true(next == use)) {
750 			return true;
751 		}
752 	}
753 }
754 
755 /*
756  * vput: unlock and release the reference.
757  */
758 void
759 vput(vnode_t *vp)
760 {
761 	int lktype;
762 
763 	/*
764 	 * Do an unlocked check of the usecount.  If it looks like we're not
765 	 * about to drop the last reference, then unlock the vnode and try
766 	 * to drop the reference.  If it ends up being the last reference
767 	 * after all, vrelel() can fix it all up.  Most of the time this
768 	 * will all go to plan.
769 	 */
770 	if (vrefcnt(vp) > 1) {
771 		VOP_UNLOCK(vp);
772 		if (vtryrele(vp)) {
773 			return;
774 		}
775 		lktype = LK_NONE;
776 	} else {
777 		lktype = VOP_ISLOCKED(vp);
778 		KASSERT(lktype != LK_NONE);
779 	}
780 	mutex_enter(vp->v_interlock);
781 	vrelel(vp, 0, lktype);
782 }
783 
784 /*
785  * Vnode release.  If reference count drops to zero, call inactive
786  * routine and either return to freelist or free to the pool.
787  */
788 static void
789 vrelel(vnode_t *vp, int flags, int lktype)
790 {
791 	const bool async = ((flags & VRELEL_ASYNC) != 0);
792 	bool recycle, defer, objlock_held;
793 	u_int use, next;
794 	int error;
795 
796 	objlock_held = false;
797 
798 retry:
799 	KASSERT(mutex_owned(vp->v_interlock));
800 
801 	if (__predict_false(vp->v_op == dead_vnodeop_p &&
802 	    VSTATE_GET(vp) != VS_RECLAIMED)) {
803 		vnpanic(vp, "dead but not clean");
804 	}
805 
806 	/*
807 	 * If not the last reference, just unlock and drop the reference count.
808 	 *
809 	 * Otherwise make sure we pass a point in time where we hold the
810 	 * last reference with VGET flag unset.
811 	 */
812 	for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
813 		if (__predict_false((use & VUSECOUNT_MASK) > 1)) {
814 			if (objlock_held) {
815 				objlock_held = false;
816 				rw_exit(vp->v_uobj.vmobjlock);
817 			}
818 			if (lktype != LK_NONE) {
819 				mutex_exit(vp->v_interlock);
820 				lktype = LK_NONE;
821 				VOP_UNLOCK(vp);
822 				mutex_enter(vp->v_interlock);
823 			}
824 			if (vtryrele(vp)) {
825 				mutex_exit(vp->v_interlock);
826 				return;
827 			}
828 			next = atomic_load_relaxed(&vp->v_usecount);
829 			continue;
830 		}
831 		KASSERT((use & VUSECOUNT_MASK) == 1);
832 		next = use & ~VUSECOUNT_VGET;
833 		if (next != use) {
834 			next = atomic_cas_uint(&vp->v_usecount, use, next);
835 		}
836 		if (__predict_true(next == use)) {
837 			break;
838 		}
839 	}
840 #ifndef __HAVE_ATOMIC_AS_MEMBAR
841 	membar_acquire();
842 #endif
843 	if (vrefcnt(vp) <= 0 || vp->v_writecount != 0) {
844 		vnpanic(vp, "%s: bad ref count", __func__);
845 	}
846 
847 #ifdef DIAGNOSTIC
848 	if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
849 	    vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
850 		vprint("vrelel: missing VOP_CLOSE()", vp);
851 	}
852 #endif
853 
854 	/*
855 	 * If already clean there is no need to lock, defer or
856 	 * deactivate this node.
857 	 */
858 	if (VSTATE_GET(vp) == VS_RECLAIMED) {
859 		if (objlock_held) {
860 			objlock_held = false;
861 			rw_exit(vp->v_uobj.vmobjlock);
862 		}
863 		if (lktype != LK_NONE) {
864 			mutex_exit(vp->v_interlock);
865 			lktype = LK_NONE;
866 			VOP_UNLOCK(vp);
867 			mutex_enter(vp->v_interlock);
868 		}
869 		goto out;
870 	}
871 
872 	/*
873 	 * First try to get the vnode locked for VOP_INACTIVE().
874 	 * Defer vnode release to vdrain_thread if caller requests
875 	 * it explicitly, is the pagedaemon or the lock failed.
876 	 */
877 	defer = false;
878 	if ((curlwp == uvm.pagedaemon_lwp) || async) {
879 		defer = true;
880 	} else if (lktype == LK_SHARED) {
881 		/* Excellent chance of getting, if the last ref. */
882 		error = vn_lock(vp, LK_UPGRADE | LK_RETRY | LK_NOWAIT);
883 		if (error != 0) {
884 			defer = true;
885 		} else {
886 			lktype = LK_EXCLUSIVE;
887 		}
888 	} else if (lktype == LK_NONE) {
889 		/* Excellent chance of getting, if the last ref. */
890 		error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
891 		if (error != 0) {
892 			defer = true;
893 		} else {
894 			lktype = LK_EXCLUSIVE;
895 		}
896 	}
897 	KASSERT(mutex_owned(vp->v_interlock));
898 	if (defer) {
899 		/*
900 		 * Defer reclaim to the kthread; it's not safe to
901 		 * clean it here.  We donate it our last reference.
902 		 */
903 		if (lktype != LK_NONE) {
904 			mutex_exit(vp->v_interlock);
905 			VOP_UNLOCK(vp);
906 			mutex_enter(vp->v_interlock);
907 		}
908 		lru_requeue(vp, &lru_list[LRU_VRELE]);
909 		mutex_exit(vp->v_interlock);
910 		return;
911 	}
912 	KASSERT(lktype == LK_EXCLUSIVE);
913 
914 	/* If the node gained another reference, retry. */
915 	use = atomic_load_relaxed(&vp->v_usecount);
916 	if ((use & VUSECOUNT_VGET) != 0) {
917 		goto retry;
918 	}
919 	KASSERT((use & VUSECOUNT_MASK) == 1);
920 
921 	if ((vp->v_iflag & (VI_TEXT|VI_EXECMAP|VI_WRMAP)) != 0 ||
922 	    (vp->v_vflag & VV_MAPPED) != 0) {
923 		/* Take care of space accounting. */
924 		if (!objlock_held) {
925 			objlock_held = true;
926 			if (!rw_tryenter(vp->v_uobj.vmobjlock, RW_WRITER)) {
927 				mutex_exit(vp->v_interlock);
928 				rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
929 				mutex_enter(vp->v_interlock);
930 				goto retry;
931 			}
932 		}
933 		if ((vp->v_iflag & VI_EXECMAP) != 0) {
934 			cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
935 		}
936 		vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
937 		vp->v_vflag &= ~VV_MAPPED;
938 	}
939 	if (objlock_held) {
940 		objlock_held = false;
941 		rw_exit(vp->v_uobj.vmobjlock);
942 	}
943 
944 	/*
945 	 * Deactivate the vnode, but preserve our reference across
946 	 * the call to VOP_INACTIVE().
947 	 *
948 	 * If VOP_INACTIVE() indicates that the file has been
949 	 * deleted, then recycle the vnode.
950 	 *
951 	 * Note that VOP_INACTIVE() will not drop the vnode lock.
952 	 */
953 	mutex_exit(vp->v_interlock);
954 	recycle = false;
955 	VOP_INACTIVE(vp, &recycle);
956 	if (!recycle) {
957 		lktype = LK_NONE;
958 		VOP_UNLOCK(vp);
959 	}
960 	mutex_enter(vp->v_interlock);
961 
962 	/*
963 	 * Block new references then check again to see if a
964 	 * new reference was acquired in the meantime.  If
965 	 * it was, restore the vnode state and try again.
966 	 */
967 	if (recycle) {
968 		VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
969 		use = atomic_load_relaxed(&vp->v_usecount);
970 		if ((use & VUSECOUNT_VGET) != 0) {
971 			VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
972 			goto retry;
973 		}
974 		KASSERT((use & VUSECOUNT_MASK) == 1);
975 	}
976 
977 	/*
978 	 * Recycle the vnode if the file is now unused (unlinked).
979 	 */
980 	if (recycle) {
981 		VSTATE_ASSERT(vp, VS_BLOCKED);
982 		KASSERT(lktype == LK_EXCLUSIVE);
983 		/* vcache_reclaim drops the lock. */
984 		lktype = LK_NONE;
985 		vcache_reclaim(vp);
986 	}
987 	KASSERT(vrefcnt(vp) > 0);
988 	KASSERT(lktype == LK_NONE);
989 
990 out:
991 	for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
992 		if (__predict_false((use & VUSECOUNT_VGET) != 0 &&
993 		    (use & VUSECOUNT_MASK) == 1)) {
994 			/* Gained and released another reference, retry. */
995 			goto retry;
996 		}
997 		next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
998 		if (__predict_true(next == use)) {
999 			if (__predict_false((use & VUSECOUNT_MASK) != 1)) {
1000 				/* Gained another reference. */
1001 				mutex_exit(vp->v_interlock);
1002 				return;
1003 			}
1004 			break;
1005 		}
1006 	}
1007 #ifndef __HAVE_ATOMIC_AS_MEMBAR
1008 	membar_acquire();
1009 #endif
1010 
1011 	if (VSTATE_GET(vp) == VS_RECLAIMED && vp->v_holdcnt == 0) {
1012 		/*
1013 		 * It's clean so destroy it.  It isn't referenced
1014 		 * anywhere since it has been reclaimed.
1015 		 */
1016 		vcache_free(VNODE_TO_VIMPL(vp));
1017 	} else {
1018 		/*
1019 		 * Otherwise, put it back onto the freelist.  It
1020 		 * can't be destroyed while still associated with
1021 		 * a file system.
1022 		 */
1023 		lru_requeue(vp, lru_which(vp));
1024 		mutex_exit(vp->v_interlock);
1025 	}
1026 }
1027 
1028 void
1029 vrele(vnode_t *vp)
1030 {
1031 
1032 	if (vtryrele(vp)) {
1033 		return;
1034 	}
1035 	mutex_enter(vp->v_interlock);
1036 	vrelel(vp, 0, LK_NONE);
1037 }
1038 
1039 /*
1040  * Asynchronous vnode release, vnode is released in different context.
1041  */
1042 void
1043 vrele_async(vnode_t *vp)
1044 {
1045 
1046 	if (vtryrele(vp)) {
1047 		return;
1048 	}
1049 	mutex_enter(vp->v_interlock);
1050 	vrelel(vp, VRELEL_ASYNC, LK_NONE);
1051 }
1052 
1053 /*
1054  * Vnode reference, where a reference is already held by some other
1055  * object (for example, a file structure).
1056  *
1057  * NB: lockless code sequences may rely on this not blocking.
1058  */
1059 void
1060 vref(vnode_t *vp)
1061 {
1062 
1063 	KASSERT(vrefcnt(vp) > 0);
1064 
1065 	atomic_inc_uint(&vp->v_usecount);
1066 }
1067 
1068 /*
1069  * Page or buffer structure gets a reference.
1070  * Called with v_interlock held.
1071  */
1072 void
1073 vholdl(vnode_t *vp)
1074 {
1075 
1076 	KASSERT(mutex_owned(vp->v_interlock));
1077 
1078 	if (vp->v_holdcnt++ == 0 && vrefcnt(vp) == 0)
1079 		lru_requeue(vp, lru_which(vp));
1080 }
1081 
1082 /*
1083  * Page or buffer structure gets a reference.
1084  */
1085 void
1086 vhold(vnode_t *vp)
1087 {
1088 
1089 	mutex_enter(vp->v_interlock);
1090 	vholdl(vp);
1091 	mutex_exit(vp->v_interlock);
1092 }
1093 
1094 /*
1095  * Page or buffer structure frees a reference.
1096  * Called with v_interlock held.
1097  */
1098 void
1099 holdrelel(vnode_t *vp)
1100 {
1101 
1102 	KASSERT(mutex_owned(vp->v_interlock));
1103 
1104 	if (vp->v_holdcnt <= 0) {
1105 		vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
1106 	}
1107 
1108 	vp->v_holdcnt--;
1109 	if (vp->v_holdcnt == 0 && vrefcnt(vp) == 0)
1110 		lru_requeue(vp, lru_which(vp));
1111 }
1112 
1113 /*
1114  * Page or buffer structure frees a reference.
1115  */
1116 void
1117 holdrele(vnode_t *vp)
1118 {
1119 
1120 	mutex_enter(vp->v_interlock);
1121 	holdrelel(vp);
1122 	mutex_exit(vp->v_interlock);
1123 }
1124 
1125 /*
1126  * Recycle an unused vnode if caller holds the last reference.
1127  */
1128 bool
1129 vrecycle(vnode_t *vp)
1130 {
1131 	int error __diagused;
1132 
1133 	mutex_enter(vp->v_interlock);
1134 
1135 	/* If the vnode is already clean we're done. */
1136 	VSTATE_WAIT_STABLE(vp);
1137 	if (VSTATE_GET(vp) != VS_LOADED) {
1138 		VSTATE_ASSERT(vp, VS_RECLAIMED);
1139 		vrelel(vp, 0, LK_NONE);
1140 		return true;
1141 	}
1142 
1143 	/* Prevent further references until the vnode is locked. */
1144 	VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
1145 
1146 	/* Make sure we hold the last reference. */
1147 	if (vrefcnt(vp) != 1) {
1148 		VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
1149 		mutex_exit(vp->v_interlock);
1150 		return false;
1151 	}
1152 
1153 	mutex_exit(vp->v_interlock);
1154 
1155 	/*
1156 	 * On a leaf file system this lock will always succeed as we hold
1157 	 * the last reference and prevent further references.
1158 	 * On layered file systems waiting for the lock would open a can of
1159 	 * deadlocks as the lower vnodes may have other active references.
1160 	 */
1161 	error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
1162 
1163 	mutex_enter(vp->v_interlock);
1164 	if (error) {
1165 		VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
1166 		mutex_exit(vp->v_interlock);
1167 		return false;
1168 	}
1169 
1170 	KASSERT(vrefcnt(vp) == 1);
1171 	vcache_reclaim(vp);
1172 	vrelel(vp, 0, LK_NONE);
1173 
1174 	return true;
1175 }
1176 
1177 /*
1178  * Helper for vrevoke() to propagate suspension from lastmp
1179  * to thismp.  Both args may be NULL.
1180  * Returns the currently suspended file system or NULL.
1181  */
1182 static struct mount *
1183 vrevoke_suspend_next(struct mount *lastmp, struct mount *thismp)
1184 {
1185 	int error;
1186 
1187 	if (lastmp == thismp)
1188 		return thismp;
1189 
1190 	if (lastmp != NULL)
1191 		vfs_resume(lastmp);
1192 
1193 	if (thismp == NULL)
1194 		return NULL;
1195 
1196 	do {
1197 		error = vfs_suspend(thismp, 0);
1198 	} while (error == EINTR || error == ERESTART);
1199 
1200 	if (error == 0)
1201 		return thismp;
1202 
1203 	KASSERT(error == EOPNOTSUPP || error == ENOENT);
1204 	return NULL;
1205 }
1206 
1207 /*
1208  * Eliminate all activity associated with the requested vnode
1209  * and with all vnodes aliased to the requested vnode.
1210  */
1211 void
1212 vrevoke(vnode_t *vp)
1213 {
1214 	struct mount *mp;
1215 	vnode_t *vq;
1216 	enum vtype type;
1217 	dev_t dev;
1218 
1219 	KASSERT(vrefcnt(vp) > 0);
1220 
1221 	mp = vrevoke_suspend_next(NULL, vp->v_mount);
1222 
1223 	mutex_enter(vp->v_interlock);
1224 	VSTATE_WAIT_STABLE(vp);
1225 	if (VSTATE_GET(vp) == VS_RECLAIMED) {
1226 		mutex_exit(vp->v_interlock);
1227 	} else if (vp->v_type != VBLK && vp->v_type != VCHR) {
1228 		atomic_inc_uint(&vp->v_usecount);
1229 		mutex_exit(vp->v_interlock);
1230 		vgone(vp);
1231 	} else {
1232 		dev = vp->v_rdev;
1233 		type = vp->v_type;
1234 		mutex_exit(vp->v_interlock);
1235 
1236 		while (spec_node_lookup_by_dev(type, dev, VDEAD_NOWAIT, &vq)
1237 		    == 0) {
1238 			mp = vrevoke_suspend_next(mp, vq->v_mount);
1239 			vgone(vq);
1240 		}
1241 	}
1242 	vrevoke_suspend_next(mp, NULL);
1243 }
1244 
1245 /*
1246  * Eliminate all activity associated with a vnode in preparation for
1247  * reuse.  Drops a reference from the vnode.
1248  */
1249 void
1250 vgone(vnode_t *vp)
1251 {
1252 	int lktype;
1253 
1254 	KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
1255 
1256 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1257 	lktype = LK_EXCLUSIVE;
1258 	mutex_enter(vp->v_interlock);
1259 	VSTATE_WAIT_STABLE(vp);
1260 	if (VSTATE_GET(vp) == VS_LOADED) {
1261 		VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
1262 		vcache_reclaim(vp);
1263 		lktype = LK_NONE;
1264 	}
1265 	VSTATE_ASSERT(vp, VS_RECLAIMED);
1266 	vrelel(vp, 0, lktype);
1267 }
1268 
1269 static inline uint32_t
1270 vcache_hash(const struct vcache_key *key)
1271 {
1272 	uint32_t hash = HASH32_BUF_INIT;
1273 
1274 	KASSERT(key->vk_key_len > 0);
1275 
1276 	hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
1277 	hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
1278 	return hash;
1279 }
1280 
1281 static int
1282 vcache_stats(struct hashstat_sysctl *hs, bool fill)
1283 {
1284 	vnode_impl_t *vip;
1285 	uint64_t chain;
1286 
1287 	strlcpy(hs->hash_name, "vcache", sizeof(hs->hash_name));
1288 	strlcpy(hs->hash_desc, "vnode cache hash", sizeof(hs->hash_desc));
1289 	if (!fill)
1290 		return 0;
1291 
1292 	hs->hash_size = vcache_hashmask + 1;
1293 
1294 	for (size_t i = 0; i < hs->hash_size; i++) {
1295 		chain = 0;
1296 		mutex_enter(&vcache_lock);
1297 		SLIST_FOREACH(vip, &vcache_hashtab[i], vi_hash) {
1298 			chain++;
1299 		}
1300 		mutex_exit(&vcache_lock);
1301 		if (chain > 0) {
1302 			hs->hash_used++;
1303 			hs->hash_items += chain;
1304 			if (chain > hs->hash_maxchain)
1305 				hs->hash_maxchain = chain;
1306 		}
1307 		preempt_point();
1308 	}
1309 
1310 	return 0;
1311 }
1312 
1313 static void
1314 vcache_init(void)
1315 {
1316 
1317 	vcache_pool = pool_cache_init(sizeof(vnode_impl_t), coherency_unit,
1318 	    0, 0, "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
1319 	KASSERT(vcache_pool != NULL);
1320 	mutex_init(&vcache_lock, MUTEX_DEFAULT, IPL_NONE);
1321 	cv_init(&vcache_cv, "vcache");
1322 	vcache_hashsize = desiredvnodes;
1323 	vcache_hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
1324 	    &vcache_hashmask);
1325 	hashstat_register("vcache", vcache_stats);
1326 }
1327 
1328 static void
1329 vcache_reinit(void)
1330 {
1331 	int i;
1332 	uint32_t hash;
1333 	u_long oldmask, newmask;
1334 	struct hashhead *oldtab, *newtab;
1335 	vnode_impl_t *vip;
1336 
1337 	newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
1338 	mutex_enter(&vcache_lock);
1339 	oldtab = vcache_hashtab;
1340 	oldmask = vcache_hashmask;
1341 	vcache_hashsize = desiredvnodes;
1342 	vcache_hashtab = newtab;
1343 	vcache_hashmask = newmask;
1344 	for (i = 0; i <= oldmask; i++) {
1345 		while ((vip = SLIST_FIRST(&oldtab[i])) != NULL) {
1346 			SLIST_REMOVE(&oldtab[i], vip, vnode_impl, vi_hash);
1347 			hash = vcache_hash(&vip->vi_key);
1348 			SLIST_INSERT_HEAD(&newtab[hash & vcache_hashmask],
1349 			    vip, vi_hash);
1350 		}
1351 	}
1352 	mutex_exit(&vcache_lock);
1353 	hashdone(oldtab, HASH_SLIST, oldmask);
1354 }
1355 
1356 static inline vnode_impl_t *
1357 vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
1358 {
1359 	struct hashhead *hashp;
1360 	vnode_impl_t *vip;
1361 
1362 	KASSERT(mutex_owned(&vcache_lock));
1363 
1364 	hashp = &vcache_hashtab[hash & vcache_hashmask];
1365 	SLIST_FOREACH(vip, hashp, vi_hash) {
1366 		if (key->vk_mount != vip->vi_key.vk_mount)
1367 			continue;
1368 		if (key->vk_key_len != vip->vi_key.vk_key_len)
1369 			continue;
1370 		if (memcmp(key->vk_key, vip->vi_key.vk_key, key->vk_key_len))
1371 			continue;
1372 		return vip;
1373 	}
1374 	return NULL;
1375 }
1376 
1377 /*
1378  * Allocate a new, uninitialized vcache node.
1379  */
1380 static vnode_impl_t *
1381 vcache_alloc(void)
1382 {
1383 	vnode_impl_t *vip;
1384 	vnode_t *vp;
1385 
1386 	vip = pool_cache_get(vcache_pool, PR_WAITOK);
1387 	vp = VIMPL_TO_VNODE(vip);
1388 	memset(vip, 0, sizeof(*vip));
1389 
1390 	rw_init(&vip->vi_lock);
1391 	vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
1392 
1393 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
1394 	klist_init(&vp->v_klist);
1395 	cv_init(&vp->v_cv, "vnode");
1396 	cache_vnode_init(vp);
1397 
1398 	vp->v_usecount = 1;
1399 	vp->v_type = VNON;
1400 	vp->v_size = vp->v_writesize = VSIZENOTSET;
1401 
1402 	vip->vi_state = VS_LOADING;
1403 
1404 	lru_requeue(vp, &lru_list[LRU_FREE]);
1405 
1406 	return vip;
1407 }
1408 
1409 /*
1410  * Deallocate a vcache node in state VS_LOADING.
1411  *
1412  * vcache_lock held on entry and released on return.
1413  */
1414 static void
1415 vcache_dealloc(vnode_impl_t *vip)
1416 {
1417 	vnode_t *vp;
1418 
1419 	KASSERT(mutex_owned(&vcache_lock));
1420 
1421 	vp = VIMPL_TO_VNODE(vip);
1422 	vfs_ref(dead_rootmount);
1423 	vfs_insmntque(vp, dead_rootmount);
1424 	mutex_enter(vp->v_interlock);
1425 	vp->v_op = dead_vnodeop_p;
1426 	VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1427 	mutex_exit(&vcache_lock);
1428 	vrelel(vp, 0, LK_NONE);
1429 }
1430 
1431 /*
1432  * Free an unused, unreferenced vcache node.
1433  * v_interlock locked on entry.
1434  */
1435 static void
1436 vcache_free(vnode_impl_t *vip)
1437 {
1438 	vnode_t *vp;
1439 
1440 	vp = VIMPL_TO_VNODE(vip);
1441 	KASSERT(mutex_owned(vp->v_interlock));
1442 
1443 	KASSERT(vrefcnt(vp) == 0);
1444 	KASSERT(vp->v_holdcnt == 0);
1445 	KASSERT(vp->v_writecount == 0);
1446 	lru_requeue(vp, NULL);
1447 	mutex_exit(vp->v_interlock);
1448 
1449 	vfs_insmntque(vp, NULL);
1450 	if (vp->v_type == VBLK || vp->v_type == VCHR)
1451 		spec_node_destroy(vp);
1452 
1453 	mutex_obj_free(vp->v_interlock);
1454 	rw_destroy(&vip->vi_lock);
1455 	uvm_obj_destroy(&vp->v_uobj, true);
1456 	klist_fini(&vp->v_klist);
1457 	cv_destroy(&vp->v_cv);
1458 	cache_vnode_fini(vp);
1459 	pool_cache_put(vcache_pool, vip);
1460 }
1461 
1462 /*
1463  * Try to get an initial reference on this cached vnode.
1464  * Returns zero on success or EBUSY if the vnode state is not LOADED.
1465  *
1466  * NB: lockless code sequences may rely on this not blocking.
1467  */
1468 int
1469 vcache_tryvget(vnode_t *vp)
1470 {
1471 	u_int use, next;
1472 
1473 	for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
1474 		if (__predict_false((use & VUSECOUNT_GATE) == 0)) {
1475 			return EBUSY;
1476 		}
1477 		next = atomic_cas_uint(&vp->v_usecount,
1478 		    use, (use + 1) | VUSECOUNT_VGET);
1479 		if (__predict_true(next == use)) {
1480 #ifndef __HAVE_ATOMIC_AS_MEMBAR
1481 			membar_acquire();
1482 #endif
1483 			return 0;
1484 		}
1485 	}
1486 }
1487 
1488 /*
1489  * Try to get an initial reference on this cached vnode.
1490  * Returns zero on success and  ENOENT if the vnode has been reclaimed.
1491  * Will wait for the vnode state to be stable.
1492  *
1493  * v_interlock locked on entry and unlocked on exit.
1494  */
1495 int
1496 vcache_vget(vnode_t *vp)
1497 {
1498 	int error;
1499 
1500 	KASSERT(mutex_owned(vp->v_interlock));
1501 
1502 	/* Increment hold count to prevent vnode from disappearing. */
1503 	vp->v_holdcnt++;
1504 	VSTATE_WAIT_STABLE(vp);
1505 	vp->v_holdcnt--;
1506 
1507 	/* If this was the last reference to a reclaimed vnode free it now. */
1508 	if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) {
1509 		if (vp->v_holdcnt == 0 && vrefcnt(vp) == 0)
1510 			vcache_free(VNODE_TO_VIMPL(vp));
1511 		else
1512 			mutex_exit(vp->v_interlock);
1513 		return ENOENT;
1514 	}
1515 	VSTATE_ASSERT(vp, VS_LOADED);
1516 	error = vcache_tryvget(vp);
1517 	KASSERT(error == 0);
1518 	mutex_exit(vp->v_interlock);
1519 
1520 	return 0;
1521 }
1522 
1523 /*
1524  * Get a vnode / fs node pair by key and return it referenced through vpp.
1525  */
1526 int
1527 vcache_get(struct mount *mp, const void *key, size_t key_len,
1528     struct vnode **vpp)
1529 {
1530 	int error;
1531 	uint32_t hash;
1532 	const void *new_key;
1533 	struct vnode *vp;
1534 	struct vcache_key vcache_key;
1535 	vnode_impl_t *vip, *new_vip;
1536 
1537 	new_key = NULL;
1538 	*vpp = NULL;
1539 
1540 	vcache_key.vk_mount = mp;
1541 	vcache_key.vk_key = key;
1542 	vcache_key.vk_key_len = key_len;
1543 	hash = vcache_hash(&vcache_key);
1544 
1545 again:
1546 	mutex_enter(&vcache_lock);
1547 	vip = vcache_hash_lookup(&vcache_key, hash);
1548 
1549 	/* If found, take a reference or retry. */
1550 	if (__predict_true(vip != NULL)) {
1551 		/*
1552 		 * If the vnode is loading we cannot take the v_interlock
1553 		 * here as it might change during load (see uvm_obj_setlock()).
1554 		 * As changing state from VS_LOADING requires both vcache_lock
1555 		 * and v_interlock it is safe to test with vcache_lock held.
1556 		 *
1557 		 * Wait for vnodes changing state from VS_LOADING and retry.
1558 		 */
1559 		if (__predict_false(vip->vi_state == VS_LOADING)) {
1560 			cv_wait(&vcache_cv, &vcache_lock);
1561 			mutex_exit(&vcache_lock);
1562 			goto again;
1563 		}
1564 		vp = VIMPL_TO_VNODE(vip);
1565 		mutex_enter(vp->v_interlock);
1566 		mutex_exit(&vcache_lock);
1567 		error = vcache_vget(vp);
1568 		if (error == ENOENT)
1569 			goto again;
1570 		if (error == 0)
1571 			*vpp = vp;
1572 		KASSERT((error != 0) == (*vpp == NULL));
1573 		return error;
1574 	}
1575 	mutex_exit(&vcache_lock);
1576 
1577 	/* Allocate and initialize a new vcache / vnode pair. */
1578 	error = vfs_busy(mp);
1579 	if (error)
1580 		return error;
1581 	new_vip = vcache_alloc();
1582 	new_vip->vi_key = vcache_key;
1583 	vp = VIMPL_TO_VNODE(new_vip);
1584 	mutex_enter(&vcache_lock);
1585 	vip = vcache_hash_lookup(&vcache_key, hash);
1586 	if (vip == NULL) {
1587 		SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1588 		    new_vip, vi_hash);
1589 		vip = new_vip;
1590 	}
1591 
1592 	/* If another thread beat us inserting this node, retry. */
1593 	if (vip != new_vip) {
1594 		vcache_dealloc(new_vip);
1595 		vfs_unbusy(mp);
1596 		goto again;
1597 	}
1598 	mutex_exit(&vcache_lock);
1599 
1600 	/* Load the fs node.  Exclusive as new_node is VS_LOADING. */
1601 	error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
1602 	if (error) {
1603 		mutex_enter(&vcache_lock);
1604 		SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1605 		    new_vip, vnode_impl, vi_hash);
1606 		vcache_dealloc(new_vip);
1607 		vfs_unbusy(mp);
1608 		KASSERT(*vpp == NULL);
1609 		return error;
1610 	}
1611 	KASSERT(new_key != NULL);
1612 	KASSERT(memcmp(key, new_key, key_len) == 0);
1613 	KASSERT(vp->v_op != NULL);
1614 	vfs_insmntque(vp, mp);
1615 	if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1616 		vp->v_vflag |= VV_MPSAFE;
1617 	vfs_ref(mp);
1618 	vfs_unbusy(mp);
1619 
1620 	/* Finished loading, finalize node. */
1621 	mutex_enter(&vcache_lock);
1622 	new_vip->vi_key.vk_key = new_key;
1623 	mutex_enter(vp->v_interlock);
1624 	VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1625 	mutex_exit(vp->v_interlock);
1626 	mutex_exit(&vcache_lock);
1627 	*vpp = vp;
1628 	return 0;
1629 }
1630 
1631 /*
1632  * Create a new vnode / fs node pair and return it referenced through vpp.
1633  */
1634 int
1635 vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
1636     kauth_cred_t cred, void *extra, struct vnode **vpp)
1637 {
1638 	int error;
1639 	uint32_t hash;
1640 	struct vnode *vp, *ovp;
1641 	vnode_impl_t *vip, *ovip;
1642 
1643 	*vpp = NULL;
1644 
1645 	/* Allocate and initialize a new vcache / vnode pair. */
1646 	error = vfs_busy(mp);
1647 	if (error)
1648 		return error;
1649 	vip = vcache_alloc();
1650 	vip->vi_key.vk_mount = mp;
1651 	vp = VIMPL_TO_VNODE(vip);
1652 
1653 	/* Create and load the fs node. */
1654 	error = VFS_NEWVNODE(mp, dvp, vp, vap, cred, extra,
1655 	    &vip->vi_key.vk_key_len, &vip->vi_key.vk_key);
1656 	if (error) {
1657 		mutex_enter(&vcache_lock);
1658 		vcache_dealloc(vip);
1659 		vfs_unbusy(mp);
1660 		KASSERT(*vpp == NULL);
1661 		return error;
1662 	}
1663 	KASSERT(vp->v_op != NULL);
1664 	KASSERT((vip->vi_key.vk_key_len == 0) == (mp == dead_rootmount));
1665 	if (vip->vi_key.vk_key_len > 0) {
1666 		KASSERT(vip->vi_key.vk_key != NULL);
1667 		hash = vcache_hash(&vip->vi_key);
1668 
1669 		/*
1670 		 * Wait for previous instance to be reclaimed,
1671 		 * then insert new node.
1672 		 */
1673 		mutex_enter(&vcache_lock);
1674 		while ((ovip = vcache_hash_lookup(&vip->vi_key, hash))) {
1675 			ovp = VIMPL_TO_VNODE(ovip);
1676 			mutex_enter(ovp->v_interlock);
1677 			mutex_exit(&vcache_lock);
1678 			error = vcache_vget(ovp);
1679 			KASSERT(error == ENOENT);
1680 			mutex_enter(&vcache_lock);
1681 		}
1682 		SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1683 		    vip, vi_hash);
1684 		mutex_exit(&vcache_lock);
1685 	}
1686 	vfs_insmntque(vp, mp);
1687 	if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1688 		vp->v_vflag |= VV_MPSAFE;
1689 	vfs_ref(mp);
1690 	vfs_unbusy(mp);
1691 
1692 	/* Finished loading, finalize node. */
1693 	mutex_enter(&vcache_lock);
1694 	mutex_enter(vp->v_interlock);
1695 	VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1696 	mutex_exit(&vcache_lock);
1697 	mutex_exit(vp->v_interlock);
1698 	*vpp = vp;
1699 	return 0;
1700 }
1701 
1702 /*
1703  * Prepare key change: update old cache nodes key and lock new cache node.
1704  * Return an error if the new node already exists.
1705  */
1706 int
1707 vcache_rekey_enter(struct mount *mp, struct vnode *vp,
1708     const void *old_key, size_t old_key_len,
1709     const void *new_key, size_t new_key_len)
1710 {
1711 	uint32_t old_hash, new_hash;
1712 	struct vcache_key old_vcache_key, new_vcache_key;
1713 	vnode_impl_t *vip, *new_vip;
1714 
1715 	old_vcache_key.vk_mount = mp;
1716 	old_vcache_key.vk_key = old_key;
1717 	old_vcache_key.vk_key_len = old_key_len;
1718 	old_hash = vcache_hash(&old_vcache_key);
1719 
1720 	new_vcache_key.vk_mount = mp;
1721 	new_vcache_key.vk_key = new_key;
1722 	new_vcache_key.vk_key_len = new_key_len;
1723 	new_hash = vcache_hash(&new_vcache_key);
1724 
1725 	new_vip = vcache_alloc();
1726 	new_vip->vi_key = new_vcache_key;
1727 
1728 	/* Insert locked new node used as placeholder. */
1729 	mutex_enter(&vcache_lock);
1730 	vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1731 	if (vip != NULL) {
1732 		vcache_dealloc(new_vip);
1733 		return EEXIST;
1734 	}
1735 	SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1736 	    new_vip, vi_hash);
1737 
1738 	/* Replace old nodes key with the temporary copy. */
1739 	vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1740 	KASSERT(vip != NULL);
1741 	KASSERT(VIMPL_TO_VNODE(vip) == vp);
1742 	KASSERT(vip->vi_key.vk_key != old_vcache_key.vk_key);
1743 	vip->vi_key = old_vcache_key;
1744 	mutex_exit(&vcache_lock);
1745 	return 0;
1746 }
1747 
1748 /*
1749  * Key change complete: update old node and remove placeholder.
1750  */
1751 void
1752 vcache_rekey_exit(struct mount *mp, struct vnode *vp,
1753     const void *old_key, size_t old_key_len,
1754     const void *new_key, size_t new_key_len)
1755 {
1756 	uint32_t old_hash, new_hash;
1757 	struct vcache_key old_vcache_key, new_vcache_key;
1758 	vnode_impl_t *vip, *new_vip;
1759 	struct vnode *new_vp;
1760 
1761 	old_vcache_key.vk_mount = mp;
1762 	old_vcache_key.vk_key = old_key;
1763 	old_vcache_key.vk_key_len = old_key_len;
1764 	old_hash = vcache_hash(&old_vcache_key);
1765 
1766 	new_vcache_key.vk_mount = mp;
1767 	new_vcache_key.vk_key = new_key;
1768 	new_vcache_key.vk_key_len = new_key_len;
1769 	new_hash = vcache_hash(&new_vcache_key);
1770 
1771 	mutex_enter(&vcache_lock);
1772 
1773 	/* Lookup old and new node. */
1774 	vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1775 	KASSERT(vip != NULL);
1776 	KASSERT(VIMPL_TO_VNODE(vip) == vp);
1777 
1778 	new_vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1779 	KASSERT(new_vip != NULL);
1780 	KASSERT(new_vip->vi_key.vk_key_len == new_key_len);
1781 	new_vp = VIMPL_TO_VNODE(new_vip);
1782 	mutex_enter(new_vp->v_interlock);
1783 	VSTATE_ASSERT(VIMPL_TO_VNODE(new_vip), VS_LOADING);
1784 	mutex_exit(new_vp->v_interlock);
1785 
1786 	/* Rekey old node and put it onto its new hashlist. */
1787 	vip->vi_key = new_vcache_key;
1788 	if (old_hash != new_hash) {
1789 		SLIST_REMOVE(&vcache_hashtab[old_hash & vcache_hashmask],
1790 		    vip, vnode_impl, vi_hash);
1791 		SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1792 		    vip, vi_hash);
1793 	}
1794 
1795 	/* Remove new node used as placeholder. */
1796 	SLIST_REMOVE(&vcache_hashtab[new_hash & vcache_hashmask],
1797 	    new_vip, vnode_impl, vi_hash);
1798 	vcache_dealloc(new_vip);
1799 }
1800 
1801 /*
1802  * Disassociate the underlying file system from a vnode.
1803  *
1804  * Must be called with vnode locked and will return unlocked.
1805  * Must be called with the interlock held, and will return with it held.
1806  */
1807 static void
1808 vcache_reclaim(vnode_t *vp)
1809 {
1810 	lwp_t *l = curlwp;
1811 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
1812 	struct mount *mp = vp->v_mount;
1813 	uint32_t hash;
1814 	uint8_t temp_buf[64], *temp_key;
1815 	size_t temp_key_len;
1816 	bool recycle;
1817 	int error;
1818 
1819 	KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1820 	KASSERT(mutex_owned(vp->v_interlock));
1821 	KASSERT(vrefcnt(vp) != 0);
1822 
1823 	temp_key_len = vip->vi_key.vk_key_len;
1824 	/*
1825 	 * Prevent the vnode from being recycled or brought into use
1826 	 * while we clean it out.
1827 	 */
1828 	VSTATE_CHANGE(vp, VS_BLOCKED, VS_RECLAIMING);
1829 	mutex_exit(vp->v_interlock);
1830 
1831 	rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
1832 	mutex_enter(vp->v_interlock);
1833 	if ((vp->v_iflag & VI_EXECMAP) != 0) {
1834 		cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
1835 	}
1836 	vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
1837 	vp->v_iflag |= VI_DEADCHECK; /* for genfs_getpages() */
1838 	mutex_exit(vp->v_interlock);
1839 	rw_exit(vp->v_uobj.vmobjlock);
1840 
1841 	/*
1842 	 * With vnode state set to reclaiming, purge name cache immediately
1843 	 * to prevent new handles on vnode, and wait for existing threads
1844 	 * trying to get a handle to notice VS_RECLAIMED status and abort.
1845 	 */
1846 	cache_purge(vp);
1847 
1848 	/* Replace the vnode key with a temporary copy. */
1849 	if (vip->vi_key.vk_key_len > sizeof(temp_buf)) {
1850 		temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
1851 	} else {
1852 		temp_key = temp_buf;
1853 	}
1854 	if (vip->vi_key.vk_key_len > 0) {
1855 		mutex_enter(&vcache_lock);
1856 		memcpy(temp_key, vip->vi_key.vk_key, temp_key_len);
1857 		vip->vi_key.vk_key = temp_key;
1858 		mutex_exit(&vcache_lock);
1859 	}
1860 
1861 	fstrans_start(mp);
1862 
1863 	/*
1864 	 * Clean out any cached data associated with the vnode.
1865 	 */
1866 	error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
1867 	if (error != 0) {
1868 		if (wapbl_vphaswapbl(vp))
1869 			WAPBL_DISCARD(wapbl_vptomp(vp));
1870 		error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
1871 	}
1872 	KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
1873 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1874 	if (vp->v_type == VBLK || vp->v_type == VCHR) {
1875 		 spec_node_revoke(vp);
1876 	}
1877 
1878 	/*
1879 	 * Disassociate the underlying file system from the vnode.
1880 	 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
1881 	 * the vnode, and may destroy the vnode so that VOP_UNLOCK
1882 	 * would no longer function.
1883 	 */
1884 	VOP_INACTIVE(vp, &recycle);
1885 	KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1886 	if (VOP_RECLAIM(vp)) {
1887 		vnpanic(vp, "%s: cannot reclaim", __func__);
1888 	}
1889 
1890 	KASSERT(vp->v_data == NULL);
1891 	KASSERT((vp->v_iflag & VI_PAGES) == 0);
1892 
1893 	if (vp->v_type == VREG && vp->v_ractx != NULL) {
1894 		uvm_ra_freectx(vp->v_ractx);
1895 		vp->v_ractx = NULL;
1896 	}
1897 
1898 	if (vip->vi_key.vk_key_len > 0) {
1899 	/* Remove from vnode cache. */
1900 		hash = vcache_hash(&vip->vi_key);
1901 		mutex_enter(&vcache_lock);
1902 		KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
1903 		SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1904 		    vip, vnode_impl, vi_hash);
1905 		mutex_exit(&vcache_lock);
1906 	}
1907 	if (temp_key != temp_buf)
1908 		kmem_free(temp_key, temp_key_len);
1909 
1910 	/* Done with purge, notify sleepers of the grim news. */
1911 	mutex_enter(vp->v_interlock);
1912 	vp->v_op = dead_vnodeop_p;
1913 	VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
1914 	vp->v_tag = VT_NON;
1915 	/*
1916 	 * Don't check for interest in NOTE_REVOKE; it's always posted
1917 	 * because it sets EV_EOF.
1918 	 */
1919 	KNOTE(&vp->v_klist, NOTE_REVOKE);
1920 	mutex_exit(vp->v_interlock);
1921 
1922 	/*
1923 	 * Move to dead mount.  Must be after changing the operations
1924 	 * vector as vnode operations enter the mount before using the
1925 	 * operations vector.  See sys/kern/vnode_if.c.
1926 	 */
1927 	vp->v_vflag &= ~VV_ROOT;
1928 	vfs_ref(dead_rootmount);
1929 	vfs_insmntque(vp, dead_rootmount);
1930 
1931 #ifdef PAX_SEGVGUARD
1932 	pax_segvguard_cleanup(vp);
1933 #endif /* PAX_SEGVGUARD */
1934 
1935 	mutex_enter(vp->v_interlock);
1936 	fstrans_done(mp);
1937 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1938 }
1939 
1940 /*
1941  * Disassociate the underlying file system from an open device vnode
1942  * and make it anonymous.
1943  *
1944  * Vnode unlocked on entry, drops a reference to the vnode.
1945  */
1946 void
1947 vcache_make_anon(vnode_t *vp)
1948 {
1949 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
1950 	uint32_t hash;
1951 	bool recycle;
1952 
1953 	KASSERT(vp->v_type == VBLK || vp->v_type == VCHR);
1954 	KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
1955 	VSTATE_ASSERT_UNLOCKED(vp, VS_ACTIVE);
1956 
1957 	/* Remove from vnode cache. */
1958 	hash = vcache_hash(&vip->vi_key);
1959 	mutex_enter(&vcache_lock);
1960 	KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
1961 	SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1962 	    vip, vnode_impl, vi_hash);
1963 	vip->vi_key.vk_mount = dead_rootmount;
1964 	vip->vi_key.vk_key_len = 0;
1965 	vip->vi_key.vk_key = NULL;
1966 	mutex_exit(&vcache_lock);
1967 
1968 	/*
1969 	 * Disassociate the underlying file system from the vnode.
1970 	 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
1971 	 * the vnode, and may destroy the vnode so that VOP_UNLOCK
1972 	 * would no longer function.
1973 	 */
1974 	if (vn_lock(vp, LK_EXCLUSIVE)) {
1975 		vnpanic(vp, "%s: cannot lock", __func__);
1976 	}
1977 	VOP_INACTIVE(vp, &recycle);
1978 	KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1979 	if (VOP_RECLAIM(vp)) {
1980 		vnpanic(vp, "%s: cannot reclaim", __func__);
1981 	}
1982 
1983 	/* Purge name cache. */
1984 	cache_purge(vp);
1985 
1986 	/* Done with purge, change operations vector. */
1987 	mutex_enter(vp->v_interlock);
1988 	vp->v_op = spec_vnodeop_p;
1989 	vp->v_vflag |= VV_MPSAFE;
1990 	mutex_exit(vp->v_interlock);
1991 
1992 	/*
1993 	 * Move to dead mount.  Must be after changing the operations
1994 	 * vector as vnode operations enter the mount before using the
1995 	 * operations vector.  See sys/kern/vnode_if.c.
1996 	 */
1997 	vfs_ref(dead_rootmount);
1998 	vfs_insmntque(vp, dead_rootmount);
1999 
2000 	vrele(vp);
2001 }
2002 
2003 /*
2004  * Update outstanding I/O count and do wakeup if requested.
2005  */
2006 void
2007 vwakeup(struct buf *bp)
2008 {
2009 	vnode_t *vp;
2010 
2011 	if ((vp = bp->b_vp) == NULL)
2012 		return;
2013 
2014 	KASSERT(bp->b_objlock == vp->v_interlock);
2015 	KASSERT(mutex_owned(bp->b_objlock));
2016 
2017 	if (--vp->v_numoutput < 0)
2018 		vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
2019 	if (vp->v_numoutput == 0)
2020 		cv_broadcast(&vp->v_cv);
2021 }
2022 
2023 /*
2024  * Test a vnode for being or becoming dead.  Returns one of:
2025  * EBUSY:  vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
2026  * ENOENT: vnode is dead.
2027  * 0:      otherwise.
2028  *
2029  * Whenever this function returns a non-zero value all future
2030  * calls will also return a non-zero value.
2031  */
2032 int
2033 vdead_check(struct vnode *vp, int flags)
2034 {
2035 
2036 	KASSERT(mutex_owned(vp->v_interlock));
2037 
2038 	if (! ISSET(flags, VDEAD_NOWAIT))
2039 		VSTATE_WAIT_STABLE(vp);
2040 
2041 	if (VSTATE_GET(vp) == VS_RECLAIMING) {
2042 		KASSERT(ISSET(flags, VDEAD_NOWAIT));
2043 		return EBUSY;
2044 	} else if (VSTATE_GET(vp) == VS_RECLAIMED) {
2045 		return ENOENT;
2046 	}
2047 
2048 	return 0;
2049 }
2050 
2051 int
2052 vfs_drainvnodes(void)
2053 {
2054 	int i, gen;
2055 
2056 	mutex_enter(&vdrain_lock);
2057 	for (i = 0; i < 2; i++) {
2058 		gen = vdrain_gen;
2059 		while (gen == vdrain_gen) {
2060 			cv_broadcast(&vdrain_cv);
2061 			cv_wait(&vdrain_gen_cv, &vdrain_lock);
2062 		}
2063 	}
2064 	mutex_exit(&vdrain_lock);
2065 
2066 	if (numvnodes >= desiredvnodes)
2067 		return EBUSY;
2068 
2069 	if (vcache_hashsize != desiredvnodes)
2070 		vcache_reinit();
2071 
2072 	return 0;
2073 }
2074 
2075 void
2076 vnpanic(vnode_t *vp, const char *fmt, ...)
2077 {
2078 	va_list ap;
2079 
2080 #ifdef DIAGNOSTIC
2081 	vprint(NULL, vp);
2082 #endif
2083 	va_start(ap, fmt);
2084 	vpanic(fmt, ap);
2085 	va_end(ap);
2086 }
2087 
2088 void
2089 vshareilock(vnode_t *tvp, vnode_t *fvp)
2090 {
2091 	kmutex_t *oldlock;
2092 
2093 	oldlock = tvp->v_interlock;
2094 	mutex_obj_hold(fvp->v_interlock);
2095 	tvp->v_interlock = fvp->v_interlock;
2096 	mutex_obj_free(oldlock);
2097 }
2098