xref: /netbsd-src/sys/kern/vfs_vnode.c (revision ba65fde2d7fefa7d39838fa5fa855e62bd606b5e)
1 /*	$NetBSD: vfs_vnode.c,v 1.19 2013/02/13 14:03:48 hannken Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997-2011 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1989, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  * (c) UNIX System Laboratories, Inc.
37  * All or some portions of this file are derived from material licensed
38  * to the University of California by American Telephone and Telegraph
39  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40  * the permission of UNIX System Laboratories, Inc.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
67  */
68 
69 /*
70  * The vnode cache subsystem.
71  *
72  * Life-cycle
73  *
74  *	Normally, there are two points where new vnodes are created:
75  *	VOP_CREATE(9) and VOP_LOOKUP(9).  The life-cycle of a vnode
76  *	starts in one of the following ways:
77  *
78  *	- Allocation, via getnewvnode(9) and/or vnalloc(9).
79  *	- Reclamation of inactive vnode, via vget(9).
80  *
81  *	Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
82  *	was another, traditional way.  Currently, only the draining thread
83  *	recycles the vnodes.  This behaviour might be revisited.
84  *
85  *	The life-cycle ends when the last reference is dropped, usually
86  *	in VOP_REMOVE(9).  In such case, VOP_INACTIVE(9) is called to inform
87  *	the file system that vnode is inactive.  Via this call, file system
88  *	indicates whether vnode can be recycled (usually, it checks its own
89  *	references, e.g. count of links, whether the file was removed).
90  *
91  *	Depending on indication, vnode can be put into a free list (cache),
92  *	or cleaned via vclean(9), which calls VOP_RECLAIM(9) to disassociate
93  *	underlying file system from the vnode, and finally destroyed.
94  *
95  * Reference counting
96  *
97  *	Vnode is considered active, if reference count (vnode_t::v_usecount)
98  *	is non-zero.  It is maintained using: vref(9) and vrele(9), as well
99  *	as vput(9), routines.  Common points holding references are e.g.
100  *	file openings, current working directory, mount points, etc.
101  *
102  * Note on v_usecount and its locking
103  *
104  *	At nearly all points it is known that v_usecount could be zero,
105  *	the vnode_t::v_interlock will be held.  To change v_usecount away
106  *	from zero, the interlock must be held.  To change from a non-zero
107  *	value to zero, again the interlock must be held.
108  *
109  *	There is a flag bit, VC_XLOCK, embedded in v_usecount.  To raise
110  *	v_usecount, if the VC_XLOCK bit is set in it, the interlock must
111  *	be held.  To modify the VC_XLOCK bit, the interlock must be held.
112  *	We always keep the usecount (v_usecount & VC_MASK) non-zero while
113  *	the VC_XLOCK bit is set.
114  *
115  *	Unless the VC_XLOCK bit is set, changing the usecount from a non-zero
116  *	value to a non-zero value can safely be done using atomic operations,
117  *	without the interlock held.
118  *
119  *	Even if the VC_XLOCK bit is set, decreasing the usecount to a non-zero
120  *	value can be done using atomic operations, without the interlock held.
121  *
122  *	Note: if VI_CLEAN is set, vnode_t::v_interlock will be released while
123  *	mntvnode_lock is still held.
124  */
125 
126 #include <sys/cdefs.h>
127 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.19 2013/02/13 14:03:48 hannken Exp $");
128 
129 #include <sys/param.h>
130 #include <sys/kernel.h>
131 
132 #include <sys/atomic.h>
133 #include <sys/buf.h>
134 #include <sys/conf.h>
135 #include <sys/device.h>
136 #include <sys/kauth.h>
137 #include <sys/kmem.h>
138 #include <sys/kthread.h>
139 #include <sys/module.h>
140 #include <sys/mount.h>
141 #include <sys/namei.h>
142 #include <sys/syscallargs.h>
143 #include <sys/sysctl.h>
144 #include <sys/systm.h>
145 #include <sys/vnode.h>
146 #include <sys/wapbl.h>
147 
148 #include <uvm/uvm.h>
149 #include <uvm/uvm_readahead.h>
150 
151 u_int			numvnodes		__cacheline_aligned;
152 
153 static pool_cache_t	vnode_cache		__read_mostly;
154 
155 /*
156  * There are two free lists: one is for vnodes which have no buffer/page
157  * references and one for those which do (i.e. v_holdcnt is non-zero).
158  * Vnode recycling mechanism first attempts to look into the former list.
159  */
160 static kmutex_t		vnode_free_list_lock	__cacheline_aligned;
161 static vnodelst_t	vnode_free_list		__cacheline_aligned;
162 static vnodelst_t	vnode_hold_list		__cacheline_aligned;
163 static kcondvar_t	vdrain_cv		__cacheline_aligned;
164 
165 static vnodelst_t	vrele_list		__cacheline_aligned;
166 static kmutex_t		vrele_lock		__cacheline_aligned;
167 static kcondvar_t	vrele_cv		__cacheline_aligned;
168 static lwp_t *		vrele_lwp		__cacheline_aligned;
169 static int		vrele_pending		__cacheline_aligned;
170 static int		vrele_gen		__cacheline_aligned;
171 
172 static int		cleanvnode(void);
173 static void		vdrain_thread(void *);
174 static void		vrele_thread(void *);
175 static void		vnpanic(vnode_t *, const char *, ...)
176     __printflike(2, 3);
177 
178 /* Routines having to do with the management of the vnode table. */
179 extern int		(**dead_vnodeop_p)(void *);
180 
181 void
182 vfs_vnode_sysinit(void)
183 {
184 	int error;
185 
186 	vnode_cache = pool_cache_init(sizeof(vnode_t), 0, 0, 0, "vnodepl",
187 	    NULL, IPL_NONE, NULL, NULL, NULL);
188 	KASSERT(vnode_cache != NULL);
189 
190 	mutex_init(&vnode_free_list_lock, MUTEX_DEFAULT, IPL_NONE);
191 	TAILQ_INIT(&vnode_free_list);
192 	TAILQ_INIT(&vnode_hold_list);
193 	TAILQ_INIT(&vrele_list);
194 
195 	mutex_init(&vrele_lock, MUTEX_DEFAULT, IPL_NONE);
196 	cv_init(&vdrain_cv, "vdrain");
197 	cv_init(&vrele_cv, "vrele");
198 	error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
199 	    NULL, NULL, "vdrain");
200 	KASSERT(error == 0);
201 	error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vrele_thread,
202 	    NULL, &vrele_lwp, "vrele");
203 	KASSERT(error == 0);
204 }
205 
206 /*
207  * Allocate a new, uninitialized vnode.  If 'mp' is non-NULL, this is a
208  * marker vnode.
209  */
210 vnode_t *
211 vnalloc(struct mount *mp)
212 {
213 	vnode_t *vp;
214 
215 	vp = pool_cache_get(vnode_cache, PR_WAITOK);
216 	KASSERT(vp != NULL);
217 
218 	memset(vp, 0, sizeof(*vp));
219 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
220 	cv_init(&vp->v_cv, "vnode");
221 	/*
222 	 * Done by memset() above.
223 	 *	LIST_INIT(&vp->v_nclist);
224 	 *	LIST_INIT(&vp->v_dnclist);
225 	 */
226 
227 	if (mp != NULL) {
228 		vp->v_mount = mp;
229 		vp->v_type = VBAD;
230 		vp->v_iflag = VI_MARKER;
231 	} else {
232 		rw_init(&vp->v_lock);
233 	}
234 
235 	return vp;
236 }
237 
238 /*
239  * Free an unused, unreferenced vnode.
240  */
241 void
242 vnfree(vnode_t *vp)
243 {
244 
245 	KASSERT(vp->v_usecount == 0);
246 
247 	if ((vp->v_iflag & VI_MARKER) == 0) {
248 		rw_destroy(&vp->v_lock);
249 		mutex_enter(&vnode_free_list_lock);
250 		numvnodes--;
251 		mutex_exit(&vnode_free_list_lock);
252 	}
253 
254 	/*
255 	 * Note: the vnode interlock will either be freed, of reference
256 	 * dropped (if VI_LOCKSHARE was in use).
257 	 */
258 	uvm_obj_destroy(&vp->v_uobj, true);
259 	cv_destroy(&vp->v_cv);
260 	pool_cache_put(vnode_cache, vp);
261 }
262 
263 /*
264  * cleanvnode: grab a vnode from freelist, clean and free it.
265  *
266  * => Releases vnode_free_list_lock.
267  */
268 static int
269 cleanvnode(void)
270 {
271 	vnode_t *vp;
272 	vnodelst_t *listhd;
273 
274 	KASSERT(mutex_owned(&vnode_free_list_lock));
275 retry:
276 	listhd = &vnode_free_list;
277 try_nextlist:
278 	TAILQ_FOREACH(vp, listhd, v_freelist) {
279 		/*
280 		 * It's safe to test v_usecount and v_iflag
281 		 * without holding the interlock here, since
282 		 * these vnodes should never appear on the
283 		 * lists.
284 		 */
285 		KASSERT(vp->v_usecount == 0);
286 		KASSERT((vp->v_iflag & VI_CLEAN) == 0);
287 		KASSERT(vp->v_freelisthd == listhd);
288 
289 		if (!mutex_tryenter(vp->v_interlock))
290 			continue;
291 		if ((vp->v_iflag & VI_XLOCK) == 0)
292 			break;
293 		mutex_exit(vp->v_interlock);
294 	}
295 
296 	if (vp == NULL) {
297 		if (listhd == &vnode_free_list) {
298 			listhd = &vnode_hold_list;
299 			goto try_nextlist;
300 		}
301 		mutex_exit(&vnode_free_list_lock);
302 		return EBUSY;
303 	}
304 
305 	/* Remove it from the freelist. */
306 	TAILQ_REMOVE(listhd, vp, v_freelist);
307 	vp->v_freelisthd = NULL;
308 	mutex_exit(&vnode_free_list_lock);
309 
310 	KASSERT(vp->v_usecount == 0);
311 
312 	/*
313 	 * The vnode is still associated with a file system, so we must
314 	 * clean it out before freeing it.  We need to add a reference
315 	 * before doing this.  If the vnode gains another reference while
316 	 * being cleaned out then we lose - retry.
317 	 */
318 	atomic_add_int(&vp->v_usecount, 1 + VC_XLOCK);
319 	vclean(vp, DOCLOSE);
320 	KASSERT(vp->v_usecount >= 1 + VC_XLOCK);
321 	atomic_add_int(&vp->v_usecount, -VC_XLOCK);
322 	if (vp->v_usecount > 1) {
323 		/*
324 		 * Don't return to freelist - the holder of the last
325 		 * reference will destroy it.
326 		 */
327 		vrelel(vp, 0); /* releases vp->v_interlock */
328 		mutex_enter(&vnode_free_list_lock);
329 		goto retry;
330 	}
331 
332 	KASSERT((vp->v_iflag & VI_CLEAN) == VI_CLEAN);
333 	mutex_exit(vp->v_interlock);
334 	if (vp->v_type == VBLK || vp->v_type == VCHR) {
335 		spec_node_destroy(vp);
336 	}
337 	vp->v_type = VNON;
338 
339 	KASSERT(vp->v_data == NULL);
340 	KASSERT(vp->v_uobj.uo_npages == 0);
341 	KASSERT(TAILQ_EMPTY(&vp->v_uobj.memq));
342 	KASSERT(vp->v_numoutput == 0);
343 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
344 
345 	vrele(vp);
346 
347 	return 0;
348 }
349 
350 /*
351  * getnewvnode: return a fresh vnode.
352  *
353  * => Returns referenced vnode, moved into the mount queue.
354  * => Shares the interlock specified by 'slock', if it is not NULL.
355  */
356 int
357 getnewvnode(enum vtagtype tag, struct mount *mp, int (**vops)(void *),
358     kmutex_t *slock, vnode_t **vpp)
359 {
360 	struct uvm_object *uobj;
361 	vnode_t *vp;
362 	int error = 0;
363 
364 	if (mp != NULL) {
365 		/*
366 		 * Mark filesystem busy while we are creating a vnode.
367 		 * If unmount is in progress, this will fail.
368 		 */
369 		error = vfs_busy(mp, NULL);
370 		if (error)
371 			return error;
372 	}
373 
374 	vp = NULL;
375 
376 	/* Allocate a new vnode. */
377 	mutex_enter(&vnode_free_list_lock);
378 	numvnodes++;
379 	if (numvnodes > desiredvnodes + desiredvnodes / 10)
380 		cv_signal(&vdrain_cv);
381 	mutex_exit(&vnode_free_list_lock);
382 	vp = vnalloc(NULL);
383 
384 	KASSERT(vp->v_freelisthd == NULL);
385 	KASSERT(LIST_EMPTY(&vp->v_nclist));
386 	KASSERT(LIST_EMPTY(&vp->v_dnclist));
387 
388 	/* Initialize vnode. */
389 	vp->v_usecount = 1;
390 	vp->v_type = VNON;
391 	vp->v_tag = tag;
392 	vp->v_op = vops;
393 	vp->v_data = NULL;
394 
395 	uobj = &vp->v_uobj;
396 	KASSERT(uobj->pgops == &uvm_vnodeops);
397 	KASSERT(uobj->uo_npages == 0);
398 	KASSERT(TAILQ_FIRST(&uobj->memq) == NULL);
399 	vp->v_size = vp->v_writesize = VSIZENOTSET;
400 
401 	/* Share the vnode_t::v_interlock, if requested. */
402 	if (slock) {
403 		/* Set the interlock and mark that it is shared. */
404 		KASSERT(vp->v_mount == NULL);
405 		mutex_obj_hold(slock);
406 		uvm_obj_setlock(&vp->v_uobj, slock);
407 		KASSERT(vp->v_interlock == slock);
408 		vp->v_iflag |= VI_LOCKSHARE;
409 	}
410 
411 	/* Finally, move vnode into the mount queue. */
412 	vfs_insmntque(vp, mp);
413 
414 	if (mp != NULL) {
415 		if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
416 			vp->v_vflag |= VV_MPSAFE;
417 		vfs_unbusy(mp, true, NULL);
418 	}
419 
420 	*vpp = vp;
421 	return 0;
422 }
423 
424 /*
425  * This is really just the reverse of getnewvnode(). Needed for
426  * VFS_VGET functions who may need to push back a vnode in case
427  * of a locking race.
428  */
429 void
430 ungetnewvnode(vnode_t *vp)
431 {
432 
433 	KASSERT(vp->v_usecount == 1);
434 	KASSERT(vp->v_data == NULL);
435 	KASSERT(vp->v_freelisthd == NULL);
436 
437 	mutex_enter(vp->v_interlock);
438 	vp->v_iflag |= VI_CLEAN;
439 	vrelel(vp, 0);
440 }
441 
442 /*
443  * Helper thread to keep the number of vnodes below desiredvnodes.
444  */
445 static void
446 vdrain_thread(void *cookie)
447 {
448 	int error;
449 
450 	mutex_enter(&vnode_free_list_lock);
451 
452 	for (;;) {
453 		cv_timedwait(&vdrain_cv, &vnode_free_list_lock, hz);
454 		while (numvnodes > desiredvnodes) {
455 			error = cleanvnode();
456 			if (error)
457 				kpause("vndsbusy", false, hz, NULL);
458 			mutex_enter(&vnode_free_list_lock);
459 			if (error)
460 				break;
461 		}
462 	}
463 }
464 
465 /*
466  * Remove a vnode from its freelist.
467  */
468 void
469 vremfree(vnode_t *vp)
470 {
471 
472 	KASSERT(mutex_owned(vp->v_interlock));
473 	KASSERT(vp->v_usecount == 0);
474 
475 	/*
476 	 * Note that the reference count must not change until
477 	 * the vnode is removed.
478 	 */
479 	mutex_enter(&vnode_free_list_lock);
480 	if (vp->v_holdcnt > 0) {
481 		KASSERT(vp->v_freelisthd == &vnode_hold_list);
482 	} else {
483 		KASSERT(vp->v_freelisthd == &vnode_free_list);
484 	}
485 	TAILQ_REMOVE(vp->v_freelisthd, vp, v_freelist);
486 	vp->v_freelisthd = NULL;
487 	mutex_exit(&vnode_free_list_lock);
488 }
489 
490 /*
491  * Try to gain a reference to a vnode, without acquiring its interlock.
492  * The caller must hold a lock that will prevent the vnode from being
493  * recycled or freed.
494  */
495 bool
496 vtryget(vnode_t *vp)
497 {
498 	u_int use, next;
499 
500 	/*
501 	 * If the vnode is being freed, don't make life any harder
502 	 * for vclean() by adding another reference without waiting.
503 	 * This is not strictly necessary, but we'll do it anyway.
504 	 */
505 	if (__predict_false((vp->v_iflag & VI_XLOCK) != 0)) {
506 		return false;
507 	}
508 	for (use = vp->v_usecount;; use = next) {
509 		if (use == 0 || __predict_false((use & VC_XLOCK) != 0)) {
510 			/* Need interlock held if first reference. */
511 			return false;
512 		}
513 		next = atomic_cas_uint(&vp->v_usecount, use, use + 1);
514 		if (__predict_true(next == use)) {
515 			return true;
516 		}
517 	}
518 }
519 
520 /*
521  * vget: get a particular vnode from the free list, increment its reference
522  * count and lock it.
523  *
524  * => Should be called with v_interlock held.
525  *
526  * If VI_XLOCK is set, the vnode is being eliminated in vgone()/vclean().
527  * In that case, we cannot grab the vnode, so the process is awakened when
528  * the transition is completed, and an error returned to indicate that the
529  * vnode is no longer usable (e.g. changed to a new file system type).
530  */
531 int
532 vget(vnode_t *vp, int flags)
533 {
534 	int error = 0;
535 
536 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
537 	KASSERT(mutex_owned(vp->v_interlock));
538 	KASSERT((flags & ~(LK_SHARED|LK_EXCLUSIVE|LK_NOWAIT)) == 0);
539 
540 	/*
541 	 * Before adding a reference, we must remove the vnode
542 	 * from its freelist.
543 	 */
544 	if (vp->v_usecount == 0) {
545 		vremfree(vp);
546 		vp->v_usecount = 1;
547 	} else {
548 		atomic_inc_uint(&vp->v_usecount);
549 	}
550 
551 	/*
552 	 * If the vnode is in the process of being cleaned out for
553 	 * another use, we wait for the cleaning to finish and then
554 	 * return failure.  Cleaning is determined by checking if
555 	 * the VI_XLOCK flag is set.
556 	 */
557 	if ((vp->v_iflag & VI_XLOCK) != 0) {
558 		if ((flags & LK_NOWAIT) != 0) {
559 			vrelel(vp, 0);
560 			return EBUSY;
561 		}
562 		vwait(vp, VI_XLOCK);
563 		vrelel(vp, 0);
564 		return ENOENT;
565 	}
566 
567 	if ((vp->v_iflag & VI_INACTNOW) != 0) {
568 		/*
569 		 * if it's being desactived, wait for it to complete.
570 		 * Make sure to not return a clean vnode.
571 		 */
572 		 if ((flags & LK_NOWAIT) != 0) {
573 			vrelel(vp, 0);
574 			return EBUSY;
575 		}
576 		vwait(vp, VI_INACTNOW);
577 		if ((vp->v_iflag & VI_CLEAN) != 0) {
578 			vrelel(vp, 0);
579 			return ENOENT;
580 		}
581 	}
582 
583 	/*
584 	 * Ok, we got it in good shape.  Just locking left.
585 	 */
586 	KASSERT((vp->v_iflag & VI_CLEAN) == 0);
587 	mutex_exit(vp->v_interlock);
588 	if (flags & (LK_EXCLUSIVE | LK_SHARED)) {
589 		error = vn_lock(vp, flags);
590 		if (error != 0) {
591 			vrele(vp);
592 		}
593 	}
594 	return error;
595 }
596 
597 /*
598  * vput: unlock and release the reference.
599  */
600 void
601 vput(vnode_t *vp)
602 {
603 
604 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
605 
606 	VOP_UNLOCK(vp);
607 	vrele(vp);
608 }
609 
610 /*
611  * Try to drop reference on a vnode.  Abort if we are releasing the
612  * last reference.  Note: this _must_ succeed if not the last reference.
613  */
614 static inline bool
615 vtryrele(vnode_t *vp)
616 {
617 	u_int use, next;
618 
619 	for (use = vp->v_usecount;; use = next) {
620 		if (use == 1) {
621 			return false;
622 		}
623 		KASSERT((use & VC_MASK) > 1);
624 		next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
625 		if (__predict_true(next == use)) {
626 			return true;
627 		}
628 	}
629 }
630 
631 /*
632  * Vnode release.  If reference count drops to zero, call inactive
633  * routine and either return to freelist or free to the pool.
634  */
635 void
636 vrelel(vnode_t *vp, int flags)
637 {
638 	bool recycle, defer;
639 	int error;
640 
641 	KASSERT(mutex_owned(vp->v_interlock));
642 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
643 	KASSERT(vp->v_freelisthd == NULL);
644 
645 	if (__predict_false(vp->v_op == dead_vnodeop_p &&
646 	    (vp->v_iflag & (VI_CLEAN|VI_XLOCK)) == 0)) {
647 		vnpanic(vp, "dead but not clean");
648 	}
649 
650 	/*
651 	 * If not the last reference, just drop the reference count
652 	 * and unlock.
653 	 */
654 	if (vtryrele(vp)) {
655 		vp->v_iflag |= VI_INACTREDO;
656 		mutex_exit(vp->v_interlock);
657 		return;
658 	}
659 	if (vp->v_usecount <= 0 || vp->v_writecount != 0) {
660 		vnpanic(vp, "%s: bad ref count", __func__);
661 	}
662 
663 	KASSERT((vp->v_iflag & VI_XLOCK) == 0);
664 
665 #ifdef DIAGNOSTIC
666 	if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
667 	    vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
668 		vprint("vrelel: missing VOP_CLOSE()", vp);
669 	}
670 #endif
671 
672 	/*
673 	 * If not clean, deactivate the vnode, but preserve
674 	 * our reference across the call to VOP_INACTIVE().
675 	 */
676 retry:
677 	if ((vp->v_iflag & VI_CLEAN) == 0) {
678 		recycle = false;
679 		vp->v_iflag |= VI_INACTNOW;
680 
681 		/*
682 		 * XXX This ugly block can be largely eliminated if
683 		 * locking is pushed down into the file systems.
684 		 *
685 		 * Defer vnode release to vrele_thread if caller
686 		 * requests it explicitly.
687 		 */
688 		if ((curlwp == uvm.pagedaemon_lwp) ||
689 		    (flags & VRELEL_ASYNC_RELE) != 0) {
690 			/* The pagedaemon can't wait around; defer. */
691 			defer = true;
692 		} else if (curlwp == vrele_lwp) {
693 			/*
694 			 * We have to try harder. But we can't sleep
695 			 * with VI_INACTNOW as vget() may be waiting on it.
696 			 */
697 			vp->v_iflag &= ~(VI_INACTREDO|VI_INACTNOW);
698 			cv_broadcast(&vp->v_cv);
699 			mutex_exit(vp->v_interlock);
700 			error = vn_lock(vp, LK_EXCLUSIVE);
701 			if (error != 0) {
702 				/* XXX */
703 				vnpanic(vp, "%s: unable to lock %p",
704 				    __func__, vp);
705 			}
706 			mutex_enter(vp->v_interlock);
707 			/*
708 			 * if we did get another reference while
709 			 * sleeping, don't try to inactivate it yet.
710 			 */
711 			if (__predict_false(vtryrele(vp))) {
712 				VOP_UNLOCK(vp);
713 				mutex_exit(vp->v_interlock);
714 				return;
715 			}
716 			vp->v_iflag |= VI_INACTNOW;
717 			mutex_exit(vp->v_interlock);
718 			defer = false;
719 		} else if ((vp->v_iflag & VI_LAYER) != 0) {
720 			/*
721 			 * Acquiring the stack's lock in vclean() even
722 			 * for an honest vput/vrele is dangerous because
723 			 * our caller may hold other vnode locks; defer.
724 			 */
725 			defer = true;
726 		} else {
727 			/* If we can't acquire the lock, then defer. */
728 			vp->v_iflag &= ~VI_INACTREDO;
729 			mutex_exit(vp->v_interlock);
730 			error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT);
731 			if (error != 0) {
732 				defer = true;
733 				mutex_enter(vp->v_interlock);
734 			} else {
735 				defer = false;
736 			}
737 		}
738 
739 		if (defer) {
740 			/*
741 			 * Defer reclaim to the kthread; it's not safe to
742 			 * clean it here.  We donate it our last reference.
743 			 */
744 			KASSERT(mutex_owned(vp->v_interlock));
745 			KASSERT((vp->v_iflag & VI_INACTPEND) == 0);
746 			vp->v_iflag &= ~VI_INACTNOW;
747 			vp->v_iflag |= VI_INACTPEND;
748 			mutex_enter(&vrele_lock);
749 			TAILQ_INSERT_TAIL(&vrele_list, vp, v_freelist);
750 			if (++vrele_pending > (desiredvnodes >> 8))
751 				cv_signal(&vrele_cv);
752 			mutex_exit(&vrele_lock);
753 			cv_broadcast(&vp->v_cv);
754 			mutex_exit(vp->v_interlock);
755 			return;
756 		}
757 
758 		/*
759 		 * The vnode can gain another reference while being
760 		 * deactivated.  If VOP_INACTIVE() indicates that
761 		 * the described file has been deleted, then recycle
762 		 * the vnode irrespective of additional references.
763 		 * Another thread may be waiting to re-use the on-disk
764 		 * inode.
765 		 *
766 		 * Note that VOP_INACTIVE() will drop the vnode lock.
767 		 */
768 		VOP_INACTIVE(vp, &recycle);
769 		mutex_enter(vp->v_interlock);
770 		vp->v_iflag &= ~VI_INACTNOW;
771 		cv_broadcast(&vp->v_cv);
772 		if (!recycle) {
773 			if (vtryrele(vp)) {
774 				mutex_exit(vp->v_interlock);
775 				return;
776 			}
777 
778 			/*
779 			 * If we grew another reference while
780 			 * VOP_INACTIVE() was underway, retry.
781 			 */
782 			if ((vp->v_iflag & VI_INACTREDO) != 0) {
783 				goto retry;
784 			}
785 		}
786 
787 		/* Take care of space accounting. */
788 		if (vp->v_iflag & VI_EXECMAP) {
789 			atomic_add_int(&uvmexp.execpages,
790 			    -vp->v_uobj.uo_npages);
791 			atomic_add_int(&uvmexp.filepages,
792 			    vp->v_uobj.uo_npages);
793 		}
794 		vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
795 		vp->v_vflag &= ~VV_MAPPED;
796 
797 		/*
798 		 * Recycle the vnode if the file is now unused (unlinked),
799 		 * otherwise just free it.
800 		 */
801 		if (recycle) {
802 			vclean(vp, DOCLOSE);
803 		}
804 		KASSERT(vp->v_usecount > 0);
805 	}
806 
807 	if (atomic_dec_uint_nv(&vp->v_usecount) != 0) {
808 		/* Gained another reference while being reclaimed. */
809 		mutex_exit(vp->v_interlock);
810 		return;
811 	}
812 
813 	if ((vp->v_iflag & VI_CLEAN) != 0) {
814 		/*
815 		 * It's clean so destroy it.  It isn't referenced
816 		 * anywhere since it has been reclaimed.
817 		 */
818 		KASSERT(vp->v_holdcnt == 0);
819 		KASSERT(vp->v_writecount == 0);
820 		mutex_exit(vp->v_interlock);
821 		vfs_insmntque(vp, NULL);
822 		if (vp->v_type == VBLK || vp->v_type == VCHR) {
823 			spec_node_destroy(vp);
824 		}
825 		vnfree(vp);
826 	} else {
827 		/*
828 		 * Otherwise, put it back onto the freelist.  It
829 		 * can't be destroyed while still associated with
830 		 * a file system.
831 		 */
832 		mutex_enter(&vnode_free_list_lock);
833 		if (vp->v_holdcnt > 0) {
834 			vp->v_freelisthd = &vnode_hold_list;
835 		} else {
836 			vp->v_freelisthd = &vnode_free_list;
837 		}
838 		TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
839 		mutex_exit(&vnode_free_list_lock);
840 		mutex_exit(vp->v_interlock);
841 	}
842 }
843 
844 void
845 vrele(vnode_t *vp)
846 {
847 
848 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
849 
850 	if ((vp->v_iflag & VI_INACTNOW) == 0 && vtryrele(vp)) {
851 		return;
852 	}
853 	mutex_enter(vp->v_interlock);
854 	vrelel(vp, 0);
855 }
856 
857 /*
858  * Asynchronous vnode release, vnode is released in different context.
859  */
860 void
861 vrele_async(vnode_t *vp)
862 {
863 
864 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
865 
866 	if ((vp->v_iflag & VI_INACTNOW) == 0 && vtryrele(vp)) {
867 		return;
868 	}
869 	mutex_enter(vp->v_interlock);
870 	vrelel(vp, VRELEL_ASYNC_RELE);
871 }
872 
873 static void
874 vrele_thread(void *cookie)
875 {
876 	vnode_t *vp;
877 
878 	for (;;) {
879 		mutex_enter(&vrele_lock);
880 		while (TAILQ_EMPTY(&vrele_list)) {
881 			vrele_gen++;
882 			cv_broadcast(&vrele_cv);
883 			cv_timedwait(&vrele_cv, &vrele_lock, hz);
884 		}
885 		vp = TAILQ_FIRST(&vrele_list);
886 		TAILQ_REMOVE(&vrele_list, vp, v_freelist);
887 		vrele_pending--;
888 		mutex_exit(&vrele_lock);
889 
890 		/*
891 		 * If not the last reference, then ignore the vnode
892 		 * and look for more work.
893 		 */
894 		mutex_enter(vp->v_interlock);
895 		KASSERT((vp->v_iflag & VI_INACTPEND) != 0);
896 		vp->v_iflag &= ~VI_INACTPEND;
897 		vrelel(vp, 0);
898 	}
899 }
900 
901 void
902 vrele_flush(void)
903 {
904 	int gen;
905 
906 	mutex_enter(&vrele_lock);
907 	gen = vrele_gen;
908 	while (vrele_pending && gen == vrele_gen) {
909 		cv_broadcast(&vrele_cv);
910 		cv_wait(&vrele_cv, &vrele_lock);
911 	}
912 	mutex_exit(&vrele_lock);
913 }
914 
915 /*
916  * Vnode reference, where a reference is already held by some other
917  * object (for example, a file structure).
918  */
919 void
920 vref(vnode_t *vp)
921 {
922 
923 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
924 	KASSERT(vp->v_usecount != 0);
925 
926 	atomic_inc_uint(&vp->v_usecount);
927 }
928 
929 /*
930  * Page or buffer structure gets a reference.
931  * Called with v_interlock held.
932  */
933 void
934 vholdl(vnode_t *vp)
935 {
936 
937 	KASSERT(mutex_owned(vp->v_interlock));
938 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
939 
940 	if (vp->v_holdcnt++ == 0 && vp->v_usecount == 0) {
941 		mutex_enter(&vnode_free_list_lock);
942 		KASSERT(vp->v_freelisthd == &vnode_free_list);
943 		TAILQ_REMOVE(vp->v_freelisthd, vp, v_freelist);
944 		vp->v_freelisthd = &vnode_hold_list;
945 		TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
946 		mutex_exit(&vnode_free_list_lock);
947 	}
948 }
949 
950 /*
951  * Page or buffer structure frees a reference.
952  * Called with v_interlock held.
953  */
954 void
955 holdrelel(vnode_t *vp)
956 {
957 
958 	KASSERT(mutex_owned(vp->v_interlock));
959 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
960 
961 	if (vp->v_holdcnt <= 0) {
962 		vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
963 	}
964 
965 	vp->v_holdcnt--;
966 	if (vp->v_holdcnt == 0 && vp->v_usecount == 0) {
967 		mutex_enter(&vnode_free_list_lock);
968 		KASSERT(vp->v_freelisthd == &vnode_hold_list);
969 		TAILQ_REMOVE(vp->v_freelisthd, vp, v_freelist);
970 		vp->v_freelisthd = &vnode_free_list;
971 		TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
972 		mutex_exit(&vnode_free_list_lock);
973 	}
974 }
975 
976 /*
977  * Disassociate the underlying file system from a vnode.
978  *
979  * Must be called with the interlock held, and will return with it held.
980  */
981 void
982 vclean(vnode_t *vp, int flags)
983 {
984 	lwp_t *l = curlwp;
985 	bool recycle, active;
986 	int error;
987 
988 	KASSERT(mutex_owned(vp->v_interlock));
989 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
990 	KASSERT(vp->v_usecount != 0);
991 
992 	/* If cleaning is already in progress wait until done and return. */
993 	if (vp->v_iflag & VI_XLOCK) {
994 		vwait(vp, VI_XLOCK);
995 		return;
996 	}
997 
998 	/* If already clean, nothing to do. */
999 	if ((vp->v_iflag & VI_CLEAN) != 0) {
1000 		return;
1001 	}
1002 
1003 	/*
1004 	 * Prevent the vnode from being recycled or brought into use
1005 	 * while we clean it out.
1006 	 */
1007 	vp->v_iflag |= VI_XLOCK;
1008 	if (vp->v_iflag & VI_EXECMAP) {
1009 		atomic_add_int(&uvmexp.execpages, -vp->v_uobj.uo_npages);
1010 		atomic_add_int(&uvmexp.filepages, vp->v_uobj.uo_npages);
1011 	}
1012 	vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
1013 	active = (vp->v_usecount & VC_MASK) > 1;
1014 
1015 	/* XXXAD should not lock vnode under layer */
1016 	mutex_exit(vp->v_interlock);
1017 	VOP_LOCK(vp, LK_EXCLUSIVE);
1018 
1019 	/*
1020 	 * Clean out any cached data associated with the vnode.
1021 	 * If purging an active vnode, it must be closed and
1022 	 * deactivated before being reclaimed. Note that the
1023 	 * VOP_INACTIVE will unlock the vnode.
1024 	 */
1025 	if (flags & DOCLOSE) {
1026 		error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
1027 		if (error != 0) {
1028 			/* XXX, fix vn_start_write's grab of mp and use that. */
1029 
1030 			if (wapbl_vphaswapbl(vp))
1031 				WAPBL_DISCARD(wapbl_vptomp(vp));
1032 			error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
1033 		}
1034 		KASSERT(error == 0);
1035 		KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1036 		if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
1037 			 spec_node_revoke(vp);
1038 		}
1039 	}
1040 	if (active) {
1041 		VOP_INACTIVE(vp, &recycle);
1042 	} else {
1043 		/*
1044 		 * Any other processes trying to obtain this lock must first
1045 		 * wait for VI_XLOCK to clear, then call the new lock operation.
1046 		 */
1047 		VOP_UNLOCK(vp);
1048 	}
1049 
1050 	/* Disassociate the underlying file system from the vnode. */
1051 	if (VOP_RECLAIM(vp)) {
1052 		vnpanic(vp, "%s: cannot reclaim", __func__);
1053 	}
1054 
1055 	KASSERT(vp->v_data == NULL);
1056 	KASSERT(vp->v_uobj.uo_npages == 0);
1057 
1058 	if (vp->v_type == VREG && vp->v_ractx != NULL) {
1059 		uvm_ra_freectx(vp->v_ractx);
1060 		vp->v_ractx = NULL;
1061 	}
1062 
1063 	/* Purge name cache. */
1064 	cache_purge(vp);
1065 
1066 	/* Done with purge, notify sleepers of the grim news. */
1067 	mutex_enter(vp->v_interlock);
1068 	vp->v_op = dead_vnodeop_p;
1069 	vp->v_tag = VT_NON;
1070 	KNOTE(&vp->v_klist, NOTE_REVOKE);
1071 	vp->v_iflag &= ~VI_XLOCK;
1072 	vp->v_vflag &= ~VV_LOCKSWORK;
1073 	if ((flags & DOCLOSE) != 0) {
1074 		vp->v_iflag |= VI_CLEAN;
1075 	}
1076 	cv_broadcast(&vp->v_cv);
1077 
1078 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1079 }
1080 
1081 /*
1082  * Recycle an unused vnode to the front of the free list.
1083  * Release the passed interlock if the vnode will be recycled.
1084  */
1085 int
1086 vrecycle(vnode_t *vp, kmutex_t *inter_lkp, struct lwp *l)
1087 {
1088 
1089 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
1090 
1091 	mutex_enter(vp->v_interlock);
1092 	if (vp->v_usecount != 0) {
1093 		mutex_exit(vp->v_interlock);
1094 		return 0;
1095 	}
1096 	if (inter_lkp) {
1097 		mutex_exit(inter_lkp);
1098 	}
1099 	vremfree(vp);
1100 	vp->v_usecount = 1;
1101 	vclean(vp, DOCLOSE);
1102 	vrelel(vp, 0);
1103 	return 1;
1104 }
1105 
1106 /*
1107  * Eliminate all activity associated with the requested vnode
1108  * and with all vnodes aliased to the requested vnode.
1109  */
1110 void
1111 vrevoke(vnode_t *vp)
1112 {
1113 	vnode_t *vq;
1114 	enum vtype type;
1115 	dev_t dev;
1116 
1117 	KASSERT(vp->v_usecount > 0);
1118 
1119 	mutex_enter(vp->v_interlock);
1120 	if ((vp->v_iflag & VI_CLEAN) != 0) {
1121 		mutex_exit(vp->v_interlock);
1122 		return;
1123 	} else if (vp->v_type != VBLK && vp->v_type != VCHR) {
1124 		atomic_inc_uint(&vp->v_usecount);
1125 		vclean(vp, DOCLOSE);
1126 		vrelel(vp, 0);
1127 		return;
1128 	} else {
1129 		dev = vp->v_rdev;
1130 		type = vp->v_type;
1131 		mutex_exit(vp->v_interlock);
1132 	}
1133 
1134 	while (spec_node_lookup_by_dev(type, dev, &vq) == 0) {
1135 		mutex_enter(vq->v_interlock);
1136 		vclean(vq, DOCLOSE);
1137 		vrelel(vq, 0);
1138 	}
1139 }
1140 
1141 /*
1142  * Eliminate all activity associated with a vnode in preparation for
1143  * reuse.  Drops a reference from the vnode.
1144  */
1145 void
1146 vgone(vnode_t *vp)
1147 {
1148 
1149 	mutex_enter(vp->v_interlock);
1150 	vclean(vp, DOCLOSE);
1151 	vrelel(vp, 0);
1152 }
1153 
1154 /*
1155  * Update outstanding I/O count and do wakeup if requested.
1156  */
1157 void
1158 vwakeup(struct buf *bp)
1159 {
1160 	vnode_t *vp;
1161 
1162 	if ((vp = bp->b_vp) == NULL)
1163 		return;
1164 
1165 	KASSERT(bp->b_objlock == vp->v_interlock);
1166 	KASSERT(mutex_owned(bp->b_objlock));
1167 
1168 	if (--vp->v_numoutput < 0)
1169 		vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
1170 	if (vp->v_numoutput == 0)
1171 		cv_broadcast(&vp->v_cv);
1172 }
1173 
1174 /*
1175  * Wait for a vnode (typically with VI_XLOCK set) to be cleaned or
1176  * recycled.
1177  */
1178 void
1179 vwait(vnode_t *vp, int flags)
1180 {
1181 
1182 	KASSERT(mutex_owned(vp->v_interlock));
1183 	KASSERT(vp->v_usecount != 0);
1184 
1185 	while ((vp->v_iflag & flags) != 0)
1186 		cv_wait(&vp->v_cv, vp->v_interlock);
1187 }
1188 
1189 int
1190 vfs_drainvnodes(long target)
1191 {
1192 	int error;
1193 
1194 	mutex_enter(&vnode_free_list_lock);
1195 
1196 	while (numvnodes > target) {
1197 		error = cleanvnode();
1198 		if (error != 0)
1199 			return error;
1200 		mutex_enter(&vnode_free_list_lock);
1201 	}
1202 
1203 	mutex_exit(&vnode_free_list_lock);
1204 
1205 	return 0;
1206 }
1207 
1208 void
1209 vnpanic(vnode_t *vp, const char *fmt, ...)
1210 {
1211 	va_list ap;
1212 
1213 #ifdef DIAGNOSTIC
1214 	vprint(NULL, vp);
1215 #endif
1216 	va_start(ap, fmt);
1217 	vpanic(fmt, ap);
1218 	va_end(ap);
1219 }
1220