xref: /dflybsd-src/sys/kern/vfs_lock.c (revision e7b4468ce80913950cd099c393f3ce6ece6fcb2c)
1 /*
2  * Copyright (c) 2004 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/kern/vfs_lock.c,v 1.28 2007/06/09 19:46:02 dillon Exp $
35  */
36 
37 /*
38  * External virtual filesystem routines
39  */
40 #include "opt_ddb.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/malloc.h>
46 #include <sys/mount.h>
47 #include <sys/proc.h>
48 #include <sys/vnode.h>
49 #include <sys/buf.h>
50 #include <sys/sysctl.h>
51 
52 #include <machine/limits.h>
53 
54 #include <vm/vm.h>
55 #include <vm/vm_object.h>
56 
57 #include <sys/buf2.h>
58 #include <sys/thread2.h>
59 #include <sys/sysref2.h>
60 
61 static void vnode_terminate(struct vnode *vp);
62 static boolean_t vnode_ctor(void *obj, void *private, int ocflags);
63 static void vnode_dtor(void *obj, void *private);
64 
65 static MALLOC_DEFINE(M_VNODE, "vnodes", "vnode structures");
66 static struct sysref_class vnode_sysref_class = {
67 	.name =		"vnode",
68 	.mtype =	M_VNODE,
69 	.proto =	SYSREF_PROTO_VNODE,
70 	.offset =	offsetof(struct vnode, v_sysref),
71 	.objsize =	sizeof(struct vnode),
72 	.mag_capacity =	256,
73 	.flags =	SRC_MANAGEDINIT,
74 	.ctor =		vnode_ctor,
75 	.dtor =		vnode_dtor,
76 	.ops = {
77 		.terminate = (sysref_terminate_func_t)vnode_terminate
78 	}
79 };
80 
81 static TAILQ_HEAD(freelst, vnode) vnode_free_list;	/* vnode free list */
82 
83 int  freevnodes = 0;
84 SYSCTL_INT(_debug, OID_AUTO, freevnodes, CTLFLAG_RD,
85 		&freevnodes, 0, "");
86 static int wantfreevnodes = 25;
87 SYSCTL_INT(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW,
88 		&wantfreevnodes, 0, "");
89 static int minvnodes;
90 SYSCTL_INT(_kern, OID_AUTO, minvnodes, CTLFLAG_RW,
91 		&minvnodes, 0, "Minimum number of vnodes");
92 
93 /*
94  * Called from vfsinit()
95  */
96 void
97 vfs_lock_init(void)
98 {
99 	minvnodes = desiredvnodes / 4;
100 
101 	TAILQ_INIT(&vnode_free_list);
102 }
103 
104 /*
105  * Inline helper functions.  vbusy() and vfree() must be called while in a
106  * critical section.
107  *
108  * Warning: must be callable if the caller holds a read spinlock to something
109  * else, meaning we can't use read spinlocks here.
110  */
111 static __inline
112 void
113 __vbusy(struct vnode *vp)
114 {
115 	TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
116 	freevnodes--;
117 	vp->v_flag &= ~(VFREE|VAGE);
118 }
119 
120 static __inline
121 void
122 __vfree(struct vnode *vp)
123 {
124 	if (vp->v_flag & (VAGE|VRECLAIMED))
125 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
126 	else
127 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
128 	freevnodes++;
129 	vp->v_flag &= ~VAGE;
130 	vp->v_flag |= VFREE;
131 }
132 
133 static __inline
134 void
135 __vfreetail(struct vnode *vp)
136 {
137 	TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
138 	freevnodes++;
139 	vp->v_flag |= VFREE;
140 }
141 
142 /*
143  * Return a C boolean if we should put the vnode on the freelist (VFREE),
144  * or leave it / mark it as VCACHED.
145  *
146  * This routine is only valid if the vnode is already either VFREE or
147  * VCACHED, or if it can become VFREE or VCACHED via vnode_terminate().
148  */
149 static __inline boolean_t
150 vshouldfree(struct vnode *vp)
151 {
152 	return (vp->v_auxrefs == 0 &&
153 	    (vp->v_object == NULL || vp->v_object->resident_page_count == 0));
154 }
155 
156 /*
157  * Add a ref to an active vnode.  This function should never be called
158  * with an inactive vnode (use vget() instead).
159  */
160 void
161 vref(struct vnode *vp)
162 {
163 	KKASSERT(vp->v_sysref.refcnt > 0 &&
164 		 (vp->v_flag & (VFREE|VINACTIVE)) == 0);
165 	sysref_get(&vp->v_sysref);
166 }
167 
168 /*
169  * Release a ref on an active or inactive vnode.  The sysref termination
170  * function will be called when the active last active reference is released,
171  * and the vnode is returned to the objcache when the last inactive
172  * reference is released.
173  */
174 void
175 vrele(struct vnode *vp)
176 {
177 	sysref_put(&vp->v_sysref);
178 }
179 
180 /*
181  * Add an auxiliary data structure reference to the vnode.  Auxiliary
182  * references do not change the state of the vnode or prevent them
183  * from being deactivated, reclaimed, or placed on the free list.
184  *
185  * An auxiliary reference DOES prevent the vnode from being destroyed,
186  * allowing you to vx_lock() it, test state, etc.
187  *
188  * An auxiliary reference DOES NOT move a vnode out of the VFREE state
189  * once it has entered it.
190  */
191 void
192 vhold(struct vnode *vp)
193 {
194 	KKASSERT(vp->v_sysref.refcnt != 0);
195 	atomic_add_int(&vp->v_auxrefs, 1);
196 }
197 
198 /*
199  * Remove an auxiliary reference from the vnode.
200  *
201  * vdrop needs to check for a VCACHE->VFREE transition to catch cases
202  * where a vnode is held past its reclamation.
203  */
204 void
205 vdrop(struct vnode *vp)
206 {
207 	KKASSERT(vp->v_sysref.refcnt != 0 && vp->v_auxrefs > 0);
208 	atomic_subtract_int(&vp->v_auxrefs, 1);
209 	if ((vp->v_flag & VCACHED) && vshouldfree(vp)) {
210 		vp->v_flag |= VAGE;
211 		vp->v_flag &= ~VCACHED;
212 		__vfree(vp);
213 	}
214 }
215 
216 /*
217  * This function is called when the last active reference on the vnode
218  * is released, typically via vrele().  SYSREF will give the vnode a
219  * negative ref count, indicating that it is undergoing termination or
220  * is being set aside for the cache, and one final sysref_put() is
221  * required to actually return it to the memory subsystem.
222  *
223  * However, because vnodes may have auxiliary structural references via
224  * v_auxrefs, we must interlock auxiliary references against termination
225  * via the VX lock mechanism.  It is possible for a vnode to be reactivated
226  * while we were blocked on the lock.
227  */
228 void
229 vnode_terminate(struct vnode *vp)
230 {
231 	vx_lock(vp);
232 	if (sysref_isinactive(&vp->v_sysref)) {
233 		/*
234 		 * Deactivate the vnode by marking it VFREE or VCACHED.
235 		 * The vnode can be reactivated from either state until
236 		 * reclaimed.  These states inherit the 'last' sysref on the
237 		 * vnode.
238 		 *
239 		 * NOTE: There may be additional inactive references from
240 		 * other entities blocking on the VX lock while we hold it,
241 		 * but this does not prevent us from changing the vnode's
242 		 * state.
243 		 *
244 		 * NOTE: The vnode could already be marked inactive.  XXX
245 		 * how?
246 		 *
247 		 * NOTE: The vnode may be marked inactive with dirty buffers
248 		 * or dirty pages in its cached VM object still present.
249 		 */
250 		if ((vp->v_flag & VINACTIVE) == 0) {
251 			vp->v_flag |= VINACTIVE;
252 			VOP_INACTIVE(vp);
253 		}
254 		KKASSERT((vp->v_flag & (VFREE|VCACHED)) == 0);
255 		if (vshouldfree(vp))
256 			__vfree(vp);
257 		else
258 			vp->v_flag |= VCACHED;
259 		vx_unlock(vp);
260 	} else {
261 		/*
262 		 * Someone reactivated the vnode while were blocked on the
263 		 * VX lock.  Release the VX lock and release the (now active)
264 		 * last reference which is no longer last.
265 		 */
266 		vx_unlock(vp);
267 		vrele(vp);
268 	}
269 }
270 
271 /*
272  * Physical vnode constructor / destructor.  These are only executed on
273  * the backend of the objcache.  They are NOT executed on every vnode
274  * allocation or deallocation.
275  */
276 boolean_t
277 vnode_ctor(void *obj, void *private, int ocflags)
278 {
279 	struct vnode *vp = obj;
280 
281 	lwkt_token_init(&vp->v_pollinfo.vpi_token);
282 	lockinit(&vp->v_lock, "vnode", 0, 0);
283 	ccms_dataspace_init(&vp->v_ccms);
284 	TAILQ_INIT(&vp->v_namecache);
285 	RB_INIT(&vp->v_rbclean_tree);
286 	RB_INIT(&vp->v_rbdirty_tree);
287 	RB_INIT(&vp->v_rbhash_tree);
288 	return(TRUE);
289 }
290 
291 void
292 vnode_dtor(void *obj, void *private)
293 {
294 	struct vnode *vp = obj;
295 
296 	ccms_dataspace_destroy(&vp->v_ccms);
297 }
298 
299 /****************************************************************
300  *			VX LOCKING FUNCTIONS			*
301  ****************************************************************
302  *
303  * These functions lock vnodes for reclamation and deactivation related
304  * activities.  The caller must already be holding some sort of reference
305  * on the vnode.
306  */
307 
308 void
309 vx_lock(struct vnode *vp)
310 {
311 	lockmgr(&vp->v_lock, LK_EXCLUSIVE);
312 }
313 
314 static int
315 vx_lock_nonblock(struct vnode *vp)
316 {
317 	return(lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT));
318 }
319 
320 void
321 vx_unlock(struct vnode *vp)
322 {
323 	lockmgr(&vp->v_lock, LK_RELEASE);
324 }
325 
326 /****************************************************************
327  *			VNODE ACQUISITION FUNCTIONS		*
328  ****************************************************************
329  *
330  * These functions must be used when accessing a vnode via an auxiliary
331  * reference such as the namecache or free list, or when you wish to
332  * do a combo ref+lock sequence.
333  *
334  * These functions are MANDATORY for any code chain accessing a vnode
335  * whos activation state is not known.
336  *
337  * vget()/vput() are used when reactivation is desired.
338  *
339  * vx_get() and vx_put() are used when reactivation is not desired.
340  */
341 int
342 vget(struct vnode *vp, int flags)
343 {
344 	int error;
345 
346 	/*
347 	 * A lock type must be passed
348 	 */
349 	if ((flags & LK_TYPE_MASK) == 0) {
350 		panic("vget() called with no lock specified!");
351 		/* NOT REACHED */
352 	}
353 
354 	/*
355 	 * Reference the structure and then acquire the lock.  0->1
356 	 * transitions and refs during termination are allowed here so
357 	 * call sysref directly.
358 	 */
359 
360 	sysref_get(&vp->v_sysref);
361 	if ((error = vn_lock(vp, flags)) != 0) {
362 		/*
363 		 * The lock failed, undo and return an error.
364 		 */
365 		sysref_put(&vp->v_sysref);
366 	} else if (vp->v_flag & VRECLAIMED) {
367 		/*
368 		 * The node is being reclaimed and cannot be reactivated
369 		 * any more, undo and return ENOENT.
370 		 */
371 		vn_unlock(vp);
372 		vrele(vp);
373 		error = ENOENT;
374 	} else {
375 		/*
376 		 * If the vnode is marked VFREE or VCACHED it needs to be
377 		 * reactivated, otherwise it had better already be active.
378 		 * VINACTIVE must also be cleared.
379 		 *
380 		 * In the VFREE/VCACHED case we have to throw away the
381 		 * sysref that was earmarking those cases and preventing
382 		 * the vnode from being destroyed.  Our sysref is still held.
383 		 */
384 		if (vp->v_flag & VFREE) {
385 			__vbusy(vp);
386 			sysref_put(&vp->v_sysref);
387 			sysref_activate(&vp->v_sysref);
388 		} else if (vp->v_flag & VCACHED) {
389 			vp->v_flag &= ~VCACHED;
390 			sysref_put(&vp->v_sysref);
391 			sysref_activate(&vp->v_sysref);
392 		} else {
393 			KKASSERT(sysref_isactive(&vp->v_sysref));
394 		}
395 		vp->v_flag &= ~VINACTIVE;
396 		error = 0;
397 	}
398 	return(error);
399 }
400 
401 void
402 vput(struct vnode *vp)
403 {
404 	vn_unlock(vp);
405 	vrele(vp);
406 }
407 
408 /*
409  * XXX The vx_*() locks should use auxrefs, not the main reference counter.
410  */
411 void
412 vx_get(struct vnode *vp)
413 {
414 	sysref_get(&vp->v_sysref);
415 	lockmgr(&vp->v_lock, LK_EXCLUSIVE);
416 }
417 
418 int
419 vx_get_nonblock(struct vnode *vp)
420 {
421 	int error;
422 
423 	sysref_get(&vp->v_sysref);
424 	error = lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT);
425 	if (error)
426 		sysref_put(&vp->v_sysref);
427 	return(error);
428 }
429 
430 /*
431  * Relase a VX lock that also held a ref on the vnode.
432  *
433  * vx_put needs to check for a VCACHE->VFREE transition to catch the
434  * case where e.g. vnlru issues a vgone*().
435  */
436 void
437 vx_put(struct vnode *vp)
438 {
439 	if ((vp->v_flag & VCACHED) && vshouldfree(vp)) {
440 		vp->v_flag |= VAGE;
441 		vp->v_flag &= ~VCACHED;
442 		__vfree(vp);
443 	}
444 	lockmgr(&vp->v_lock, LK_RELEASE);
445 	sysref_put(&vp->v_sysref);
446 }
447 
448 /*
449  * Misc functions
450  */
451 
452 void
453 vsetflags(struct vnode *vp, int flags)
454 {
455 	crit_enter();
456 	vp->v_flag |= flags;
457 	crit_exit();
458 }
459 
460 void
461 vclrflags(struct vnode *vp, int flags)
462 {
463 	crit_enter();
464 	vp->v_flag &= ~flags;
465 	crit_exit();
466 }
467 
468 /*
469  * Try to reuse a vnode from the free list.  NOTE: The returned vnode
470  * is not completely initialized.
471  */
472 static
473 struct vnode *
474 allocfreevnode(void)
475 {
476 	struct vnode *vp;
477 	int count;
478 
479 	for (count = 0; count < freevnodes; count++) {
480 		/*
481 		 * Note that regardless of how we block in this loop,
482 		 * we only get here if freevnodes != 0 so there
483 		 * had better be something on the list.
484 		 *
485 		 * Try to lock the first vnode on the free list.
486 		 * Cycle if we can't.
487 		 *
488 		 * XXX NOT MP SAFE
489 		 */
490 		vp = TAILQ_FIRST(&vnode_free_list);
491 		if (vx_lock_nonblock(vp)) {
492 			KKASSERT(vp->v_flag & VFREE);
493 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
494 			TAILQ_INSERT_TAIL(&vnode_free_list,
495 					  vp, v_freelist);
496 			continue;
497 		}
498 
499 		/*
500 		 * With the vnode locked we can safely remove it
501 		 * from the free list.  We inherit the reference
502 		 * that was previously associated with the vnode
503 		 * being on the free list.
504 		 */
505 		KKASSERT((vp->v_flag & (VFREE|VINACTIVE)) ==
506 			  (VFREE|VINACTIVE));
507 		KKASSERT(sysref_isinactive(&vp->v_sysref));
508 		__vbusy(vp);
509 
510 		/*
511 		 * Holding the VX lock on an inactive vnode prevents it
512 		 * from being reactivated or reused.  New namecache
513 		 * associations can only be made using active vnodes.
514 		 *
515 		 * Another thread may be blocked on our vnode lock while
516 		 * holding a namecache lock.  We can only reuse this vnode
517 		 * if we can clear all namecache associations without
518 		 * blocking.
519 		 */
520 		if ((vp->v_flag & VRECLAIMED) == 0) {
521 			if (cache_inval_vp_nonblock(vp)) {
522 				__vfreetail(vp);
523 				vx_unlock(vp);
524 				continue;
525 			}
526 			vgone_vxlocked(vp);
527 			/* vnode is still VX locked */
528 		}
529 
530 		/*
531 		 * We can reuse the vnode if no primary or auxiliary
532 		 * references remain other then ours, else put it
533 		 * back on the free list and keep looking.
534 		 *
535 		 * Either the free list inherits the last reference
536 		 * or we fall through and sysref_activate() the last
537 		 * reference.
538 		 *
539 		 * Since the vnode is in a VRECLAIMED state, no new
540 		 * namecache associations could have been made.
541 		 */
542 		KKASSERT(TAILQ_EMPTY(&vp->v_namecache));
543 		if (vp->v_auxrefs ||
544 		    !sysref_islastdeactivation(&vp->v_sysref)) {
545 			__vfreetail(vp);
546 			vx_unlock(vp);
547 			continue;
548 		}
549 
550 		/*
551 		 * Return a VX locked vnode suitable for reuse.  The caller
552 		 * inherits the sysref.
553 		 */
554 		return(vp);
555 	}
556 	return(NULL);
557 }
558 
559 /*
560  * Obtain a new vnode from the freelist, allocating more if necessary.
561  * The returned vnode is VX locked & refd.
562  */
563 struct vnode *
564 allocvnode(int lktimeout, int lkflags)
565 {
566 	struct vnode *vp;
567 
568 	/*
569 	 * Try to reuse vnodes if we hit the max.  This situation only
570 	 * occurs in certain large-memory (2G+) situations.  We cannot
571 	 * attempt to directly reclaim vnodes due to nasty recursion
572 	 * problems.
573 	 */
574 	while (numvnodes - freevnodes > desiredvnodes)
575 		vnlru_proc_wait();
576 
577 	/*
578 	 * Attempt to reuse a vnode already on the free list, allocating
579 	 * a new vnode if we can't find one or if we have not reached a
580 	 * good minimum for good LRU performance.
581 	 */
582 	if (freevnodes >= wantfreevnodes && numvnodes >= minvnodes)
583 		vp = allocfreevnode();
584 	else
585 		vp = NULL;
586 	if (vp == NULL) {
587 		vp = sysref_alloc(&vnode_sysref_class);
588 		lockmgr(&vp->v_lock, LK_EXCLUSIVE);
589 		numvnodes++;
590 	}
591 
592 	/*
593 	 * We are using a managed sysref class, vnode fields are only
594 	 * zerod on initial allocation from the backing store, not
595 	 * on reallocation.  Thus we have to clear these fields for both
596 	 * reallocation and reuse.
597 	 */
598 #ifdef INVARIANTS
599 	if (vp->v_data)
600 		panic("cleaned vnode isn't");
601 	if (vp->v_track_read.bk_active + vp->v_track_write.bk_active)
602 		panic("Clean vnode has pending I/O's");
603 	if (vp->v_flag & VONWORKLST)
604 		panic("Clean vnode still pending on syncer worklist!");
605 	if (!RB_EMPTY(&vp->v_rbdirty_tree))
606 		panic("Clean vnode still has dirty buffers!");
607 	if (!RB_EMPTY(&vp->v_rbclean_tree))
608 		panic("Clean vnode still has clean buffers!");
609 	if (!RB_EMPTY(&vp->v_rbhash_tree))
610 		panic("Clean vnode still on hash tree!");
611 	KKASSERT(vp->v_mount == NULL);
612 #endif
613 	vp->v_flag = 0;
614 	vp->v_lastw = 0;
615 	vp->v_lasta = 0;
616 	vp->v_cstart = 0;
617 	vp->v_clen = 0;
618 	vp->v_socket = 0;
619 	vp->v_opencount = 0;
620 	vp->v_writecount = 0;	/* XXX */
621 	lockreinit(&vp->v_lock, "vnode", lktimeout, lkflags);
622 	KKASSERT(TAILQ_FIRST(&vp->v_namecache) == NULL);
623 	/* exclusive lock still held */
624 
625 	/*
626 	 * Note: sysref needs to be activated to convert -0x40000000 to +1.
627 	 * The -0x40000000 comes from the last ref on reuse, and from
628 	 * sysref_init() on allocate.
629 	 */
630 	sysref_activate(&vp->v_sysref);
631 	vp->v_filesize = NOOFFSET;
632 	vp->v_type = VNON;
633 	vp->v_tag = 0;
634 	vp->v_ops = NULL;
635 	vp->v_data = NULL;
636 	KKASSERT(vp->v_mount == NULL);
637 
638 	return (vp);
639 }
640 
641 int
642 freesomevnodes(int n)
643 {
644 	struct vnode *vp;
645 	int count = 0;
646 
647 	while (n) {
648 		--n;
649 		if ((vp = allocfreevnode()) == NULL)
650 			break;
651 		vx_put(vp);
652 		--numvnodes;
653 	}
654 	return(count);
655 }
656 
657