1 /* 2 * Copyright (c) 2004,2013-2017 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 /* 36 * External lock/ref-related vnode functions 37 * 38 * vs_state transition locking requirements: 39 * 40 * INACTIVE -> CACHED|DYING vx_lock(excl) + vi->spin 41 * DYING -> CACHED vx_lock(excl) 42 * ACTIVE -> INACTIVE (none) + v_spin + vi->spin 43 * INACTIVE -> ACTIVE vn_lock(any) + v_spin + vi->spin 44 * CACHED -> ACTIVE vn_lock(any) + v_spin + vi->spin 45 * 46 * NOTE: Switching to/from ACTIVE/INACTIVE requires v_spin and vi->spin, 47 * 48 * Switching into ACTIVE also requires a vref and vnode lock, however 49 * the vnode lock is allowed to be SHARED. 50 * 51 * Switching into a CACHED or DYING state requires an exclusive vnode 52 * lock or vx_lock (which is almost the same thing). 53 */ 54 55 #include <sys/param.h> 56 #include <sys/systm.h> 57 #include <sys/kernel.h> 58 #include <sys/malloc.h> 59 #include <sys/mount.h> 60 #include <sys/proc.h> 61 #include <sys/vnode.h> 62 #include <sys/spinlock2.h> 63 #include <sys/sysctl.h> 64 65 #include <machine/limits.h> 66 67 #include <vm/vm.h> 68 #include <vm/vm_object.h> 69 70 #define VACT_MAX 10 71 #define VACT_INC 2 72 73 static void vnode_terminate(struct vnode *vp); 74 75 static MALLOC_DEFINE(M_VNODE, "vnodes", "vnode structures"); 76 77 /* 78 * The vnode free list hold inactive vnodes. Aged inactive vnodes 79 * are inserted prior to the mid point, and otherwise inserted 80 * at the tail. 81 * 82 * The vnode code goes to great lengths to avoid moving vnodes between 83 * lists, but sometimes it is unavoidable. For this situation we try to 84 * avoid lock contention but we do not try very hard to avoid cache line 85 * congestion. A modestly sized hash table is used. 86 */ 87 #define VLIST_PRIME2 123462047LU 88 #define VLIST_XOR (uintptr_t)0xab4582fa8322fb71LLU 89 90 #define VLIST_HASH(vp) (((uintptr_t)vp ^ VLIST_XOR) % \ 91 VLIST_PRIME2 % (unsigned)ncpus) 92 93 static struct vnode_index *vnode_list_hash; 94 95 int activevnodes = 0; 96 SYSCTL_INT(_debug, OID_AUTO, activevnodes, CTLFLAG_RD, 97 &activevnodes, 0, "Number of active nodes"); 98 int cachedvnodes = 0; 99 SYSCTL_INT(_debug, OID_AUTO, cachedvnodes, CTLFLAG_RD, 100 &cachedvnodes, 0, "Number of total cached nodes"); 101 int inactivevnodes = 0; 102 SYSCTL_INT(_debug, OID_AUTO, inactivevnodes, CTLFLAG_RD, 103 &inactivevnodes, 0, "Number of inactive nodes"); 104 static int batchfreevnodes = 5; 105 SYSCTL_INT(_debug, OID_AUTO, batchfreevnodes, CTLFLAG_RW, 106 &batchfreevnodes, 0, "Number of vnodes to free at once"); 107 #ifdef TRACKVNODE 108 static u_long trackvnode; 109 SYSCTL_ULONG(_debug, OID_AUTO, trackvnode, CTLFLAG_RW, 110 &trackvnode, 0, ""); 111 #endif 112 113 /* 114 * Called from vfsinit() 115 */ 116 void 117 vfs_lock_init(void) 118 { 119 int i; 120 121 kmalloc_raise_limit(M_VNODE, 0); /* unlimited */ 122 vnode_list_hash = kmalloc(sizeof(*vnode_list_hash) * ncpus, 123 M_VNODE, M_ZERO | M_WAITOK); 124 for (i = 0; i < ncpus; ++i) { 125 struct vnode_index *vi = &vnode_list_hash[i]; 126 127 TAILQ_INIT(&vi->inactive_list); 128 TAILQ_INIT(&vi->active_list); 129 TAILQ_INSERT_TAIL(&vi->active_list, &vi->active_rover, v_list); 130 spin_init(&vi->spin, "vfslock"); 131 } 132 } 133 134 /* 135 * Misc functions 136 */ 137 static __inline 138 void 139 _vsetflags(struct vnode *vp, int flags) 140 { 141 atomic_set_int(&vp->v_flag, flags); 142 } 143 144 static __inline 145 void 146 _vclrflags(struct vnode *vp, int flags) 147 { 148 atomic_clear_int(&vp->v_flag, flags); 149 } 150 151 void 152 vsetflags(struct vnode *vp, int flags) 153 { 154 _vsetflags(vp, flags); 155 } 156 157 void 158 vclrflags(struct vnode *vp, int flags) 159 { 160 _vclrflags(vp, flags); 161 } 162 163 /* 164 * Place the vnode on the active list. 165 * 166 * Caller must hold vp->v_spin 167 */ 168 static __inline 169 void 170 _vactivate(struct vnode *vp) 171 { 172 struct vnode_index *vi = &vnode_list_hash[VLIST_HASH(vp)]; 173 174 #ifdef TRACKVNODE 175 if ((u_long)vp == trackvnode) 176 kprintf("_vactivate %p %08x\n", vp, vp->v_flag); 177 #endif 178 spin_lock(&vi->spin); 179 180 switch(vp->v_state) { 181 case VS_ACTIVE: 182 spin_unlock(&vi->spin); 183 panic("_vactivate: already active"); 184 /* NOT REACHED */ 185 return; 186 case VS_INACTIVE: 187 TAILQ_REMOVE(&vi->inactive_list, vp, v_list); 188 atomic_add_int(&mycpu->gd_inactivevnodes, -1); 189 break; 190 case VS_CACHED: 191 case VS_DYING: 192 break; 193 } 194 TAILQ_INSERT_TAIL(&vi->active_list, vp, v_list); 195 vp->v_state = VS_ACTIVE; 196 spin_unlock(&vi->spin); 197 atomic_add_int(&mycpu->gd_activevnodes, 1); 198 } 199 200 /* 201 * Put a vnode on the inactive list. 202 * 203 * Caller must hold v_spin 204 */ 205 static __inline 206 void 207 _vinactive(struct vnode *vp) 208 { 209 struct vnode_index *vi = &vnode_list_hash[VLIST_HASH(vp)]; 210 211 #ifdef TRACKVNODE 212 if ((u_long)vp == trackvnode) { 213 kprintf("_vinactive %p %08x\n", vp, vp->v_flag); 214 print_backtrace(-1); 215 } 216 #endif 217 spin_lock(&vi->spin); 218 219 /* 220 * Remove from active list if it is sitting on it 221 */ 222 switch(vp->v_state) { 223 case VS_ACTIVE: 224 TAILQ_REMOVE(&vi->active_list, vp, v_list); 225 atomic_add_int(&mycpu->gd_activevnodes, -1); 226 break; 227 case VS_INACTIVE: 228 spin_unlock(&vi->spin); 229 panic("_vinactive: already inactive"); 230 /* NOT REACHED */ 231 return; 232 case VS_CACHED: 233 case VS_DYING: 234 break; 235 } 236 237 /* 238 * Distinguish between basically dead vnodes, vnodes with cached 239 * data, and vnodes without cached data. A rover will shift the 240 * vnodes around as their cache status is lost. 241 */ 242 if (vp->v_flag & VRECLAIMED) { 243 TAILQ_INSERT_HEAD(&vi->inactive_list, vp, v_list); 244 } else { 245 TAILQ_INSERT_TAIL(&vi->inactive_list, vp, v_list); 246 } 247 vp->v_state = VS_INACTIVE; 248 spin_unlock(&vi->spin); 249 atomic_add_int(&mycpu->gd_inactivevnodes, 1); 250 } 251 252 /* 253 * Add a ref to an active vnode. This function should never be called 254 * with an inactive vnode (use vget() instead), but might be called 255 * with other states. 256 */ 257 void 258 vref(struct vnode *vp) 259 { 260 KASSERT((VREFCNT(vp) > 0 && vp->v_state != VS_INACTIVE), 261 ("vref: bad refcnt %08x %d", vp->v_refcnt, vp->v_state)); 262 atomic_add_int(&vp->v_refcnt, 1); 263 } 264 265 void 266 synchronizevnodecount(void) 267 { 268 int nca = 0; 269 int act = 0; 270 int ina = 0; 271 int i; 272 273 for (i = 0; i < ncpus; ++i) { 274 globaldata_t gd = globaldata_find(i); 275 nca += gd->gd_cachedvnodes; 276 act += gd->gd_activevnodes; 277 ina += gd->gd_inactivevnodes; 278 } 279 cachedvnodes = nca; 280 activevnodes = act; 281 inactivevnodes = ina; 282 } 283 284 /* 285 * Count number of cached vnodes. This is middling expensive so be 286 * careful not to make this call in the critical path. Each cpu tracks 287 * its own accumulator. The individual accumulators must be summed 288 * together to get an accurate value. 289 */ 290 int 291 countcachedvnodes(void) 292 { 293 int i; 294 int n = 0; 295 296 for (i = 0; i < ncpus; ++i) { 297 globaldata_t gd = globaldata_find(i); 298 n += gd->gd_cachedvnodes; 299 } 300 return n; 301 } 302 303 int 304 countcachedandinactivevnodes(void) 305 { 306 int i; 307 int n = 0; 308 309 for (i = 0; i < ncpus; ++i) { 310 globaldata_t gd = globaldata_find(i); 311 n += gd->gd_cachedvnodes + gd->gd_inactivevnodes; 312 } 313 return n; 314 } 315 316 /* 317 * Release a ref on an active or inactive vnode. 318 * 319 * Caller has no other requirements. 320 * 321 * If VREF_FINALIZE is set this will deactivate the vnode on the 1->0 322 * transition, otherwise we leave the vnode in the active list and 323 * do a lockless transition to 0, which is very important for the 324 * critical path. 325 * 326 * (vrele() is not called when a vnode is being destroyed w/kfree) 327 */ 328 void 329 vrele(struct vnode *vp) 330 { 331 for (;;) { 332 int count = vp->v_refcnt; 333 cpu_ccfence(); 334 KKASSERT((count & VREF_MASK) > 0); 335 KKASSERT(vp->v_state == VS_ACTIVE || 336 vp->v_state == VS_INACTIVE); 337 338 /* 339 * 2+ case 340 */ 341 if ((count & VREF_MASK) > 1) { 342 if (atomic_cmpset_int(&vp->v_refcnt, count, count - 1)) 343 break; 344 continue; 345 } 346 347 /* 348 * 1->0 transition case must handle possible finalization. 349 * When finalizing we transition 1->0x40000000. Note that 350 * cachedvnodes is only adjusted on transitions to ->0. 351 * 352 * WARNING! VREF_TERMINATE can be cleared at any point 353 * when the refcnt is non-zero (by vget()) and 354 * the vnode has not been reclaimed. Thus 355 * transitions out of VREF_TERMINATE do not have 356 * to mess with cachedvnodes. 357 */ 358 if (count & VREF_FINALIZE) { 359 vx_lock(vp); 360 if (atomic_cmpset_int(&vp->v_refcnt, 361 count, VREF_TERMINATE)) { 362 vnode_terminate(vp); 363 break; 364 } 365 vx_unlock(vp); 366 } else { 367 if (atomic_cmpset_int(&vp->v_refcnt, count, 0)) { 368 atomic_add_int(&mycpu->gd_cachedvnodes, 1); 369 break; 370 } 371 } 372 /* retry */ 373 } 374 } 375 376 /* 377 * Add an auxiliary data structure reference to the vnode. Auxiliary 378 * references do not change the state of the vnode or prevent deactivation 379 * or reclamation of the vnode, but will prevent the vnode from being 380 * destroyed (kfree()'d). 381 * 382 * WARNING! vhold() must not acquire v_spin. The spinlock may or may not 383 * already be held by the caller. vdrop() will clean up the 384 * free list state. 385 */ 386 void 387 vhold(struct vnode *vp) 388 { 389 atomic_add_int(&vp->v_auxrefs, 1); 390 } 391 392 /* 393 * Remove an auxiliary reference from the vnode. 394 */ 395 void 396 vdrop(struct vnode *vp) 397 { 398 atomic_add_int(&vp->v_auxrefs, -1); 399 } 400 401 /* 402 * This function is called on the 1->0 transition (which is actually 403 * 1->VREF_TERMINATE) when VREF_FINALIZE is set, forcing deactivation 404 * of the vnode. 405 * 406 * Additional vrefs are allowed to race but will not result in a reentrant 407 * call to vnode_terminate() due to refcnt being VREF_TERMINATE. This 408 * prevents additional 1->0 transitions. 409 * 410 * ONLY A VGET() CAN REACTIVATE THE VNODE. 411 * 412 * Caller must hold the VX lock. 413 * 414 * NOTE: v_mount may be NULL due to assigmment to dead_vnode_vops 415 * 416 * NOTE: The vnode may be marked inactive with dirty buffers 417 * or dirty pages in its cached VM object still present. 418 * 419 * NOTE: VS_FREE should not be set on entry (the vnode was expected to 420 * previously be active). We lose control of the vnode the instant 421 * it is placed on the free list. 422 * 423 * The VX lock is required when transitioning to VS_CACHED but is 424 * not sufficient for the vshouldfree() interlocked test or when 425 * transitioning away from VS_CACHED. v_spin is also required for 426 * those cases. 427 */ 428 static 429 void 430 vnode_terminate(struct vnode *vp) 431 { 432 KKASSERT(vp->v_state == VS_ACTIVE); 433 434 if ((vp->v_flag & VINACTIVE) == 0) { 435 _vsetflags(vp, VINACTIVE); 436 if (vp->v_mount) 437 VOP_INACTIVE(vp); 438 } 439 spin_lock(&vp->v_spin); 440 _vinactive(vp); 441 spin_unlock(&vp->v_spin); 442 443 vx_unlock(vp); 444 } 445 446 /**************************************************************** 447 * VX LOCKING FUNCTIONS * 448 **************************************************************** 449 * 450 * These functions lock vnodes for reclamation and deactivation related 451 * activities. The caller must already be holding some sort of reference 452 * on the vnode. 453 */ 454 void 455 vx_lock(struct vnode *vp) 456 { 457 lockmgr(&vp->v_lock, LK_EXCLUSIVE); 458 } 459 460 void 461 vx_unlock(struct vnode *vp) 462 { 463 lockmgr(&vp->v_lock, LK_RELEASE); 464 } 465 466 /**************************************************************** 467 * VNODE ACQUISITION FUNCTIONS * 468 **************************************************************** 469 * 470 * These functions must be used when accessing a vnode that has no 471 * chance of being destroyed in a SMP race. That means the caller will 472 * usually either hold an auxiliary reference (such as the namecache) 473 * or hold some other lock that ensures that the vnode cannot be destroyed. 474 * 475 * These functions are MANDATORY for any code chain accessing a vnode 476 * whos activation state is not known. 477 * 478 * vget() can be called with LK_NOWAIT and will return EBUSY if the 479 * lock cannot be immediately acquired. 480 * 481 * vget()/vput() are used when reactivation is desired. 482 * 483 * vx_get() and vx_put() are used when reactivation is not desired. 484 */ 485 int 486 vget(struct vnode *vp, int flags) 487 { 488 int error; 489 490 /* 491 * A lock type must be passed 492 */ 493 if ((flags & LK_TYPE_MASK) == 0) { 494 panic("vget() called with no lock specified!"); 495 /* NOT REACHED */ 496 } 497 498 /* 499 * Reference the structure and then acquire the lock. 500 * 501 * NOTE: The requested lock might be a shared lock and does 502 * not protect our access to the refcnt or other fields. 503 */ 504 if ((atomic_fetchadd_int(&vp->v_refcnt, 1) & VREF_MASK) == 0) 505 atomic_add_int(&mycpu->gd_cachedvnodes, -1); 506 507 if ((error = vn_lock(vp, flags | LK_FAILRECLAIM)) != 0) { 508 /* 509 * The lock failed, undo and return an error. This will not 510 * normally trigger a termination. 511 */ 512 vrele(vp); 513 } else if (vp->v_flag & VRECLAIMED) { 514 /* 515 * The node is being reclaimed and cannot be reactivated 516 * any more, undo and return ENOENT. 517 */ 518 vn_unlock(vp); 519 vrele(vp); 520 error = ENOENT; 521 } else if (vp->v_state == VS_ACTIVE) { 522 /* 523 * A VS_ACTIVE vnode coupled with the fact that we have 524 * a vnode lock (even if shared) prevents v_state from 525 * changing. Since the vnode is not in a VRECLAIMED state, 526 * we can safely clear VINACTIVE. 527 * 528 * It is possible for a shared lock to cause a race with 529 * another thread that is also in the process of clearing 530 * VREF_TERMINATE, meaning that we might return with it still 531 * set and then assert in a later vref(). The solution is to 532 * unconditionally clear VREF_TERMINATE here as well. 533 * 534 * NOTE! Multiple threads may clear VINACTIVE if this is 535 * shared lock. This race is allowed. 536 */ 537 _vclrflags(vp, VINACTIVE); /* SMP race ok */ 538 vp->v_act += VACT_INC; 539 if (vp->v_act > VACT_MAX) /* SMP race ok */ 540 vp->v_act = VACT_MAX; 541 error = 0; 542 atomic_clear_int(&vp->v_refcnt, VREF_TERMINATE); 543 } else { 544 /* 545 * If the vnode is not VS_ACTIVE it must be reactivated 546 * in addition to clearing VINACTIVE. An exclusive spin_lock 547 * is needed to manipulate the vnode's list. 548 * 549 * Because the lockmgr lock might be shared, we might race 550 * another reactivation, which we handle. In this situation, 551 * however, the refcnt prevents other v_state races. 552 * 553 * As with above, clearing VINACTIVE is allowed to race other 554 * clearings of VINACTIVE. 555 * 556 * VREF_TERMINATE and VREF_FINALIZE can only be cleared when 557 * the refcnt is non-zero and the vnode has not been 558 * reclaimed. This also means that the transitions do 559 * not affect cachedvnodes. 560 * 561 * It is possible for a shared lock to cause a race with 562 * another thread that is also in the process of clearing 563 * VREF_TERMINATE, meaning that we might return with it still 564 * set and then assert in a later vref(). The solution is to 565 * unconditionally clear VREF_TERMINATE here as well. 566 */ 567 _vclrflags(vp, VINACTIVE); 568 vp->v_act += VACT_INC; 569 if (vp->v_act > VACT_MAX) /* SMP race ok */ 570 vp->v_act = VACT_MAX; 571 spin_lock(&vp->v_spin); 572 573 switch(vp->v_state) { 574 case VS_INACTIVE: 575 _vactivate(vp); 576 atomic_clear_int(&vp->v_refcnt, VREF_TERMINATE | 577 VREF_FINALIZE); 578 spin_unlock(&vp->v_spin); 579 break; 580 case VS_CACHED: 581 _vactivate(vp); 582 atomic_clear_int(&vp->v_refcnt, VREF_TERMINATE | 583 VREF_FINALIZE); 584 spin_unlock(&vp->v_spin); 585 break; 586 case VS_ACTIVE: 587 atomic_clear_int(&vp->v_refcnt, VREF_FINALIZE | 588 VREF_TERMINATE); 589 spin_unlock(&vp->v_spin); 590 break; 591 case VS_DYING: 592 spin_unlock(&vp->v_spin); 593 panic("Impossible VS_DYING state"); 594 break; 595 } 596 error = 0; 597 } 598 return(error); 599 } 600 601 #ifdef DEBUG_VPUT 602 603 void 604 debug_vput(struct vnode *vp, const char *filename, int line) 605 { 606 kprintf("vput(%p) %s:%d\n", vp, filename, line); 607 vn_unlock(vp); 608 vrele(vp); 609 } 610 611 #else 612 613 void 614 vput(struct vnode *vp) 615 { 616 vn_unlock(vp); 617 vrele(vp); 618 } 619 620 #endif 621 622 /* 623 * Acquire the vnode lock unguarded. 624 * 625 * The non-blocking version also uses a slightly different mechanic. 626 * This function will explicitly fail not only if it cannot acquire 627 * the lock normally, but also if the caller already holds a lock. 628 * 629 * The adjusted mechanic is used to close a loophole where complex 630 * VOP_RECLAIM code can circle around recursively and allocate the 631 * same vnode it is trying to destroy from the freelist. 632 * 633 * Any filesystem (aka UFS) which puts LK_CANRECURSE in lk_flags can 634 * cause the incorrect behavior to occur. If not for that lockmgr() 635 * would do the right thing. 636 * 637 * XXX The vx_*() locks should use auxrefs, not the main reference counter. 638 */ 639 void 640 vx_get(struct vnode *vp) 641 { 642 if ((atomic_fetchadd_int(&vp->v_refcnt, 1) & VREF_MASK) == 0) 643 atomic_add_int(&mycpu->gd_cachedvnodes, -1); 644 lockmgr(&vp->v_lock, LK_EXCLUSIVE); 645 } 646 647 int 648 vx_get_nonblock(struct vnode *vp) 649 { 650 int error; 651 652 if (lockinuse(&vp->v_lock)) 653 return(EBUSY); 654 error = lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT); 655 if (error == 0) { 656 if ((atomic_fetchadd_int(&vp->v_refcnt, 1) & VREF_MASK) == 0) 657 atomic_add_int(&mycpu->gd_cachedvnodes, -1); 658 } 659 return(error); 660 } 661 662 /* 663 * Release a VX lock that also held a ref on the vnode. vrele() will handle 664 * any needed state transitions. 665 * 666 * However, filesystems use this function to get rid of unwanted new vnodes 667 * so try to get the vnode on the correct queue in that case. 668 */ 669 void 670 vx_put(struct vnode *vp) 671 { 672 if (vp->v_type == VNON || vp->v_type == VBAD) 673 atomic_set_int(&vp->v_refcnt, VREF_FINALIZE); 674 lockmgr(&vp->v_lock, LK_RELEASE); 675 vrele(vp); 676 } 677 678 /* 679 * Try to reuse a vnode from the free list. This function is somewhat 680 * advisory in that NULL can be returned as a normal case, even if free 681 * vnodes are present. 682 * 683 * The scan is limited because it can result in excessive CPU use during 684 * periods of extreme vnode use. 685 * 686 * NOTE: The returned vnode is not completely initialized. 687 */ 688 static 689 struct vnode * 690 cleanfreevnode(int maxcount) 691 { 692 struct vnode_index *vi; 693 struct vnode *vp; 694 int count; 695 int trigger = (long)vmstats.v_page_count / (activevnodes * 2 + 1); 696 int ri; 697 int cpu_count; 698 699 /* 700 * Try to deactivate some vnodes cached on the active list. 701 */ 702 if (countcachedvnodes() < inactivevnodes) 703 goto skip; 704 705 ri = vnode_list_hash[mycpu->gd_cpuid].deac_rover + 1; 706 707 for (count = 0; count < maxcount * 2; ++count, ++ri) { 708 vi = &vnode_list_hash[((unsigned)ri >> 4) % ncpus]; 709 710 spin_lock(&vi->spin); 711 712 vp = TAILQ_NEXT(&vi->active_rover, v_list); 713 TAILQ_REMOVE(&vi->active_list, &vi->active_rover, v_list); 714 if (vp == NULL) { 715 TAILQ_INSERT_HEAD(&vi->active_list, 716 &vi->active_rover, v_list); 717 } else { 718 TAILQ_INSERT_AFTER(&vi->active_list, vp, 719 &vi->active_rover, v_list); 720 } 721 if (vp == NULL) { 722 spin_unlock(&vi->spin); 723 continue; 724 } 725 if ((vp->v_refcnt & VREF_MASK) != 0) { 726 spin_unlock(&vi->spin); 727 vp->v_act += VACT_INC; 728 if (vp->v_act > VACT_MAX) /* SMP race ok */ 729 vp->v_act = VACT_MAX; 730 continue; 731 } 732 733 /* 734 * decrement by less if the vnode's object has a lot of 735 * VM pages. XXX possible SMP races. 736 */ 737 if (vp->v_act > 0) { 738 vm_object_t obj; 739 if ((obj = vp->v_object) != NULL && 740 obj->resident_page_count >= trigger) { 741 vp->v_act -= 1; 742 } else { 743 vp->v_act -= VACT_INC; 744 } 745 if (vp->v_act < 0) 746 vp->v_act = 0; 747 spin_unlock(&vi->spin); 748 continue; 749 } 750 751 /* 752 * Try to deactivate the vnode. 753 */ 754 if ((atomic_fetchadd_int(&vp->v_refcnt, 1) & VREF_MASK) == 0) 755 atomic_add_int(&mycpu->gd_cachedvnodes, -1); 756 atomic_set_int(&vp->v_refcnt, VREF_FINALIZE); 757 758 spin_unlock(&vi->spin); 759 vrele(vp); 760 } 761 762 vnode_list_hash[mycpu->gd_cpuid].deac_rover = ri; 763 764 skip: 765 /* 766 * Loop trying to lock the first vnode on the free list. 767 * Cycle if we can't. 768 */ 769 cpu_count = ncpus; 770 ri = vnode_list_hash[mycpu->gd_cpuid].free_rover + 1; 771 772 for (count = 0; count < maxcount; ++count, ++ri) { 773 vi = &vnode_list_hash[((unsigned)ri >> 4) % ncpus]; 774 775 spin_lock(&vi->spin); 776 777 vp = TAILQ_FIRST(&vi->inactive_list); 778 if (vp == NULL) { 779 spin_unlock(&vi->spin); 780 if (--cpu_count == 0) 781 break; 782 ri = (ri + 16) & ~15; 783 --ri; 784 continue; 785 } 786 787 /* 788 * non-blocking vx_get will also ref the vnode on success. 789 */ 790 if (vx_get_nonblock(vp)) { 791 KKASSERT(vp->v_state == VS_INACTIVE); 792 TAILQ_REMOVE(&vi->inactive_list, vp, v_list); 793 TAILQ_INSERT_TAIL(&vi->inactive_list, vp, v_list); 794 spin_unlock(&vi->spin); 795 continue; 796 } 797 798 /* 799 * Because we are holding vfs_spin the vnode should currently 800 * be inactive and VREF_TERMINATE should still be set. 801 * 802 * Once vfs_spin is released the vnode's state should remain 803 * unmodified due to both the lock and ref on it. 804 */ 805 KKASSERT(vp->v_state == VS_INACTIVE); 806 spin_unlock(&vi->spin); 807 #ifdef TRACKVNODE 808 if ((u_long)vp == trackvnode) 809 kprintf("cleanfreevnode %p %08x\n", vp, vp->v_flag); 810 #endif 811 812 /* 813 * Do not reclaim/reuse a vnode while auxillary refs exists. 814 * This includes namecache refs due to a related ncp being 815 * locked or having children, a VM object association, or 816 * other hold users. 817 * 818 * Do not reclaim/reuse a vnode if someone else has a real 819 * ref on it. This can occur if a filesystem temporarily 820 * releases the vnode lock during VOP_RECLAIM. 821 */ 822 if (vp->v_auxrefs || 823 (vp->v_refcnt & ~VREF_FINALIZE) != VREF_TERMINATE + 1) { 824 failed: 825 if (vp->v_state == VS_INACTIVE) { 826 spin_lock(&vi->spin); 827 if (vp->v_state == VS_INACTIVE) { 828 TAILQ_REMOVE(&vi->inactive_list, 829 vp, v_list); 830 TAILQ_INSERT_TAIL(&vi->inactive_list, 831 vp, v_list); 832 } 833 spin_unlock(&vi->spin); 834 } 835 vx_put(vp); 836 continue; 837 } 838 839 /* 840 * VINACTIVE and VREF_TERMINATE are expected to both be set 841 * for vnodes pulled from the inactive list, and cannot be 842 * changed while we hold the vx lock. 843 * 844 * Try to reclaim the vnode. 845 */ 846 KKASSERT(vp->v_flag & VINACTIVE); 847 KKASSERT(vp->v_refcnt & VREF_TERMINATE); 848 849 if ((vp->v_flag & VRECLAIMED) == 0) { 850 if (cache_inval_vp_nonblock(vp)) 851 goto failed; 852 vgone_vxlocked(vp); 853 /* vnode is still VX locked */ 854 } 855 856 /* 857 * At this point if there are no other refs or auxrefs on 858 * the vnode with the inactive list locked, and we remove 859 * the vnode from the inactive list, it should not be 860 * possible for anyone else to access the vnode any more. 861 * 862 * Since the vnode is in a VRECLAIMED state, no new 863 * namecache associations could have been made and the 864 * vnode should have already been removed from its mountlist. 865 * 866 * Since we hold a VX lock on the vnode it cannot have been 867 * reactivated (moved out of the inactive list). 868 */ 869 KKASSERT(TAILQ_EMPTY(&vp->v_namecache)); 870 spin_lock(&vi->spin); 871 if (vp->v_auxrefs || 872 (vp->v_refcnt & ~VREF_FINALIZE) != VREF_TERMINATE + 1) { 873 spin_unlock(&vi->spin); 874 goto failed; 875 } 876 KKASSERT(vp->v_state == VS_INACTIVE); 877 TAILQ_REMOVE(&vi->inactive_list, vp, v_list); 878 atomic_add_int(&mycpu->gd_inactivevnodes, -1); 879 vp->v_state = VS_DYING; 880 spin_unlock(&vi->spin); 881 882 /* 883 * Nothing should have been able to access this vp. Only 884 * our ref should remain now. 885 */ 886 atomic_clear_int(&vp->v_refcnt, VREF_TERMINATE|VREF_FINALIZE); 887 KASSERT(vp->v_refcnt == 1, 888 ("vp %p badrefs %08x", vp, vp->v_refcnt)); 889 890 /* 891 * Return a VX locked vnode suitable for reuse. 892 */ 893 vnode_list_hash[mycpu->gd_cpuid].free_rover = ri; 894 return(vp); 895 } 896 vnode_list_hash[mycpu->gd_cpuid].free_rover = ri; 897 return(NULL); 898 } 899 900 /* 901 * Obtain a new vnode. The returned vnode is VX locked & vrefd. 902 * 903 * All new vnodes set the VAGE flags. An open() of the vnode will 904 * decrement the (2-bit) flags. Vnodes which are opened several times 905 * are thus retained in the cache over vnodes which are merely stat()d. 906 * 907 * We attempt to reuse an already-recycled vnode from our pcpu inactive 908 * queue first, and allocate otherwise. Attempting to recycle inactive 909 * vnodes here can lead to numerous deadlocks, particularly with 910 * softupdates. 911 */ 912 struct vnode * 913 allocvnode(int lktimeout, int lkflags) 914 { 915 struct vnode *vp; 916 struct vnode_index *vi; 917 918 /* 919 * lktimeout only applies when LK_TIMELOCK is used, and only 920 * the pageout daemon uses it. The timeout may not be zero 921 * or the pageout daemon can deadlock in low-VM situations. 922 */ 923 if (lktimeout == 0) 924 lktimeout = hz / 10; 925 926 /* 927 * Do not flag for synchronous recyclement unless there are enough 928 * freeable vnodes to recycle and the number of vnodes has 929 * significantly exceeded our target. We want the normal vnlru 930 * process to handle the cleaning (at 9/10's) before we are forced 931 * to flag it here at 11/10's for userexit path processing. 932 */ 933 if (numvnodes >= maxvnodes * 11 / 10 && 934 cachedvnodes + inactivevnodes >= maxvnodes * 5 / 10) { 935 struct thread *td = curthread; 936 if (td->td_lwp) 937 atomic_set_int(&td->td_lwp->lwp_mpflags, LWP_MP_VNLRU); 938 } 939 940 /* 941 * Try to trivially reuse a reclaimed vnode from the head of the 942 * inactive list for this cpu. Any vnode cycling which occurs 943 * which terminates the vnode will cause it to be returned to the 944 * same pcpu structure (e.g. unlink calls). 945 */ 946 vi = &vnode_list_hash[mycpuid]; 947 spin_lock(&vi->spin); 948 949 vp = TAILQ_FIRST(&vi->inactive_list); 950 if (vp && (vp->v_flag & VRECLAIMED)) { 951 /* 952 * non-blocking vx_get will also ref the vnode on success. 953 */ 954 if (vx_get_nonblock(vp)) { 955 KKASSERT(vp->v_state == VS_INACTIVE); 956 TAILQ_REMOVE(&vi->inactive_list, vp, v_list); 957 TAILQ_INSERT_TAIL(&vi->inactive_list, vp, v_list); 958 spin_unlock(&vi->spin); 959 goto slower; 960 } 961 962 /* 963 * Because we are holding vfs_spin the vnode should currently 964 * be inactive and VREF_TERMINATE should still be set. 965 * 966 * Once vfs_spin is released the vnode's state should remain 967 * unmodified due to both the lock and ref on it. 968 */ 969 KKASSERT(vp->v_state == VS_INACTIVE); 970 #ifdef TRACKVNODE 971 if ((u_long)vp == trackvnode) 972 kprintf("allocvnode %p %08x\n", vp, vp->v_flag); 973 #endif 974 975 /* 976 * Do not reclaim/reuse a vnode while auxillary refs exists. 977 * This includes namecache refs due to a related ncp being 978 * locked or having children, a VM object association, or 979 * other hold users. 980 * 981 * Do not reclaim/reuse a vnode if someone else has a real 982 * ref on it. This can occur if a filesystem temporarily 983 * releases the vnode lock during VOP_RECLAIM. 984 */ 985 if (vp->v_auxrefs || 986 (vp->v_refcnt & ~VREF_FINALIZE) != VREF_TERMINATE + 1) { 987 if (vp->v_state == VS_INACTIVE) { 988 TAILQ_REMOVE(&vi->inactive_list, 989 vp, v_list); 990 TAILQ_INSERT_TAIL(&vi->inactive_list, 991 vp, v_list); 992 } 993 spin_unlock(&vi->spin); 994 vx_put(vp); 995 goto slower; 996 } 997 998 /* 999 * VINACTIVE and VREF_TERMINATE are expected to both be set 1000 * for vnodes pulled from the inactive list, and cannot be 1001 * changed while we hold the vx lock. 1002 * 1003 * Try to reclaim the vnode. 1004 */ 1005 KKASSERT(vp->v_flag & VINACTIVE); 1006 KKASSERT(vp->v_refcnt & VREF_TERMINATE); 1007 1008 if ((vp->v_flag & VRECLAIMED) == 0) { 1009 spin_unlock(&vi->spin); 1010 vx_put(vp); 1011 goto slower; 1012 } 1013 1014 /* 1015 * At this point if there are no other refs or auxrefs on 1016 * the vnode with the inactive list locked, and we remove 1017 * the vnode from the inactive list, it should not be 1018 * possible for anyone else to access the vnode any more. 1019 * 1020 * Since the vnode is in a VRECLAIMED state, no new 1021 * namecache associations could have been made and the 1022 * vnode should have already been removed from its mountlist. 1023 * 1024 * Since we hold a VX lock on the vnode it cannot have been 1025 * reactivated (moved out of the inactive list). 1026 */ 1027 KKASSERT(TAILQ_EMPTY(&vp->v_namecache)); 1028 KKASSERT(vp->v_state == VS_INACTIVE); 1029 TAILQ_REMOVE(&vi->inactive_list, vp, v_list); 1030 atomic_add_int(&mycpu->gd_inactivevnodes, -1); 1031 vp->v_state = VS_DYING; 1032 spin_unlock(&vi->spin); 1033 1034 /* 1035 * Nothing should have been able to access this vp. Only 1036 * our ref should remain now. 1037 * 1038 * At this point we can kfree() the vnode if we want to. 1039 * Instead, we reuse it for the allocation. 1040 */ 1041 atomic_clear_int(&vp->v_refcnt, VREF_TERMINATE|VREF_FINALIZE); 1042 KASSERT(vp->v_refcnt == 1, 1043 ("vp %p badrefs %08x", vp, vp->v_refcnt)); 1044 bzero(vp, sizeof(*vp)); 1045 } else { 1046 spin_unlock(&vi->spin); 1047 slower: 1048 vp = kmalloc(sizeof(*vp), M_VNODE, M_ZERO | M_WAITOK); 1049 atomic_add_int(&numvnodes, 1); 1050 } 1051 1052 lwkt_token_init(&vp->v_token, "vnode"); 1053 lockinit(&vp->v_lock, "vnode", lktimeout, lkflags); 1054 TAILQ_INIT(&vp->v_namecache); 1055 RB_INIT(&vp->v_rbclean_tree); 1056 RB_INIT(&vp->v_rbdirty_tree); 1057 RB_INIT(&vp->v_rbhash_tree); 1058 spin_init(&vp->v_spin, "allocvnode"); 1059 1060 lockmgr(&vp->v_lock, LK_EXCLUSIVE); 1061 vp->v_refcnt = 1; 1062 vp->v_flag = VAGE0 | VAGE1; 1063 vp->v_pbuf_count = nswbuf_kva / NSWBUF_SPLIT; 1064 1065 KKASSERT(TAILQ_EMPTY(&vp->v_namecache)); 1066 /* exclusive lock still held */ 1067 1068 vp->v_filesize = NOOFFSET; 1069 vp->v_type = VNON; 1070 vp->v_tag = 0; 1071 vp->v_state = VS_CACHED; 1072 _vactivate(vp); 1073 1074 return (vp); 1075 } 1076 1077 /* 1078 * Called after a process has allocated a vnode via allocvnode() 1079 * and we detected that too many vnodes were present. 1080 * 1081 * This function is called just prior to a return to userland if the 1082 * process at some point had to allocate a new vnode during the last 1083 * system call and the vnode count was found to be excessive. 1084 * 1085 * This is a synchronous path that we do not normally want to execute. 1086 * 1087 * Flagged at >= 11/10's, runs if >= 10/10, vnlru runs at 9/10. 1088 * 1089 * WARNING: Sometimes numvnodes can blow out due to children being 1090 * present under directory vnodes in the namecache. For the 1091 * moment use an if() instead of a while() and note that if 1092 * we were to use a while() we would still have to break out 1093 * if freesomevnodes() returned 0. vnlru will also be trying 1094 * hard to free vnodes at the same time (with a lower trigger 1095 * pointer). 1096 */ 1097 void 1098 allocvnode_gc(void) 1099 { 1100 if (numvnodes >= maxvnodes && 1101 countcachedandinactivevnodes() >= maxvnodes * 5 / 10) { 1102 freesomevnodes(batchfreevnodes); 1103 } 1104 } 1105 1106 int 1107 freesomevnodes(int n) 1108 { 1109 struct vnode *vp; 1110 int count = 0; 1111 1112 while (n) { 1113 if ((vp = cleanfreevnode(n)) == NULL) 1114 break; 1115 vx_unlock(vp); 1116 --n; 1117 ++count; 1118 kfree(vp, M_VNODE); 1119 atomic_add_int(&numvnodes, -1); 1120 } 1121 return(count); 1122 } 1123