1 /* $NetBSD: vfs_vnode.c,v 1.73 2017/01/27 10:50:10 hannken Exp $ */ 2 3 /*- 4 * Copyright (c) 1997-2011 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Copyright (c) 1989, 1993 35 * The Regents of the University of California. All rights reserved. 36 * (c) UNIX System Laboratories, Inc. 37 * All or some portions of this file are derived from material licensed 38 * to the University of California by American Telephone and Telegraph 39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 40 * the permission of UNIX System Laboratories, Inc. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. Neither the name of the University nor the names of its contributors 51 * may be used to endorse or promote products derived from this software 52 * without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 * 66 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94 67 */ 68 69 /* 70 * The vnode cache subsystem. 71 * 72 * Life-cycle 73 * 74 * Normally, there are two points where new vnodes are created: 75 * VOP_CREATE(9) and VOP_LOOKUP(9). The life-cycle of a vnode 76 * starts in one of the following ways: 77 * 78 * - Allocation, via vcache_get(9) or vcache_new(9). 79 * - Reclamation of inactive vnode, via vcache_vget(9). 80 * 81 * Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9) 82 * was another, traditional way. Currently, only the draining thread 83 * recycles the vnodes. This behaviour might be revisited. 84 * 85 * The life-cycle ends when the last reference is dropped, usually 86 * in VOP_REMOVE(9). In such case, VOP_INACTIVE(9) is called to inform 87 * the file system that vnode is inactive. Via this call, file system 88 * indicates whether vnode can be recycled (usually, it checks its own 89 * references, e.g. count of links, whether the file was removed). 90 * 91 * Depending on indication, vnode can be put into a free list (cache), 92 * or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to 93 * disassociate underlying file system from the vnode, and finally 94 * destroyed. 95 * 96 * Vnode state 97 * 98 * Vnode is always in one of six states: 99 * - MARKER This is a marker vnode to help list traversal. It 100 * will never change its state. 101 * - LOADING Vnode is associating underlying file system and not 102 * yet ready to use. 103 * - ACTIVE Vnode has associated underlying file system and is 104 * ready to use. 105 * - BLOCKED Vnode is active but cannot get new references. 106 * - RECLAIMING Vnode is disassociating from the underlying file 107 * system. 108 * - RECLAIMED Vnode has disassociated from underlying file system 109 * and is dead. 110 * 111 * Valid state changes are: 112 * LOADING -> ACTIVE 113 * Vnode has been initialised in vcache_get() or 114 * vcache_new() and is ready to use. 115 * ACTIVE -> RECLAIMING 116 * Vnode starts disassociation from underlying file 117 * system in vcache_reclaim(). 118 * RECLAIMING -> RECLAIMED 119 * Vnode finished disassociation from underlying file 120 * system in vcache_reclaim(). 121 * ACTIVE -> BLOCKED 122 * Either vcache_rekey*() is changing the vnode key or 123 * vrelel() is about to call VOP_INACTIVE(). 124 * BLOCKED -> ACTIVE 125 * The block condition is over. 126 * LOADING -> RECLAIMED 127 * Either vcache_get() or vcache_new() failed to 128 * associate the underlying file system or vcache_rekey*() 129 * drops a vnode used as placeholder. 130 * 131 * Of these states LOADING, BLOCKED and RECLAIMING are intermediate 132 * and it is possible to wait for state change. 133 * 134 * State is protected with v_interlock with one exception: 135 * to change from LOADING both v_interlock and vcache_lock must be held 136 * so it is possible to check "state == LOADING" without holding 137 * v_interlock. See vcache_get() for details. 138 * 139 * Reference counting 140 * 141 * Vnode is considered active, if reference count (vnode_t::v_usecount) 142 * is non-zero. It is maintained using: vref(9) and vrele(9), as well 143 * as vput(9), routines. Common points holding references are e.g. 144 * file openings, current working directory, mount points, etc. 145 * 146 * Note on v_usecount and its locking 147 * 148 * At nearly all points it is known that v_usecount could be zero, 149 * the vnode_t::v_interlock will be held. To change v_usecount away 150 * from zero, the interlock must be held. To change from a non-zero 151 * value to zero, again the interlock must be held. 152 * 153 * Changing the usecount from a non-zero value to a non-zero value can 154 * safely be done using atomic operations, without the interlock held. 155 * 156 */ 157 158 #include <sys/cdefs.h> 159 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.73 2017/01/27 10:50:10 hannken Exp $"); 160 161 #include <sys/param.h> 162 #include <sys/kernel.h> 163 164 #include <sys/atomic.h> 165 #include <sys/buf.h> 166 #include <sys/conf.h> 167 #include <sys/device.h> 168 #include <sys/hash.h> 169 #include <sys/kauth.h> 170 #include <sys/kmem.h> 171 #include <sys/kthread.h> 172 #include <sys/module.h> 173 #include <sys/mount.h> 174 #include <sys/namei.h> 175 #include <sys/syscallargs.h> 176 #include <sys/sysctl.h> 177 #include <sys/systm.h> 178 #include <sys/vnode_impl.h> 179 #include <sys/wapbl.h> 180 #include <sys/fstrans.h> 181 182 #include <uvm/uvm.h> 183 #include <uvm/uvm_readahead.h> 184 185 /* Flags to vrelel. */ 186 #define VRELEL_ASYNC_RELE 0x0001 /* Always defer to vrele thread. */ 187 188 u_int numvnodes __cacheline_aligned; 189 190 /* 191 * There are three lru lists: one holds vnodes waiting for async release, 192 * one is for vnodes which have no buffer/page references and 193 * one for those which do (i.e. v_holdcnt is non-zero). 194 */ 195 static vnodelst_t lru_vrele_list __cacheline_aligned; 196 static vnodelst_t lru_free_list __cacheline_aligned; 197 static vnodelst_t lru_hold_list __cacheline_aligned; 198 static kmutex_t vdrain_lock __cacheline_aligned; 199 static kcondvar_t vdrain_cv __cacheline_aligned; 200 static int vdrain_gen; 201 static kcondvar_t vdrain_gen_cv; 202 static bool vdrain_retry; 203 static lwp_t * vdrain_lwp; 204 SLIST_HEAD(hashhead, vnode_impl); 205 static kmutex_t vcache_lock __cacheline_aligned; 206 static kcondvar_t vcache_cv __cacheline_aligned; 207 static u_int vcache_hashsize; 208 static u_long vcache_hashmask; 209 static struct hashhead *vcache_hashtab __cacheline_aligned; 210 static pool_cache_t vcache_pool; 211 static void lru_requeue(vnode_t *, vnodelst_t *); 212 static vnodelst_t * lru_which(vnode_t *); 213 static vnode_impl_t * vcache_alloc(void); 214 static void vcache_free(vnode_impl_t *); 215 static void vcache_init(void); 216 static void vcache_reinit(void); 217 static void vcache_reclaim(vnode_t *); 218 static void vrelel(vnode_t *, int); 219 static void vdrain_thread(void *); 220 static void vnpanic(vnode_t *, const char *, ...) 221 __printflike(2, 3); 222 223 /* Routines having to do with the management of the vnode table. */ 224 extern struct mount *dead_rootmount; 225 extern int (**dead_vnodeop_p)(void *); 226 extern struct vfsops dead_vfsops; 227 228 /* Vnode state operations and diagnostics. */ 229 230 #if defined(DIAGNOSTIC) 231 232 #define VSTATE_GET(vp) \ 233 vstate_assert_get((vp), __func__, __LINE__) 234 #define VSTATE_CHANGE(vp, from, to) \ 235 vstate_assert_change((vp), (from), (to), __func__, __LINE__) 236 #define VSTATE_WAIT_STABLE(vp) \ 237 vstate_assert_wait_stable((vp), __func__, __LINE__) 238 #define VSTATE_ASSERT(vp, state) \ 239 vstate_assert((vp), (state), __func__, __LINE__) 240 241 static void 242 vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line) 243 { 244 vnode_impl_t *vip = VNODE_TO_VIMPL(vp); 245 246 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line); 247 248 if (__predict_true(vip->vi_state == state)) 249 return; 250 vnpanic(vp, "state is %s, expected %s at %s:%d", 251 vstate_name(vip->vi_state), vstate_name(state), func, line); 252 } 253 254 static enum vnode_state 255 vstate_assert_get(vnode_t *vp, const char *func, int line) 256 { 257 vnode_impl_t *vip = VNODE_TO_VIMPL(vp); 258 259 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line); 260 if (vip->vi_state == VS_MARKER) 261 vnpanic(vp, "state is %s at %s:%d", 262 vstate_name(vip->vi_state), func, line); 263 264 return vip->vi_state; 265 } 266 267 static void 268 vstate_assert_wait_stable(vnode_t *vp, const char *func, int line) 269 { 270 vnode_impl_t *vip = VNODE_TO_VIMPL(vp); 271 272 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line); 273 if (vip->vi_state == VS_MARKER) 274 vnpanic(vp, "state is %s at %s:%d", 275 vstate_name(vip->vi_state), func, line); 276 277 while (vip->vi_state != VS_ACTIVE && vip->vi_state != VS_RECLAIMED) 278 cv_wait(&vp->v_cv, vp->v_interlock); 279 280 if (vip->vi_state == VS_MARKER) 281 vnpanic(vp, "state is %s at %s:%d", 282 vstate_name(vip->vi_state), func, line); 283 } 284 285 static void 286 vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to, 287 const char *func, int line) 288 { 289 vnode_impl_t *vip = VNODE_TO_VIMPL(vp); 290 291 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line); 292 if (from == VS_LOADING) 293 KASSERTMSG(mutex_owned(&vcache_lock), "at %s:%d", func, line); 294 295 if (from == VS_MARKER) 296 vnpanic(vp, "from is %s at %s:%d", 297 vstate_name(from), func, line); 298 if (to == VS_MARKER) 299 vnpanic(vp, "to is %s at %s:%d", 300 vstate_name(to), func, line); 301 if (vip->vi_state != from) 302 vnpanic(vp, "from is %s, expected %s at %s:%d\n", 303 vstate_name(vip->vi_state), vstate_name(from), func, line); 304 if ((from == VS_BLOCKED || to == VS_BLOCKED) && vp->v_usecount != 1) 305 vnpanic(vp, "%s to %s with usecount %d at %s:%d", 306 vstate_name(from), vstate_name(to), vp->v_usecount, 307 func, line); 308 309 vip->vi_state = to; 310 if (from == VS_LOADING) 311 cv_broadcast(&vcache_cv); 312 if (to == VS_ACTIVE || to == VS_RECLAIMED) 313 cv_broadcast(&vp->v_cv); 314 } 315 316 #else /* defined(DIAGNOSTIC) */ 317 318 #define VSTATE_GET(vp) \ 319 (VNODE_TO_VIMPL((vp))->vi_state) 320 #define VSTATE_CHANGE(vp, from, to) \ 321 vstate_change((vp), (from), (to)) 322 #define VSTATE_WAIT_STABLE(vp) \ 323 vstate_wait_stable((vp)) 324 #define VSTATE_ASSERT(vp, state) 325 326 static void 327 vstate_wait_stable(vnode_t *vp) 328 { 329 vnode_impl_t *vip = VNODE_TO_VIMPL(vp); 330 331 while (vip->vi_state != VS_ACTIVE && vip->vi_state != VS_RECLAIMED) 332 cv_wait(&vp->v_cv, vp->v_interlock); 333 } 334 335 static void 336 vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to) 337 { 338 vnode_impl_t *vip = VNODE_TO_VIMPL(vp); 339 340 vip->vi_state = to; 341 if (from == VS_LOADING) 342 cv_broadcast(&vcache_cv); 343 if (to == VS_ACTIVE || to == VS_RECLAIMED) 344 cv_broadcast(&vp->v_cv); 345 } 346 347 #endif /* defined(DIAGNOSTIC) */ 348 349 void 350 vfs_vnode_sysinit(void) 351 { 352 int error __diagused; 353 354 dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL); 355 KASSERT(dead_rootmount != NULL); 356 dead_rootmount->mnt_iflag = IMNT_MPSAFE; 357 358 mutex_init(&vdrain_lock, MUTEX_DEFAULT, IPL_NONE); 359 TAILQ_INIT(&lru_free_list); 360 TAILQ_INIT(&lru_hold_list); 361 TAILQ_INIT(&lru_vrele_list); 362 363 vcache_init(); 364 365 cv_init(&vdrain_cv, "vdrain"); 366 cv_init(&vdrain_gen_cv, "vdrainwt"); 367 error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread, 368 NULL, &vdrain_lwp, "vdrain"); 369 KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error); 370 } 371 372 /* 373 * Allocate a new marker vnode. 374 */ 375 vnode_t * 376 vnalloc_marker(struct mount *mp) 377 { 378 vnode_impl_t *vip; 379 vnode_t *vp; 380 381 vip = pool_cache_get(vcache_pool, PR_WAITOK); 382 memset(vip, 0, sizeof(*vip)); 383 vp = VIMPL_TO_VNODE(vip); 384 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0); 385 vp->v_mount = mp; 386 vp->v_type = VBAD; 387 vip->vi_state = VS_MARKER; 388 389 return vp; 390 } 391 392 /* 393 * Free a marker vnode. 394 */ 395 void 396 vnfree_marker(vnode_t *vp) 397 { 398 vnode_impl_t *vip; 399 400 vip = VNODE_TO_VIMPL(vp); 401 KASSERT(vip->vi_state == VS_MARKER); 402 uvm_obj_destroy(&vp->v_uobj, true); 403 pool_cache_put(vcache_pool, vip); 404 } 405 406 /* 407 * Test a vnode for being a marker vnode. 408 */ 409 bool 410 vnis_marker(vnode_t *vp) 411 { 412 413 return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER); 414 } 415 416 /* 417 * Return the lru list this node should be on. 418 */ 419 static vnodelst_t * 420 lru_which(vnode_t *vp) 421 { 422 423 KASSERT(mutex_owned(vp->v_interlock)); 424 425 if (vp->v_holdcnt > 0) 426 return &lru_hold_list; 427 else 428 return &lru_free_list; 429 } 430 431 /* 432 * Put vnode to end of given list. 433 * Both the current and the new list may be NULL, used on vnode alloc/free. 434 * Adjust numvnodes and signal vdrain thread if there is work. 435 */ 436 static void 437 lru_requeue(vnode_t *vp, vnodelst_t *listhd) 438 { 439 vnode_impl_t *vip; 440 441 mutex_enter(&vdrain_lock); 442 vip = VNODE_TO_VIMPL(vp); 443 if (vip->vi_lrulisthd != NULL) 444 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist); 445 else 446 numvnodes++; 447 vip->vi_lrulisthd = listhd; 448 if (vip->vi_lrulisthd != NULL) 449 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist); 450 else 451 numvnodes--; 452 if (numvnodes > desiredvnodes || listhd == &lru_vrele_list) 453 cv_broadcast(&vdrain_cv); 454 mutex_exit(&vdrain_lock); 455 } 456 457 /* 458 * Reclaim a cached vnode. Used from vdrain_thread only. 459 */ 460 static __inline void 461 vdrain_remove(vnode_t *vp) 462 { 463 struct mount *mp; 464 465 KASSERT(mutex_owned(&vdrain_lock)); 466 467 /* Probe usecount (unlocked). */ 468 if (vp->v_usecount > 0) 469 return; 470 /* Try v_interlock -- we lock the wrong direction! */ 471 if (!mutex_tryenter(vp->v_interlock)) 472 return; 473 /* Probe usecount and state. */ 474 if (vp->v_usecount > 0 || VSTATE_GET(vp) != VS_ACTIVE) { 475 mutex_exit(vp->v_interlock); 476 return; 477 } 478 mp = vp->v_mount; 479 if (fstrans_start_nowait(mp, FSTRANS_SHARED) != 0) { 480 mutex_exit(vp->v_interlock); 481 return; 482 } 483 vdrain_retry = true; 484 mutex_exit(&vdrain_lock); 485 486 if (vcache_vget(vp) == 0) { 487 if (!vrecycle(vp)) 488 vrele(vp); 489 } 490 fstrans_done(mp); 491 492 mutex_enter(&vdrain_lock); 493 } 494 495 /* 496 * Release a cached vnode. Used from vdrain_thread only. 497 */ 498 static __inline void 499 vdrain_vrele(vnode_t *vp) 500 { 501 vnode_impl_t *vip = VNODE_TO_VIMPL(vp); 502 struct mount *mp; 503 504 KASSERT(mutex_owned(&vdrain_lock)); 505 506 mp = vp->v_mount; 507 if (fstrans_start_nowait(mp, FSTRANS_LAZY) != 0) 508 return; 509 510 /* 511 * First remove the vnode from the vrele list. 512 * Put it on the last lru list, the last vrele() 513 * will put it back onto the right list before 514 * its v_usecount reaches zero. 515 */ 516 KASSERT(vip->vi_lrulisthd == &lru_vrele_list); 517 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist); 518 vip->vi_lrulisthd = &lru_hold_list; 519 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist); 520 521 vdrain_retry = true; 522 mutex_exit(&vdrain_lock); 523 524 mutex_enter(vp->v_interlock); 525 vrelel(vp, 0); 526 fstrans_done(mp); 527 528 mutex_enter(&vdrain_lock); 529 } 530 531 /* 532 * Helper thread to keep the number of vnodes below desiredvnodes 533 * and release vnodes from asynchronous vrele. 534 */ 535 static void 536 vdrain_thread(void *cookie) 537 { 538 vnodelst_t *listhd[] = { 539 &lru_vrele_list, &lru_free_list, &lru_hold_list 540 }; 541 int i; 542 u_int target; 543 vnode_impl_t *vip, *marker; 544 545 marker = VNODE_TO_VIMPL(vnalloc_marker(NULL)); 546 547 mutex_enter(&vdrain_lock); 548 549 for (;;) { 550 vdrain_retry = false; 551 target = desiredvnodes - desiredvnodes/10; 552 553 for (i = 0; i < __arraycount(listhd); i++) { 554 TAILQ_INSERT_HEAD(listhd[i], marker, vi_lrulist); 555 while ((vip = TAILQ_NEXT(marker, vi_lrulist))) { 556 TAILQ_REMOVE(listhd[i], marker, vi_lrulist); 557 TAILQ_INSERT_AFTER(listhd[i], vip, marker, 558 vi_lrulist); 559 if (listhd[i] == &lru_vrele_list) 560 vdrain_vrele(VIMPL_TO_VNODE(vip)); 561 else if (numvnodes < target) 562 break; 563 else 564 vdrain_remove(VIMPL_TO_VNODE(vip)); 565 } 566 TAILQ_REMOVE(listhd[i], marker, vi_lrulist); 567 } 568 569 if (vdrain_retry) { 570 mutex_exit(&vdrain_lock); 571 yield(); 572 mutex_enter(&vdrain_lock); 573 } else { 574 vdrain_gen++; 575 cv_broadcast(&vdrain_gen_cv); 576 cv_wait(&vdrain_cv, &vdrain_lock); 577 } 578 } 579 } 580 581 /* 582 * vput: unlock and release the reference. 583 */ 584 void 585 vput(vnode_t *vp) 586 { 587 588 VOP_UNLOCK(vp); 589 vrele(vp); 590 } 591 592 /* 593 * Try to drop reference on a vnode. Abort if we are releasing the 594 * last reference. Note: this _must_ succeed if not the last reference. 595 */ 596 static inline bool 597 vtryrele(vnode_t *vp) 598 { 599 u_int use, next; 600 601 for (use = vp->v_usecount;; use = next) { 602 if (use == 1) { 603 return false; 604 } 605 KASSERT(use > 1); 606 next = atomic_cas_uint(&vp->v_usecount, use, use - 1); 607 if (__predict_true(next == use)) { 608 return true; 609 } 610 } 611 } 612 613 /* 614 * Vnode release. If reference count drops to zero, call inactive 615 * routine and either return to freelist or free to the pool. 616 */ 617 static void 618 vrelel(vnode_t *vp, int flags) 619 { 620 bool recycle, defer; 621 int error; 622 623 KASSERT(mutex_owned(vp->v_interlock)); 624 625 if (__predict_false(vp->v_op == dead_vnodeop_p && 626 VSTATE_GET(vp) != VS_RECLAIMED)) { 627 vnpanic(vp, "dead but not clean"); 628 } 629 630 /* 631 * If not the last reference, just drop the reference count 632 * and unlock. 633 */ 634 if (vtryrele(vp)) { 635 mutex_exit(vp->v_interlock); 636 return; 637 } 638 if (vp->v_usecount <= 0 || vp->v_writecount != 0) { 639 vnpanic(vp, "%s: bad ref count", __func__); 640 } 641 642 #ifdef DIAGNOSTIC 643 if ((vp->v_type == VBLK || vp->v_type == VCHR) && 644 vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) { 645 vprint("vrelel: missing VOP_CLOSE()", vp); 646 } 647 #endif 648 649 /* 650 * If not clean, deactivate the vnode, but preserve 651 * our reference across the call to VOP_INACTIVE(). 652 */ 653 if (VSTATE_GET(vp) != VS_RECLAIMED) { 654 recycle = false; 655 656 /* 657 * XXX This ugly block can be largely eliminated if 658 * locking is pushed down into the file systems. 659 * 660 * Defer vnode release to vdrain_thread if caller 661 * requests it explicitly or is the pagedaemon. 662 */ 663 if ((curlwp == uvm.pagedaemon_lwp) || 664 (flags & VRELEL_ASYNC_RELE) != 0) { 665 defer = true; 666 } else if (curlwp == vdrain_lwp) { 667 /* 668 * We have to try harder. 669 */ 670 mutex_exit(vp->v_interlock); 671 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 672 KASSERTMSG((error == 0), "vn_lock failed: %d", error); 673 mutex_enter(vp->v_interlock); 674 defer = false; 675 } else { 676 /* If we can't acquire the lock, then defer. */ 677 mutex_exit(vp->v_interlock); 678 error = vn_lock(vp, 679 LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT); 680 defer = (error != 0); 681 mutex_enter(vp->v_interlock); 682 } 683 684 KASSERT(mutex_owned(vp->v_interlock)); 685 KASSERT(! (curlwp == vdrain_lwp && defer)); 686 687 if (defer) { 688 /* 689 * Defer reclaim to the kthread; it's not safe to 690 * clean it here. We donate it our last reference. 691 */ 692 lru_requeue(vp, &lru_vrele_list); 693 mutex_exit(vp->v_interlock); 694 return; 695 } 696 697 /* 698 * If the node got another reference while we 699 * released the interlock, don't try to inactivate it yet. 700 */ 701 if (__predict_false(vtryrele(vp))) { 702 VOP_UNLOCK(vp); 703 mutex_exit(vp->v_interlock); 704 return; 705 } 706 VSTATE_CHANGE(vp, VS_ACTIVE, VS_BLOCKED); 707 mutex_exit(vp->v_interlock); 708 709 /* 710 * The vnode must not gain another reference while being 711 * deactivated. If VOP_INACTIVE() indicates that 712 * the described file has been deleted, then recycle 713 * the vnode. 714 * 715 * Note that VOP_INACTIVE() will drop the vnode lock. 716 */ 717 VOP_INACTIVE(vp, &recycle); 718 if (recycle) { 719 /* vcache_reclaim() below will drop the lock. */ 720 if (vn_lock(vp, LK_EXCLUSIVE) != 0) 721 recycle = false; 722 } 723 mutex_enter(vp->v_interlock); 724 VSTATE_CHANGE(vp, VS_BLOCKED, VS_ACTIVE); 725 if (!recycle) { 726 if (vtryrele(vp)) { 727 mutex_exit(vp->v_interlock); 728 return; 729 } 730 } 731 732 /* Take care of space accounting. */ 733 if (vp->v_iflag & VI_EXECMAP) { 734 atomic_add_int(&uvmexp.execpages, 735 -vp->v_uobj.uo_npages); 736 atomic_add_int(&uvmexp.filepages, 737 vp->v_uobj.uo_npages); 738 } 739 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP); 740 vp->v_vflag &= ~VV_MAPPED; 741 742 /* 743 * Recycle the vnode if the file is now unused (unlinked), 744 * otherwise just free it. 745 */ 746 if (recycle) { 747 VSTATE_ASSERT(vp, VS_ACTIVE); 748 vcache_reclaim(vp); 749 } 750 KASSERT(vp->v_usecount > 0); 751 } 752 753 if (atomic_dec_uint_nv(&vp->v_usecount) != 0) { 754 /* Gained another reference while being reclaimed. */ 755 mutex_exit(vp->v_interlock); 756 return; 757 } 758 759 if (VSTATE_GET(vp) == VS_RECLAIMED && vp->v_holdcnt == 0) { 760 /* 761 * It's clean so destroy it. It isn't referenced 762 * anywhere since it has been reclaimed. 763 */ 764 vcache_free(VNODE_TO_VIMPL(vp)); 765 } else { 766 /* 767 * Otherwise, put it back onto the freelist. It 768 * can't be destroyed while still associated with 769 * a file system. 770 */ 771 lru_requeue(vp, lru_which(vp)); 772 mutex_exit(vp->v_interlock); 773 } 774 } 775 776 void 777 vrele(vnode_t *vp) 778 { 779 780 if (vtryrele(vp)) { 781 return; 782 } 783 mutex_enter(vp->v_interlock); 784 vrelel(vp, 0); 785 } 786 787 /* 788 * Asynchronous vnode release, vnode is released in different context. 789 */ 790 void 791 vrele_async(vnode_t *vp) 792 { 793 794 if (vtryrele(vp)) { 795 return; 796 } 797 mutex_enter(vp->v_interlock); 798 vrelel(vp, VRELEL_ASYNC_RELE); 799 } 800 801 /* 802 * Vnode reference, where a reference is already held by some other 803 * object (for example, a file structure). 804 */ 805 void 806 vref(vnode_t *vp) 807 { 808 809 KASSERT(vp->v_usecount != 0); 810 811 atomic_inc_uint(&vp->v_usecount); 812 } 813 814 /* 815 * Page or buffer structure gets a reference. 816 * Called with v_interlock held. 817 */ 818 void 819 vholdl(vnode_t *vp) 820 { 821 822 KASSERT(mutex_owned(vp->v_interlock)); 823 824 if (vp->v_holdcnt++ == 0 && vp->v_usecount == 0) 825 lru_requeue(vp, lru_which(vp)); 826 } 827 828 /* 829 * Page or buffer structure frees a reference. 830 * Called with v_interlock held. 831 */ 832 void 833 holdrelel(vnode_t *vp) 834 { 835 836 KASSERT(mutex_owned(vp->v_interlock)); 837 838 if (vp->v_holdcnt <= 0) { 839 vnpanic(vp, "%s: holdcnt vp %p", __func__, vp); 840 } 841 842 vp->v_holdcnt--; 843 if (vp->v_holdcnt == 0 && vp->v_usecount == 0) 844 lru_requeue(vp, lru_which(vp)); 845 } 846 847 /* 848 * Recycle an unused vnode if caller holds the last reference. 849 */ 850 bool 851 vrecycle(vnode_t *vp) 852 { 853 int error __diagused; 854 855 mutex_enter(vp->v_interlock); 856 857 /* Make sure we hold the last reference. */ 858 VSTATE_WAIT_STABLE(vp); 859 if (vp->v_usecount != 1) { 860 mutex_exit(vp->v_interlock); 861 return false; 862 } 863 864 /* If the vnode is already clean we're done. */ 865 if (VSTATE_GET(vp) != VS_ACTIVE) { 866 VSTATE_ASSERT(vp, VS_RECLAIMED); 867 vrelel(vp, 0); 868 return true; 869 } 870 871 /* Prevent further references until the vnode is locked. */ 872 VSTATE_CHANGE(vp, VS_ACTIVE, VS_BLOCKED); 873 mutex_exit(vp->v_interlock); 874 875 /* 876 * On a leaf file system this lock will always succeed as we hold 877 * the last reference and prevent further references. 878 * On layered file systems waiting for the lock would open a can of 879 * deadlocks as the lower vnodes may have other active references. 880 */ 881 error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT); 882 883 mutex_enter(vp->v_interlock); 884 VSTATE_CHANGE(vp, VS_BLOCKED, VS_ACTIVE); 885 886 if (error) { 887 mutex_exit(vp->v_interlock); 888 return false; 889 } 890 891 KASSERT(vp->v_usecount == 1); 892 vcache_reclaim(vp); 893 vrelel(vp, 0); 894 895 return true; 896 } 897 898 /* 899 * Eliminate all activity associated with the requested vnode 900 * and with all vnodes aliased to the requested vnode. 901 */ 902 void 903 vrevoke(vnode_t *vp) 904 { 905 vnode_t *vq; 906 enum vtype type; 907 dev_t dev; 908 909 KASSERT(vp->v_usecount > 0); 910 911 mutex_enter(vp->v_interlock); 912 VSTATE_WAIT_STABLE(vp); 913 if (VSTATE_GET(vp) == VS_RECLAIMED) { 914 mutex_exit(vp->v_interlock); 915 return; 916 } else if (vp->v_type != VBLK && vp->v_type != VCHR) { 917 atomic_inc_uint(&vp->v_usecount); 918 mutex_exit(vp->v_interlock); 919 vgone(vp); 920 return; 921 } else { 922 dev = vp->v_rdev; 923 type = vp->v_type; 924 mutex_exit(vp->v_interlock); 925 } 926 927 while (spec_node_lookup_by_dev(type, dev, &vq) == 0) { 928 vgone(vq); 929 } 930 } 931 932 /* 933 * Eliminate all activity associated with a vnode in preparation for 934 * reuse. Drops a reference from the vnode. 935 */ 936 void 937 vgone(vnode_t *vp) 938 { 939 940 if (vn_lock(vp, LK_EXCLUSIVE) != 0) { 941 VSTATE_ASSERT(vp, VS_RECLAIMED); 942 vrele(vp); 943 } 944 945 mutex_enter(vp->v_interlock); 946 vcache_reclaim(vp); 947 vrelel(vp, 0); 948 } 949 950 static inline uint32_t 951 vcache_hash(const struct vcache_key *key) 952 { 953 uint32_t hash = HASH32_BUF_INIT; 954 955 hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash); 956 hash = hash32_buf(key->vk_key, key->vk_key_len, hash); 957 return hash; 958 } 959 960 static void 961 vcache_init(void) 962 { 963 964 vcache_pool = pool_cache_init(sizeof(vnode_impl_t), 0, 0, 0, 965 "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL); 966 KASSERT(vcache_pool != NULL); 967 mutex_init(&vcache_lock, MUTEX_DEFAULT, IPL_NONE); 968 cv_init(&vcache_cv, "vcache"); 969 vcache_hashsize = desiredvnodes; 970 vcache_hashtab = hashinit(desiredvnodes, HASH_SLIST, true, 971 &vcache_hashmask); 972 } 973 974 static void 975 vcache_reinit(void) 976 { 977 int i; 978 uint32_t hash; 979 u_long oldmask, newmask; 980 struct hashhead *oldtab, *newtab; 981 vnode_impl_t *vip; 982 983 newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask); 984 mutex_enter(&vcache_lock); 985 oldtab = vcache_hashtab; 986 oldmask = vcache_hashmask; 987 vcache_hashsize = desiredvnodes; 988 vcache_hashtab = newtab; 989 vcache_hashmask = newmask; 990 for (i = 0; i <= oldmask; i++) { 991 while ((vip = SLIST_FIRST(&oldtab[i])) != NULL) { 992 SLIST_REMOVE(&oldtab[i], vip, vnode_impl, vi_hash); 993 hash = vcache_hash(&vip->vi_key); 994 SLIST_INSERT_HEAD(&newtab[hash & vcache_hashmask], 995 vip, vi_hash); 996 } 997 } 998 mutex_exit(&vcache_lock); 999 hashdone(oldtab, HASH_SLIST, oldmask); 1000 } 1001 1002 static inline vnode_impl_t * 1003 vcache_hash_lookup(const struct vcache_key *key, uint32_t hash) 1004 { 1005 struct hashhead *hashp; 1006 vnode_impl_t *vip; 1007 1008 KASSERT(mutex_owned(&vcache_lock)); 1009 1010 hashp = &vcache_hashtab[hash & vcache_hashmask]; 1011 SLIST_FOREACH(vip, hashp, vi_hash) { 1012 if (key->vk_mount != vip->vi_key.vk_mount) 1013 continue; 1014 if (key->vk_key_len != vip->vi_key.vk_key_len) 1015 continue; 1016 if (memcmp(key->vk_key, vip->vi_key.vk_key, key->vk_key_len)) 1017 continue; 1018 return vip; 1019 } 1020 return NULL; 1021 } 1022 1023 /* 1024 * Allocate a new, uninitialized vcache node. 1025 */ 1026 static vnode_impl_t * 1027 vcache_alloc(void) 1028 { 1029 vnode_impl_t *vip; 1030 vnode_t *vp; 1031 1032 vip = pool_cache_get(vcache_pool, PR_WAITOK); 1033 memset(vip, 0, sizeof(*vip)); 1034 1035 rw_init(&vip->vi_lock); 1036 /* SLIST_INIT(&vip->vi_hash); */ 1037 /* LIST_INIT(&vip->vi_nclist); */ 1038 /* LIST_INIT(&vip->vi_dnclist); */ 1039 1040 vp = VIMPL_TO_VNODE(vip); 1041 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0); 1042 cv_init(&vp->v_cv, "vnode"); 1043 1044 vp->v_usecount = 1; 1045 vp->v_type = VNON; 1046 vp->v_size = vp->v_writesize = VSIZENOTSET; 1047 1048 vip->vi_state = VS_LOADING; 1049 1050 lru_requeue(vp, &lru_free_list); 1051 1052 return vip; 1053 } 1054 1055 /* 1056 * Free an unused, unreferenced vcache node. 1057 * v_interlock locked on entry. 1058 */ 1059 static void 1060 vcache_free(vnode_impl_t *vip) 1061 { 1062 vnode_t *vp; 1063 1064 vp = VIMPL_TO_VNODE(vip); 1065 KASSERT(mutex_owned(vp->v_interlock)); 1066 1067 KASSERT(vp->v_usecount == 0); 1068 KASSERT(vp->v_holdcnt == 0); 1069 KASSERT(vp->v_writecount == 0); 1070 lru_requeue(vp, NULL); 1071 mutex_exit(vp->v_interlock); 1072 1073 vfs_insmntque(vp, NULL); 1074 if (vp->v_type == VBLK || vp->v_type == VCHR) 1075 spec_node_destroy(vp); 1076 1077 rw_destroy(&vip->vi_lock); 1078 uvm_obj_destroy(&vp->v_uobj, true); 1079 cv_destroy(&vp->v_cv); 1080 pool_cache_put(vcache_pool, vip); 1081 } 1082 1083 /* 1084 * Try to get an initial reference on this cached vnode. 1085 * Returns zero on success, ENOENT if the vnode has been reclaimed and 1086 * EBUSY if the vnode state is unstable. 1087 * 1088 * v_interlock locked on entry and unlocked on exit. 1089 */ 1090 int 1091 vcache_tryvget(vnode_t *vp) 1092 { 1093 int error = 0; 1094 1095 KASSERT(mutex_owned(vp->v_interlock)); 1096 1097 if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) 1098 error = ENOENT; 1099 else if (__predict_false(VSTATE_GET(vp) != VS_ACTIVE)) 1100 error = EBUSY; 1101 else if (vp->v_usecount == 0) 1102 vp->v_usecount = 1; 1103 else 1104 atomic_inc_uint(&vp->v_usecount); 1105 1106 mutex_exit(vp->v_interlock); 1107 1108 return error; 1109 } 1110 1111 /* 1112 * Try to get an initial reference on this cached vnode. 1113 * Returns zero on success and ENOENT if the vnode has been reclaimed. 1114 * Will wait for the vnode state to be stable. 1115 * 1116 * v_interlock locked on entry and unlocked on exit. 1117 */ 1118 int 1119 vcache_vget(vnode_t *vp) 1120 { 1121 1122 KASSERT(mutex_owned(vp->v_interlock)); 1123 1124 /* Increment hold count to prevent vnode from disappearing. */ 1125 vp->v_holdcnt++; 1126 VSTATE_WAIT_STABLE(vp); 1127 vp->v_holdcnt--; 1128 1129 /* If this was the last reference to a reclaimed vnode free it now. */ 1130 if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) { 1131 if (vp->v_holdcnt == 0 && vp->v_usecount == 0) 1132 vcache_free(VNODE_TO_VIMPL(vp)); 1133 else 1134 mutex_exit(vp->v_interlock); 1135 return ENOENT; 1136 } 1137 VSTATE_ASSERT(vp, VS_ACTIVE); 1138 if (vp->v_usecount == 0) 1139 vp->v_usecount = 1; 1140 else 1141 atomic_inc_uint(&vp->v_usecount); 1142 1143 mutex_exit(vp->v_interlock); 1144 1145 return 0; 1146 } 1147 1148 /* 1149 * Get a vnode / fs node pair by key and return it referenced through vpp. 1150 */ 1151 int 1152 vcache_get(struct mount *mp, const void *key, size_t key_len, 1153 struct vnode **vpp) 1154 { 1155 int error; 1156 uint32_t hash; 1157 const void *new_key; 1158 struct vnode *vp; 1159 struct vcache_key vcache_key; 1160 vnode_impl_t *vip, *new_vip; 1161 1162 new_key = NULL; 1163 *vpp = NULL; 1164 1165 vcache_key.vk_mount = mp; 1166 vcache_key.vk_key = key; 1167 vcache_key.vk_key_len = key_len; 1168 hash = vcache_hash(&vcache_key); 1169 1170 again: 1171 mutex_enter(&vcache_lock); 1172 vip = vcache_hash_lookup(&vcache_key, hash); 1173 1174 /* If found, take a reference or retry. */ 1175 if (__predict_true(vip != NULL)) { 1176 /* 1177 * If the vnode is loading we cannot take the v_interlock 1178 * here as it might change during load (see uvm_obj_setlock()). 1179 * As changing state from VS_LOADING requires both vcache_lock 1180 * and v_interlock it is safe to test with vcache_lock held. 1181 * 1182 * Wait for vnodes changing state from VS_LOADING and retry. 1183 */ 1184 if (__predict_false(vip->vi_state == VS_LOADING)) { 1185 cv_wait(&vcache_cv, &vcache_lock); 1186 mutex_exit(&vcache_lock); 1187 goto again; 1188 } 1189 vp = VIMPL_TO_VNODE(vip); 1190 mutex_enter(vp->v_interlock); 1191 mutex_exit(&vcache_lock); 1192 error = vcache_vget(vp); 1193 if (error == ENOENT) 1194 goto again; 1195 if (error == 0) 1196 *vpp = vp; 1197 KASSERT((error != 0) == (*vpp == NULL)); 1198 return error; 1199 } 1200 mutex_exit(&vcache_lock); 1201 1202 /* Allocate and initialize a new vcache / vnode pair. */ 1203 error = vfs_busy(mp, NULL); 1204 if (error) 1205 return error; 1206 new_vip = vcache_alloc(); 1207 new_vip->vi_key = vcache_key; 1208 vp = VIMPL_TO_VNODE(new_vip); 1209 mutex_enter(&vcache_lock); 1210 vip = vcache_hash_lookup(&vcache_key, hash); 1211 if (vip == NULL) { 1212 SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask], 1213 new_vip, vi_hash); 1214 vip = new_vip; 1215 } 1216 1217 /* If another thread beat us inserting this node, retry. */ 1218 if (vip != new_vip) { 1219 mutex_enter(vp->v_interlock); 1220 VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED); 1221 mutex_exit(&vcache_lock); 1222 vrelel(vp, 0); 1223 vfs_unbusy(mp, false, NULL); 1224 goto again; 1225 } 1226 mutex_exit(&vcache_lock); 1227 1228 /* Load the fs node. Exclusive as new_node is VS_LOADING. */ 1229 error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key); 1230 if (error) { 1231 mutex_enter(&vcache_lock); 1232 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask], 1233 new_vip, vnode_impl, vi_hash); 1234 mutex_enter(vp->v_interlock); 1235 VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED); 1236 mutex_exit(&vcache_lock); 1237 vrelel(vp, 0); 1238 vfs_unbusy(mp, false, NULL); 1239 KASSERT(*vpp == NULL); 1240 return error; 1241 } 1242 KASSERT(new_key != NULL); 1243 KASSERT(memcmp(key, new_key, key_len) == 0); 1244 KASSERT(vp->v_op != NULL); 1245 vfs_insmntque(vp, mp); 1246 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0) 1247 vp->v_vflag |= VV_MPSAFE; 1248 vfs_unbusy(mp, true, NULL); 1249 1250 /* Finished loading, finalize node. */ 1251 mutex_enter(&vcache_lock); 1252 new_vip->vi_key.vk_key = new_key; 1253 mutex_enter(vp->v_interlock); 1254 VSTATE_CHANGE(vp, VS_LOADING, VS_ACTIVE); 1255 mutex_exit(vp->v_interlock); 1256 mutex_exit(&vcache_lock); 1257 *vpp = vp; 1258 return 0; 1259 } 1260 1261 /* 1262 * Create a new vnode / fs node pair and return it referenced through vpp. 1263 */ 1264 int 1265 vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap, 1266 kauth_cred_t cred, struct vnode **vpp) 1267 { 1268 int error; 1269 uint32_t hash; 1270 struct vnode *vp, *ovp; 1271 vnode_impl_t *vip, *ovip; 1272 1273 *vpp = NULL; 1274 1275 /* Allocate and initialize a new vcache / vnode pair. */ 1276 error = vfs_busy(mp, NULL); 1277 if (error) 1278 return error; 1279 vip = vcache_alloc(); 1280 vip->vi_key.vk_mount = mp; 1281 vp = VIMPL_TO_VNODE(vip); 1282 1283 /* Create and load the fs node. */ 1284 error = VFS_NEWVNODE(mp, dvp, vp, vap, cred, 1285 &vip->vi_key.vk_key_len, &vip->vi_key.vk_key); 1286 if (error) { 1287 mutex_enter(&vcache_lock); 1288 mutex_enter(vp->v_interlock); 1289 VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED); 1290 mutex_exit(&vcache_lock); 1291 vrelel(vp, 0); 1292 vfs_unbusy(mp, false, NULL); 1293 KASSERT(*vpp == NULL); 1294 return error; 1295 } 1296 KASSERT(vip->vi_key.vk_key != NULL); 1297 KASSERT(vp->v_op != NULL); 1298 hash = vcache_hash(&vip->vi_key); 1299 1300 /* Wait for previous instance to be reclaimed, then insert new node. */ 1301 mutex_enter(&vcache_lock); 1302 while ((ovip = vcache_hash_lookup(&vip->vi_key, hash))) { 1303 ovp = VIMPL_TO_VNODE(ovip); 1304 mutex_enter(ovp->v_interlock); 1305 mutex_exit(&vcache_lock); 1306 error = vcache_vget(ovp); 1307 KASSERT(error == ENOENT); 1308 mutex_enter(&vcache_lock); 1309 } 1310 SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask], 1311 vip, vi_hash); 1312 mutex_exit(&vcache_lock); 1313 vfs_insmntque(vp, mp); 1314 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0) 1315 vp->v_vflag |= VV_MPSAFE; 1316 vfs_unbusy(mp, true, NULL); 1317 1318 /* Finished loading, finalize node. */ 1319 mutex_enter(&vcache_lock); 1320 mutex_enter(vp->v_interlock); 1321 VSTATE_CHANGE(vp, VS_LOADING, VS_ACTIVE); 1322 mutex_exit(&vcache_lock); 1323 mutex_exit(vp->v_interlock); 1324 *vpp = vp; 1325 return 0; 1326 } 1327 1328 /* 1329 * Prepare key change: update old cache nodes key and lock new cache node. 1330 * Return an error if the new node already exists. 1331 */ 1332 int 1333 vcache_rekey_enter(struct mount *mp, struct vnode *vp, 1334 const void *old_key, size_t old_key_len, 1335 const void *new_key, size_t new_key_len) 1336 { 1337 uint32_t old_hash, new_hash; 1338 struct vcache_key old_vcache_key, new_vcache_key; 1339 vnode_impl_t *vip, *new_vip; 1340 struct vnode *new_vp; 1341 1342 old_vcache_key.vk_mount = mp; 1343 old_vcache_key.vk_key = old_key; 1344 old_vcache_key.vk_key_len = old_key_len; 1345 old_hash = vcache_hash(&old_vcache_key); 1346 1347 new_vcache_key.vk_mount = mp; 1348 new_vcache_key.vk_key = new_key; 1349 new_vcache_key.vk_key_len = new_key_len; 1350 new_hash = vcache_hash(&new_vcache_key); 1351 1352 new_vip = vcache_alloc(); 1353 new_vip->vi_key = new_vcache_key; 1354 new_vp = VIMPL_TO_VNODE(new_vip); 1355 1356 /* Insert locked new node used as placeholder. */ 1357 mutex_enter(&vcache_lock); 1358 vip = vcache_hash_lookup(&new_vcache_key, new_hash); 1359 if (vip != NULL) { 1360 mutex_enter(new_vp->v_interlock); 1361 VSTATE_CHANGE(new_vp, VS_LOADING, VS_RECLAIMED); 1362 mutex_exit(&vcache_lock); 1363 vrelel(new_vp, 0); 1364 return EEXIST; 1365 } 1366 SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask], 1367 new_vip, vi_hash); 1368 1369 /* Replace old nodes key with the temporary copy. */ 1370 vip = vcache_hash_lookup(&old_vcache_key, old_hash); 1371 KASSERT(vip != NULL); 1372 KASSERT(VIMPL_TO_VNODE(vip) == vp); 1373 KASSERT(vip->vi_key.vk_key != old_vcache_key.vk_key); 1374 vip->vi_key = old_vcache_key; 1375 mutex_exit(&vcache_lock); 1376 return 0; 1377 } 1378 1379 /* 1380 * Key change complete: update old node and remove placeholder. 1381 */ 1382 void 1383 vcache_rekey_exit(struct mount *mp, struct vnode *vp, 1384 const void *old_key, size_t old_key_len, 1385 const void *new_key, size_t new_key_len) 1386 { 1387 uint32_t old_hash, new_hash; 1388 struct vcache_key old_vcache_key, new_vcache_key; 1389 vnode_impl_t *vip, *new_vip; 1390 struct vnode *new_vp; 1391 1392 old_vcache_key.vk_mount = mp; 1393 old_vcache_key.vk_key = old_key; 1394 old_vcache_key.vk_key_len = old_key_len; 1395 old_hash = vcache_hash(&old_vcache_key); 1396 1397 new_vcache_key.vk_mount = mp; 1398 new_vcache_key.vk_key = new_key; 1399 new_vcache_key.vk_key_len = new_key_len; 1400 new_hash = vcache_hash(&new_vcache_key); 1401 1402 mutex_enter(&vcache_lock); 1403 1404 /* Lookup old and new node. */ 1405 vip = vcache_hash_lookup(&old_vcache_key, old_hash); 1406 KASSERT(vip != NULL); 1407 KASSERT(VIMPL_TO_VNODE(vip) == vp); 1408 1409 new_vip = vcache_hash_lookup(&new_vcache_key, new_hash); 1410 KASSERT(new_vip != NULL); 1411 KASSERT(new_vip->vi_key.vk_key_len == new_key_len); 1412 new_vp = VIMPL_TO_VNODE(new_vip); 1413 mutex_enter(new_vp->v_interlock); 1414 VSTATE_ASSERT(VIMPL_TO_VNODE(new_vip), VS_LOADING); 1415 1416 /* Rekey old node and put it onto its new hashlist. */ 1417 vip->vi_key = new_vcache_key; 1418 if (old_hash != new_hash) { 1419 SLIST_REMOVE(&vcache_hashtab[old_hash & vcache_hashmask], 1420 vip, vnode_impl, vi_hash); 1421 SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask], 1422 vip, vi_hash); 1423 } 1424 1425 /* Remove new node used as placeholder. */ 1426 SLIST_REMOVE(&vcache_hashtab[new_hash & vcache_hashmask], 1427 new_vip, vnode_impl, vi_hash); 1428 VSTATE_CHANGE(new_vp, VS_LOADING, VS_RECLAIMED); 1429 mutex_exit(&vcache_lock); 1430 vrelel(new_vp, 0); 1431 } 1432 1433 /* 1434 * Disassociate the underlying file system from a vnode. 1435 * 1436 * Must be called with vnode locked and will return unlocked. 1437 * Must be called with the interlock held, and will return with it held. 1438 */ 1439 static void 1440 vcache_reclaim(vnode_t *vp) 1441 { 1442 lwp_t *l = curlwp; 1443 vnode_impl_t *vip = VNODE_TO_VIMPL(vp); 1444 uint32_t hash; 1445 uint8_t temp_buf[64], *temp_key; 1446 size_t temp_key_len; 1447 bool recycle, active; 1448 int error; 1449 1450 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 || 1451 VOP_ISLOCKED(vp) == LK_EXCLUSIVE); 1452 KASSERT(mutex_owned(vp->v_interlock)); 1453 KASSERT(vp->v_usecount != 0); 1454 1455 active = (vp->v_usecount > 1); 1456 temp_key_len = vip->vi_key.vk_key_len; 1457 /* 1458 * Prevent the vnode from being recycled or brought into use 1459 * while we clean it out. 1460 */ 1461 VSTATE_CHANGE(vp, VS_ACTIVE, VS_RECLAIMING); 1462 if (vp->v_iflag & VI_EXECMAP) { 1463 atomic_add_int(&uvmexp.execpages, -vp->v_uobj.uo_npages); 1464 atomic_add_int(&uvmexp.filepages, vp->v_uobj.uo_npages); 1465 } 1466 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP); 1467 mutex_exit(vp->v_interlock); 1468 1469 /* Replace the vnode key with a temporary copy. */ 1470 if (vip->vi_key.vk_key_len > sizeof(temp_buf)) { 1471 temp_key = kmem_alloc(temp_key_len, KM_SLEEP); 1472 } else { 1473 temp_key = temp_buf; 1474 } 1475 mutex_enter(&vcache_lock); 1476 memcpy(temp_key, vip->vi_key.vk_key, temp_key_len); 1477 vip->vi_key.vk_key = temp_key; 1478 mutex_exit(&vcache_lock); 1479 1480 /* 1481 * Clean out any cached data associated with the vnode. 1482 * If purging an active vnode, it must be closed and 1483 * deactivated before being reclaimed. 1484 */ 1485 error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0); 1486 if (error != 0) { 1487 if (wapbl_vphaswapbl(vp)) 1488 WAPBL_DISCARD(wapbl_vptomp(vp)); 1489 error = vinvalbuf(vp, 0, NOCRED, l, 0, 0); 1490 } 1491 KASSERTMSG((error == 0), "vinvalbuf failed: %d", error); 1492 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0); 1493 if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) { 1494 spec_node_revoke(vp); 1495 } 1496 1497 /* 1498 * Disassociate the underlying file system from the vnode. 1499 * Note that the VOP_INACTIVE will unlock the vnode. 1500 */ 1501 VOP_INACTIVE(vp, &recycle); 1502 if (VOP_RECLAIM(vp)) { 1503 vnpanic(vp, "%s: cannot reclaim", __func__); 1504 } 1505 1506 KASSERT(vp->v_data == NULL); 1507 KASSERT(vp->v_uobj.uo_npages == 0); 1508 1509 if (vp->v_type == VREG && vp->v_ractx != NULL) { 1510 uvm_ra_freectx(vp->v_ractx); 1511 vp->v_ractx = NULL; 1512 } 1513 1514 /* Purge name cache. */ 1515 cache_purge(vp); 1516 1517 /* Move to dead mount. */ 1518 vp->v_vflag &= ~VV_ROOT; 1519 atomic_inc_uint(&dead_rootmount->mnt_refcnt); 1520 vfs_insmntque(vp, dead_rootmount); 1521 1522 /* Remove from vnode cache. */ 1523 hash = vcache_hash(&vip->vi_key); 1524 mutex_enter(&vcache_lock); 1525 KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash)); 1526 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask], 1527 vip, vnode_impl, vi_hash); 1528 mutex_exit(&vcache_lock); 1529 if (temp_key != temp_buf) 1530 kmem_free(temp_key, temp_key_len); 1531 1532 /* Done with purge, notify sleepers of the grim news. */ 1533 mutex_enter(vp->v_interlock); 1534 vp->v_op = dead_vnodeop_p; 1535 vp->v_vflag |= VV_LOCKSWORK; 1536 VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED); 1537 vp->v_tag = VT_NON; 1538 KNOTE(&vp->v_klist, NOTE_REVOKE); 1539 1540 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0); 1541 } 1542 1543 /* 1544 * Update outstanding I/O count and do wakeup if requested. 1545 */ 1546 void 1547 vwakeup(struct buf *bp) 1548 { 1549 vnode_t *vp; 1550 1551 if ((vp = bp->b_vp) == NULL) 1552 return; 1553 1554 KASSERT(bp->b_objlock == vp->v_interlock); 1555 KASSERT(mutex_owned(bp->b_objlock)); 1556 1557 if (--vp->v_numoutput < 0) 1558 vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp); 1559 if (vp->v_numoutput == 0) 1560 cv_broadcast(&vp->v_cv); 1561 } 1562 1563 /* 1564 * Test a vnode for being or becoming dead. Returns one of: 1565 * EBUSY: vnode is becoming dead, with "flags == VDEAD_NOWAIT" only. 1566 * ENOENT: vnode is dead. 1567 * 0: otherwise. 1568 * 1569 * Whenever this function returns a non-zero value all future 1570 * calls will also return a non-zero value. 1571 */ 1572 int 1573 vdead_check(struct vnode *vp, int flags) 1574 { 1575 1576 KASSERT(mutex_owned(vp->v_interlock)); 1577 1578 if (! ISSET(flags, VDEAD_NOWAIT)) 1579 VSTATE_WAIT_STABLE(vp); 1580 1581 if (VSTATE_GET(vp) == VS_RECLAIMING) { 1582 KASSERT(ISSET(flags, VDEAD_NOWAIT)); 1583 return EBUSY; 1584 } else if (VSTATE_GET(vp) == VS_RECLAIMED) { 1585 return ENOENT; 1586 } 1587 1588 return 0; 1589 } 1590 1591 int 1592 vfs_drainvnodes(void) 1593 { 1594 int i, gen; 1595 1596 mutex_enter(&vdrain_lock); 1597 for (i = 0; i < 2; i++) { 1598 gen = vdrain_gen; 1599 while (gen == vdrain_gen) { 1600 cv_broadcast(&vdrain_cv); 1601 cv_wait(&vdrain_gen_cv, &vdrain_lock); 1602 } 1603 } 1604 mutex_exit(&vdrain_lock); 1605 1606 if (numvnodes >= desiredvnodes) 1607 return EBUSY; 1608 1609 if (vcache_hashsize != desiredvnodes) 1610 vcache_reinit(); 1611 1612 return 0; 1613 } 1614 1615 void 1616 vnpanic(vnode_t *vp, const char *fmt, ...) 1617 { 1618 va_list ap; 1619 1620 #ifdef DIAGNOSTIC 1621 vprint(NULL, vp); 1622 #endif 1623 va_start(ap, fmt); 1624 vpanic(fmt, ap); 1625 va_end(ap); 1626 } 1627