1 /* $NetBSD: vfs_vnode.c,v 1.125 2020/06/14 00:20:17 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 1997-2011, 2019, 2020 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Copyright (c) 1989, 1993 35 * The Regents of the University of California. All rights reserved. 36 * (c) UNIX System Laboratories, Inc. 37 * All or some portions of this file are derived from material licensed 38 * to the University of California by American Telephone and Telegraph 39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 40 * the permission of UNIX System Laboratories, Inc. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. Neither the name of the University nor the names of its contributors 51 * may be used to endorse or promote products derived from this software 52 * without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 * 66 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94 67 */ 68 69 /* 70 * The vnode cache subsystem. 71 * 72 * Life-cycle 73 * 74 * Normally, there are two points where new vnodes are created: 75 * VOP_CREATE(9) and VOP_LOOKUP(9). The life-cycle of a vnode 76 * starts in one of the following ways: 77 * 78 * - Allocation, via vcache_get(9) or vcache_new(9). 79 * - Reclamation of inactive vnode, via vcache_vget(9). 80 * 81 * Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9) 82 * was another, traditional way. Currently, only the draining thread 83 * recycles the vnodes. This behaviour might be revisited. 84 * 85 * The life-cycle ends when the last reference is dropped, usually 86 * in VOP_REMOVE(9). In such case, VOP_INACTIVE(9) is called to inform 87 * the file system that vnode is inactive. Via this call, file system 88 * indicates whether vnode can be recycled (usually, it checks its own 89 * references, e.g. count of links, whether the file was removed). 90 * 91 * Depending on indication, vnode can be put into a free list (cache), 92 * or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to 93 * disassociate underlying file system from the vnode, and finally 94 * destroyed. 95 * 96 * Vnode state 97 * 98 * Vnode is always in one of six states: 99 * - MARKER This is a marker vnode to help list traversal. It 100 * will never change its state. 101 * - LOADING Vnode is associating underlying file system and not 102 * yet ready to use. 103 * - LOADED Vnode has associated underlying file system and is 104 * ready to use. 105 * - BLOCKED Vnode is active but cannot get new references. 106 * - RECLAIMING Vnode is disassociating from the underlying file 107 * system. 108 * - RECLAIMED Vnode has disassociated from underlying file system 109 * and is dead. 110 * 111 * Valid state changes are: 112 * LOADING -> LOADED 113 * Vnode has been initialised in vcache_get() or 114 * vcache_new() and is ready to use. 115 * BLOCKED -> RECLAIMING 116 * Vnode starts disassociation from underlying file 117 * system in vcache_reclaim(). 118 * RECLAIMING -> RECLAIMED 119 * Vnode finished disassociation from underlying file 120 * system in vcache_reclaim(). 121 * LOADED -> BLOCKED 122 * Either vcache_rekey*() is changing the vnode key or 123 * vrelel() is about to call VOP_INACTIVE(). 124 * BLOCKED -> LOADED 125 * The block condition is over. 126 * LOADING -> RECLAIMED 127 * Either vcache_get() or vcache_new() failed to 128 * associate the underlying file system or vcache_rekey*() 129 * drops a vnode used as placeholder. 130 * 131 * Of these states LOADING, BLOCKED and RECLAIMING are intermediate 132 * and it is possible to wait for state change. 133 * 134 * State is protected with v_interlock with one exception: 135 * to change from LOADING both v_interlock and vcache_lock must be held 136 * so it is possible to check "state == LOADING" without holding 137 * v_interlock. See vcache_get() for details. 138 * 139 * Reference counting 140 * 141 * Vnode is considered active, if reference count (vnode_t::v_usecount) 142 * is non-zero. It is maintained using: vref(9) and vrele(9), as well 143 * as vput(9), routines. Common points holding references are e.g. 144 * file openings, current working directory, mount points, etc. 145 * 146 * v_usecount is adjusted with atomic operations, however to change 147 * from a non-zero value to zero the interlock must also be held. 148 */ 149 150 #include <sys/cdefs.h> 151 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.125 2020/06/14 00:20:17 ad Exp $"); 152 153 #ifdef _KERNEL_OPT 154 #include "opt_pax.h" 155 #endif 156 157 #include <sys/param.h> 158 #include <sys/kernel.h> 159 160 #include <sys/atomic.h> 161 #include <sys/buf.h> 162 #include <sys/conf.h> 163 #include <sys/device.h> 164 #include <sys/hash.h> 165 #include <sys/kauth.h> 166 #include <sys/kmem.h> 167 #include <sys/kthread.h> 168 #include <sys/module.h> 169 #include <sys/mount.h> 170 #include <sys/namei.h> 171 #include <sys/pax.h> 172 #include <sys/syscallargs.h> 173 #include <sys/sysctl.h> 174 #include <sys/systm.h> 175 #include <sys/vnode_impl.h> 176 #include <sys/wapbl.h> 177 #include <sys/fstrans.h> 178 179 #include <uvm/uvm.h> 180 #include <uvm/uvm_readahead.h> 181 #include <uvm/uvm_stat.h> 182 183 /* Flags to vrelel. */ 184 #define VRELEL_ASYNC 0x0001 /* Always defer to vrele thread. */ 185 186 #define LRU_VRELE 0 187 #define LRU_FREE 1 188 #define LRU_HOLD 2 189 #define LRU_COUNT 3 190 191 /* 192 * There are three lru lists: one holds vnodes waiting for async release, 193 * one is for vnodes which have no buffer/page references and one for those 194 * which do (i.e. v_holdcnt is non-zero). We put the lists into a single, 195 * private cache line as vnodes migrate between them while under the same 196 * lock (vdrain_lock). 197 */ 198 u_int numvnodes __cacheline_aligned; 199 static vnodelst_t lru_list[LRU_COUNT] __cacheline_aligned; 200 static kmutex_t vdrain_lock __cacheline_aligned; 201 static kcondvar_t vdrain_cv; 202 static int vdrain_gen; 203 static kcondvar_t vdrain_gen_cv; 204 static bool vdrain_retry; 205 static lwp_t * vdrain_lwp; 206 SLIST_HEAD(hashhead, vnode_impl); 207 static kmutex_t vcache_lock __cacheline_aligned; 208 static kcondvar_t vcache_cv; 209 static u_int vcache_hashsize; 210 static u_long vcache_hashmask; 211 static struct hashhead *vcache_hashtab; 212 static pool_cache_t vcache_pool; 213 static void lru_requeue(vnode_t *, vnodelst_t *); 214 static vnodelst_t * lru_which(vnode_t *); 215 static vnode_impl_t * vcache_alloc(void); 216 static void vcache_dealloc(vnode_impl_t *); 217 static void vcache_free(vnode_impl_t *); 218 static void vcache_init(void); 219 static void vcache_reinit(void); 220 static void vcache_reclaim(vnode_t *); 221 static void vrelel(vnode_t *, int, int); 222 static void vdrain_thread(void *); 223 static void vnpanic(vnode_t *, const char *, ...) 224 __printflike(2, 3); 225 226 /* Routines having to do with the management of the vnode table. */ 227 extern struct mount *dead_rootmount; 228 extern int (**dead_vnodeop_p)(void *); 229 extern int (**spec_vnodeop_p)(void *); 230 extern struct vfsops dead_vfsops; 231 232 /* 233 * The high bit of v_usecount is a gate for vcache_tryvget(). It's set 234 * only when the vnode state is LOADED. 235 */ 236 #define VUSECOUNT_MASK 0x7fffffff 237 #define VUSECOUNT_GATE 0x80000000 238 239 /* 240 * Return the current usecount of a vnode. 241 */ 242 inline int 243 vrefcnt(struct vnode *vp) 244 { 245 246 return atomic_load_relaxed(&vp->v_usecount) & VUSECOUNT_MASK; 247 } 248 249 /* Vnode state operations and diagnostics. */ 250 251 #if defined(DIAGNOSTIC) 252 253 #define VSTATE_VALID(state) \ 254 ((state) != VS_ACTIVE && (state) != VS_MARKER) 255 #define VSTATE_GET(vp) \ 256 vstate_assert_get((vp), __func__, __LINE__) 257 #define VSTATE_CHANGE(vp, from, to) \ 258 vstate_assert_change((vp), (from), (to), __func__, __LINE__) 259 #define VSTATE_WAIT_STABLE(vp) \ 260 vstate_assert_wait_stable((vp), __func__, __LINE__) 261 262 void 263 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line, 264 bool has_lock) 265 { 266 vnode_impl_t *vip = VNODE_TO_VIMPL(vp); 267 int refcnt = vrefcnt(vp); 268 269 if (!has_lock) { 270 /* 271 * Prevent predictive loads from the CPU, but check the state 272 * without loooking first. 273 */ 274 membar_enter(); 275 if (state == VS_ACTIVE && refcnt > 0 && 276 (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED)) 277 return; 278 if (vip->vi_state == state) 279 return; 280 mutex_enter((vp)->v_interlock); 281 } 282 283 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line); 284 285 if ((state == VS_ACTIVE && refcnt > 0 && 286 (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED)) || 287 vip->vi_state == state) { 288 if (!has_lock) 289 mutex_exit((vp)->v_interlock); 290 return; 291 } 292 vnpanic(vp, "state is %s, usecount %d, expected %s at %s:%d", 293 vstate_name(vip->vi_state), refcnt, 294 vstate_name(state), func, line); 295 } 296 297 static enum vnode_state 298 vstate_assert_get(vnode_t *vp, const char *func, int line) 299 { 300 vnode_impl_t *vip = VNODE_TO_VIMPL(vp); 301 302 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line); 303 if (! VSTATE_VALID(vip->vi_state)) 304 vnpanic(vp, "state is %s at %s:%d", 305 vstate_name(vip->vi_state), func, line); 306 307 return vip->vi_state; 308 } 309 310 static void 311 vstate_assert_wait_stable(vnode_t *vp, const char *func, int line) 312 { 313 vnode_impl_t *vip = VNODE_TO_VIMPL(vp); 314 315 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line); 316 if (! VSTATE_VALID(vip->vi_state)) 317 vnpanic(vp, "state is %s at %s:%d", 318 vstate_name(vip->vi_state), func, line); 319 320 while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED) 321 cv_wait(&vp->v_cv, vp->v_interlock); 322 323 if (! VSTATE_VALID(vip->vi_state)) 324 vnpanic(vp, "state is %s at %s:%d", 325 vstate_name(vip->vi_state), func, line); 326 } 327 328 static void 329 vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to, 330 const char *func, int line) 331 { 332 bool gated = (atomic_load_relaxed(&vp->v_usecount) & VUSECOUNT_GATE); 333 vnode_impl_t *vip = VNODE_TO_VIMPL(vp); 334 335 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line); 336 if (from == VS_LOADING) 337 KASSERTMSG(mutex_owned(&vcache_lock), "at %s:%d", func, line); 338 339 if (! VSTATE_VALID(from)) 340 vnpanic(vp, "from is %s at %s:%d", 341 vstate_name(from), func, line); 342 if (! VSTATE_VALID(to)) 343 vnpanic(vp, "to is %s at %s:%d", 344 vstate_name(to), func, line); 345 if (vip->vi_state != from) 346 vnpanic(vp, "from is %s, expected %s at %s:%d\n", 347 vstate_name(vip->vi_state), vstate_name(from), func, line); 348 if ((from == VS_LOADED) != gated) 349 vnpanic(vp, "state is %s, gate %d does not match at %s:%d\n", 350 vstate_name(vip->vi_state), gated, func, line); 351 352 /* Open/close the gate for vcache_tryvget(). */ 353 if (to == VS_LOADED) 354 atomic_or_uint(&vp->v_usecount, VUSECOUNT_GATE); 355 else 356 atomic_and_uint(&vp->v_usecount, ~VUSECOUNT_GATE); 357 358 vip->vi_state = to; 359 if (from == VS_LOADING) 360 cv_broadcast(&vcache_cv); 361 if (to == VS_LOADED || to == VS_RECLAIMED) 362 cv_broadcast(&vp->v_cv); 363 } 364 365 #else /* defined(DIAGNOSTIC) */ 366 367 #define VSTATE_GET(vp) \ 368 (VNODE_TO_VIMPL((vp))->vi_state) 369 #define VSTATE_CHANGE(vp, from, to) \ 370 vstate_change((vp), (from), (to)) 371 #define VSTATE_WAIT_STABLE(vp) \ 372 vstate_wait_stable((vp)) 373 void 374 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line, 375 bool has_lock) 376 { 377 378 } 379 380 static void 381 vstate_wait_stable(vnode_t *vp) 382 { 383 vnode_impl_t *vip = VNODE_TO_VIMPL(vp); 384 385 while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED) 386 cv_wait(&vp->v_cv, vp->v_interlock); 387 } 388 389 static void 390 vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to) 391 { 392 vnode_impl_t *vip = VNODE_TO_VIMPL(vp); 393 394 /* Open/close the gate for vcache_tryvget(). */ 395 if (to == VS_LOADED) 396 atomic_or_uint(&vp->v_usecount, VUSECOUNT_GATE); 397 else 398 atomic_and_uint(&vp->v_usecount, ~VUSECOUNT_GATE); 399 400 vip->vi_state = to; 401 if (from == VS_LOADING) 402 cv_broadcast(&vcache_cv); 403 if (to == VS_LOADED || to == VS_RECLAIMED) 404 cv_broadcast(&vp->v_cv); 405 } 406 407 #endif /* defined(DIAGNOSTIC) */ 408 409 void 410 vfs_vnode_sysinit(void) 411 { 412 int error __diagused, i; 413 414 dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL); 415 KASSERT(dead_rootmount != NULL); 416 dead_rootmount->mnt_iflag |= IMNT_MPSAFE; 417 418 mutex_init(&vdrain_lock, MUTEX_DEFAULT, IPL_NONE); 419 for (i = 0; i < LRU_COUNT; i++) { 420 TAILQ_INIT(&lru_list[i]); 421 } 422 vcache_init(); 423 424 cv_init(&vdrain_cv, "vdrain"); 425 cv_init(&vdrain_gen_cv, "vdrainwt"); 426 error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread, 427 NULL, &vdrain_lwp, "vdrain"); 428 KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error); 429 } 430 431 /* 432 * Allocate a new marker vnode. 433 */ 434 vnode_t * 435 vnalloc_marker(struct mount *mp) 436 { 437 vnode_impl_t *vip; 438 vnode_t *vp; 439 440 vip = pool_cache_get(vcache_pool, PR_WAITOK); 441 memset(vip, 0, sizeof(*vip)); 442 vp = VIMPL_TO_VNODE(vip); 443 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1); 444 vp->v_mount = mp; 445 vp->v_type = VBAD; 446 vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE); 447 vip->vi_state = VS_MARKER; 448 449 return vp; 450 } 451 452 /* 453 * Free a marker vnode. 454 */ 455 void 456 vnfree_marker(vnode_t *vp) 457 { 458 vnode_impl_t *vip; 459 460 vip = VNODE_TO_VIMPL(vp); 461 KASSERT(vip->vi_state == VS_MARKER); 462 mutex_obj_free(vp->v_interlock); 463 uvm_obj_destroy(&vp->v_uobj, true); 464 pool_cache_put(vcache_pool, vip); 465 } 466 467 /* 468 * Test a vnode for being a marker vnode. 469 */ 470 bool 471 vnis_marker(vnode_t *vp) 472 { 473 474 return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER); 475 } 476 477 /* 478 * Return the lru list this node should be on. 479 */ 480 static vnodelst_t * 481 lru_which(vnode_t *vp) 482 { 483 484 KASSERT(mutex_owned(vp->v_interlock)); 485 486 if (vp->v_holdcnt > 0) 487 return &lru_list[LRU_HOLD]; 488 else 489 return &lru_list[LRU_FREE]; 490 } 491 492 /* 493 * Put vnode to end of given list. 494 * Both the current and the new list may be NULL, used on vnode alloc/free. 495 * Adjust numvnodes and signal vdrain thread if there is work. 496 */ 497 static void 498 lru_requeue(vnode_t *vp, vnodelst_t *listhd) 499 { 500 vnode_impl_t *vip; 501 int d; 502 503 /* 504 * If the vnode is on the correct list, and was put there recently, 505 * then leave it be, thus avoiding huge cache and lock contention. 506 */ 507 vip = VNODE_TO_VIMPL(vp); 508 if (listhd == vip->vi_lrulisthd && 509 (getticks() - vip->vi_lrulisttm) < hz) { 510 return; 511 } 512 513 mutex_enter(&vdrain_lock); 514 d = 0; 515 if (vip->vi_lrulisthd != NULL) 516 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist); 517 else 518 d++; 519 vip->vi_lrulisthd = listhd; 520 vip->vi_lrulisttm = getticks(); 521 if (vip->vi_lrulisthd != NULL) 522 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist); 523 else 524 d--; 525 if (d != 0) { 526 /* 527 * Looks strange? This is not a bug. Don't store 528 * numvnodes unless there is a change - avoid false 529 * sharing on MP. 530 */ 531 numvnodes += d; 532 } 533 if ((d > 0 && numvnodes > desiredvnodes) || 534 listhd == &lru_list[LRU_VRELE]) 535 cv_signal(&vdrain_cv); 536 mutex_exit(&vdrain_lock); 537 } 538 539 /* 540 * Release deferred vrele vnodes for this mount. 541 * Called with file system suspended. 542 */ 543 void 544 vrele_flush(struct mount *mp) 545 { 546 vnode_impl_t *vip, *marker; 547 vnode_t *vp; 548 int when = 0; 549 550 KASSERT(fstrans_is_owner(mp)); 551 552 marker = VNODE_TO_VIMPL(vnalloc_marker(NULL)); 553 554 mutex_enter(&vdrain_lock); 555 TAILQ_INSERT_HEAD(&lru_list[LRU_VRELE], marker, vi_lrulist); 556 557 while ((vip = TAILQ_NEXT(marker, vi_lrulist))) { 558 TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist); 559 TAILQ_INSERT_AFTER(&lru_list[LRU_VRELE], vip, marker, 560 vi_lrulist); 561 vp = VIMPL_TO_VNODE(vip); 562 if (vnis_marker(vp)) 563 continue; 564 565 KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]); 566 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist); 567 vip->vi_lrulisthd = &lru_list[LRU_HOLD]; 568 vip->vi_lrulisttm = getticks(); 569 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist); 570 mutex_exit(&vdrain_lock); 571 572 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 573 mutex_enter(vp->v_interlock); 574 vrelel(vp, 0, LK_EXCLUSIVE); 575 576 if (getticks() > when) { 577 yield(); 578 when = getticks() + hz / 10; 579 } 580 581 mutex_enter(&vdrain_lock); 582 } 583 584 TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist); 585 mutex_exit(&vdrain_lock); 586 587 vnfree_marker(VIMPL_TO_VNODE(marker)); 588 } 589 590 /* 591 * Reclaim a cached vnode. Used from vdrain_thread only. 592 */ 593 static __inline void 594 vdrain_remove(vnode_t *vp) 595 { 596 struct mount *mp; 597 598 KASSERT(mutex_owned(&vdrain_lock)); 599 600 /* Probe usecount (unlocked). */ 601 if (vrefcnt(vp) > 0) 602 return; 603 /* Try v_interlock -- we lock the wrong direction! */ 604 if (!mutex_tryenter(vp->v_interlock)) 605 return; 606 /* Probe usecount and state. */ 607 if (vrefcnt(vp) > 0 || VSTATE_GET(vp) != VS_LOADED) { 608 mutex_exit(vp->v_interlock); 609 return; 610 } 611 mp = vp->v_mount; 612 if (fstrans_start_nowait(mp) != 0) { 613 mutex_exit(vp->v_interlock); 614 return; 615 } 616 vdrain_retry = true; 617 mutex_exit(&vdrain_lock); 618 619 if (vcache_vget(vp) == 0) { 620 if (!vrecycle(vp)) { 621 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 622 mutex_enter(vp->v_interlock); 623 vrelel(vp, 0, LK_EXCLUSIVE); 624 } 625 } 626 fstrans_done(mp); 627 628 mutex_enter(&vdrain_lock); 629 } 630 631 /* 632 * Release a cached vnode. Used from vdrain_thread only. 633 */ 634 static __inline void 635 vdrain_vrele(vnode_t *vp) 636 { 637 vnode_impl_t *vip = VNODE_TO_VIMPL(vp); 638 struct mount *mp; 639 640 KASSERT(mutex_owned(&vdrain_lock)); 641 642 mp = vp->v_mount; 643 if (fstrans_start_nowait(mp) != 0) 644 return; 645 646 /* 647 * First remove the vnode from the vrele list. 648 * Put it on the last lru list, the last vrele() 649 * will put it back onto the right list before 650 * its usecount reaches zero. 651 */ 652 KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]); 653 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist); 654 vip->vi_lrulisthd = &lru_list[LRU_HOLD]; 655 vip->vi_lrulisttm = getticks(); 656 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist); 657 658 vdrain_retry = true; 659 mutex_exit(&vdrain_lock); 660 661 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 662 mutex_enter(vp->v_interlock); 663 vrelel(vp, 0, LK_EXCLUSIVE); 664 fstrans_done(mp); 665 666 mutex_enter(&vdrain_lock); 667 } 668 669 /* 670 * Helper thread to keep the number of vnodes below desiredvnodes 671 * and release vnodes from asynchronous vrele. 672 */ 673 static void 674 vdrain_thread(void *cookie) 675 { 676 int i; 677 u_int target; 678 vnode_impl_t *vip, *marker; 679 680 marker = VNODE_TO_VIMPL(vnalloc_marker(NULL)); 681 682 mutex_enter(&vdrain_lock); 683 684 for (;;) { 685 vdrain_retry = false; 686 target = desiredvnodes - desiredvnodes/10; 687 688 for (i = 0; i < LRU_COUNT; i++) { 689 TAILQ_INSERT_HEAD(&lru_list[i], marker, vi_lrulist); 690 while ((vip = TAILQ_NEXT(marker, vi_lrulist))) { 691 TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist); 692 TAILQ_INSERT_AFTER(&lru_list[i], vip, marker, 693 vi_lrulist); 694 if (vnis_marker(VIMPL_TO_VNODE(vip))) 695 continue; 696 if (i == LRU_VRELE) 697 vdrain_vrele(VIMPL_TO_VNODE(vip)); 698 else if (numvnodes < target) 699 break; 700 else 701 vdrain_remove(VIMPL_TO_VNODE(vip)); 702 } 703 TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist); 704 } 705 706 if (vdrain_retry) { 707 kpause("vdrainrt", false, 1, &vdrain_lock); 708 } else { 709 vdrain_gen++; 710 cv_broadcast(&vdrain_gen_cv); 711 cv_wait(&vdrain_cv, &vdrain_lock); 712 } 713 } 714 } 715 716 /* 717 * Try to drop reference on a vnode. Abort if we are releasing the 718 * last reference. Note: this _must_ succeed if not the last reference. 719 */ 720 static bool 721 vtryrele(vnode_t *vp) 722 { 723 u_int use, next; 724 725 for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) { 726 if (__predict_false((use & VUSECOUNT_MASK) == 1)) { 727 return false; 728 } 729 KASSERT((use & VUSECOUNT_MASK) > 1); 730 next = atomic_cas_uint(&vp->v_usecount, use, use - 1); 731 if (__predict_true(next == use)) { 732 return true; 733 } 734 } 735 } 736 737 /* 738 * vput: unlock and release the reference. 739 */ 740 void 741 vput(vnode_t *vp) 742 { 743 int lktype; 744 745 /* 746 * Do an unlocked check of the usecount. If it looks like we're not 747 * about to drop the last reference, then unlock the vnode and try 748 * to drop the reference. If it ends up being the last reference 749 * after all, vrelel() can fix it all up. Most of the time this 750 * will all go to plan. 751 */ 752 if (vrefcnt(vp) > 1) { 753 VOP_UNLOCK(vp); 754 if (vtryrele(vp)) { 755 return; 756 } 757 lktype = LK_NONE; 758 } else if ((vp->v_vflag & VV_LOCKSWORK) == 0) { 759 lktype = LK_EXCLUSIVE; 760 } else { 761 lktype = VOP_ISLOCKED(vp); 762 KASSERT(lktype != LK_NONE); 763 } 764 mutex_enter(vp->v_interlock); 765 vrelel(vp, 0, lktype); 766 } 767 768 /* 769 * Vnode release. If reference count drops to zero, call inactive 770 * routine and either return to freelist or free to the pool. 771 */ 772 static void 773 vrelel(vnode_t *vp, int flags, int lktype) 774 { 775 const bool async = ((flags & VRELEL_ASYNC) != 0); 776 bool recycle, defer; 777 int error; 778 779 KASSERT(mutex_owned(vp->v_interlock)); 780 781 if (__predict_false(vp->v_op == dead_vnodeop_p && 782 VSTATE_GET(vp) != VS_RECLAIMED)) { 783 vnpanic(vp, "dead but not clean"); 784 } 785 786 /* 787 * If not the last reference, just drop the reference count and 788 * unlock. VOP_UNLOCK() is called here without a vnode reference 789 * held, but is ok as the hold of v_interlock will stop the vnode 790 * from disappearing. 791 */ 792 if (vtryrele(vp)) { 793 if (lktype != LK_NONE) { 794 VOP_UNLOCK(vp); 795 } 796 mutex_exit(vp->v_interlock); 797 return; 798 } 799 if (vrefcnt(vp) <= 0 || vp->v_writecount != 0) { 800 vnpanic(vp, "%s: bad ref count", __func__); 801 } 802 803 #ifdef DIAGNOSTIC 804 if ((vp->v_type == VBLK || vp->v_type == VCHR) && 805 vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) { 806 vprint("vrelel: missing VOP_CLOSE()", vp); 807 } 808 #endif 809 810 /* 811 * First try to get the vnode locked for VOP_INACTIVE(). 812 * Defer vnode release to vdrain_thread if caller requests 813 * it explicitly, is the pagedaemon or the lock failed. 814 */ 815 defer = false; 816 if ((curlwp == uvm.pagedaemon_lwp) || async) { 817 defer = true; 818 } else if (lktype == LK_SHARED) { 819 /* Excellent chance of getting, if the last ref. */ 820 error = vn_lock(vp, LK_UPGRADE | LK_RETRY | 821 LK_NOWAIT); 822 if (error != 0) { 823 defer = true; 824 } else { 825 lktype = LK_EXCLUSIVE; 826 } 827 } else if (lktype == LK_NONE) { 828 /* Excellent chance of getting, if the last ref. */ 829 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | 830 LK_NOWAIT); 831 if (error != 0) { 832 defer = true; 833 } else { 834 lktype = LK_EXCLUSIVE; 835 } 836 } 837 KASSERT(mutex_owned(vp->v_interlock)); 838 if (defer) { 839 /* 840 * Defer reclaim to the kthread; it's not safe to 841 * clean it here. We donate it our last reference. 842 */ 843 if (lktype != LK_NONE) { 844 VOP_UNLOCK(vp); 845 } 846 lru_requeue(vp, &lru_list[LRU_VRELE]); 847 mutex_exit(vp->v_interlock); 848 return; 849 } 850 KASSERT(lktype == LK_EXCLUSIVE); 851 852 /* 853 * If not clean, deactivate the vnode, but preserve 854 * our reference across the call to VOP_INACTIVE(). 855 */ 856 if (VSTATE_GET(vp) == VS_RECLAIMED) { 857 VOP_UNLOCK(vp); 858 } else { 859 /* 860 * If VOP_INACTIVE() indicates that the file has been 861 * deleted, then recycle the vnode. 862 * 863 * Note that VOP_INACTIVE() will not drop the vnode lock. 864 */ 865 mutex_exit(vp->v_interlock); 866 recycle = false; 867 VOP_INACTIVE(vp, &recycle); 868 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER); 869 mutex_enter(vp->v_interlock); 870 871 for (;;) { 872 /* 873 * If no longer the last reference, try to shed it. 874 * On success, drop the interlock last thereby 875 * preventing the vnode being freed behind us. 876 */ 877 if (vtryrele(vp)) { 878 VOP_UNLOCK(vp); 879 rw_exit(vp->v_uobj.vmobjlock); 880 mutex_exit(vp->v_interlock); 881 return; 882 } 883 /* 884 * Block new references then check again to see if a 885 * new reference was acquired in the meantime. If 886 * it was, restore the vnode state and try again. 887 */ 888 if (recycle) { 889 VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED); 890 if (vrefcnt(vp) != 1) { 891 VSTATE_CHANGE(vp, VS_BLOCKED, 892 VS_LOADED); 893 continue; 894 } 895 } 896 break; 897 } 898 899 /* Take care of space accounting. */ 900 if ((vp->v_iflag & VI_EXECMAP) != 0) { 901 cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages); 902 } 903 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP); 904 vp->v_vflag &= ~VV_MAPPED; 905 rw_exit(vp->v_uobj.vmobjlock); 906 907 /* 908 * Recycle the vnode if the file is now unused (unlinked), 909 * otherwise just free it. 910 */ 911 if (recycle) { 912 VSTATE_ASSERT(vp, VS_BLOCKED); 913 /* vcache_reclaim drops the lock. */ 914 vcache_reclaim(vp); 915 } else { 916 VOP_UNLOCK(vp); 917 } 918 KASSERT(vrefcnt(vp) > 0); 919 } 920 921 if ((atomic_dec_uint_nv(&vp->v_usecount) & VUSECOUNT_MASK) != 0) { 922 /* Gained another reference while being reclaimed. */ 923 mutex_exit(vp->v_interlock); 924 return; 925 } 926 927 if (VSTATE_GET(vp) == VS_RECLAIMED && vp->v_holdcnt == 0) { 928 /* 929 * It's clean so destroy it. It isn't referenced 930 * anywhere since it has been reclaimed. 931 */ 932 vcache_free(VNODE_TO_VIMPL(vp)); 933 } else { 934 /* 935 * Otherwise, put it back onto the freelist. It 936 * can't be destroyed while still associated with 937 * a file system. 938 */ 939 lru_requeue(vp, lru_which(vp)); 940 mutex_exit(vp->v_interlock); 941 } 942 } 943 944 void 945 vrele(vnode_t *vp) 946 { 947 948 if (vtryrele(vp)) { 949 return; 950 } 951 mutex_enter(vp->v_interlock); 952 vrelel(vp, 0, LK_NONE); 953 } 954 955 /* 956 * Asynchronous vnode release, vnode is released in different context. 957 */ 958 void 959 vrele_async(vnode_t *vp) 960 { 961 962 if (vtryrele(vp)) { 963 return; 964 } 965 mutex_enter(vp->v_interlock); 966 vrelel(vp, VRELEL_ASYNC, LK_NONE); 967 } 968 969 /* 970 * Vnode reference, where a reference is already held by some other 971 * object (for example, a file structure). 972 * 973 * NB: lockless code sequences may rely on this not blocking. 974 */ 975 void 976 vref(vnode_t *vp) 977 { 978 979 KASSERT(vrefcnt(vp) > 0); 980 981 atomic_inc_uint(&vp->v_usecount); 982 } 983 984 /* 985 * Page or buffer structure gets a reference. 986 * Called with v_interlock held. 987 */ 988 void 989 vholdl(vnode_t *vp) 990 { 991 992 KASSERT(mutex_owned(vp->v_interlock)); 993 994 if (vp->v_holdcnt++ == 0 && vrefcnt(vp) == 0) 995 lru_requeue(vp, lru_which(vp)); 996 } 997 998 /* 999 * Page or buffer structure gets a reference. 1000 */ 1001 void 1002 vhold(vnode_t *vp) 1003 { 1004 1005 mutex_enter(vp->v_interlock); 1006 vholdl(vp); 1007 mutex_exit(vp->v_interlock); 1008 } 1009 1010 /* 1011 * Page or buffer structure frees a reference. 1012 * Called with v_interlock held. 1013 */ 1014 void 1015 holdrelel(vnode_t *vp) 1016 { 1017 1018 KASSERT(mutex_owned(vp->v_interlock)); 1019 1020 if (vp->v_holdcnt <= 0) { 1021 vnpanic(vp, "%s: holdcnt vp %p", __func__, vp); 1022 } 1023 1024 vp->v_holdcnt--; 1025 if (vp->v_holdcnt == 0 && vrefcnt(vp) == 0) 1026 lru_requeue(vp, lru_which(vp)); 1027 } 1028 1029 /* 1030 * Page or buffer structure frees a reference. 1031 */ 1032 void 1033 holdrele(vnode_t *vp) 1034 { 1035 1036 mutex_enter(vp->v_interlock); 1037 holdrelel(vp); 1038 mutex_exit(vp->v_interlock); 1039 } 1040 1041 /* 1042 * Recycle an unused vnode if caller holds the last reference. 1043 */ 1044 bool 1045 vrecycle(vnode_t *vp) 1046 { 1047 int error __diagused; 1048 1049 mutex_enter(vp->v_interlock); 1050 1051 /* If the vnode is already clean we're done. */ 1052 VSTATE_WAIT_STABLE(vp); 1053 if (VSTATE_GET(vp) != VS_LOADED) { 1054 VSTATE_ASSERT(vp, VS_RECLAIMED); 1055 vrelel(vp, 0, LK_NONE); 1056 return true; 1057 } 1058 1059 /* Prevent further references until the vnode is locked. */ 1060 VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED); 1061 1062 /* Make sure we hold the last reference. */ 1063 if (vrefcnt(vp) != 1) { 1064 VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED); 1065 mutex_exit(vp->v_interlock); 1066 return false; 1067 } 1068 1069 mutex_exit(vp->v_interlock); 1070 1071 /* 1072 * On a leaf file system this lock will always succeed as we hold 1073 * the last reference and prevent further references. 1074 * On layered file systems waiting for the lock would open a can of 1075 * deadlocks as the lower vnodes may have other active references. 1076 */ 1077 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT); 1078 1079 mutex_enter(vp->v_interlock); 1080 if (error) { 1081 VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED); 1082 mutex_exit(vp->v_interlock); 1083 return false; 1084 } 1085 1086 KASSERT(vrefcnt(vp) == 1); 1087 vcache_reclaim(vp); 1088 vrelel(vp, 0, LK_NONE); 1089 1090 return true; 1091 } 1092 1093 /* 1094 * Helper for vrevoke() to propagate suspension from lastmp 1095 * to thismp. Both args may be NULL. 1096 * Returns the currently suspended file system or NULL. 1097 */ 1098 static struct mount * 1099 vrevoke_suspend_next(struct mount *lastmp, struct mount *thismp) 1100 { 1101 int error; 1102 1103 if (lastmp == thismp) 1104 return thismp; 1105 1106 if (lastmp != NULL) 1107 vfs_resume(lastmp); 1108 1109 if (thismp == NULL) 1110 return NULL; 1111 1112 do { 1113 error = vfs_suspend(thismp, 0); 1114 } while (error == EINTR || error == ERESTART); 1115 1116 if (error == 0) 1117 return thismp; 1118 1119 KASSERT(error == EOPNOTSUPP); 1120 return NULL; 1121 } 1122 1123 /* 1124 * Eliminate all activity associated with the requested vnode 1125 * and with all vnodes aliased to the requested vnode. 1126 */ 1127 void 1128 vrevoke(vnode_t *vp) 1129 { 1130 struct mount *mp; 1131 vnode_t *vq; 1132 enum vtype type; 1133 dev_t dev; 1134 1135 KASSERT(vrefcnt(vp) > 0); 1136 1137 mp = vrevoke_suspend_next(NULL, vp->v_mount); 1138 1139 mutex_enter(vp->v_interlock); 1140 VSTATE_WAIT_STABLE(vp); 1141 if (VSTATE_GET(vp) == VS_RECLAIMED) { 1142 mutex_exit(vp->v_interlock); 1143 } else if (vp->v_type != VBLK && vp->v_type != VCHR) { 1144 atomic_inc_uint(&vp->v_usecount); 1145 mutex_exit(vp->v_interlock); 1146 vgone(vp); 1147 } else { 1148 dev = vp->v_rdev; 1149 type = vp->v_type; 1150 mutex_exit(vp->v_interlock); 1151 1152 while (spec_node_lookup_by_dev(type, dev, &vq) == 0) { 1153 mp = vrevoke_suspend_next(mp, vq->v_mount); 1154 vgone(vq); 1155 } 1156 } 1157 vrevoke_suspend_next(mp, NULL); 1158 } 1159 1160 /* 1161 * Eliminate all activity associated with a vnode in preparation for 1162 * reuse. Drops a reference from the vnode. 1163 */ 1164 void 1165 vgone(vnode_t *vp) 1166 { 1167 int lktype; 1168 1169 KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount)); 1170 1171 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1172 lktype = LK_EXCLUSIVE; 1173 mutex_enter(vp->v_interlock); 1174 VSTATE_WAIT_STABLE(vp); 1175 if (VSTATE_GET(vp) == VS_LOADED) { 1176 VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED); 1177 vcache_reclaim(vp); 1178 lktype = LK_NONE; 1179 } 1180 VSTATE_ASSERT(vp, VS_RECLAIMED); 1181 vrelel(vp, 0, lktype); 1182 } 1183 1184 static inline uint32_t 1185 vcache_hash(const struct vcache_key *key) 1186 { 1187 uint32_t hash = HASH32_BUF_INIT; 1188 1189 KASSERT(key->vk_key_len > 0); 1190 1191 hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash); 1192 hash = hash32_buf(key->vk_key, key->vk_key_len, hash); 1193 return hash; 1194 } 1195 1196 static void 1197 vcache_init(void) 1198 { 1199 1200 vcache_pool = pool_cache_init(sizeof(vnode_impl_t), coherency_unit, 1201 0, 0, "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL); 1202 KASSERT(vcache_pool != NULL); 1203 mutex_init(&vcache_lock, MUTEX_DEFAULT, IPL_NONE); 1204 cv_init(&vcache_cv, "vcache"); 1205 vcache_hashsize = desiredvnodes; 1206 vcache_hashtab = hashinit(desiredvnodes, HASH_SLIST, true, 1207 &vcache_hashmask); 1208 } 1209 1210 static void 1211 vcache_reinit(void) 1212 { 1213 int i; 1214 uint32_t hash; 1215 u_long oldmask, newmask; 1216 struct hashhead *oldtab, *newtab; 1217 vnode_impl_t *vip; 1218 1219 newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask); 1220 mutex_enter(&vcache_lock); 1221 oldtab = vcache_hashtab; 1222 oldmask = vcache_hashmask; 1223 vcache_hashsize = desiredvnodes; 1224 vcache_hashtab = newtab; 1225 vcache_hashmask = newmask; 1226 for (i = 0; i <= oldmask; i++) { 1227 while ((vip = SLIST_FIRST(&oldtab[i])) != NULL) { 1228 SLIST_REMOVE(&oldtab[i], vip, vnode_impl, vi_hash); 1229 hash = vcache_hash(&vip->vi_key); 1230 SLIST_INSERT_HEAD(&newtab[hash & vcache_hashmask], 1231 vip, vi_hash); 1232 } 1233 } 1234 mutex_exit(&vcache_lock); 1235 hashdone(oldtab, HASH_SLIST, oldmask); 1236 } 1237 1238 static inline vnode_impl_t * 1239 vcache_hash_lookup(const struct vcache_key *key, uint32_t hash) 1240 { 1241 struct hashhead *hashp; 1242 vnode_impl_t *vip; 1243 1244 KASSERT(mutex_owned(&vcache_lock)); 1245 1246 hashp = &vcache_hashtab[hash & vcache_hashmask]; 1247 SLIST_FOREACH(vip, hashp, vi_hash) { 1248 if (key->vk_mount != vip->vi_key.vk_mount) 1249 continue; 1250 if (key->vk_key_len != vip->vi_key.vk_key_len) 1251 continue; 1252 if (memcmp(key->vk_key, vip->vi_key.vk_key, key->vk_key_len)) 1253 continue; 1254 return vip; 1255 } 1256 return NULL; 1257 } 1258 1259 /* 1260 * Allocate a new, uninitialized vcache node. 1261 */ 1262 static vnode_impl_t * 1263 vcache_alloc(void) 1264 { 1265 vnode_impl_t *vip; 1266 vnode_t *vp; 1267 1268 vip = pool_cache_get(vcache_pool, PR_WAITOK); 1269 vp = VIMPL_TO_VNODE(vip); 1270 memset(vip, 0, sizeof(*vip)); 1271 1272 rw_init(&vip->vi_lock); 1273 vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE); 1274 1275 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1); 1276 cv_init(&vp->v_cv, "vnode"); 1277 cache_vnode_init(vp); 1278 1279 vp->v_usecount = 1; 1280 vp->v_type = VNON; 1281 vp->v_size = vp->v_writesize = VSIZENOTSET; 1282 1283 vip->vi_state = VS_LOADING; 1284 1285 lru_requeue(vp, &lru_list[LRU_FREE]); 1286 1287 return vip; 1288 } 1289 1290 /* 1291 * Deallocate a vcache node in state VS_LOADING. 1292 * 1293 * vcache_lock held on entry and released on return. 1294 */ 1295 static void 1296 vcache_dealloc(vnode_impl_t *vip) 1297 { 1298 vnode_t *vp; 1299 1300 KASSERT(mutex_owned(&vcache_lock)); 1301 1302 vp = VIMPL_TO_VNODE(vip); 1303 vfs_ref(dead_rootmount); 1304 vfs_insmntque(vp, dead_rootmount); 1305 mutex_enter(vp->v_interlock); 1306 vp->v_op = dead_vnodeop_p; 1307 VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED); 1308 mutex_exit(&vcache_lock); 1309 vrelel(vp, 0, LK_NONE); 1310 } 1311 1312 /* 1313 * Free an unused, unreferenced vcache node. 1314 * v_interlock locked on entry. 1315 */ 1316 static void 1317 vcache_free(vnode_impl_t *vip) 1318 { 1319 vnode_t *vp; 1320 1321 vp = VIMPL_TO_VNODE(vip); 1322 KASSERT(mutex_owned(vp->v_interlock)); 1323 1324 KASSERT(vrefcnt(vp) == 0); 1325 KASSERT(vp->v_holdcnt == 0); 1326 KASSERT(vp->v_writecount == 0); 1327 lru_requeue(vp, NULL); 1328 mutex_exit(vp->v_interlock); 1329 1330 vfs_insmntque(vp, NULL); 1331 if (vp->v_type == VBLK || vp->v_type == VCHR) 1332 spec_node_destroy(vp); 1333 1334 mutex_obj_free(vp->v_interlock); 1335 rw_destroy(&vip->vi_lock); 1336 uvm_obj_destroy(&vp->v_uobj, true); 1337 cv_destroy(&vp->v_cv); 1338 cache_vnode_fini(vp); 1339 pool_cache_put(vcache_pool, vip); 1340 } 1341 1342 /* 1343 * Try to get an initial reference on this cached vnode. 1344 * Returns zero on success or EBUSY if the vnode state is not LOADED. 1345 * 1346 * NB: lockless code sequences may rely on this not blocking. 1347 */ 1348 int 1349 vcache_tryvget(vnode_t *vp) 1350 { 1351 u_int use, next; 1352 1353 for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) { 1354 if (__predict_false((use & VUSECOUNT_GATE) == 0)) { 1355 return EBUSY; 1356 } 1357 next = atomic_cas_uint(&vp->v_usecount, use, use + 1); 1358 if (__predict_true(next == use)) { 1359 return 0; 1360 } 1361 } 1362 } 1363 1364 /* 1365 * Try to get an initial reference on this cached vnode. 1366 * Returns zero on success and ENOENT if the vnode has been reclaimed. 1367 * Will wait for the vnode state to be stable. 1368 * 1369 * v_interlock locked on entry and unlocked on exit. 1370 */ 1371 int 1372 vcache_vget(vnode_t *vp) 1373 { 1374 1375 KASSERT(mutex_owned(vp->v_interlock)); 1376 1377 /* Increment hold count to prevent vnode from disappearing. */ 1378 vp->v_holdcnt++; 1379 VSTATE_WAIT_STABLE(vp); 1380 vp->v_holdcnt--; 1381 1382 /* If this was the last reference to a reclaimed vnode free it now. */ 1383 if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) { 1384 if (vp->v_holdcnt == 0 && vrefcnt(vp) == 0) 1385 vcache_free(VNODE_TO_VIMPL(vp)); 1386 else 1387 mutex_exit(vp->v_interlock); 1388 return ENOENT; 1389 } 1390 VSTATE_ASSERT(vp, VS_LOADED); 1391 atomic_inc_uint(&vp->v_usecount); 1392 mutex_exit(vp->v_interlock); 1393 1394 return 0; 1395 } 1396 1397 /* 1398 * Get a vnode / fs node pair by key and return it referenced through vpp. 1399 */ 1400 int 1401 vcache_get(struct mount *mp, const void *key, size_t key_len, 1402 struct vnode **vpp) 1403 { 1404 int error; 1405 uint32_t hash; 1406 const void *new_key; 1407 struct vnode *vp; 1408 struct vcache_key vcache_key; 1409 vnode_impl_t *vip, *new_vip; 1410 1411 new_key = NULL; 1412 *vpp = NULL; 1413 1414 vcache_key.vk_mount = mp; 1415 vcache_key.vk_key = key; 1416 vcache_key.vk_key_len = key_len; 1417 hash = vcache_hash(&vcache_key); 1418 1419 again: 1420 mutex_enter(&vcache_lock); 1421 vip = vcache_hash_lookup(&vcache_key, hash); 1422 1423 /* If found, take a reference or retry. */ 1424 if (__predict_true(vip != NULL)) { 1425 /* 1426 * If the vnode is loading we cannot take the v_interlock 1427 * here as it might change during load (see uvm_obj_setlock()). 1428 * As changing state from VS_LOADING requires both vcache_lock 1429 * and v_interlock it is safe to test with vcache_lock held. 1430 * 1431 * Wait for vnodes changing state from VS_LOADING and retry. 1432 */ 1433 if (__predict_false(vip->vi_state == VS_LOADING)) { 1434 cv_wait(&vcache_cv, &vcache_lock); 1435 mutex_exit(&vcache_lock); 1436 goto again; 1437 } 1438 vp = VIMPL_TO_VNODE(vip); 1439 mutex_enter(vp->v_interlock); 1440 mutex_exit(&vcache_lock); 1441 error = vcache_vget(vp); 1442 if (error == ENOENT) 1443 goto again; 1444 if (error == 0) 1445 *vpp = vp; 1446 KASSERT((error != 0) == (*vpp == NULL)); 1447 return error; 1448 } 1449 mutex_exit(&vcache_lock); 1450 1451 /* Allocate and initialize a new vcache / vnode pair. */ 1452 error = vfs_busy(mp); 1453 if (error) 1454 return error; 1455 new_vip = vcache_alloc(); 1456 new_vip->vi_key = vcache_key; 1457 vp = VIMPL_TO_VNODE(new_vip); 1458 mutex_enter(&vcache_lock); 1459 vip = vcache_hash_lookup(&vcache_key, hash); 1460 if (vip == NULL) { 1461 SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask], 1462 new_vip, vi_hash); 1463 vip = new_vip; 1464 } 1465 1466 /* If another thread beat us inserting this node, retry. */ 1467 if (vip != new_vip) { 1468 vcache_dealloc(new_vip); 1469 vfs_unbusy(mp); 1470 goto again; 1471 } 1472 mutex_exit(&vcache_lock); 1473 1474 /* Load the fs node. Exclusive as new_node is VS_LOADING. */ 1475 error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key); 1476 if (error) { 1477 mutex_enter(&vcache_lock); 1478 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask], 1479 new_vip, vnode_impl, vi_hash); 1480 vcache_dealloc(new_vip); 1481 vfs_unbusy(mp); 1482 KASSERT(*vpp == NULL); 1483 return error; 1484 } 1485 KASSERT(new_key != NULL); 1486 KASSERT(memcmp(key, new_key, key_len) == 0); 1487 KASSERT(vp->v_op != NULL); 1488 vfs_insmntque(vp, mp); 1489 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0) 1490 vp->v_vflag |= VV_MPSAFE; 1491 vfs_ref(mp); 1492 vfs_unbusy(mp); 1493 1494 /* Finished loading, finalize node. */ 1495 mutex_enter(&vcache_lock); 1496 new_vip->vi_key.vk_key = new_key; 1497 mutex_enter(vp->v_interlock); 1498 VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED); 1499 mutex_exit(vp->v_interlock); 1500 mutex_exit(&vcache_lock); 1501 *vpp = vp; 1502 return 0; 1503 } 1504 1505 /* 1506 * Create a new vnode / fs node pair and return it referenced through vpp. 1507 */ 1508 int 1509 vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap, 1510 kauth_cred_t cred, void *extra, struct vnode **vpp) 1511 { 1512 int error; 1513 uint32_t hash; 1514 struct vnode *vp, *ovp; 1515 vnode_impl_t *vip, *ovip; 1516 1517 *vpp = NULL; 1518 1519 /* Allocate and initialize a new vcache / vnode pair. */ 1520 error = vfs_busy(mp); 1521 if (error) 1522 return error; 1523 vip = vcache_alloc(); 1524 vip->vi_key.vk_mount = mp; 1525 vp = VIMPL_TO_VNODE(vip); 1526 1527 /* Create and load the fs node. */ 1528 error = VFS_NEWVNODE(mp, dvp, vp, vap, cred, extra, 1529 &vip->vi_key.vk_key_len, &vip->vi_key.vk_key); 1530 if (error) { 1531 mutex_enter(&vcache_lock); 1532 vcache_dealloc(vip); 1533 vfs_unbusy(mp); 1534 KASSERT(*vpp == NULL); 1535 return error; 1536 } 1537 KASSERT(vp->v_op != NULL); 1538 KASSERT((vip->vi_key.vk_key_len == 0) == (mp == dead_rootmount)); 1539 if (vip->vi_key.vk_key_len > 0) { 1540 KASSERT(vip->vi_key.vk_key != NULL); 1541 hash = vcache_hash(&vip->vi_key); 1542 1543 /* 1544 * Wait for previous instance to be reclaimed, 1545 * then insert new node. 1546 */ 1547 mutex_enter(&vcache_lock); 1548 while ((ovip = vcache_hash_lookup(&vip->vi_key, hash))) { 1549 ovp = VIMPL_TO_VNODE(ovip); 1550 mutex_enter(ovp->v_interlock); 1551 mutex_exit(&vcache_lock); 1552 error = vcache_vget(ovp); 1553 KASSERT(error == ENOENT); 1554 mutex_enter(&vcache_lock); 1555 } 1556 SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask], 1557 vip, vi_hash); 1558 mutex_exit(&vcache_lock); 1559 } 1560 vfs_insmntque(vp, mp); 1561 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0) 1562 vp->v_vflag |= VV_MPSAFE; 1563 vfs_ref(mp); 1564 vfs_unbusy(mp); 1565 1566 /* Finished loading, finalize node. */ 1567 mutex_enter(&vcache_lock); 1568 mutex_enter(vp->v_interlock); 1569 VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED); 1570 mutex_exit(&vcache_lock); 1571 mutex_exit(vp->v_interlock); 1572 *vpp = vp; 1573 return 0; 1574 } 1575 1576 /* 1577 * Prepare key change: update old cache nodes key and lock new cache node. 1578 * Return an error if the new node already exists. 1579 */ 1580 int 1581 vcache_rekey_enter(struct mount *mp, struct vnode *vp, 1582 const void *old_key, size_t old_key_len, 1583 const void *new_key, size_t new_key_len) 1584 { 1585 uint32_t old_hash, new_hash; 1586 struct vcache_key old_vcache_key, new_vcache_key; 1587 vnode_impl_t *vip, *new_vip; 1588 1589 old_vcache_key.vk_mount = mp; 1590 old_vcache_key.vk_key = old_key; 1591 old_vcache_key.vk_key_len = old_key_len; 1592 old_hash = vcache_hash(&old_vcache_key); 1593 1594 new_vcache_key.vk_mount = mp; 1595 new_vcache_key.vk_key = new_key; 1596 new_vcache_key.vk_key_len = new_key_len; 1597 new_hash = vcache_hash(&new_vcache_key); 1598 1599 new_vip = vcache_alloc(); 1600 new_vip->vi_key = new_vcache_key; 1601 1602 /* Insert locked new node used as placeholder. */ 1603 mutex_enter(&vcache_lock); 1604 vip = vcache_hash_lookup(&new_vcache_key, new_hash); 1605 if (vip != NULL) { 1606 vcache_dealloc(new_vip); 1607 return EEXIST; 1608 } 1609 SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask], 1610 new_vip, vi_hash); 1611 1612 /* Replace old nodes key with the temporary copy. */ 1613 vip = vcache_hash_lookup(&old_vcache_key, old_hash); 1614 KASSERT(vip != NULL); 1615 KASSERT(VIMPL_TO_VNODE(vip) == vp); 1616 KASSERT(vip->vi_key.vk_key != old_vcache_key.vk_key); 1617 vip->vi_key = old_vcache_key; 1618 mutex_exit(&vcache_lock); 1619 return 0; 1620 } 1621 1622 /* 1623 * Key change complete: update old node and remove placeholder. 1624 */ 1625 void 1626 vcache_rekey_exit(struct mount *mp, struct vnode *vp, 1627 const void *old_key, size_t old_key_len, 1628 const void *new_key, size_t new_key_len) 1629 { 1630 uint32_t old_hash, new_hash; 1631 struct vcache_key old_vcache_key, new_vcache_key; 1632 vnode_impl_t *vip, *new_vip; 1633 struct vnode *new_vp; 1634 1635 old_vcache_key.vk_mount = mp; 1636 old_vcache_key.vk_key = old_key; 1637 old_vcache_key.vk_key_len = old_key_len; 1638 old_hash = vcache_hash(&old_vcache_key); 1639 1640 new_vcache_key.vk_mount = mp; 1641 new_vcache_key.vk_key = new_key; 1642 new_vcache_key.vk_key_len = new_key_len; 1643 new_hash = vcache_hash(&new_vcache_key); 1644 1645 mutex_enter(&vcache_lock); 1646 1647 /* Lookup old and new node. */ 1648 vip = vcache_hash_lookup(&old_vcache_key, old_hash); 1649 KASSERT(vip != NULL); 1650 KASSERT(VIMPL_TO_VNODE(vip) == vp); 1651 1652 new_vip = vcache_hash_lookup(&new_vcache_key, new_hash); 1653 KASSERT(new_vip != NULL); 1654 KASSERT(new_vip->vi_key.vk_key_len == new_key_len); 1655 new_vp = VIMPL_TO_VNODE(new_vip); 1656 mutex_enter(new_vp->v_interlock); 1657 VSTATE_ASSERT(VIMPL_TO_VNODE(new_vip), VS_LOADING); 1658 mutex_exit(new_vp->v_interlock); 1659 1660 /* Rekey old node and put it onto its new hashlist. */ 1661 vip->vi_key = new_vcache_key; 1662 if (old_hash != new_hash) { 1663 SLIST_REMOVE(&vcache_hashtab[old_hash & vcache_hashmask], 1664 vip, vnode_impl, vi_hash); 1665 SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask], 1666 vip, vi_hash); 1667 } 1668 1669 /* Remove new node used as placeholder. */ 1670 SLIST_REMOVE(&vcache_hashtab[new_hash & vcache_hashmask], 1671 new_vip, vnode_impl, vi_hash); 1672 vcache_dealloc(new_vip); 1673 } 1674 1675 /* 1676 * Disassociate the underlying file system from a vnode. 1677 * 1678 * Must be called with vnode locked and will return unlocked. 1679 * Must be called with the interlock held, and will return with it held. 1680 */ 1681 static void 1682 vcache_reclaim(vnode_t *vp) 1683 { 1684 lwp_t *l = curlwp; 1685 vnode_impl_t *vip = VNODE_TO_VIMPL(vp); 1686 struct mount *mp = vp->v_mount; 1687 uint32_t hash; 1688 uint8_t temp_buf[64], *temp_key; 1689 size_t temp_key_len; 1690 bool recycle, active; 1691 int error; 1692 1693 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 || 1694 VOP_ISLOCKED(vp) == LK_EXCLUSIVE); 1695 KASSERT(mutex_owned(vp->v_interlock)); 1696 KASSERT(vrefcnt(vp) != 0); 1697 1698 active = (vrefcnt(vp) > 1); 1699 temp_key_len = vip->vi_key.vk_key_len; 1700 /* 1701 * Prevent the vnode from being recycled or brought into use 1702 * while we clean it out. 1703 */ 1704 VSTATE_CHANGE(vp, VS_BLOCKED, VS_RECLAIMING); 1705 mutex_exit(vp->v_interlock); 1706 1707 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER); 1708 mutex_enter(vp->v_interlock); 1709 if ((vp->v_iflag & VI_EXECMAP) != 0) { 1710 cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages); 1711 } 1712 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP); 1713 vp->v_iflag |= VI_DEADCHECK; /* for genfs_getpages() */ 1714 mutex_exit(vp->v_interlock); 1715 rw_exit(vp->v_uobj.vmobjlock); 1716 1717 /* 1718 * With vnode state set to reclaiming, purge name cache immediately 1719 * to prevent new handles on vnode, and wait for existing threads 1720 * trying to get a handle to notice VS_RECLAIMED status and abort. 1721 */ 1722 cache_purge(vp); 1723 1724 /* Replace the vnode key with a temporary copy. */ 1725 if (vip->vi_key.vk_key_len > sizeof(temp_buf)) { 1726 temp_key = kmem_alloc(temp_key_len, KM_SLEEP); 1727 } else { 1728 temp_key = temp_buf; 1729 } 1730 if (vip->vi_key.vk_key_len > 0) { 1731 mutex_enter(&vcache_lock); 1732 memcpy(temp_key, vip->vi_key.vk_key, temp_key_len); 1733 vip->vi_key.vk_key = temp_key; 1734 mutex_exit(&vcache_lock); 1735 } 1736 1737 fstrans_start(mp); 1738 1739 /* 1740 * Clean out any cached data associated with the vnode. 1741 * If purging an active vnode, it must be closed and 1742 * deactivated before being reclaimed. 1743 */ 1744 error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0); 1745 if (error != 0) { 1746 if (wapbl_vphaswapbl(vp)) 1747 WAPBL_DISCARD(wapbl_vptomp(vp)); 1748 error = vinvalbuf(vp, 0, NOCRED, l, 0, 0); 1749 } 1750 KASSERTMSG((error == 0), "vinvalbuf failed: %d", error); 1751 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0); 1752 if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) { 1753 spec_node_revoke(vp); 1754 } 1755 1756 /* 1757 * Disassociate the underlying file system from the vnode. 1758 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks 1759 * the vnode, and may destroy the vnode so that VOP_UNLOCK 1760 * would no longer function. 1761 */ 1762 VOP_INACTIVE(vp, &recycle); 1763 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 || 1764 VOP_ISLOCKED(vp) == LK_EXCLUSIVE); 1765 if (VOP_RECLAIM(vp)) { 1766 vnpanic(vp, "%s: cannot reclaim", __func__); 1767 } 1768 1769 KASSERT(vp->v_data == NULL); 1770 KASSERT((vp->v_iflag & VI_PAGES) == 0); 1771 1772 if (vp->v_type == VREG && vp->v_ractx != NULL) { 1773 uvm_ra_freectx(vp->v_ractx); 1774 vp->v_ractx = NULL; 1775 } 1776 1777 if (vip->vi_key.vk_key_len > 0) { 1778 /* Remove from vnode cache. */ 1779 hash = vcache_hash(&vip->vi_key); 1780 mutex_enter(&vcache_lock); 1781 KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash)); 1782 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask], 1783 vip, vnode_impl, vi_hash); 1784 mutex_exit(&vcache_lock); 1785 } 1786 if (temp_key != temp_buf) 1787 kmem_free(temp_key, temp_key_len); 1788 1789 /* Done with purge, notify sleepers of the grim news. */ 1790 mutex_enter(vp->v_interlock); 1791 vp->v_op = dead_vnodeop_p; 1792 vp->v_vflag |= VV_LOCKSWORK; 1793 VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED); 1794 vp->v_tag = VT_NON; 1795 KNOTE(&vp->v_klist, NOTE_REVOKE); 1796 mutex_exit(vp->v_interlock); 1797 1798 /* 1799 * Move to dead mount. Must be after changing the operations 1800 * vector as vnode operations enter the mount before using the 1801 * operations vector. See sys/kern/vnode_if.c. 1802 */ 1803 vp->v_vflag &= ~VV_ROOT; 1804 vfs_ref(dead_rootmount); 1805 vfs_insmntque(vp, dead_rootmount); 1806 1807 #ifdef PAX_SEGVGUARD 1808 pax_segvguard_cleanup(vp); 1809 #endif /* PAX_SEGVGUARD */ 1810 1811 mutex_enter(vp->v_interlock); 1812 fstrans_done(mp); 1813 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0); 1814 } 1815 1816 /* 1817 * Disassociate the underlying file system from an open device vnode 1818 * and make it anonymous. 1819 * 1820 * Vnode unlocked on entry, drops a reference to the vnode. 1821 */ 1822 void 1823 vcache_make_anon(vnode_t *vp) 1824 { 1825 vnode_impl_t *vip = VNODE_TO_VIMPL(vp); 1826 uint32_t hash; 1827 bool recycle; 1828 1829 KASSERT(vp->v_type == VBLK || vp->v_type == VCHR); 1830 KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount)); 1831 VSTATE_ASSERT_UNLOCKED(vp, VS_ACTIVE); 1832 1833 /* Remove from vnode cache. */ 1834 hash = vcache_hash(&vip->vi_key); 1835 mutex_enter(&vcache_lock); 1836 KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash)); 1837 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask], 1838 vip, vnode_impl, vi_hash); 1839 vip->vi_key.vk_mount = dead_rootmount; 1840 vip->vi_key.vk_key_len = 0; 1841 vip->vi_key.vk_key = NULL; 1842 mutex_exit(&vcache_lock); 1843 1844 /* 1845 * Disassociate the underlying file system from the vnode. 1846 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks 1847 * the vnode, and may destroy the vnode so that VOP_UNLOCK 1848 * would no longer function. 1849 */ 1850 if (vn_lock(vp, LK_EXCLUSIVE)) { 1851 vnpanic(vp, "%s: cannot lock", __func__); 1852 } 1853 VOP_INACTIVE(vp, &recycle); 1854 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 || 1855 VOP_ISLOCKED(vp) == LK_EXCLUSIVE); 1856 if (VOP_RECLAIM(vp)) { 1857 vnpanic(vp, "%s: cannot reclaim", __func__); 1858 } 1859 1860 /* Purge name cache. */ 1861 cache_purge(vp); 1862 1863 /* Done with purge, change operations vector. */ 1864 mutex_enter(vp->v_interlock); 1865 vp->v_op = spec_vnodeop_p; 1866 vp->v_vflag |= VV_MPSAFE; 1867 vp->v_vflag &= ~VV_LOCKSWORK; 1868 mutex_exit(vp->v_interlock); 1869 1870 /* 1871 * Move to dead mount. Must be after changing the operations 1872 * vector as vnode operations enter the mount before using the 1873 * operations vector. See sys/kern/vnode_if.c. 1874 */ 1875 vfs_ref(dead_rootmount); 1876 vfs_insmntque(vp, dead_rootmount); 1877 1878 vrele(vp); 1879 } 1880 1881 /* 1882 * Update outstanding I/O count and do wakeup if requested. 1883 */ 1884 void 1885 vwakeup(struct buf *bp) 1886 { 1887 vnode_t *vp; 1888 1889 if ((vp = bp->b_vp) == NULL) 1890 return; 1891 1892 KASSERT(bp->b_objlock == vp->v_interlock); 1893 KASSERT(mutex_owned(bp->b_objlock)); 1894 1895 if (--vp->v_numoutput < 0) 1896 vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp); 1897 if (vp->v_numoutput == 0) 1898 cv_broadcast(&vp->v_cv); 1899 } 1900 1901 /* 1902 * Test a vnode for being or becoming dead. Returns one of: 1903 * EBUSY: vnode is becoming dead, with "flags == VDEAD_NOWAIT" only. 1904 * ENOENT: vnode is dead. 1905 * 0: otherwise. 1906 * 1907 * Whenever this function returns a non-zero value all future 1908 * calls will also return a non-zero value. 1909 */ 1910 int 1911 vdead_check(struct vnode *vp, int flags) 1912 { 1913 1914 KASSERT(mutex_owned(vp->v_interlock)); 1915 1916 if (! ISSET(flags, VDEAD_NOWAIT)) 1917 VSTATE_WAIT_STABLE(vp); 1918 1919 if (VSTATE_GET(vp) == VS_RECLAIMING) { 1920 KASSERT(ISSET(flags, VDEAD_NOWAIT)); 1921 return EBUSY; 1922 } else if (VSTATE_GET(vp) == VS_RECLAIMED) { 1923 return ENOENT; 1924 } 1925 1926 return 0; 1927 } 1928 1929 int 1930 vfs_drainvnodes(void) 1931 { 1932 int i, gen; 1933 1934 mutex_enter(&vdrain_lock); 1935 for (i = 0; i < 2; i++) { 1936 gen = vdrain_gen; 1937 while (gen == vdrain_gen) { 1938 cv_broadcast(&vdrain_cv); 1939 cv_wait(&vdrain_gen_cv, &vdrain_lock); 1940 } 1941 } 1942 mutex_exit(&vdrain_lock); 1943 1944 if (numvnodes >= desiredvnodes) 1945 return EBUSY; 1946 1947 if (vcache_hashsize != desiredvnodes) 1948 vcache_reinit(); 1949 1950 return 0; 1951 } 1952 1953 void 1954 vnpanic(vnode_t *vp, const char *fmt, ...) 1955 { 1956 va_list ap; 1957 1958 #ifdef DIAGNOSTIC 1959 vprint(NULL, vp); 1960 #endif 1961 va_start(ap, fmt); 1962 vpanic(fmt, ap); 1963 va_end(ap); 1964 } 1965 1966 void 1967 vshareilock(vnode_t *tvp, vnode_t *fvp) 1968 { 1969 kmutex_t *oldlock; 1970 1971 oldlock = tvp->v_interlock; 1972 mutex_obj_hold(fvp->v_interlock); 1973 tvp->v_interlock = fvp->v_interlock; 1974 mutex_obj_free(oldlock); 1975 } 1976