1 /* 2 * Copyright (c) 2003-2020 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993, 1995 35 * The Regents of the University of California. All rights reserved. 36 * 37 * This code is derived from software contributed to Berkeley by 38 * Poul-Henning Kamp of the FreeBSD Project. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 3. Neither the name of the University nor the names of its contributors 49 * may be used to endorse or promote products derived from this software 50 * without specific prior written permission. 51 * 52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 62 * SUCH DAMAGE. 63 */ 64 65 #include <sys/param.h> 66 #include <sys/systm.h> 67 #include <sys/uio.h> 68 #include <sys/kernel.h> 69 #include <sys/sysctl.h> 70 #include <sys/mount.h> 71 #include <sys/vnode.h> 72 #include <sys/malloc.h> 73 #include <sys/sysproto.h> 74 #include <sys/spinlock.h> 75 #include <sys/proc.h> 76 #include <sys/namei.h> 77 #include <sys/nlookup.h> 78 #include <sys/filedesc.h> 79 #include <sys/fnv_hash.h> 80 #include <sys/globaldata.h> 81 #include <sys/kern_syscall.h> 82 #include <sys/dirent.h> 83 #include <ddb/ddb.h> 84 85 #include <sys/spinlock2.h> 86 87 #define MAX_RECURSION_DEPTH 64 88 89 /* 90 * Random lookups in the cache are accomplished with a hash table using 91 * a hash key of (nc_src_vp, name). Each hash chain has its own spin lock, 92 * but we use the ncp->update counter trick to avoid acquiring any 93 * contestable spin-locks during a lookup. 94 * 95 * Negative entries may exist and correspond to resolved namecache 96 * structures where nc_vp is NULL. In a negative entry, NCF_WHITEOUT 97 * will be set if the entry corresponds to a whited-out directory entry 98 * (verses simply not finding the entry at all). pcpu_ncache[n].neg_list 99 * is locked via pcpu_ncache[n].neg_spin; 100 * 101 * MPSAFE RULES: 102 * 103 * (1) ncp's typically have at least a nc_refs of 1, and usually 2. One 104 * is applicable to direct lookups via the hash table nchpp or via 105 * nc_list (the two are added or removed together). Removal of the ncp 106 * from the hash table drops this reference. The second is applicable 107 * to vp->v_namecache linkages (or negative list linkages), and removal 108 * of the ncp from these lists drops this reference. 109 * 110 * On the 1->0 transition of nc_refs the ncp can no longer be referenced 111 * and must be destroyed. No other thread should have access to it at 112 * this point so it can be safely locked and freed without any deadlock 113 * fears. 114 * 115 * The 1->0 transition can occur at almost any juncture and so cache_drop() 116 * deals with it directly. 117 * 118 * (2) Once the 1->0 transition occurs, the entity that caused the transition 119 * will be responsible for destroying the ncp. The ncp cannot be on any 120 * list or hash at this time, or be held by anyone other than the caller 121 * responsible for the transition. 122 * 123 * (3) A ncp must be locked in order to modify it. 124 * 125 * (5) ncp locks are ordered, child-to-parent. Child first, then parent. 126 * This may seem backwards but forward-scans use the hash table and thus 127 * can hold the parent unlocked while traversing downward. Deletions, 128 * on the other-hand, tend to propagate bottom-up since the ref on the 129 * is dropped as the children go away. 130 * 131 * (6) Both parent and child must be locked in order to enter the child onto 132 * the parent's nc_list. 133 */ 134 135 /* 136 * Structures associated with name cacheing. 137 */ 138 #define NCHHASH(hash) (&nchashtbl[(hash) & nchash]) 139 #define MINNEG 1024 140 #define MINPOS 1024 141 #define NCMOUNT_NUMCACHE (16384) /* power of 2 */ 142 #define NCMOUNT_SET (8) /* power of 2 */ 143 144 MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries"); 145 146 TAILQ_HEAD(nchash_list, namecache); 147 148 /* 149 * Don't cachealign, but at least pad to 32 bytes so entries 150 * don't cross a cache line. 151 */ 152 struct nchash_head { 153 struct nchash_list list; /* 16 bytes */ 154 struct spinlock spin; /* 8 bytes */ 155 long pad01; /* 8 bytes */ 156 }; 157 158 struct ncmount_cache { 159 struct spinlock spin; 160 struct namecache *ncp; 161 struct mount *mp; 162 struct mount *mp_target; 163 int isneg; 164 int ticks; 165 int updating; 166 int unused01; 167 }; 168 169 struct pcpu_ncache { 170 struct spinlock umount_spin; /* cache_findmount/interlock */ 171 struct spinlock neg_spin; /* for neg_list and neg_count */ 172 struct namecache_list neg_list; 173 long neg_count; 174 long vfscache_negs; 175 long vfscache_count; 176 long vfscache_leafs; 177 long numdefered; 178 } __cachealign; 179 180 __read_mostly static struct nchash_head *nchashtbl; 181 __read_mostly static struct pcpu_ncache *pcpu_ncache; 182 static struct ncmount_cache ncmount_cache[NCMOUNT_NUMCACHE]; 183 184 /* 185 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server 186 * to create the namecache infrastructure leading to a dangling vnode. 187 * 188 * 0 Only errors are reported 189 * 1 Successes are reported 190 * 2 Successes + the whole directory scan is reported 191 * 3 Force the directory scan code run as if the parent vnode did not 192 * have a namecache record, even if it does have one. 193 */ 194 __read_mostly static int ncvp_debug; 195 SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0, 196 "Namecache debug level (0-3)"); 197 198 __read_mostly static u_long nchash; /* size of hash table */ 199 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, 200 "Size of namecache hash table"); 201 202 __read_mostly static int ncnegflush = 10; /* burst for negative flush */ 203 SYSCTL_INT(_debug, OID_AUTO, ncnegflush, CTLFLAG_RW, &ncnegflush, 0, 204 "Batch flush negative entries"); 205 206 __read_mostly static int ncposflush = 10; /* burst for positive flush */ 207 SYSCTL_INT(_debug, OID_AUTO, ncposflush, CTLFLAG_RW, &ncposflush, 0, 208 "Batch flush positive entries"); 209 210 __read_mostly static int ncnegfactor = 16; /* ratio of negative entries */ 211 SYSCTL_INT(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, 212 "Ratio of namecache negative entries"); 213 214 __read_mostly static int nclockwarn; /* warn on locked entries in ticks */ 215 SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0, 216 "Warn on locked namecache entries in ticks"); 217 218 __read_mostly static int ncposlimit; /* number of cache entries allocated */ 219 SYSCTL_INT(_debug, OID_AUTO, ncposlimit, CTLFLAG_RW, &ncposlimit, 0, 220 "Number of cache entries allocated"); 221 222 __read_mostly static int ncp_shared_lock_disable = 0; 223 SYSCTL_INT(_debug, OID_AUTO, ncp_shared_lock_disable, CTLFLAG_RW, 224 &ncp_shared_lock_disable, 0, "Disable shared namecache locks"); 225 226 SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), 227 "sizeof(struct vnode)"); 228 SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), 229 "sizeof(struct namecache)"); 230 231 __read_mostly static int ncmount_cache_enable = 1; 232 SYSCTL_INT(_debug, OID_AUTO, ncmount_cache_enable, CTLFLAG_RW, 233 &ncmount_cache_enable, 0, "mount point cache"); 234 235 static __inline void _cache_drop(struct namecache *ncp); 236 static int cache_resolve_mp(struct mount *mp); 237 static int cache_findmount_callback(struct mount *mp, void *data); 238 static void _cache_setunresolved(struct namecache *ncp); 239 static void _cache_cleanneg(long count); 240 static void _cache_cleanpos(long count); 241 static void _cache_cleandefered(void); 242 static void _cache_unlink(struct namecache *ncp); 243 244 /* 245 * The new name cache statistics (these are rolled up globals and not 246 * modified in the critical path, see struct pcpu_ncache). 247 */ 248 SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics"); 249 static long vfscache_negs; 250 SYSCTL_LONG(_vfs_cache, OID_AUTO, numneg, CTLFLAG_RD, &vfscache_negs, 0, 251 "Number of negative namecache entries"); 252 static long vfscache_count; 253 SYSCTL_LONG(_vfs_cache, OID_AUTO, numcache, CTLFLAG_RD, &vfscache_count, 0, 254 "Number of namecaches entries"); 255 static long vfscache_leafs; 256 SYSCTL_LONG(_vfs_cache, OID_AUTO, numleafs, CTLFLAG_RD, &vfscache_leafs, 0, 257 "Number of namecaches entries"); 258 static long numdefered; 259 SYSCTL_LONG(_debug, OID_AUTO, numdefered, CTLFLAG_RD, &numdefered, 0, 260 "Number of cache entries allocated"); 261 262 263 struct nchstats nchstats[SMP_MAXCPU]; 264 /* 265 * Export VFS cache effectiveness statistics to user-land. 266 * 267 * The statistics are left for aggregation to user-land so 268 * neat things can be achieved, like observing per-CPU cache 269 * distribution. 270 */ 271 static int 272 sysctl_nchstats(SYSCTL_HANDLER_ARGS) 273 { 274 struct globaldata *gd; 275 int i, error; 276 277 error = 0; 278 for (i = 0; i < ncpus; ++i) { 279 gd = globaldata_find(i); 280 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats), 281 sizeof(struct nchstats)))) 282 break; 283 } 284 285 return (error); 286 } 287 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD, 288 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics"); 289 290 static void cache_zap(struct namecache *ncp); 291 292 /* 293 * Cache mount points and namecache records in order to avoid unnecessary 294 * atomic ops on mnt_refs and ncp->refs. This improves concurrent SMP 295 * performance and is particularly important on multi-socket systems to 296 * reduce cache-line ping-ponging. 297 * 298 * Try to keep the pcpu structure within one cache line (~64 bytes). 299 */ 300 #define MNTCACHE_COUNT 32 /* power of 2, multiple of SET */ 301 #define MNTCACHE_SET 8 /* set associativity */ 302 303 struct mntcache_elm { 304 struct namecache *ncp; 305 struct mount *mp; 306 int ticks; 307 int unused01; 308 }; 309 310 struct mntcache { 311 struct mntcache_elm array[MNTCACHE_COUNT]; 312 } __cachealign; 313 314 static struct mntcache pcpu_mntcache[MAXCPU]; 315 316 static __inline 317 struct mntcache_elm * 318 _cache_mntcache_hash(void *ptr) 319 { 320 struct mntcache_elm *elm; 321 int hv; 322 323 hv = iscsi_crc32(&ptr, sizeof(ptr)) & (MNTCACHE_COUNT - 1); 324 elm = &pcpu_mntcache[mycpu->gd_cpuid].array[hv & ~(MNTCACHE_SET - 1)]; 325 326 return elm; 327 } 328 329 static 330 void 331 _cache_mntref(struct mount *mp) 332 { 333 struct mntcache_elm *elm; 334 struct mount *mpr; 335 int i; 336 337 elm = _cache_mntcache_hash(mp); 338 for (i = 0; i < MNTCACHE_SET; ++i) { 339 if (elm->mp == mp) { 340 mpr = atomic_swap_ptr((void *)&elm->mp, NULL); 341 if (__predict_true(mpr == mp)) 342 return; 343 if (mpr) 344 atomic_add_int(&mpr->mnt_refs, -1); 345 } 346 ++elm; 347 } 348 atomic_add_int(&mp->mnt_refs, 1); 349 } 350 351 static 352 void 353 _cache_mntrel(struct mount *mp) 354 { 355 struct mntcache_elm *elm; 356 struct mntcache_elm *best; 357 struct mount *mpr; 358 int delta1; 359 int delta2; 360 int i; 361 362 elm = _cache_mntcache_hash(mp); 363 best = elm; 364 for (i = 0; i < MNTCACHE_SET; ++i) { 365 if (elm->mp == NULL) { 366 mpr = atomic_swap_ptr((void *)&elm->mp, mp); 367 if (__predict_false(mpr != NULL)) { 368 atomic_add_int(&mpr->mnt_refs, -1); 369 } 370 elm->ticks = ticks; 371 return; 372 } 373 delta1 = ticks - best->ticks; 374 delta2 = ticks - elm->ticks; 375 if (delta2 > delta1 || delta1 < -1 || delta2 < -1) 376 best = elm; 377 ++elm; 378 } 379 mpr = atomic_swap_ptr((void *)&best->mp, mp); 380 best->ticks = ticks; 381 if (mpr) 382 atomic_add_int(&mpr->mnt_refs, -1); 383 } 384 385 /* 386 * Clears all cached mount points on all cpus. This routine should only 387 * be called when we are waiting for a mount to clear, e.g. so we can 388 * unmount. 389 */ 390 void 391 cache_clearmntcache(struct mount *target __unused) 392 { 393 int n; 394 395 for (n = 0; n < ncpus; ++n) { 396 struct mntcache *cache = &pcpu_mntcache[n]; 397 struct mntcache_elm *elm; 398 struct namecache *ncp; 399 struct mount *mp; 400 int i; 401 402 for (i = 0; i < MNTCACHE_COUNT; ++i) { 403 elm = &cache->array[i]; 404 if (elm->mp) { 405 mp = atomic_swap_ptr((void *)&elm->mp, NULL); 406 if (mp) 407 atomic_add_int(&mp->mnt_refs, -1); 408 } 409 if (elm->ncp) { 410 ncp = atomic_swap_ptr((void *)&elm->ncp, NULL); 411 if (ncp) 412 _cache_drop(ncp); 413 } 414 } 415 } 416 } 417 418 /* 419 * Namespace locking. The caller must already hold a reference to the 420 * namecache structure in order to lock/unlock it. The controlling entity 421 * in a 1->0 transition does not need to lock the ncp to dispose of it, 422 * as nobody else will have visiblity to it at that point. 423 * 424 * Note that holding a locked namecache structure prevents other threads 425 * from making namespace changes (e.g. deleting or creating), prevents 426 * vnode association state changes by other threads, and prevents the 427 * namecache entry from being resolved or unresolved by other threads. 428 * 429 * An exclusive lock owner has full authority to associate/disassociate 430 * vnodes and resolve/unresolve the locked ncp. 431 * 432 * A shared lock owner only has authority to acquire the underlying vnode, 433 * if any. 434 * 435 * The primary lock field is nc_lockstatus. nc_locktd is set after the 436 * fact (when locking) or cleared prior to unlocking. 437 * 438 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed 439 * or recycled, but it does NOT help you if the vnode had already 440 * initiated a recyclement. If this is important, use cache_get() 441 * rather then cache_lock() (and deal with the differences in the 442 * way the refs counter is handled). Or, alternatively, make an 443 * unconditional call to cache_validate() or cache_resolve() 444 * after cache_lock() returns. 445 */ 446 static __inline 447 void 448 _cache_lock(struct namecache *ncp) 449 { 450 int didwarn = 0; 451 int error; 452 453 error = lockmgr(&ncp->nc_lock, LK_EXCLUSIVE); 454 while (__predict_false(error == EWOULDBLOCK)) { 455 if (didwarn == 0) { 456 didwarn = ticks - nclockwarn; 457 kprintf("[diagnostic] cache_lock: " 458 "%s blocked on %p " 459 "\"%*.*s\"\n", 460 curthread->td_comm, ncp, 461 ncp->nc_nlen, ncp->nc_nlen, 462 ncp->nc_name); 463 } 464 error = lockmgr(&ncp->nc_lock, LK_EXCLUSIVE | LK_TIMELOCK); 465 } 466 if (__predict_false(didwarn)) { 467 kprintf("[diagnostic] cache_lock: " 468 "%s unblocked %*.*s after %d secs\n", 469 curthread->td_comm, 470 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name, 471 (int)(ticks - didwarn) / hz); 472 } 473 } 474 475 /* 476 * Release a previously acquired lock. 477 * 478 * A concurrent shared-lock acquisition or acquisition/release can 479 * race bit 31 so only drop the ncp if bit 31 was set. 480 */ 481 static __inline 482 void 483 _cache_unlock(struct namecache *ncp) 484 { 485 lockmgr(&ncp->nc_lock, LK_RELEASE); 486 } 487 488 /* 489 * Lock ncp exclusively, non-blocking. Return 0 on success. 490 */ 491 static __inline 492 int 493 _cache_lock_nonblock(struct namecache *ncp) 494 { 495 int error; 496 497 error = lockmgr(&ncp->nc_lock, LK_EXCLUSIVE | LK_NOWAIT); 498 if (__predict_false(error != 0)) { 499 return(EWOULDBLOCK); 500 } 501 return 0; 502 } 503 504 /* 505 * This is a special form of _cache_lock() which only succeeds if 506 * it can get a pristine, non-recursive lock. The caller must have 507 * already ref'd the ncp. 508 * 509 * On success the ncp will be locked, on failure it will not. The 510 * ref count does not change either way. 511 * 512 * We want _cache_lock_special() (on success) to return a definitively 513 * usable vnode or a definitively unresolved ncp. 514 */ 515 static __inline 516 int 517 _cache_lock_special(struct namecache *ncp) 518 { 519 if (_cache_lock_nonblock(ncp) == 0) { 520 if (lockmgr_oneexcl(&ncp->nc_lock)) { 521 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 522 _cache_setunresolved(ncp); 523 return 0; 524 } 525 _cache_unlock(ncp); 526 } 527 return EWOULDBLOCK; 528 } 529 530 /* 531 * Shared lock, guarantees vp held 532 * 533 * The shared lock holds vp on the 0->1 transition. It is possible to race 534 * another shared lock release, preventing the other release from dropping 535 * the vnode and clearing bit 31. 536 * 537 * If it is not set then we are responsible for setting it, and this 538 * responsibility does not race with anyone else. 539 */ 540 static __inline 541 void 542 _cache_lock_shared(struct namecache *ncp) 543 { 544 int didwarn = 0; 545 int error; 546 547 error = lockmgr(&ncp->nc_lock, LK_SHARED | LK_TIMELOCK); 548 while (__predict_false(error == EWOULDBLOCK)) { 549 if (didwarn == 0) { 550 didwarn = ticks - nclockwarn; 551 kprintf("[diagnostic] cache_lock_shared: " 552 "%s blocked on %p " 553 "\"%*.*s\"\n", 554 curthread->td_comm, ncp, 555 ncp->nc_nlen, ncp->nc_nlen, 556 ncp->nc_name); 557 } 558 error = lockmgr(&ncp->nc_lock, LK_SHARED | LK_TIMELOCK); 559 } 560 if (__predict_false(didwarn)) { 561 kprintf("[diagnostic] cache_lock_shared: " 562 "%s unblocked %*.*s after %d secs\n", 563 curthread->td_comm, 564 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name, 565 (int)(ticks - didwarn) / hz); 566 } 567 } 568 569 /* 570 * Shared lock, guarantees vp held. Non-blocking. Returns 0 on success 571 */ 572 static __inline 573 int 574 _cache_lock_shared_nonblock(struct namecache *ncp) 575 { 576 int error; 577 578 error = lockmgr(&ncp->nc_lock, LK_SHARED | LK_NOWAIT); 579 if (__predict_false(error != 0)) { 580 return(EWOULDBLOCK); 581 } 582 return 0; 583 } 584 585 /* 586 * This function tries to get a shared lock but will back-off to an 587 * exclusive lock if: 588 * 589 * (1) Some other thread is trying to obtain an exclusive lock 590 * (to prevent the exclusive requester from getting livelocked out 591 * by many shared locks). 592 * 593 * (2) The current thread already owns an exclusive lock (to avoid 594 * deadlocking). 595 * 596 * WARNING! On machines with lots of cores we really want to try hard to 597 * get a shared lock or concurrent path lookups can chain-react 598 * into a very high-latency exclusive lock. 599 * 600 * This is very evident in dsynth's initial scans. 601 */ 602 static __inline 603 int 604 _cache_lock_shared_special(struct namecache *ncp) 605 { 606 /* 607 * Only honor a successful shared lock (returning 0) if there is 608 * no exclusive request pending and the vnode, if present, is not 609 * in a reclaimed state. 610 */ 611 if (_cache_lock_shared_nonblock(ncp) == 0) { 612 if (__predict_true(!lockmgr_exclpending(&ncp->nc_lock))) { 613 if (ncp->nc_vp == NULL || 614 (ncp->nc_vp->v_flag & VRECLAIMED) == 0) { 615 return(0); 616 } 617 } 618 _cache_unlock(ncp); 619 return(EWOULDBLOCK); 620 } 621 622 /* 623 * Non-blocking shared lock failed. If we already own the exclusive 624 * lock just acquire another exclusive lock (instead of deadlocking). 625 * Otherwise acquire a shared lock. 626 */ 627 if (lockstatus(&ncp->nc_lock, curthread) == LK_EXCLUSIVE) { 628 _cache_lock(ncp); 629 return(0); 630 } 631 _cache_lock_shared(ncp); 632 return(0); 633 } 634 635 static __inline 636 int 637 _cache_lockstatus(struct namecache *ncp) 638 { 639 int status; 640 641 status = lockstatus(&ncp->nc_lock, curthread); 642 if (status == 0 || status == LK_EXCLOTHER) 643 status = -1; 644 return status; 645 } 646 647 /* 648 * cache_hold() and cache_drop() prevent the premature deletion of a 649 * namecache entry but do not prevent operations (such as zapping) on 650 * that namecache entry. 651 * 652 * This routine may only be called from outside this source module if 653 * nc_refs is already deterministically at least 1, such as being 654 * associated with e.g. a process, file descriptor, or some other entity. 655 * 656 * Only the above situations, similar situations within this module where 657 * the ref count is deterministically at least 1, or when the ncp is found 658 * via the nchpp (hash table) lookup, can bump nc_refs. 659 * 660 * Very specifically, a ncp found via nc_list CANNOT bump nc_refs. It 661 * can still be removed from the nc_list, however, as long as the caller 662 * can acquire its lock (in the wrong order). 663 * 664 * This is a rare case where callers are allowed to hold a spinlock, 665 * so we can't ourselves. 666 */ 667 static __inline 668 struct namecache * 669 _cache_hold(struct namecache *ncp) 670 { 671 KKASSERT(ncp->nc_refs > 0); 672 atomic_add_int(&ncp->nc_refs, 1); 673 674 return(ncp); 675 } 676 677 /* 678 * Drop a cache entry. 679 * 680 * The 1->0 transition is special and requires the caller to destroy the 681 * entry. It means that the ncp is no longer on a nchpp list (since that 682 * would mean there was stilla ref). The ncp could still be on a nc_list 683 * but will not have any child of its own, again because nc_refs is now 0 684 * and children would have a ref to their parent. 685 * 686 * Once the 1->0 transition is made, nc_refs cannot be incremented again. 687 */ 688 static __inline 689 void 690 _cache_drop(struct namecache *ncp) 691 { 692 if (atomic_fetchadd_int(&ncp->nc_refs, -1) == 1) { 693 /* 694 * Executed unlocked (no need to lock on last drop) 695 */ 696 _cache_setunresolved(ncp); 697 698 /* 699 * Scrap it. 700 */ 701 ncp->nc_refs = -1; /* safety */ 702 if (ncp->nc_name) 703 kfree(ncp->nc_name, M_VFSCACHE); 704 kfree(ncp, M_VFSCACHE); 705 } 706 } 707 708 /* 709 * Link a new namecache entry to its parent and to the hash table. Be 710 * careful to avoid races if vhold() blocks in the future. 711 * 712 * Both ncp and par must be referenced and locked. The reference is 713 * transfered to the nchpp (and, most notably, NOT to the parent list). 714 * 715 * NOTE: The hash table spinlock is held across this call, we can't do 716 * anything fancy. 717 */ 718 static void 719 _cache_link_parent(struct namecache *ncp, struct namecache *par, 720 struct nchash_head *nchpp) 721 { 722 struct pcpu_ncache *pn = &pcpu_ncache[mycpu->gd_cpuid]; 723 724 KKASSERT(ncp->nc_parent == NULL); 725 ncp->nc_parent = par; 726 ncp->nc_head = nchpp; 727 728 /* 729 * Set inheritance flags. Note that the parent flags may be 730 * stale due to getattr potentially not having been run yet 731 * (it gets run during nlookup()'s). 732 */ 733 ncp->nc_flag &= ~(NCF_SF_PNOCACHE | NCF_UF_PCACHE); 734 if (par->nc_flag & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE)) 735 ncp->nc_flag |= NCF_SF_PNOCACHE; 736 if (par->nc_flag & (NCF_UF_CACHE | NCF_UF_PCACHE)) 737 ncp->nc_flag |= NCF_UF_PCACHE; 738 739 /* 740 * Add to hash table and parent, adjust accounting 741 */ 742 TAILQ_INSERT_HEAD(&nchpp->list, ncp, nc_hash); 743 atomic_add_long(&pn->vfscache_count, 1); 744 if (TAILQ_EMPTY(&ncp->nc_list)) 745 atomic_add_long(&pn->vfscache_leafs, 1); 746 747 if (TAILQ_EMPTY(&par->nc_list)) { 748 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry); 749 atomic_add_long(&pn->vfscache_leafs, -1); 750 /* 751 * Any vp associated with an ncp which has children must 752 * be held to prevent it from being recycled. 753 */ 754 if (par->nc_vp) 755 vhold(par->nc_vp); 756 } else { 757 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry); 758 } 759 _cache_hold(par); /* add nc_parent ref */ 760 } 761 762 /* 763 * Remove the parent and hash associations from a namecache structure. 764 * Drop the ref-count on the parent. The caller receives the ref 765 * from the ncp's nchpp linkage that was removed and may forward that 766 * ref to a new linkage. 767 768 * The caller usually holds an additional ref * on the ncp so the unlink 769 * cannot be the final drop. XXX should not be necessary now since the 770 * caller receives the ref from the nchpp linkage, assuming the ncp 771 * was linked in the first place. 772 * 773 * ncp must be locked, which means that there won't be any nc_parent 774 * removal races. This routine will acquire a temporary lock on 775 * the parent as well as the appropriate hash chain. 776 */ 777 static void 778 _cache_unlink_parent(struct namecache *ncp) 779 { 780 struct pcpu_ncache *pn = &pcpu_ncache[mycpu->gd_cpuid]; 781 struct namecache *par; 782 struct vnode *dropvp; 783 struct nchash_head *nchpp; 784 785 if ((par = ncp->nc_parent) != NULL) { 786 cpu_ccfence(); 787 KKASSERT(ncp->nc_parent == par); 788 789 /* don't add a ref, we drop the nchpp ref later */ 790 _cache_lock(par); 791 nchpp = ncp->nc_head; 792 spin_lock(&nchpp->spin); 793 794 /* 795 * Remove from hash table and parent, adjust accounting 796 */ 797 TAILQ_REMOVE(&ncp->nc_head->list, ncp, nc_hash); 798 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry); 799 atomic_add_long(&pn->vfscache_count, -1); 800 if (TAILQ_EMPTY(&ncp->nc_list)) 801 atomic_add_long(&pn->vfscache_leafs, -1); 802 803 dropvp = NULL; 804 if (TAILQ_EMPTY(&par->nc_list)) { 805 atomic_add_long(&pn->vfscache_leafs, 1); 806 if (par->nc_vp) 807 dropvp = par->nc_vp; 808 } 809 ncp->nc_parent = NULL; 810 ncp->nc_head = NULL; 811 spin_unlock(&nchpp->spin); 812 _cache_unlock(par); 813 _cache_drop(par); /* drop nc_parent ref */ 814 815 /* 816 * We can only safely vdrop with no spinlocks held. 817 */ 818 if (dropvp) 819 vdrop(dropvp); 820 } 821 } 822 823 /* 824 * Allocate a new namecache structure. Most of the code does not require 825 * zero-termination of the string but it makes vop_compat_ncreate() easier. 826 * 827 * The returned ncp will be locked and referenced. The ref is generally meant 828 * to be transfered to the nchpp linkage. 829 */ 830 static struct namecache * 831 cache_alloc(int nlen) 832 { 833 struct namecache *ncp; 834 835 ncp = kmalloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO); 836 if (nlen) 837 ncp->nc_name = kmalloc(nlen + 1, M_VFSCACHE, M_WAITOK); 838 ncp->nc_nlen = nlen; 839 ncp->nc_flag = NCF_UNRESOLVED; 840 ncp->nc_error = ENOTCONN; /* needs to be resolved */ 841 ncp->nc_refs = 1; 842 TAILQ_INIT(&ncp->nc_list); 843 lockinit(&ncp->nc_lock, "ncplk", hz, LK_CANRECURSE); 844 lockmgr(&ncp->nc_lock, LK_EXCLUSIVE); 845 846 return(ncp); 847 } 848 849 /* 850 * Can only be called for the case where the ncp has never been 851 * associated with anything (so no spinlocks are needed). 852 */ 853 static void 854 _cache_free(struct namecache *ncp) 855 { 856 KKASSERT(ncp->nc_refs == 1); 857 if (ncp->nc_name) 858 kfree(ncp->nc_name, M_VFSCACHE); 859 kfree(ncp, M_VFSCACHE); 860 } 861 862 /* 863 * [re]initialize a nchandle. 864 */ 865 void 866 cache_zero(struct nchandle *nch) 867 { 868 nch->ncp = NULL; 869 nch->mount = NULL; 870 } 871 872 /* 873 * Ref and deref a nchandle structure (ncp + mp) 874 * 875 * The caller must specify a stable ncp pointer, typically meaning the 876 * ncp is already referenced but this can also occur indirectly through 877 * e.g. holding a lock on a direct child. 878 * 879 * WARNING: Caller may hold an unrelated read spinlock, which means we can't 880 * use read spinlocks here. 881 */ 882 struct nchandle * 883 cache_hold(struct nchandle *nch) 884 { 885 _cache_hold(nch->ncp); 886 _cache_mntref(nch->mount); 887 return(nch); 888 } 889 890 /* 891 * Create a copy of a namecache handle for an already-referenced 892 * entry. 893 */ 894 void 895 cache_copy(struct nchandle *nch, struct nchandle *target) 896 { 897 struct namecache *ncp; 898 struct mount *mp; 899 struct mntcache_elm *elm; 900 struct namecache *ncpr; 901 int i; 902 903 ncp = nch->ncp; 904 mp = nch->mount; 905 target->ncp = ncp; 906 target->mount = mp; 907 908 elm = _cache_mntcache_hash(ncp); 909 for (i = 0; i < MNTCACHE_SET; ++i) { 910 if (elm->ncp == ncp) { 911 ncpr = atomic_swap_ptr((void *)&elm->ncp, NULL); 912 if (ncpr == ncp) { 913 _cache_mntref(mp); 914 return; 915 } 916 if (ncpr) 917 _cache_drop(ncpr); 918 } 919 ++elm; 920 } 921 if (ncp) 922 _cache_hold(ncp); 923 _cache_mntref(mp); 924 } 925 926 /* 927 * Drop the nchandle, but try to cache the ref to avoid global atomic 928 * ops. This is typically done on the system root and jail root nchandles. 929 */ 930 void 931 cache_drop_and_cache(struct nchandle *nch, int elmno) 932 { 933 struct mntcache_elm *elm; 934 struct mntcache_elm *best; 935 struct namecache *ncpr; 936 int delta1; 937 int delta2; 938 int i; 939 940 if (elmno > 4) { 941 if (nch->ncp) { 942 _cache_drop(nch->ncp); 943 nch->ncp = NULL; 944 } 945 if (nch->mount) { 946 _cache_mntrel(nch->mount); 947 nch->mount = NULL; 948 } 949 return; 950 } 951 952 elm = _cache_mntcache_hash(nch->ncp); 953 best = elm; 954 for (i = 0; i < MNTCACHE_SET; ++i) { 955 if (elm->ncp == NULL) { 956 ncpr = atomic_swap_ptr((void *)&elm->ncp, nch->ncp); 957 _cache_mntrel(nch->mount); 958 elm->ticks = ticks; 959 nch->mount = NULL; 960 nch->ncp = NULL; 961 if (ncpr) 962 _cache_drop(ncpr); 963 return; 964 } 965 delta1 = ticks - best->ticks; 966 delta2 = ticks - elm->ticks; 967 if (delta2 > delta1 || delta1 < -1 || delta2 < -1) 968 best = elm; 969 ++elm; 970 } 971 ncpr = atomic_swap_ptr((void *)&best->ncp, nch->ncp); 972 _cache_mntrel(nch->mount); 973 best->ticks = ticks; 974 nch->mount = NULL; 975 nch->ncp = NULL; 976 if (ncpr) 977 _cache_drop(ncpr); 978 } 979 980 void 981 cache_changemount(struct nchandle *nch, struct mount *mp) 982 { 983 _cache_mntref(mp); 984 _cache_mntrel(nch->mount); 985 nch->mount = mp; 986 } 987 988 void 989 cache_drop(struct nchandle *nch) 990 { 991 _cache_mntrel(nch->mount); 992 _cache_drop(nch->ncp); 993 nch->ncp = NULL; 994 nch->mount = NULL; 995 } 996 997 int 998 cache_lockstatus(struct nchandle *nch) 999 { 1000 return(_cache_lockstatus(nch->ncp)); 1001 } 1002 1003 void 1004 cache_lock(struct nchandle *nch) 1005 { 1006 _cache_lock(nch->ncp); 1007 } 1008 1009 void 1010 cache_lock_maybe_shared(struct nchandle *nch, int excl) 1011 { 1012 struct namecache *ncp = nch->ncp; 1013 1014 if (ncp_shared_lock_disable || excl || 1015 (ncp->nc_flag & NCF_UNRESOLVED)) { 1016 _cache_lock(ncp); 1017 } else { 1018 _cache_lock_shared(ncp); 1019 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 1020 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) { 1021 _cache_unlock(ncp); 1022 _cache_lock(ncp); 1023 } 1024 } else { 1025 _cache_unlock(ncp); 1026 _cache_lock(ncp); 1027 } 1028 } 1029 } 1030 1031 /* 1032 * Relock nch1 given an unlocked nch1 and a locked nch2. The caller 1033 * is responsible for checking both for validity on return as they 1034 * may have become invalid. 1035 * 1036 * We have to deal with potential deadlocks here, just ping pong 1037 * the lock until we get it (we will always block somewhere when 1038 * looping so this is not cpu-intensive). 1039 * 1040 * which = 0 nch1 not locked, nch2 is locked 1041 * which = 1 nch1 is locked, nch2 is not locked 1042 */ 1043 void 1044 cache_relock(struct nchandle *nch1, struct ucred *cred1, 1045 struct nchandle *nch2, struct ucred *cred2) 1046 { 1047 int which; 1048 1049 which = 0; 1050 1051 for (;;) { 1052 if (which == 0) { 1053 if (cache_lock_nonblock(nch1) == 0) { 1054 cache_resolve(nch1, cred1); 1055 break; 1056 } 1057 cache_unlock(nch2); 1058 cache_lock(nch1); 1059 cache_resolve(nch1, cred1); 1060 which = 1; 1061 } else { 1062 if (cache_lock_nonblock(nch2) == 0) { 1063 cache_resolve(nch2, cred2); 1064 break; 1065 } 1066 cache_unlock(nch1); 1067 cache_lock(nch2); 1068 cache_resolve(nch2, cred2); 1069 which = 0; 1070 } 1071 } 1072 } 1073 1074 int 1075 cache_lock_nonblock(struct nchandle *nch) 1076 { 1077 return(_cache_lock_nonblock(nch->ncp)); 1078 } 1079 1080 void 1081 cache_unlock(struct nchandle *nch) 1082 { 1083 _cache_unlock(nch->ncp); 1084 } 1085 1086 /* 1087 * ref-and-lock, unlock-and-deref functions. 1088 * 1089 * This function is primarily used by nlookup. Even though cache_lock 1090 * holds the vnode, it is possible that the vnode may have already 1091 * initiated a recyclement. 1092 * 1093 * We want cache_get() to return a definitively usable vnode or a 1094 * definitively unresolved ncp. 1095 */ 1096 static 1097 struct namecache * 1098 _cache_get(struct namecache *ncp) 1099 { 1100 _cache_hold(ncp); 1101 _cache_lock(ncp); 1102 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 1103 _cache_setunresolved(ncp); 1104 return(ncp); 1105 } 1106 1107 /* 1108 * Attempt to obtain a shared lock on the ncp. A shared lock will only 1109 * be obtained if the ncp is resolved and the vnode (if not ENOENT) is 1110 * valid. Otherwise an exclusive lock will be acquired instead. 1111 */ 1112 static 1113 struct namecache * 1114 _cache_get_maybe_shared(struct namecache *ncp, int excl) 1115 { 1116 if (ncp_shared_lock_disable || excl || 1117 (ncp->nc_flag & NCF_UNRESOLVED)) { 1118 return(_cache_get(ncp)); 1119 } 1120 _cache_hold(ncp); 1121 _cache_lock_shared(ncp); 1122 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 1123 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) { 1124 _cache_unlock(ncp); 1125 ncp = _cache_get(ncp); 1126 _cache_drop(ncp); 1127 } 1128 } else { 1129 _cache_unlock(ncp); 1130 ncp = _cache_get(ncp); 1131 _cache_drop(ncp); 1132 } 1133 return(ncp); 1134 } 1135 1136 /* 1137 * NOTE: The same nchandle can be passed for both arguments. 1138 */ 1139 void 1140 cache_get(struct nchandle *nch, struct nchandle *target) 1141 { 1142 KKASSERT(nch->ncp->nc_refs > 0); 1143 target->mount = nch->mount; 1144 target->ncp = _cache_get(nch->ncp); 1145 _cache_mntref(target->mount); 1146 } 1147 1148 void 1149 cache_get_maybe_shared(struct nchandle *nch, struct nchandle *target, int excl) 1150 { 1151 KKASSERT(nch->ncp->nc_refs > 0); 1152 target->mount = nch->mount; 1153 target->ncp = _cache_get_maybe_shared(nch->ncp, excl); 1154 _cache_mntref(target->mount); 1155 } 1156 1157 /* 1158 * Release a held and locked ncp 1159 */ 1160 static __inline 1161 void 1162 _cache_put(struct namecache *ncp) 1163 { 1164 _cache_unlock(ncp); 1165 _cache_drop(ncp); 1166 } 1167 1168 void 1169 cache_put(struct nchandle *nch) 1170 { 1171 _cache_mntrel(nch->mount); 1172 _cache_put(nch->ncp); 1173 nch->ncp = NULL; 1174 nch->mount = NULL; 1175 } 1176 1177 /* 1178 * Resolve an unresolved ncp by associating a vnode with it. If the 1179 * vnode is NULL, a negative cache entry is created. 1180 * 1181 * The ncp should be locked on entry and will remain locked on return. 1182 */ 1183 static 1184 void 1185 _cache_setvp(struct mount *mp, struct namecache *ncp, struct vnode *vp) 1186 { 1187 KKASSERT((ncp->nc_flag & NCF_UNRESOLVED) && 1188 (_cache_lockstatus(ncp) == LK_EXCLUSIVE) && 1189 ncp->nc_vp == NULL); 1190 1191 if (vp) { 1192 /* 1193 * Any vp associated with an ncp which has children must 1194 * be held. Any vp associated with a locked ncp must be held. 1195 */ 1196 if (!TAILQ_EMPTY(&ncp->nc_list)) 1197 vhold(vp); 1198 spin_lock(&vp->v_spin); 1199 ncp->nc_vp = vp; 1200 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode); 1201 ++vp->v_namecache_count; 1202 _cache_hold(ncp); /* v_namecache assoc */ 1203 spin_unlock(&vp->v_spin); 1204 vhold(vp); /* nc_vp */ 1205 1206 /* 1207 * Set auxiliary flags 1208 */ 1209 switch(vp->v_type) { 1210 case VDIR: 1211 ncp->nc_flag |= NCF_ISDIR; 1212 break; 1213 case VLNK: 1214 ncp->nc_flag |= NCF_ISSYMLINK; 1215 /* XXX cache the contents of the symlink */ 1216 break; 1217 default: 1218 break; 1219 } 1220 1221 ncp->nc_error = 0; 1222 1223 /* 1224 * XXX: this is a hack to work-around the lack of a real pfs vfs 1225 * implementation 1226 */ 1227 if (mp) { 1228 if (strncmp(mp->mnt_stat.f_fstypename, "null", 5) == 0) 1229 vp->v_pfsmp = mp; 1230 } 1231 } else { 1232 /* 1233 * When creating a negative cache hit we set the 1234 * namecache_gen. A later resolve will clean out the 1235 * negative cache hit if the mount point's namecache_gen 1236 * has changed. Used by devfs, could also be used by 1237 * other remote FSs. 1238 */ 1239 struct pcpu_ncache *pn = &pcpu_ncache[mycpu->gd_cpuid]; 1240 1241 ncp->nc_vp = NULL; 1242 ncp->nc_negcpu = mycpu->gd_cpuid; 1243 spin_lock(&pn->neg_spin); 1244 TAILQ_INSERT_TAIL(&pn->neg_list, ncp, nc_vnode); 1245 _cache_hold(ncp); /* neg_list assoc */ 1246 ++pn->neg_count; 1247 spin_unlock(&pn->neg_spin); 1248 atomic_add_long(&pn->vfscache_negs, 1); 1249 1250 ncp->nc_error = ENOENT; 1251 if (mp) 1252 VFS_NCPGEN_SET(mp, ncp); 1253 } 1254 ncp->nc_flag &= ~(NCF_UNRESOLVED | NCF_DEFEREDZAP); 1255 } 1256 1257 void 1258 cache_setvp(struct nchandle *nch, struct vnode *vp) 1259 { 1260 _cache_setvp(nch->mount, nch->ncp, vp); 1261 } 1262 1263 /* 1264 * Used for NFS 1265 */ 1266 void 1267 cache_settimeout(struct nchandle *nch, int nticks) 1268 { 1269 struct namecache *ncp = nch->ncp; 1270 1271 if ((ncp->nc_timeout = ticks + nticks) == 0) 1272 ncp->nc_timeout = 1; 1273 } 1274 1275 /* 1276 * Disassociate the vnode or negative-cache association and mark a 1277 * namecache entry as unresolved again. Note that the ncp is still 1278 * left in the hash table and still linked to its parent. 1279 * 1280 * The ncp should be locked and refd on entry and will remain locked and refd 1281 * on return. 1282 * 1283 * This routine is normally never called on a directory containing children. 1284 * However, NFS often does just that in its rename() code as a cop-out to 1285 * avoid complex namespace operations. This disconnects a directory vnode 1286 * from its namecache and can cause the OLDAPI and NEWAPI to get out of 1287 * sync. 1288 * 1289 */ 1290 static 1291 void 1292 _cache_setunresolved(struct namecache *ncp) 1293 { 1294 struct vnode *vp; 1295 1296 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 1297 ncp->nc_flag |= NCF_UNRESOLVED; 1298 ncp->nc_timeout = 0; 1299 ncp->nc_error = ENOTCONN; 1300 if ((vp = ncp->nc_vp) != NULL) { 1301 spin_lock(&vp->v_spin); 1302 ncp->nc_vp = NULL; 1303 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode); 1304 --vp->v_namecache_count; 1305 spin_unlock(&vp->v_spin); 1306 1307 /* 1308 * Any vp associated with an ncp with children is 1309 * held by that ncp. Any vp associated with ncp 1310 * is held by that ncp. These conditions must be 1311 * undone when the vp is cleared out from the ncp. 1312 */ 1313 if (!TAILQ_EMPTY(&ncp->nc_list)) 1314 vdrop(vp); 1315 vdrop(vp); 1316 } else { 1317 struct pcpu_ncache *pn; 1318 1319 pn = &pcpu_ncache[ncp->nc_negcpu]; 1320 1321 atomic_add_long(&pn->vfscache_negs, -1); 1322 spin_lock(&pn->neg_spin); 1323 TAILQ_REMOVE(&pn->neg_list, ncp, nc_vnode); 1324 --pn->neg_count; 1325 spin_unlock(&pn->neg_spin); 1326 } 1327 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK); 1328 _cache_drop(ncp); /* from v_namecache or neg_list */ 1329 } 1330 } 1331 1332 /* 1333 * The cache_nresolve() code calls this function to automatically 1334 * set a resolved cache element to unresolved if it has timed out 1335 * or if it is a negative cache hit and the mount point namecache_gen 1336 * has changed. 1337 */ 1338 static __inline int 1339 _cache_auto_unresolve_test(struct mount *mp, struct namecache *ncp) 1340 { 1341 /* 1342 * Try to zap entries that have timed out. We have 1343 * to be careful here because locked leafs may depend 1344 * on the vnode remaining intact in a parent, so only 1345 * do this under very specific conditions. 1346 */ 1347 if (ncp->nc_timeout && (int)(ncp->nc_timeout - ticks) < 0 && 1348 TAILQ_EMPTY(&ncp->nc_list)) { 1349 return 1; 1350 } 1351 1352 /* 1353 * If a resolved negative cache hit is invalid due to 1354 * the mount's namecache generation being bumped, zap it. 1355 */ 1356 if (ncp->nc_vp == NULL && VFS_NCPGEN_TEST(mp, ncp)) { 1357 return 1; 1358 } 1359 1360 /* 1361 * Otherwise we are good 1362 */ 1363 return 0; 1364 } 1365 1366 static __inline void 1367 _cache_auto_unresolve(struct mount *mp, struct namecache *ncp) 1368 { 1369 /* 1370 * Already in an unresolved state, nothing to do. 1371 */ 1372 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 1373 if (_cache_auto_unresolve_test(mp, ncp)) 1374 _cache_setunresolved(ncp); 1375 } 1376 } 1377 1378 void 1379 cache_setunresolved(struct nchandle *nch) 1380 { 1381 _cache_setunresolved(nch->ncp); 1382 } 1383 1384 /* 1385 * Determine if we can clear NCF_ISMOUNTPT by scanning the mountlist 1386 * looking for matches. This flag tells the lookup code when it must 1387 * check for a mount linkage and also prevents the directories in question 1388 * from being deleted or renamed. 1389 */ 1390 static 1391 int 1392 cache_clrmountpt_callback(struct mount *mp, void *data) 1393 { 1394 struct nchandle *nch = data; 1395 1396 if (mp->mnt_ncmounton.ncp == nch->ncp) 1397 return(1); 1398 if (mp->mnt_ncmountpt.ncp == nch->ncp) 1399 return(1); 1400 return(0); 1401 } 1402 1403 /* 1404 * Clear NCF_ISMOUNTPT on nch->ncp if it is no longer associated 1405 * with a mount point. 1406 */ 1407 void 1408 cache_clrmountpt(struct nchandle *nch) 1409 { 1410 int count; 1411 1412 count = mountlist_scan(cache_clrmountpt_callback, nch, 1413 MNTSCAN_FORWARD | MNTSCAN_NOBUSY | 1414 MNTSCAN_NOUNLOCK); 1415 if (count == 0) 1416 nch->ncp->nc_flag &= ~NCF_ISMOUNTPT; 1417 } 1418 1419 /* 1420 * Invalidate portions of the namecache topology given a starting entry. 1421 * The passed ncp is set to an unresolved state and: 1422 * 1423 * The passed ncp must be referenced and locked. The routine may unlock 1424 * and relock ncp several times, and will recheck the children and loop 1425 * to catch races. When done the passed ncp will be returned with the 1426 * reference and lock intact. 1427 * 1428 * CINV_DESTROY - Set a flag in the passed ncp entry indicating 1429 * that the physical underlying nodes have been 1430 * destroyed... as in deleted. For example, when 1431 * a directory is removed. This will cause record 1432 * lookups on the name to no longer be able to find 1433 * the record and tells the resolver to return failure 1434 * rather then trying to resolve through the parent. 1435 * 1436 * The topology itself, including ncp->nc_name, 1437 * remains intact. 1438 * 1439 * This only applies to the passed ncp, if CINV_CHILDREN 1440 * is specified the children are not flagged. 1441 * 1442 * CINV_CHILDREN - Set all children (recursively) to an unresolved 1443 * state as well. 1444 * 1445 * Note that this will also have the side effect of 1446 * cleaning out any unreferenced nodes in the topology 1447 * from the leaves up as the recursion backs out. 1448 * 1449 * Note that the topology for any referenced nodes remains intact, but 1450 * the nodes will be marked as having been destroyed and will be set 1451 * to an unresolved state. 1452 * 1453 * It is possible for cache_inval() to race a cache_resolve(), meaning that 1454 * the namecache entry may not actually be invalidated on return if it was 1455 * revalidated while recursing down into its children. This code guarentees 1456 * that the node(s) will go through an invalidation cycle, but does not 1457 * guarentee that they will remain in an invalidated state. 1458 * 1459 * Returns non-zero if a revalidation was detected during the invalidation 1460 * recursion, zero otherwise. Note that since only the original ncp is 1461 * locked the revalidation ultimately can only indicate that the original ncp 1462 * *MIGHT* no have been reresolved. 1463 * 1464 * DEEP RECURSION HANDLING - If a recursive invalidation recurses deeply we 1465 * have to avoid blowing out the kernel stack. We do this by saving the 1466 * deep namecache node and aborting the recursion, then re-recursing at that 1467 * node using a depth-first algorithm in order to allow multiple deep 1468 * recursions to chain through each other, then we restart the invalidation 1469 * from scratch. 1470 */ 1471 1472 struct cinvtrack { 1473 struct namecache *resume_ncp; 1474 int depth; 1475 }; 1476 1477 static int _cache_inval_internal(struct namecache *, int, struct cinvtrack *); 1478 1479 static 1480 int 1481 _cache_inval(struct namecache *ncp, int flags) 1482 { 1483 struct cinvtrack track; 1484 struct namecache *ncp2; 1485 int r; 1486 1487 track.depth = 0; 1488 track.resume_ncp = NULL; 1489 1490 for (;;) { 1491 r = _cache_inval_internal(ncp, flags, &track); 1492 if (track.resume_ncp == NULL) 1493 break; 1494 _cache_unlock(ncp); 1495 while ((ncp2 = track.resume_ncp) != NULL) { 1496 track.resume_ncp = NULL; 1497 _cache_lock(ncp2); 1498 _cache_inval_internal(ncp2, flags & ~CINV_DESTROY, 1499 &track); 1500 /*_cache_put(ncp2);*/ 1501 cache_zap(ncp2); 1502 } 1503 _cache_lock(ncp); 1504 } 1505 return(r); 1506 } 1507 1508 int 1509 cache_inval(struct nchandle *nch, int flags) 1510 { 1511 return(_cache_inval(nch->ncp, flags)); 1512 } 1513 1514 /* 1515 * Helper for _cache_inval(). The passed ncp is refd and locked and 1516 * remains that way on return, but may be unlocked/relocked multiple 1517 * times by the routine. 1518 */ 1519 static int 1520 _cache_inval_internal(struct namecache *ncp, int flags, struct cinvtrack *track) 1521 { 1522 struct namecache *nextkid; 1523 int rcnt = 0; 1524 1525 KKASSERT(_cache_lockstatus(ncp) == LK_EXCLUSIVE); 1526 1527 _cache_setunresolved(ncp); 1528 if (flags & CINV_DESTROY) { 1529 ncp->nc_flag |= NCF_DESTROYED; 1530 ++ncp->nc_generation; 1531 } 1532 1533 while ((flags & CINV_CHILDREN) && 1534 (nextkid = TAILQ_FIRST(&ncp->nc_list)) != NULL 1535 ) { 1536 struct namecache *kid; 1537 int restart; 1538 1539 restart = 0; 1540 _cache_hold(nextkid); 1541 if (++track->depth > MAX_RECURSION_DEPTH) { 1542 track->resume_ncp = ncp; 1543 _cache_hold(ncp); 1544 ++rcnt; 1545 } 1546 while ((kid = nextkid) != NULL) { 1547 /* 1548 * Parent (ncp) must be locked for the iteration. 1549 */ 1550 nextkid = NULL; 1551 if (kid->nc_parent != ncp) { 1552 _cache_drop(kid); 1553 kprintf("cache_inval_internal restartA %s\n", 1554 ncp->nc_name); 1555 restart = 1; 1556 break; 1557 } 1558 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL) 1559 _cache_hold(nextkid); 1560 1561 /* 1562 * Parent unlocked for this section to avoid 1563 * deadlocks. Then lock the kid and check for 1564 * races. 1565 */ 1566 _cache_unlock(ncp); 1567 if (track->resume_ncp) { 1568 _cache_drop(kid); 1569 _cache_lock(ncp); 1570 break; 1571 } 1572 _cache_lock(kid); 1573 if (kid->nc_parent != ncp) { 1574 kprintf("cache_inval_internal " 1575 "restartB %s\n", 1576 ncp->nc_name); 1577 restart = 1; 1578 _cache_unlock(kid); 1579 _cache_drop(kid); 1580 _cache_lock(ncp); 1581 break; 1582 } 1583 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 || 1584 TAILQ_FIRST(&kid->nc_list) 1585 ) { 1586 1587 rcnt += _cache_inval_internal(kid, 1588 flags & ~CINV_DESTROY, track); 1589 /*_cache_unlock(kid);*/ 1590 /*_cache_drop(kid);*/ 1591 cache_zap(kid); 1592 } else { 1593 cache_zap(kid); 1594 } 1595 1596 /* 1597 * Relock parent to continue scan 1598 */ 1599 _cache_lock(ncp); 1600 } 1601 if (nextkid) 1602 _cache_drop(nextkid); 1603 --track->depth; 1604 if (restart == 0) 1605 break; 1606 } 1607 1608 /* 1609 * Someone could have gotten in there while ncp was unlocked, 1610 * retry if so. 1611 */ 1612 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) 1613 ++rcnt; 1614 return (rcnt); 1615 } 1616 1617 /* 1618 * Invalidate a vnode's namecache associations. To avoid races against 1619 * the resolver we do not invalidate a node which we previously invalidated 1620 * but which was then re-resolved while we were in the invalidation loop. 1621 * 1622 * Returns non-zero if any namecache entries remain after the invalidation 1623 * loop completed. 1624 * 1625 * NOTE: Unlike the namecache topology which guarentees that ncp's will not 1626 * be ripped out of the topology while held, the vnode's v_namecache 1627 * list has no such restriction. NCP's can be ripped out of the list 1628 * at virtually any time if not locked, even if held. 1629 * 1630 * In addition, the v_namecache list itself must be locked via 1631 * the vnode's spinlock. 1632 */ 1633 int 1634 cache_inval_vp(struct vnode *vp, int flags) 1635 { 1636 struct namecache *ncp; 1637 struct namecache *next; 1638 1639 restart: 1640 spin_lock(&vp->v_spin); 1641 ncp = TAILQ_FIRST(&vp->v_namecache); 1642 if (ncp) 1643 _cache_hold(ncp); 1644 while (ncp) { 1645 /* loop entered with ncp held and vp spin-locked */ 1646 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL) 1647 _cache_hold(next); 1648 spin_unlock(&vp->v_spin); 1649 _cache_lock(ncp); 1650 if (ncp->nc_vp != vp) { 1651 kprintf("Warning: cache_inval_vp: race-A detected on " 1652 "%s\n", ncp->nc_name); 1653 _cache_put(ncp); 1654 if (next) 1655 _cache_drop(next); 1656 goto restart; 1657 } 1658 _cache_inval(ncp, flags); 1659 _cache_put(ncp); /* also releases reference */ 1660 ncp = next; 1661 spin_lock(&vp->v_spin); 1662 if (ncp && ncp->nc_vp != vp) { 1663 spin_unlock(&vp->v_spin); 1664 kprintf("Warning: cache_inval_vp: race-B detected on " 1665 "%s\n", ncp->nc_name); 1666 _cache_drop(ncp); 1667 goto restart; 1668 } 1669 } 1670 spin_unlock(&vp->v_spin); 1671 return(TAILQ_FIRST(&vp->v_namecache) != NULL); 1672 } 1673 1674 /* 1675 * This routine is used instead of the normal cache_inval_vp() when we 1676 * are trying to recycle otherwise good vnodes. 1677 * 1678 * Return 0 on success, non-zero if not all namecache records could be 1679 * disassociated from the vnode (for various reasons). 1680 */ 1681 int 1682 cache_inval_vp_nonblock(struct vnode *vp) 1683 { 1684 struct namecache *ncp; 1685 struct namecache *next; 1686 1687 spin_lock(&vp->v_spin); 1688 ncp = TAILQ_FIRST(&vp->v_namecache); 1689 if (ncp) 1690 _cache_hold(ncp); 1691 while (ncp) { 1692 /* loop entered with ncp held */ 1693 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL) 1694 _cache_hold(next); 1695 spin_unlock(&vp->v_spin); 1696 if (_cache_lock_nonblock(ncp)) { 1697 _cache_drop(ncp); 1698 if (next) 1699 _cache_drop(next); 1700 goto done; 1701 } 1702 if (ncp->nc_vp != vp) { 1703 kprintf("Warning: cache_inval_vp: race-A detected on " 1704 "%s\n", ncp->nc_name); 1705 _cache_put(ncp); 1706 if (next) 1707 _cache_drop(next); 1708 goto done; 1709 } 1710 _cache_inval(ncp, 0); 1711 _cache_put(ncp); /* also releases reference */ 1712 ncp = next; 1713 spin_lock(&vp->v_spin); 1714 if (ncp && ncp->nc_vp != vp) { 1715 spin_unlock(&vp->v_spin); 1716 kprintf("Warning: cache_inval_vp: race-B detected on " 1717 "%s\n", ncp->nc_name); 1718 _cache_drop(ncp); 1719 goto done; 1720 } 1721 } 1722 spin_unlock(&vp->v_spin); 1723 done: 1724 return(TAILQ_FIRST(&vp->v_namecache) != NULL); 1725 } 1726 1727 /* 1728 * Clears the universal directory search 'ok' flag. This flag allows 1729 * nlookup() to bypass normal vnode checks. This flag is a cached flag 1730 * so clearing it simply forces revalidation. 1731 */ 1732 void 1733 cache_inval_wxok(struct vnode *vp) 1734 { 1735 struct namecache *ncp; 1736 1737 spin_lock(&vp->v_spin); 1738 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) { 1739 if (ncp->nc_flag & (NCF_WXOK | NCF_NOTX)) 1740 atomic_clear_short(&ncp->nc_flag, NCF_WXOK | NCF_NOTX); 1741 } 1742 spin_unlock(&vp->v_spin); 1743 } 1744 1745 /* 1746 * The source ncp has been renamed to the target ncp. Both fncp and tncp 1747 * must be locked. The target ncp is destroyed (as a normal rename-over 1748 * would destroy the target file or directory). 1749 * 1750 * Because there may be references to the source ncp we cannot copy its 1751 * contents to the target. Instead the source ncp is relinked as the target 1752 * and the target ncp is removed from the namecache topology. 1753 */ 1754 void 1755 cache_rename(struct nchandle *fnch, struct nchandle *tnch) 1756 { 1757 struct namecache *fncp = fnch->ncp; 1758 struct namecache *tncp = tnch->ncp; 1759 struct namecache *tncp_par; 1760 struct nchash_head *nchpp; 1761 u_int32_t hash; 1762 char *oname; 1763 char *nname; 1764 1765 ++fncp->nc_generation; 1766 ++tncp->nc_generation; 1767 if (tncp->nc_nlen) { 1768 nname = kmalloc(tncp->nc_nlen + 1, M_VFSCACHE, M_WAITOK); 1769 bcopy(tncp->nc_name, nname, tncp->nc_nlen); 1770 nname[tncp->nc_nlen] = 0; 1771 } else { 1772 nname = NULL; 1773 } 1774 1775 /* 1776 * Rename fncp (unlink) 1777 */ 1778 _cache_unlink_parent(fncp); 1779 oname = fncp->nc_name; 1780 fncp->nc_name = nname; 1781 fncp->nc_nlen = tncp->nc_nlen; 1782 if (oname) 1783 kfree(oname, M_VFSCACHE); 1784 1785 tncp_par = tncp->nc_parent; 1786 _cache_hold(tncp_par); 1787 _cache_lock(tncp_par); 1788 1789 /* 1790 * Rename fncp (relink) 1791 */ 1792 hash = fnv_32_buf(fncp->nc_name, fncp->nc_nlen, FNV1_32_INIT); 1793 hash = fnv_32_buf(&tncp_par, sizeof(tncp_par), hash); 1794 nchpp = NCHHASH(hash); 1795 1796 spin_lock(&nchpp->spin); 1797 _cache_link_parent(fncp, tncp_par, nchpp); 1798 spin_unlock(&nchpp->spin); 1799 1800 _cache_put(tncp_par); 1801 1802 /* 1803 * Get rid of the overwritten tncp (unlink) 1804 */ 1805 _cache_unlink(tncp); 1806 } 1807 1808 /* 1809 * Perform actions consistent with unlinking a file. The passed-in ncp 1810 * must be locked. 1811 * 1812 * The ncp is marked DESTROYED so it no longer shows up in searches, 1813 * and will be physically deleted when the vnode goes away. 1814 * 1815 * If the related vnode has no refs then we cycle it through vget()/vput() 1816 * to (possibly if we don't have a ref race) trigger a deactivation, 1817 * allowing the VFS to trivially detect and recycle the deleted vnode 1818 * via VOP_INACTIVE(). 1819 * 1820 * NOTE: _cache_rename() will automatically call _cache_unlink() on the 1821 * target ncp. 1822 */ 1823 void 1824 cache_unlink(struct nchandle *nch) 1825 { 1826 _cache_unlink(nch->ncp); 1827 } 1828 1829 static void 1830 _cache_unlink(struct namecache *ncp) 1831 { 1832 struct vnode *vp; 1833 1834 /* 1835 * Causes lookups to fail and allows another ncp with the same 1836 * name to be created under ncp->nc_parent. 1837 */ 1838 ncp->nc_flag |= NCF_DESTROYED; 1839 ++ncp->nc_generation; 1840 1841 /* 1842 * Attempt to trigger a deactivation. Set VREF_FINALIZE to 1843 * force action on the 1->0 transition. 1844 */ 1845 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0 && 1846 (vp = ncp->nc_vp) != NULL) { 1847 atomic_set_int(&vp->v_refcnt, VREF_FINALIZE); 1848 if (VREFCNT(vp) <= 0) { 1849 if (vget(vp, LK_SHARED) == 0) 1850 vput(vp); 1851 } 1852 } 1853 } 1854 1855 /* 1856 * Return non-zero if the nch might be associated with an open and/or mmap()'d 1857 * file. The easy solution is to just return non-zero if the vnode has refs. 1858 * Used to interlock hammer2 reclaims (VREF_FINALIZE should already be set to 1859 * force the reclaim). 1860 */ 1861 int 1862 cache_isopen(struct nchandle *nch) 1863 { 1864 struct vnode *vp; 1865 struct namecache *ncp = nch->ncp; 1866 1867 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0 && 1868 (vp = ncp->nc_vp) != NULL && 1869 VREFCNT(vp)) { 1870 return 1; 1871 } 1872 return 0; 1873 } 1874 1875 1876 /* 1877 * vget the vnode associated with the namecache entry. Resolve the namecache 1878 * entry if necessary. The passed ncp must be referenced and locked. If 1879 * the ncp is resolved it might be locked shared. 1880 * 1881 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked 1882 * (depending on the passed lk_type) will be returned in *vpp with an error 1883 * of 0, or NULL will be returned in *vpp with a non-0 error code. The 1884 * most typical error is ENOENT, meaning that the ncp represents a negative 1885 * cache hit and there is no vnode to retrieve, but other errors can occur 1886 * too. 1887 * 1888 * The vget() can race a reclaim. If this occurs we re-resolve the 1889 * namecache entry. 1890 * 1891 * There are numerous places in the kernel where vget() is called on a 1892 * vnode while one or more of its namecache entries is locked. Releasing 1893 * a vnode never deadlocks against locked namecache entries (the vnode 1894 * will not get recycled while referenced ncp's exist). This means we 1895 * can safely acquire the vnode. In fact, we MUST NOT release the ncp 1896 * lock when acquiring the vp lock or we might cause a deadlock. 1897 * 1898 * NOTE: The passed-in ncp must be locked exclusively if it is initially 1899 * unresolved. If a reclaim race occurs the passed-in ncp will be 1900 * relocked exclusively before being re-resolved. 1901 */ 1902 int 1903 cache_vget(struct nchandle *nch, struct ucred *cred, 1904 int lk_type, struct vnode **vpp) 1905 { 1906 struct namecache *ncp; 1907 struct vnode *vp; 1908 int error; 1909 1910 ncp = nch->ncp; 1911 again: 1912 vp = NULL; 1913 if (ncp->nc_flag & NCF_UNRESOLVED) 1914 error = cache_resolve(nch, cred); 1915 else 1916 error = 0; 1917 1918 if (error == 0 && (vp = ncp->nc_vp) != NULL) { 1919 error = vget(vp, lk_type); 1920 if (error) { 1921 /* 1922 * VRECLAIM race 1923 * 1924 * The ncp may have been locked shared, we must relock 1925 * it exclusively before we can set it to unresolved. 1926 */ 1927 if (error == ENOENT) { 1928 kprintf("Warning: vnode reclaim race detected " 1929 "in cache_vget on %p (%s)\n", 1930 vp, ncp->nc_name); 1931 _cache_unlock(ncp); 1932 _cache_lock(ncp); 1933 _cache_setunresolved(ncp); 1934 goto again; 1935 } 1936 1937 /* 1938 * Not a reclaim race, some other error. 1939 */ 1940 KKASSERT(ncp->nc_vp == vp); 1941 vp = NULL; 1942 } else { 1943 KKASSERT(ncp->nc_vp == vp); 1944 KKASSERT((vp->v_flag & VRECLAIMED) == 0); 1945 } 1946 } 1947 if (error == 0 && vp == NULL) 1948 error = ENOENT; 1949 *vpp = vp; 1950 return(error); 1951 } 1952 1953 /* 1954 * Similar to cache_vget() but only acquires a ref on the vnode. 1955 * 1956 * NOTE: The passed-in ncp must be locked exclusively if it is initially 1957 * unresolved. If a reclaim race occurs the passed-in ncp will be 1958 * relocked exclusively before being re-resolved. 1959 * 1960 * NOTE: At the moment we have to issue a vget() on the vnode, even though 1961 * we are going to immediately release the lock, in order to resolve 1962 * potential reclamation races. Once we have a solid vnode ref that 1963 * was (at some point) interlocked via a vget(), the vnode will not 1964 * be reclaimed. 1965 * 1966 * NOTE: vhold counts (v_auxrefs) do not prevent reclamation. 1967 */ 1968 int 1969 cache_vref(struct nchandle *nch, struct ucred *cred, struct vnode **vpp) 1970 { 1971 struct namecache *ncp; 1972 struct vnode *vp; 1973 int error; 1974 1975 ncp = nch->ncp; 1976 again: 1977 vp = NULL; 1978 if (ncp->nc_flag & NCF_UNRESOLVED) 1979 error = cache_resolve(nch, cred); 1980 else 1981 error = 0; 1982 1983 if (error == 0 && (vp = ncp->nc_vp) != NULL) { 1984 error = vget(vp, LK_SHARED); 1985 if (error) { 1986 /* 1987 * VRECLAIM race 1988 */ 1989 if (error == ENOENT) { 1990 kprintf("Warning: vnode reclaim race detected " 1991 "in cache_vget on %p (%s)\n", 1992 vp, ncp->nc_name); 1993 _cache_unlock(ncp); 1994 _cache_lock(ncp); 1995 _cache_setunresolved(ncp); 1996 goto again; 1997 } 1998 1999 /* 2000 * Not a reclaim race, some other error. 2001 */ 2002 KKASSERT(ncp->nc_vp == vp); 2003 vp = NULL; 2004 } else { 2005 KKASSERT(ncp->nc_vp == vp); 2006 KKASSERT((vp->v_flag & VRECLAIMED) == 0); 2007 /* caller does not want a lock */ 2008 vn_unlock(vp); 2009 } 2010 } 2011 if (error == 0 && vp == NULL) 2012 error = ENOENT; 2013 *vpp = vp; 2014 return(error); 2015 } 2016 2017 /* 2018 * Return a referenced vnode representing the parent directory of 2019 * ncp. 2020 * 2021 * Because the caller has locked the ncp it should not be possible for 2022 * the parent ncp to go away. However, the parent can unresolve its 2023 * dvp at any time so we must be able to acquire a lock on the parent 2024 * to safely access nc_vp. 2025 * 2026 * We have to leave par unlocked when vget()ing dvp to avoid a deadlock, 2027 * so use vhold()/vdrop() while holding the lock to prevent dvp from 2028 * getting destroyed. 2029 * 2030 * NOTE: vhold() is allowed when dvp has 0 refs if we hold a 2031 * lock on the ncp in question.. 2032 */ 2033 struct vnode * 2034 cache_dvpref(struct namecache *ncp) 2035 { 2036 struct namecache *par; 2037 struct vnode *dvp; 2038 2039 dvp = NULL; 2040 if ((par = ncp->nc_parent) != NULL) { 2041 _cache_hold(par); 2042 _cache_lock(par); 2043 if ((par->nc_flag & NCF_UNRESOLVED) == 0) { 2044 if ((dvp = par->nc_vp) != NULL) 2045 vhold(dvp); 2046 } 2047 _cache_unlock(par); 2048 if (dvp) { 2049 if (vget(dvp, LK_SHARED) == 0) { 2050 vn_unlock(dvp); 2051 vdrop(dvp); 2052 /* return refd, unlocked dvp */ 2053 } else { 2054 vdrop(dvp); 2055 dvp = NULL; 2056 } 2057 } 2058 _cache_drop(par); 2059 } 2060 return(dvp); 2061 } 2062 2063 /* 2064 * Convert a directory vnode to a namecache record without any other 2065 * knowledge of the topology. This ONLY works with directory vnodes and 2066 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the 2067 * returned ncp (if not NULL) will be held and unlocked. 2068 * 2069 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned. 2070 * If 'makeit' is 1 we attempt to track-down and create the namecache topology 2071 * for dvp. This will fail only if the directory has been deleted out from 2072 * under the caller. 2073 * 2074 * Callers must always check for a NULL return no matter the value of 'makeit'. 2075 * 2076 * To avoid underflowing the kernel stack each recursive call increments 2077 * the makeit variable. 2078 */ 2079 2080 static int cache_inefficient_scan(struct nchandle *nch, struct ucred *cred, 2081 struct vnode *dvp, char *fakename); 2082 static int cache_fromdvp_try(struct vnode *dvp, struct ucred *cred, 2083 struct vnode **saved_dvp); 2084 2085 int 2086 cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit, 2087 struct nchandle *nch) 2088 { 2089 struct vnode *saved_dvp; 2090 struct vnode *pvp; 2091 char *fakename; 2092 int error; 2093 2094 nch->ncp = NULL; 2095 nch->mount = dvp->v_mount; 2096 saved_dvp = NULL; 2097 fakename = NULL; 2098 2099 /* 2100 * Handle the makeit == 0 degenerate case 2101 */ 2102 if (makeit == 0) { 2103 spin_lock_shared(&dvp->v_spin); 2104 nch->ncp = TAILQ_FIRST(&dvp->v_namecache); 2105 if (nch->ncp) 2106 cache_hold(nch); 2107 spin_unlock_shared(&dvp->v_spin); 2108 } 2109 2110 /* 2111 * Loop until resolution, inside code will break out on error. 2112 */ 2113 while (makeit) { 2114 /* 2115 * Break out if we successfully acquire a working ncp. 2116 */ 2117 spin_lock_shared(&dvp->v_spin); 2118 nch->ncp = TAILQ_FIRST(&dvp->v_namecache); 2119 if (nch->ncp) { 2120 cache_hold(nch); 2121 spin_unlock_shared(&dvp->v_spin); 2122 break; 2123 } 2124 spin_unlock_shared(&dvp->v_spin); 2125 2126 /* 2127 * If dvp is the root of its filesystem it should already 2128 * have a namecache pointer associated with it as a side 2129 * effect of the mount, but it may have been disassociated. 2130 */ 2131 if (dvp->v_flag & VROOT) { 2132 nch->ncp = _cache_get(nch->mount->mnt_ncmountpt.ncp); 2133 error = cache_resolve_mp(nch->mount); 2134 _cache_put(nch->ncp); 2135 if (ncvp_debug) { 2136 kprintf("cache_fromdvp: resolve root of mount %p error %d", 2137 dvp->v_mount, error); 2138 } 2139 if (error) { 2140 if (ncvp_debug) 2141 kprintf(" failed\n"); 2142 nch->ncp = NULL; 2143 break; 2144 } 2145 if (ncvp_debug) 2146 kprintf(" succeeded\n"); 2147 continue; 2148 } 2149 2150 /* 2151 * If we are recursed too deeply resort to an O(n^2) 2152 * algorithm to resolve the namecache topology. The 2153 * resolved pvp is left referenced in saved_dvp to 2154 * prevent the tree from being destroyed while we loop. 2155 */ 2156 if (makeit > 20) { 2157 error = cache_fromdvp_try(dvp, cred, &saved_dvp); 2158 if (error) { 2159 kprintf("lookupdotdot(longpath) failed %d " 2160 "dvp %p\n", error, dvp); 2161 nch->ncp = NULL; 2162 break; 2163 } 2164 continue; 2165 } 2166 2167 /* 2168 * Get the parent directory and resolve its ncp. 2169 */ 2170 if (fakename) { 2171 kfree(fakename, M_TEMP); 2172 fakename = NULL; 2173 } 2174 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred, 2175 &fakename); 2176 if (error) { 2177 kprintf("lookupdotdot failed %d dvp %p\n", error, dvp); 2178 break; 2179 } 2180 vn_unlock(pvp); 2181 2182 /* 2183 * Reuse makeit as a recursion depth counter. On success 2184 * nch will be fully referenced. 2185 */ 2186 cache_fromdvp(pvp, cred, makeit + 1, nch); 2187 vrele(pvp); 2188 if (nch->ncp == NULL) 2189 break; 2190 2191 /* 2192 * Do an inefficient scan of pvp (embodied by ncp) to look 2193 * for dvp. This will create a namecache record for dvp on 2194 * success. We loop up to recheck on success. 2195 * 2196 * ncp and dvp are both held but not locked. 2197 */ 2198 error = cache_inefficient_scan(nch, cred, dvp, fakename); 2199 if (error) { 2200 kprintf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n", 2201 pvp, nch->ncp->nc_name, dvp); 2202 cache_drop(nch); 2203 /* nch was NULLed out, reload mount */ 2204 nch->mount = dvp->v_mount; 2205 break; 2206 } 2207 if (ncvp_debug) { 2208 kprintf("cache_fromdvp: scan %p (%s) succeeded\n", 2209 pvp, nch->ncp->nc_name); 2210 } 2211 cache_drop(nch); 2212 /* nch was NULLed out, reload mount */ 2213 nch->mount = dvp->v_mount; 2214 } 2215 2216 /* 2217 * If nch->ncp is non-NULL it will have been held already. 2218 */ 2219 if (fakename) 2220 kfree(fakename, M_TEMP); 2221 if (saved_dvp) 2222 vrele(saved_dvp); 2223 if (nch->ncp) 2224 return (0); 2225 return (EINVAL); 2226 } 2227 2228 /* 2229 * Go up the chain of parent directories until we find something 2230 * we can resolve into the namecache. This is very inefficient. 2231 */ 2232 static 2233 int 2234 cache_fromdvp_try(struct vnode *dvp, struct ucred *cred, 2235 struct vnode **saved_dvp) 2236 { 2237 struct nchandle nch; 2238 struct vnode *pvp; 2239 int error; 2240 static time_t last_fromdvp_report; 2241 char *fakename; 2242 2243 /* 2244 * Loop getting the parent directory vnode until we get something we 2245 * can resolve in the namecache. 2246 */ 2247 vref(dvp); 2248 nch.mount = dvp->v_mount; 2249 nch.ncp = NULL; 2250 fakename = NULL; 2251 2252 for (;;) { 2253 if (fakename) { 2254 kfree(fakename, M_TEMP); 2255 fakename = NULL; 2256 } 2257 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred, 2258 &fakename); 2259 if (error) { 2260 vrele(dvp); 2261 break; 2262 } 2263 vn_unlock(pvp); 2264 spin_lock_shared(&pvp->v_spin); 2265 if ((nch.ncp = TAILQ_FIRST(&pvp->v_namecache)) != NULL) { 2266 _cache_hold(nch.ncp); 2267 spin_unlock_shared(&pvp->v_spin); 2268 vrele(pvp); 2269 break; 2270 } 2271 spin_unlock_shared(&pvp->v_spin); 2272 if (pvp->v_flag & VROOT) { 2273 nch.ncp = _cache_get(pvp->v_mount->mnt_ncmountpt.ncp); 2274 error = cache_resolve_mp(nch.mount); 2275 _cache_unlock(nch.ncp); 2276 vrele(pvp); 2277 if (error) { 2278 _cache_drop(nch.ncp); 2279 nch.ncp = NULL; 2280 vrele(dvp); 2281 } 2282 break; 2283 } 2284 vrele(dvp); 2285 dvp = pvp; 2286 } 2287 if (error == 0) { 2288 if (last_fromdvp_report != time_uptime) { 2289 last_fromdvp_report = time_uptime; 2290 kprintf("Warning: extremely inefficient path " 2291 "resolution on %s\n", 2292 nch.ncp->nc_name); 2293 } 2294 error = cache_inefficient_scan(&nch, cred, dvp, fakename); 2295 2296 /* 2297 * Hopefully dvp now has a namecache record associated with 2298 * it. Leave it referenced to prevent the kernel from 2299 * recycling the vnode. Otherwise extremely long directory 2300 * paths could result in endless recycling. 2301 */ 2302 if (*saved_dvp) 2303 vrele(*saved_dvp); 2304 *saved_dvp = dvp; 2305 _cache_drop(nch.ncp); 2306 } 2307 if (fakename) 2308 kfree(fakename, M_TEMP); 2309 return (error); 2310 } 2311 2312 /* 2313 * Do an inefficient scan of the directory represented by ncp looking for 2314 * the directory vnode dvp. ncp must be held but not locked on entry and 2315 * will be held on return. dvp must be refd but not locked on entry and 2316 * will remain refd on return. 2317 * 2318 * Why do this at all? Well, due to its stateless nature the NFS server 2319 * converts file handles directly to vnodes without necessarily going through 2320 * the namecache ops that would otherwise create the namecache topology 2321 * leading to the vnode. We could either (1) Change the namecache algorithms 2322 * to allow disconnect namecache records that are re-merged opportunistically, 2323 * or (2) Make the NFS server backtrack and scan to recover a connected 2324 * namecache topology in order to then be able to issue new API lookups. 2325 * 2326 * It turns out that (1) is a huge mess. It takes a nice clean set of 2327 * namecache algorithms and introduces a lot of complication in every subsystem 2328 * that calls into the namecache to deal with the re-merge case, especially 2329 * since we are using the namecache to placehold negative lookups and the 2330 * vnode might not be immediately assigned. (2) is certainly far less 2331 * efficient then (1), but since we are only talking about directories here 2332 * (which are likely to remain cached), the case does not actually run all 2333 * that often and has the supreme advantage of not polluting the namecache 2334 * algorithms. 2335 * 2336 * If a fakename is supplied just construct a namecache entry using the 2337 * fake name. 2338 */ 2339 static int 2340 cache_inefficient_scan(struct nchandle *nch, struct ucred *cred, 2341 struct vnode *dvp, char *fakename) 2342 { 2343 struct nlcomponent nlc; 2344 struct nchandle rncp; 2345 struct dirent *den; 2346 struct vnode *pvp; 2347 struct vattr vat; 2348 struct iovec iov; 2349 struct uio uio; 2350 int blksize; 2351 int eofflag; 2352 int bytes; 2353 char *rbuf; 2354 int error; 2355 2356 vat.va_blocksize = 0; 2357 if ((error = VOP_GETATTR(dvp, &vat)) != 0) 2358 return (error); 2359 cache_lock(nch); 2360 error = cache_vref(nch, cred, &pvp); 2361 cache_unlock(nch); 2362 if (error) 2363 return (error); 2364 if (ncvp_debug) { 2365 kprintf("inefficient_scan of (%p,%s): directory iosize %ld " 2366 "vattr fileid = %lld\n", 2367 nch->ncp, nch->ncp->nc_name, 2368 vat.va_blocksize, 2369 (long long)vat.va_fileid); 2370 } 2371 2372 /* 2373 * Use the supplied fakename if not NULL. Fake names are typically 2374 * not in the actual filesystem hierarchy. This is used by HAMMER 2375 * to glue @@timestamp recursions together. 2376 */ 2377 if (fakename) { 2378 nlc.nlc_nameptr = fakename; 2379 nlc.nlc_namelen = strlen(fakename); 2380 rncp = cache_nlookup(nch, &nlc); 2381 goto done; 2382 } 2383 2384 if ((blksize = vat.va_blocksize) == 0) 2385 blksize = DEV_BSIZE; 2386 rbuf = kmalloc(blksize, M_TEMP, M_WAITOK); 2387 rncp.ncp = NULL; 2388 2389 eofflag = 0; 2390 uio.uio_offset = 0; 2391 again: 2392 iov.iov_base = rbuf; 2393 iov.iov_len = blksize; 2394 uio.uio_iov = &iov; 2395 uio.uio_iovcnt = 1; 2396 uio.uio_resid = blksize; 2397 uio.uio_segflg = UIO_SYSSPACE; 2398 uio.uio_rw = UIO_READ; 2399 uio.uio_td = curthread; 2400 2401 if (ncvp_debug >= 2) 2402 kprintf("cache_inefficient_scan: readdir @ %08x\n", (int)uio.uio_offset); 2403 error = VOP_READDIR(pvp, &uio, cred, &eofflag, NULL, NULL); 2404 if (error == 0) { 2405 den = (struct dirent *)rbuf; 2406 bytes = blksize - uio.uio_resid; 2407 2408 while (bytes > 0) { 2409 if (ncvp_debug >= 2) { 2410 kprintf("cache_inefficient_scan: %*.*s\n", 2411 den->d_namlen, den->d_namlen, 2412 den->d_name); 2413 } 2414 if (den->d_type != DT_WHT && 2415 den->d_ino == vat.va_fileid) { 2416 if (ncvp_debug) { 2417 kprintf("cache_inefficient_scan: " 2418 "MATCHED inode %lld path %s/%*.*s\n", 2419 (long long)vat.va_fileid, 2420 nch->ncp->nc_name, 2421 den->d_namlen, den->d_namlen, 2422 den->d_name); 2423 } 2424 nlc.nlc_nameptr = den->d_name; 2425 nlc.nlc_namelen = den->d_namlen; 2426 rncp = cache_nlookup(nch, &nlc); 2427 KKASSERT(rncp.ncp != NULL); 2428 break; 2429 } 2430 bytes -= _DIRENT_DIRSIZ(den); 2431 den = _DIRENT_NEXT(den); 2432 } 2433 if (rncp.ncp == NULL && eofflag == 0 && uio.uio_resid != blksize) 2434 goto again; 2435 } 2436 kfree(rbuf, M_TEMP); 2437 done: 2438 vrele(pvp); 2439 if (rncp.ncp) { 2440 if (rncp.ncp->nc_flag & NCF_UNRESOLVED) { 2441 _cache_setvp(rncp.mount, rncp.ncp, dvp); 2442 if (ncvp_debug >= 2) { 2443 kprintf("cache_inefficient_scan: setvp %s/%s = %p\n", 2444 nch->ncp->nc_name, rncp.ncp->nc_name, dvp); 2445 } 2446 } else { 2447 if (ncvp_debug >= 2) { 2448 kprintf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n", 2449 nch->ncp->nc_name, rncp.ncp->nc_name, dvp, 2450 rncp.ncp->nc_vp); 2451 } 2452 } 2453 if (rncp.ncp->nc_vp == NULL) 2454 error = rncp.ncp->nc_error; 2455 /* 2456 * Release rncp after a successful nlookup. rncp was fully 2457 * referenced. 2458 */ 2459 cache_put(&rncp); 2460 } else { 2461 kprintf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n", 2462 dvp, nch->ncp->nc_name); 2463 error = ENOENT; 2464 } 2465 return (error); 2466 } 2467 2468 /* 2469 * This function must be called with the ncp held and locked and will unlock 2470 * and drop it during zapping. 2471 * 2472 * Zap a namecache entry. The ncp is unconditionally set to an unresolved 2473 * state, which disassociates it from its vnode or pcpu_ncache[n].neg_list 2474 * and removes the related reference. If the ncp can be removed, and the 2475 * parent can be zapped non-blocking, this function loops up. 2476 * 2477 * There will be one ref from the caller (which we now own). The only 2478 * remaining autonomous refs to the ncp will then be due to nc_parent->nc_list, 2479 * so possibly 2 refs left. Taking this into account, if there are no 2480 * additional refs and no children, the ncp will be removed from the topology 2481 * and destroyed. 2482 * 2483 * References and/or children may exist if the ncp is in the middle of the 2484 * topology, preventing the ncp from being destroyed. 2485 * 2486 * If nonblock is non-zero and the parent ncp cannot be locked we give up. 2487 * 2488 * This function may return a held (but NOT locked) parent node which the 2489 * caller must drop in a loop. Looping is one way to avoid unbounded recursion 2490 * due to deep namecache trees. 2491 * 2492 * WARNING! For MPSAFE operation this routine must acquire up to three 2493 * spin locks to be able to safely test nc_refs. Lock order is 2494 * very important. 2495 * 2496 * hash spinlock if on hash list 2497 * parent spinlock if child of parent 2498 * (the ncp is unresolved so there is no vnode association) 2499 */ 2500 static void 2501 cache_zap(struct namecache *ncp) 2502 { 2503 struct namecache *par; 2504 struct vnode *dropvp; 2505 struct nchash_head *nchpp; 2506 int refcmp; 2507 int nonblock = 1; /* XXX cleanup */ 2508 2509 again: 2510 /* 2511 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED. 2512 * This gets rid of any vp->v_namecache list or negative list and 2513 * the related ref. 2514 */ 2515 _cache_setunresolved(ncp); 2516 2517 /* 2518 * Try to scrap the entry and possibly tail-recurse on its parent. 2519 * We only scrap unref'd (other then our ref) unresolved entries, 2520 * we do not scrap 'live' entries. 2521 * 2522 * If nc_parent is non NULL we expect 2 references, else just 1. 2523 * If there are more, someone else also holds the ncp and we cannot 2524 * destroy it. 2525 */ 2526 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED); 2527 KKASSERT(ncp->nc_refs > 0); 2528 2529 /* 2530 * If the ncp is linked to its parent it will also be in the hash 2531 * table. We have to be able to lock the parent and the hash table. 2532 * 2533 * Acquire locks. Note that the parent can't go away while we hold 2534 * a child locked. If nc_parent is present, expect 2 refs instead 2535 * of 1. 2536 */ 2537 nchpp = NULL; 2538 if ((par = ncp->nc_parent) != NULL) { 2539 if (nonblock) { 2540 if (_cache_lock_nonblock(par)) { 2541 /* lock failed */ 2542 ncp->nc_flag |= NCF_DEFEREDZAP; 2543 atomic_add_long( 2544 &pcpu_ncache[mycpu->gd_cpuid].numdefered, 2545 1); 2546 _cache_unlock(ncp); 2547 _cache_drop(ncp); /* caller's ref */ 2548 return; 2549 } 2550 _cache_hold(par); 2551 } else { 2552 _cache_hold(par); 2553 _cache_lock(par); 2554 } 2555 nchpp = ncp->nc_head; 2556 spin_lock(&nchpp->spin); 2557 } 2558 2559 /* 2560 * With the parent and nchpp locked, and the vnode removed 2561 * (no vp->v_namecache), we expect 1 or 2 refs. If there are 2562 * more someone else has a ref and we cannot zap the entry. 2563 * 2564 * one for our hold 2565 * one for our parent link (parent also has one from the linkage) 2566 */ 2567 if (par) 2568 refcmp = 2; 2569 else 2570 refcmp = 1; 2571 2572 /* 2573 * On failure undo the work we've done so far and drop the 2574 * caller's ref and ncp. 2575 */ 2576 if (ncp->nc_refs != refcmp || TAILQ_FIRST(&ncp->nc_list)) { 2577 if (par) { 2578 spin_unlock(&nchpp->spin); 2579 _cache_put(par); 2580 } 2581 _cache_unlock(ncp); 2582 _cache_drop(ncp); 2583 return; 2584 } 2585 2586 /* 2587 * We own all the refs and with the spinlocks held no further 2588 * refs can be acquired by others. 2589 * 2590 * Remove us from the hash list and parent list. We have to 2591 * drop a ref on the parent's vp if the parent's list becomes 2592 * empty. 2593 */ 2594 dropvp = NULL; 2595 if (par) { 2596 struct pcpu_ncache *pn = &pcpu_ncache[mycpu->gd_cpuid]; 2597 2598 KKASSERT(nchpp == ncp->nc_head); 2599 TAILQ_REMOVE(&ncp->nc_head->list, ncp, nc_hash); 2600 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry); 2601 atomic_add_long(&pn->vfscache_count, -1); 2602 if (TAILQ_EMPTY(&ncp->nc_list)) 2603 atomic_add_long(&pn->vfscache_leafs, -1); 2604 2605 if (TAILQ_EMPTY(&par->nc_list)) { 2606 atomic_add_long(&pn->vfscache_leafs, 1); 2607 if (par->nc_vp) 2608 dropvp = par->nc_vp; 2609 } 2610 ncp->nc_parent = NULL; 2611 ncp->nc_head = NULL; 2612 spin_unlock(&nchpp->spin); 2613 _cache_drop(par); /* removal of ncp from par->nc_list */ 2614 /*_cache_unlock(par);*/ 2615 } else { 2616 KKASSERT(ncp->nc_head == NULL); 2617 } 2618 2619 /* 2620 * ncp should not have picked up any refs. Physically 2621 * destroy the ncp. 2622 */ 2623 if (ncp->nc_refs != refcmp) { 2624 panic("cache_zap: %p bad refs %d (expected %d)\n", 2625 ncp, ncp->nc_refs, refcmp); 2626 } 2627 /* _cache_unlock(ncp) not required */ 2628 ncp->nc_refs = -1; /* safety */ 2629 if (ncp->nc_name) 2630 kfree(ncp->nc_name, M_VFSCACHE); 2631 kfree(ncp, M_VFSCACHE); 2632 2633 /* 2634 * Delayed drop (we had to release our spinlocks) 2635 */ 2636 if (dropvp) 2637 vdrop(dropvp); 2638 2639 /* 2640 * Loop up if we can recursively clean out the parent. 2641 */ 2642 if (par) { 2643 refcmp = 1; /* ref on parent */ 2644 if (par->nc_parent) /* par->par */ 2645 ++refcmp; 2646 par->nc_flag &= ~NCF_DEFEREDZAP; 2647 if ((par->nc_flag & NCF_UNRESOLVED) && 2648 par->nc_refs == refcmp && 2649 TAILQ_EMPTY(&par->nc_list)) { 2650 ncp = par; 2651 goto again; 2652 } 2653 _cache_unlock(par); 2654 _cache_drop(par); 2655 } 2656 } 2657 2658 /* 2659 * Clean up dangling negative cache and defered-drop entries in the 2660 * namecache. 2661 * 2662 * This routine is called in the critical path and also called from 2663 * vnlru(). When called from vnlru we use a lower limit to try to 2664 * deal with the negative cache before the critical path has to start 2665 * dealing with it. 2666 */ 2667 typedef enum { CHI_LOW, CHI_HIGH } cache_hs_t; 2668 2669 static cache_hs_t neg_cache_hysteresis_state[2] = { CHI_LOW, CHI_LOW }; 2670 static cache_hs_t pos_cache_hysteresis_state[2] = { CHI_LOW, CHI_LOW }; 2671 2672 void 2673 cache_hysteresis(int critpath) 2674 { 2675 long poslimit; 2676 long neglimit = maxvnodes / ncnegfactor; 2677 long xnumcache = vfscache_leafs; 2678 2679 if (critpath == 0) 2680 neglimit = neglimit * 8 / 10; 2681 2682 /* 2683 * Don't cache too many negative hits. We use hysteresis to reduce 2684 * the impact on the critical path. 2685 */ 2686 switch(neg_cache_hysteresis_state[critpath]) { 2687 case CHI_LOW: 2688 if (vfscache_negs > MINNEG && vfscache_negs > neglimit) { 2689 if (critpath) 2690 _cache_cleanneg(ncnegflush); 2691 else 2692 _cache_cleanneg(ncnegflush + 2693 vfscache_negs - neglimit); 2694 neg_cache_hysteresis_state[critpath] = CHI_HIGH; 2695 } 2696 break; 2697 case CHI_HIGH: 2698 if (vfscache_negs > MINNEG * 9 / 10 && 2699 vfscache_negs * 9 / 10 > neglimit 2700 ) { 2701 if (critpath) 2702 _cache_cleanneg(ncnegflush); 2703 else 2704 _cache_cleanneg(ncnegflush + 2705 vfscache_negs * 9 / 10 - 2706 neglimit); 2707 } else { 2708 neg_cache_hysteresis_state[critpath] = CHI_LOW; 2709 } 2710 break; 2711 } 2712 2713 /* 2714 * Don't cache too many positive hits. We use hysteresis to reduce 2715 * the impact on the critical path. 2716 * 2717 * Excessive positive hits can accumulate due to large numbers of 2718 * hardlinks (the vnode cache will not prevent hl ncps from growing 2719 * into infinity). 2720 */ 2721 if ((poslimit = ncposlimit) == 0) 2722 poslimit = maxvnodes * 2; 2723 if (critpath == 0) 2724 poslimit = poslimit * 8 / 10; 2725 2726 switch(pos_cache_hysteresis_state[critpath]) { 2727 case CHI_LOW: 2728 if (xnumcache > poslimit && xnumcache > MINPOS) { 2729 if (critpath) 2730 _cache_cleanpos(ncposflush); 2731 else 2732 _cache_cleanpos(ncposflush + 2733 xnumcache - poslimit); 2734 pos_cache_hysteresis_state[critpath] = CHI_HIGH; 2735 } 2736 break; 2737 case CHI_HIGH: 2738 if (xnumcache > poslimit * 5 / 6 && xnumcache > MINPOS) { 2739 if (critpath) 2740 _cache_cleanpos(ncposflush); 2741 else 2742 _cache_cleanpos(ncposflush + 2743 xnumcache - poslimit * 5 / 6); 2744 } else { 2745 pos_cache_hysteresis_state[critpath] = CHI_LOW; 2746 } 2747 break; 2748 } 2749 2750 /* 2751 * Clean out dangling defered-zap ncps which could not be cleanly 2752 * dropped if too many build up. Note that numdefered is 2753 * heuristical. Make sure we are real-time for the current cpu, 2754 * plus the global rollup. 2755 */ 2756 if (pcpu_ncache[mycpu->gd_cpuid].numdefered + numdefered > neglimit) { 2757 _cache_cleandefered(); 2758 } 2759 } 2760 2761 /* 2762 * NEW NAMECACHE LOOKUP API 2763 * 2764 * Lookup an entry in the namecache. The passed par_nch must be referenced 2765 * and unlocked. A referenced and locked nchandle with a non-NULL nch.ncp 2766 * is ALWAYS returned, eve if the supplied component is illegal. 2767 * 2768 * The resulting namecache entry should be returned to the system with 2769 * cache_put() or cache_unlock() + cache_drop(). 2770 * 2771 * namecache locks are recursive but care must be taken to avoid lock order 2772 * reversals (hence why the passed par_nch must be unlocked). Locking 2773 * rules are to order for parent traversals, not for child traversals. 2774 * 2775 * Nobody else will be able to manipulate the associated namespace (e.g. 2776 * create, delete, rename, rename-target) until the caller unlocks the 2777 * entry. 2778 * 2779 * The returned entry will be in one of three states: positive hit (non-null 2780 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set). 2781 * Unresolved entries must be resolved through the filesystem to associate the 2782 * vnode and/or determine whether a positive or negative hit has occured. 2783 * 2784 * It is not necessary to lock a directory in order to lock namespace under 2785 * that directory. In fact, it is explicitly not allowed to do that. A 2786 * directory is typically only locked when being created, renamed, or 2787 * destroyed. 2788 * 2789 * The directory (par) may be unresolved, in which case any returned child 2790 * will likely also be marked unresolved. Likely but not guarenteed. Since 2791 * the filesystem lookup requires a resolved directory vnode the caller is 2792 * responsible for resolving the namecache chain top-down. This API 2793 * specifically allows whole chains to be created in an unresolved state. 2794 */ 2795 struct nchandle 2796 cache_nlookup(struct nchandle *par_nch, struct nlcomponent *nlc) 2797 { 2798 struct nchandle nch; 2799 struct namecache *ncp; 2800 struct namecache *new_ncp; 2801 struct namecache *rep_ncp; /* reuse a destroyed ncp */ 2802 struct nchash_head *nchpp; 2803 struct mount *mp; 2804 u_int32_t hash; 2805 globaldata_t gd; 2806 int par_locked; 2807 2808 gd = mycpu; 2809 mp = par_nch->mount; 2810 par_locked = 0; 2811 2812 /* 2813 * This is a good time to call it, no ncp's are locked by 2814 * the caller or us. 2815 */ 2816 cache_hysteresis(1); 2817 2818 /* 2819 * Try to locate an existing entry 2820 */ 2821 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT); 2822 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash); 2823 new_ncp = NULL; 2824 nchpp = NCHHASH(hash); 2825 restart: 2826 rep_ncp = NULL; 2827 if (new_ncp) 2828 spin_lock(&nchpp->spin); 2829 else 2830 spin_lock_shared(&nchpp->spin); 2831 2832 TAILQ_FOREACH(ncp, &nchpp->list, nc_hash) { 2833 /* 2834 * Break out if we find a matching entry. Note that 2835 * UNRESOLVED entries may match, but DESTROYED entries 2836 * do not. 2837 * 2838 * We may be able to reuse DESTROYED entries that we come 2839 * across, even if the name does not match, as long as 2840 * nc_nlen is correct and the only hold ref is from the nchpp 2841 * list itself. 2842 */ 2843 if (ncp->nc_parent == par_nch->ncp && 2844 ncp->nc_nlen == nlc->nlc_namelen) { 2845 if (ncp->nc_flag & NCF_DESTROYED) { 2846 if (ncp->nc_refs == 1 && rep_ncp == NULL) 2847 rep_ncp = ncp; 2848 continue; 2849 } 2850 if (bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen)) 2851 continue; 2852 _cache_hold(ncp); 2853 if (new_ncp) 2854 spin_unlock(&nchpp->spin); 2855 else 2856 spin_unlock_shared(&nchpp->spin); 2857 if (par_locked) { 2858 _cache_unlock(par_nch->ncp); 2859 par_locked = 0; 2860 } 2861 if (_cache_lock_special(ncp) == 0) { 2862 /* 2863 * Successfully locked but we must re-test 2864 * conditions that might have changed since 2865 * we did not have the lock before. 2866 */ 2867 if (ncp->nc_parent != par_nch->ncp || 2868 ncp->nc_nlen != nlc->nlc_namelen || 2869 bcmp(ncp->nc_name, nlc->nlc_nameptr, 2870 ncp->nc_nlen) || 2871 (ncp->nc_flag & NCF_DESTROYED)) { 2872 _cache_put(ncp); 2873 goto restart; 2874 } 2875 _cache_auto_unresolve(mp, ncp); 2876 if (new_ncp) 2877 _cache_free(new_ncp); 2878 goto found; 2879 } 2880 _cache_get(ncp); /* cycle the lock to block */ 2881 _cache_put(ncp); 2882 _cache_drop(ncp); 2883 goto restart; 2884 } 2885 } 2886 2887 /* 2888 * We failed to locate the entry, try to resurrect a destroyed 2889 * entry that we did find that is already correctly linked into 2890 * nchpp and the parent. We must re-test conditions after 2891 * successfully locking rep_ncp. 2892 * 2893 * This case can occur under heavy loads due to not being able 2894 * to safely lock the parent in cache_zap(). Nominally a repeated 2895 * create/unlink load, but only the namelen needs to match. 2896 */ 2897 if (rep_ncp && new_ncp == NULL) { 2898 if (_cache_lock_nonblock(rep_ncp) == 0) { 2899 _cache_hold(rep_ncp); 2900 if (rep_ncp->nc_parent == par_nch->ncp && 2901 rep_ncp->nc_nlen == nlc->nlc_namelen && 2902 (rep_ncp->nc_flag & NCF_DESTROYED) && 2903 rep_ncp->nc_refs == 2) { 2904 /* 2905 * Update nc_name as reuse as new. 2906 */ 2907 ncp = rep_ncp; 2908 bcopy(nlc->nlc_nameptr, ncp->nc_name, 2909 nlc->nlc_namelen); 2910 spin_unlock_shared(&nchpp->spin); 2911 _cache_setunresolved(ncp); 2912 ncp->nc_flag = NCF_UNRESOLVED; 2913 ncp->nc_error = ENOTCONN; 2914 goto found; 2915 } 2916 _cache_put(rep_ncp); 2917 } 2918 } 2919 2920 /* 2921 * Otherwise create a new entry and add it to the cache. The parent 2922 * ncp must also be locked so we can link into it. 2923 * 2924 * We have to relookup after possibly blocking in kmalloc or 2925 * when locking par_nch. 2926 * 2927 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special 2928 * mount case, in which case nc_name will be NULL. 2929 */ 2930 if (new_ncp == NULL) { 2931 spin_unlock_shared(&nchpp->spin); 2932 new_ncp = cache_alloc(nlc->nlc_namelen); 2933 if (nlc->nlc_namelen) { 2934 bcopy(nlc->nlc_nameptr, new_ncp->nc_name, 2935 nlc->nlc_namelen); 2936 new_ncp->nc_name[nlc->nlc_namelen] = 0; 2937 } 2938 goto restart; 2939 } 2940 2941 /* 2942 * NOTE! The spinlock is held exclusively here because new_ncp 2943 * is non-NULL. 2944 */ 2945 if (par_locked == 0) { 2946 spin_unlock(&nchpp->spin); 2947 _cache_lock(par_nch->ncp); 2948 par_locked = 1; 2949 goto restart; 2950 } 2951 2952 /* 2953 * Link to parent (requires another ref, the one already in new_ncp 2954 * is what we wil lreturn). 2955 * 2956 * WARNING! We still hold the spinlock. We have to set the hash 2957 * table entry atomically. 2958 */ 2959 ncp = new_ncp; 2960 ++ncp->nc_refs; 2961 _cache_link_parent(ncp, par_nch->ncp, nchpp); 2962 spin_unlock(&nchpp->spin); 2963 _cache_unlock(par_nch->ncp); 2964 /* par_locked = 0 - not used */ 2965 found: 2966 /* 2967 * stats and namecache size management 2968 */ 2969 if (ncp->nc_flag & NCF_UNRESOLVED) 2970 ++gd->gd_nchstats->ncs_miss; 2971 else if (ncp->nc_vp) 2972 ++gd->gd_nchstats->ncs_goodhits; 2973 else 2974 ++gd->gd_nchstats->ncs_neghits; 2975 nch.mount = mp; 2976 nch.ncp = ncp; 2977 _cache_mntref(nch.mount); 2978 2979 return(nch); 2980 } 2981 2982 /* 2983 * Attempt to lookup a namecache entry and return with a shared namecache 2984 * lock. This operates non-blocking. EWOULDBLOCK is returned if excl is 2985 * set or we are unable to lock. 2986 */ 2987 int 2988 cache_nlookup_maybe_shared(struct nchandle *par_nch, 2989 struct nlcomponent *nlc, 2990 int excl, struct nchandle *res_nch) 2991 { 2992 struct namecache *ncp; 2993 struct nchash_head *nchpp; 2994 struct mount *mp; 2995 u_int32_t hash; 2996 globaldata_t gd; 2997 2998 /* 2999 * If exclusive requested or shared namecache locks are disabled, 3000 * return failure. 3001 */ 3002 if (ncp_shared_lock_disable || excl) 3003 return(EWOULDBLOCK); 3004 3005 gd = mycpu; 3006 mp = par_nch->mount; 3007 3008 /* 3009 * This is a good time to call it, no ncp's are locked by 3010 * the caller or us. 3011 */ 3012 cache_hysteresis(1); 3013 3014 /* 3015 * Try to locate an existing entry 3016 */ 3017 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT); 3018 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash); 3019 nchpp = NCHHASH(hash); 3020 3021 spin_lock_shared(&nchpp->spin); 3022 3023 TAILQ_FOREACH(ncp, &nchpp->list, nc_hash) { 3024 /* 3025 * Break out if we find a matching entry. Note that 3026 * UNRESOLVED entries may match, but DESTROYED entries 3027 * do not. 3028 */ 3029 if (ncp->nc_parent == par_nch->ncp && 3030 ncp->nc_nlen == nlc->nlc_namelen && 3031 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 && 3032 (ncp->nc_flag & NCF_DESTROYED) == 0 3033 ) { 3034 _cache_hold(ncp); 3035 spin_unlock_shared(&nchpp->spin); 3036 3037 if (_cache_lock_shared_special(ncp) == 0) { 3038 if (ncp->nc_parent == par_nch->ncp && 3039 ncp->nc_nlen == nlc->nlc_namelen && 3040 bcmp(ncp->nc_name, nlc->nlc_nameptr, 3041 ncp->nc_nlen) == 0 && 3042 (ncp->nc_flag & NCF_DESTROYED) == 0 && 3043 (ncp->nc_flag & NCF_UNRESOLVED) == 0 && 3044 _cache_auto_unresolve_test(mp, ncp) == 0) { 3045 goto found; 3046 } 3047 _cache_unlock(ncp); 3048 } 3049 _cache_drop(ncp); 3050 return(EWOULDBLOCK); 3051 } 3052 } 3053 3054 /* 3055 * Failure 3056 */ 3057 spin_unlock_shared(&nchpp->spin); 3058 return(EWOULDBLOCK); 3059 3060 /* 3061 * Success 3062 * 3063 * Note that nc_error might be non-zero (e.g ENOENT). 3064 */ 3065 found: 3066 res_nch->mount = mp; 3067 res_nch->ncp = ncp; 3068 ++gd->gd_nchstats->ncs_goodhits; 3069 _cache_mntref(res_nch->mount); 3070 3071 KKASSERT(ncp->nc_error != EWOULDBLOCK); 3072 return(ncp->nc_error); 3073 } 3074 3075 /* 3076 * This is a non-blocking verison of cache_nlookup() used by 3077 * nfs_readdirplusrpc_uio(). It can fail for any reason and 3078 * will return nch.ncp == NULL in that case. 3079 */ 3080 struct nchandle 3081 cache_nlookup_nonblock(struct nchandle *par_nch, struct nlcomponent *nlc) 3082 { 3083 struct nchandle nch; 3084 struct namecache *ncp; 3085 struct namecache *new_ncp; 3086 struct nchash_head *nchpp; 3087 struct mount *mp; 3088 u_int32_t hash; 3089 globaldata_t gd; 3090 int par_locked; 3091 3092 gd = mycpu; 3093 mp = par_nch->mount; 3094 par_locked = 0; 3095 3096 /* 3097 * Try to locate an existing entry 3098 */ 3099 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT); 3100 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash); 3101 new_ncp = NULL; 3102 nchpp = NCHHASH(hash); 3103 restart: 3104 spin_lock(&nchpp->spin); 3105 TAILQ_FOREACH(ncp, &nchpp->list, nc_hash) { 3106 /* 3107 * Break out if we find a matching entry. Note that 3108 * UNRESOLVED entries may match, but DESTROYED entries 3109 * do not. 3110 */ 3111 if (ncp->nc_parent == par_nch->ncp && 3112 ncp->nc_nlen == nlc->nlc_namelen && 3113 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 && 3114 (ncp->nc_flag & NCF_DESTROYED) == 0 3115 ) { 3116 _cache_hold(ncp); 3117 spin_unlock(&nchpp->spin); 3118 if (par_locked) { 3119 _cache_unlock(par_nch->ncp); 3120 par_locked = 0; 3121 } 3122 if (_cache_lock_special(ncp) == 0) { 3123 if (ncp->nc_parent != par_nch->ncp || 3124 ncp->nc_nlen != nlc->nlc_namelen || 3125 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) || 3126 (ncp->nc_flag & NCF_DESTROYED)) { 3127 kprintf("cache_lookup_nonblock: " 3128 "ncp-race %p %*.*s\n", 3129 ncp, 3130 nlc->nlc_namelen, 3131 nlc->nlc_namelen, 3132 nlc->nlc_nameptr); 3133 _cache_unlock(ncp); 3134 _cache_drop(ncp); 3135 goto failed; 3136 } 3137 _cache_auto_unresolve(mp, ncp); 3138 if (new_ncp) { 3139 _cache_free(new_ncp); 3140 new_ncp = NULL; 3141 } 3142 goto found; 3143 } 3144 _cache_drop(ncp); 3145 goto failed; 3146 } 3147 } 3148 3149 /* 3150 * We failed to locate an entry, create a new entry and add it to 3151 * the cache. The parent ncp must also be locked so we 3152 * can link into it. 3153 * 3154 * We have to relookup after possibly blocking in kmalloc or 3155 * when locking par_nch. 3156 * 3157 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special 3158 * mount case, in which case nc_name will be NULL. 3159 */ 3160 if (new_ncp == NULL) { 3161 spin_unlock(&nchpp->spin); 3162 new_ncp = cache_alloc(nlc->nlc_namelen); 3163 if (nlc->nlc_namelen) { 3164 bcopy(nlc->nlc_nameptr, new_ncp->nc_name, 3165 nlc->nlc_namelen); 3166 new_ncp->nc_name[nlc->nlc_namelen] = 0; 3167 } 3168 goto restart; 3169 } 3170 if (par_locked == 0) { 3171 spin_unlock(&nchpp->spin); 3172 if (_cache_lock_nonblock(par_nch->ncp) == 0) { 3173 par_locked = 1; 3174 goto restart; 3175 } 3176 goto failed; 3177 } 3178 3179 /* 3180 * Link to parent (requires another ref, the one already in new_ncp 3181 * is what we wil lreturn). 3182 * 3183 * WARNING! We still hold the spinlock. We have to set the hash 3184 * table entry atomically. 3185 */ 3186 ncp = new_ncp; 3187 ++ncp->nc_refs; 3188 _cache_link_parent(ncp, par_nch->ncp, nchpp); 3189 spin_unlock(&nchpp->spin); 3190 _cache_unlock(par_nch->ncp); 3191 /* par_locked = 0 - not used */ 3192 found: 3193 /* 3194 * stats and namecache size management 3195 */ 3196 if (ncp->nc_flag & NCF_UNRESOLVED) 3197 ++gd->gd_nchstats->ncs_miss; 3198 else if (ncp->nc_vp) 3199 ++gd->gd_nchstats->ncs_goodhits; 3200 else 3201 ++gd->gd_nchstats->ncs_neghits; 3202 nch.mount = mp; 3203 nch.ncp = ncp; 3204 _cache_mntref(nch.mount); 3205 3206 return(nch); 3207 failed: 3208 if (new_ncp) { 3209 _cache_free(new_ncp); 3210 new_ncp = NULL; 3211 } 3212 nch.mount = NULL; 3213 nch.ncp = NULL; 3214 return(nch); 3215 } 3216 3217 /* 3218 * This version is non-locking. The caller must validate the result 3219 * for parent-to-child continuity. 3220 * 3221 * It can fail for any reason and will return nch.ncp == NULL in that case. 3222 */ 3223 struct nchandle 3224 cache_nlookup_nonlocked(struct nchandle *par_nch, struct nlcomponent *nlc) 3225 { 3226 struct nchandle nch; 3227 struct namecache *ncp; 3228 struct nchash_head *nchpp; 3229 struct mount *mp; 3230 u_int32_t hash; 3231 globaldata_t gd; 3232 3233 gd = mycpu; 3234 mp = par_nch->mount; 3235 3236 /* 3237 * Try to locate an existing entry 3238 */ 3239 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT); 3240 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash); 3241 nchpp = NCHHASH(hash); 3242 3243 spin_lock_shared(&nchpp->spin); 3244 TAILQ_FOREACH(ncp, &nchpp->list, nc_hash) { 3245 /* 3246 * Break out if we find a matching entry. Note that 3247 * UNRESOLVED entries may match, but DESTROYED entries 3248 * do not. 3249 * 3250 * Resolved NFS entries which have timed out fail so the 3251 * caller can rerun with normal locking. 3252 */ 3253 if (ncp->nc_parent == par_nch->ncp && 3254 ncp->nc_nlen == nlc->nlc_namelen && 3255 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 && 3256 (ncp->nc_flag & NCF_DESTROYED) == 0 3257 ) { 3258 if (_cache_auto_unresolve_test(par_nch->mount, ncp)) 3259 break; 3260 _cache_hold(ncp); 3261 spin_unlock_shared(&nchpp->spin); 3262 goto found; 3263 } 3264 } 3265 spin_unlock_shared(&nchpp->spin); 3266 nch.mount = NULL; 3267 nch.ncp = NULL; 3268 return nch; 3269 found: 3270 /* 3271 * stats and namecache size management 3272 */ 3273 if (ncp->nc_flag & NCF_UNRESOLVED) 3274 ++gd->gd_nchstats->ncs_miss; 3275 else if (ncp->nc_vp) 3276 ++gd->gd_nchstats->ncs_goodhits; 3277 else 3278 ++gd->gd_nchstats->ncs_neghits; 3279 nch.mount = mp; 3280 nch.ncp = ncp; 3281 _cache_mntref(nch.mount); 3282 3283 return(nch); 3284 } 3285 3286 /* 3287 * The namecache entry is marked as being used as a mount point. 3288 * Locate the mount if it is visible to the caller. The DragonFly 3289 * mount system allows arbitrary loops in the topology and disentangles 3290 * those loops by matching against (mp, ncp) rather than just (ncp). 3291 * This means any given ncp can dive any number of mounts, depending 3292 * on the relative mount (e.g. nullfs) the caller is at in the topology. 3293 * 3294 * We use a very simple frontend cache to reduce SMP conflicts, 3295 * which we have to do because the mountlist scan needs an exclusive 3296 * lock around its ripout info list. Not to mention that there might 3297 * be a lot of mounts. 3298 * 3299 * Because all mounts can potentially be accessed by all cpus, break the cpu's 3300 * down a bit to allow some contention rather than making the cache 3301 * excessively huge. 3302 * 3303 * The hash table is split into per-cpu areas, is 4-way set-associative. 3304 */ 3305 struct findmount_info { 3306 struct mount *result; 3307 struct mount *nch_mount; 3308 struct namecache *nch_ncp; 3309 }; 3310 3311 static __inline 3312 struct ncmount_cache * 3313 ncmount_cache_lookup4(struct mount *mp, struct namecache *ncp) 3314 { 3315 uint32_t hash; 3316 3317 hash = iscsi_crc32(&mp, sizeof(mp)); 3318 hash = iscsi_crc32_ext(&ncp, sizeof(ncp), hash); 3319 hash ^= hash >> 16; 3320 hash = hash & ((NCMOUNT_NUMCACHE - 1) & ~(NCMOUNT_SET - 1)); 3321 3322 return (&ncmount_cache[hash]); 3323 } 3324 3325 static 3326 struct ncmount_cache * 3327 ncmount_cache_lookup(struct mount *mp, struct namecache *ncp) 3328 { 3329 struct ncmount_cache *ncc; 3330 struct ncmount_cache *best; 3331 int delta; 3332 int best_delta; 3333 int i; 3334 3335 ncc = ncmount_cache_lookup4(mp, ncp); 3336 3337 /* 3338 * NOTE: When checking for a ticks overflow implement a slop of 3339 * 2 ticks just to be safe, because ticks is accessed 3340 * non-atomically one CPU can increment it while another 3341 * is still using the old value. 3342 */ 3343 if (ncc->ncp == ncp && ncc->mp == mp) /* 0 */ 3344 return ncc; 3345 delta = (int)(ticks - ncc->ticks); /* beware GCC opts */ 3346 if (delta < -2) /* overflow reset */ 3347 ncc->ticks = ticks; 3348 best = ncc; 3349 best_delta = delta; 3350 3351 for (i = 1; i < NCMOUNT_SET; ++i) { /* 1, 2, 3 */ 3352 ++ncc; 3353 if (ncc->ncp == ncp && ncc->mp == mp) 3354 return ncc; 3355 delta = (int)(ticks - ncc->ticks); 3356 if (delta < -2) 3357 ncc->ticks = ticks; 3358 if (delta > best_delta) { 3359 best_delta = delta; 3360 best = ncc; 3361 } 3362 } 3363 return best; 3364 } 3365 3366 /* 3367 * pcpu-optimized mount search. Locate the recursive mountpoint, avoid 3368 * doing an expensive mountlist_scan*() if possible. 3369 * 3370 * (mp, ncp) -> mountonpt.k 3371 * 3372 * Returns a referenced mount pointer or NULL 3373 * 3374 * General SMP operation uses a per-cpu umount_spin to interlock unmount 3375 * operations (that is, where the mp_target can be freed out from under us). 3376 * 3377 * Lookups use the ncc->updating counter to validate the contents in order 3378 * to avoid having to obtain the per cache-element spin-lock. In addition, 3379 * the ticks field is only updated when it changes. However, if our per-cpu 3380 * lock fails due to an unmount-in-progress, we fall-back to the 3381 * cache-element's spin-lock. 3382 */ 3383 struct mount * 3384 cache_findmount(struct nchandle *nch) 3385 { 3386 struct findmount_info info; 3387 struct ncmount_cache *ncc; 3388 struct ncmount_cache ncc_copy; 3389 struct mount *target; 3390 struct pcpu_ncache *pcpu; 3391 struct spinlock *spinlk; 3392 int update; 3393 3394 pcpu = pcpu_ncache; 3395 if (ncmount_cache_enable == 0 || pcpu == NULL) { 3396 ncc = NULL; 3397 goto skip; 3398 } 3399 pcpu += mycpu->gd_cpuid; 3400 3401 again: 3402 ncc = ncmount_cache_lookup(nch->mount, nch->ncp); 3403 if (ncc->ncp == nch->ncp && ncc->mp == nch->mount) { 3404 found: 3405 /* 3406 * This is a bit messy for now because we do not yet have 3407 * safe disposal of mount structures. We have to ref 3408 * ncc->mp_target but the 'update' counter only tell us 3409 * whether the cache has changed after the fact. 3410 * 3411 * For now get a per-cpu spinlock that will only contend 3412 * against umount's. This is the best path. If it fails, 3413 * instead of waiting on the umount we fall-back to a 3414 * shared ncc->spin lock, which will generally only cost a 3415 * cache ping-pong. 3416 */ 3417 update = ncc->updating; 3418 if (__predict_true(spin_trylock(&pcpu->umount_spin))) { 3419 spinlk = &pcpu->umount_spin; 3420 } else { 3421 spinlk = &ncc->spin; 3422 spin_lock_shared(spinlk); 3423 } 3424 if (update & 1) { /* update in progress */ 3425 spin_unlock_any(spinlk); 3426 goto skip; 3427 } 3428 ncc_copy = *ncc; 3429 cpu_lfence(); 3430 if (ncc->updating != update) { /* content changed */ 3431 spin_unlock_any(spinlk); 3432 goto again; 3433 } 3434 if (ncc_copy.ncp != nch->ncp || ncc_copy.mp != nch->mount) { 3435 spin_unlock_any(spinlk); 3436 goto again; 3437 } 3438 if (ncc_copy.isneg == 0) { 3439 target = ncc_copy.mp_target; 3440 if (target->mnt_ncmounton.mount == nch->mount && 3441 target->mnt_ncmounton.ncp == nch->ncp) { 3442 /* 3443 * Cache hit (positive) (avoid dirtying 3444 * the cache line if possible) 3445 */ 3446 if (ncc->ticks != (int)ticks) 3447 ncc->ticks = (int)ticks; 3448 _cache_mntref(target); 3449 } 3450 } else { 3451 /* 3452 * Cache hit (negative) (avoid dirtying 3453 * the cache line if possible) 3454 */ 3455 if (ncc->ticks != (int)ticks) 3456 ncc->ticks = (int)ticks; 3457 target = NULL; 3458 } 3459 spin_unlock_any(spinlk); 3460 3461 return target; 3462 } 3463 skip: 3464 3465 /* 3466 * Slow 3467 */ 3468 info.result = NULL; 3469 info.nch_mount = nch->mount; 3470 info.nch_ncp = nch->ncp; 3471 mountlist_scan(cache_findmount_callback, &info, 3472 MNTSCAN_FORWARD | MNTSCAN_NOBUSY | MNTSCAN_NOUNLOCK); 3473 3474 /* 3475 * To reduce multi-re-entry on the cache, relookup in the cache. 3476 * This can still race, obviously, but that's ok. 3477 */ 3478 ncc = ncmount_cache_lookup(nch->mount, nch->ncp); 3479 if (ncc->ncp == nch->ncp && ncc->mp == nch->mount) { 3480 if (info.result) 3481 atomic_add_int(&info.result->mnt_refs, -1); 3482 goto found; 3483 } 3484 3485 /* 3486 * Cache the result. 3487 */ 3488 if ((info.result == NULL || 3489 (info.result->mnt_kern_flag & MNTK_UNMOUNT) == 0)) { 3490 spin_lock(&ncc->spin); 3491 atomic_add_int_nonlocked(&ncc->updating, 1); 3492 cpu_sfence(); 3493 KKASSERT(ncc->updating & 1); 3494 if (ncc->mp != nch->mount) { 3495 if (ncc->mp) 3496 atomic_add_int(&ncc->mp->mnt_refs, -1); 3497 atomic_add_int(&nch->mount->mnt_refs, 1); 3498 ncc->mp = nch->mount; 3499 } 3500 ncc->ncp = nch->ncp; /* ptr compares only, not refd*/ 3501 ncc->ticks = (int)ticks; 3502 3503 if (info.result) { 3504 ncc->isneg = 0; 3505 if (ncc->mp_target != info.result) { 3506 if (ncc->mp_target) 3507 atomic_add_int(&ncc->mp_target->mnt_refs, -1); 3508 ncc->mp_target = info.result; 3509 atomic_add_int(&info.result->mnt_refs, 1); 3510 } 3511 } else { 3512 ncc->isneg = 1; 3513 if (ncc->mp_target) { 3514 atomic_add_int(&ncc->mp_target->mnt_refs, -1); 3515 ncc->mp_target = NULL; 3516 } 3517 } 3518 cpu_sfence(); 3519 atomic_add_int_nonlocked(&ncc->updating, 1); 3520 spin_unlock(&ncc->spin); 3521 } 3522 return(info.result); 3523 } 3524 3525 static 3526 int 3527 cache_findmount_callback(struct mount *mp, void *data) 3528 { 3529 struct findmount_info *info = data; 3530 3531 /* 3532 * Check the mount's mounted-on point against the passed nch. 3533 */ 3534 if (mp->mnt_ncmounton.mount == info->nch_mount && 3535 mp->mnt_ncmounton.ncp == info->nch_ncp 3536 ) { 3537 info->result = mp; 3538 _cache_mntref(mp); 3539 return(-1); 3540 } 3541 return(0); 3542 } 3543 3544 void 3545 cache_dropmount(struct mount *mp) 3546 { 3547 _cache_mntrel(mp); 3548 } 3549 3550 /* 3551 * mp is being mounted, scrap entries matching mp->mnt_ncmounton (positive 3552 * or negative). 3553 * 3554 * A full scan is not required, but for now just do it anyway. 3555 */ 3556 void 3557 cache_ismounting(struct mount *mp) 3558 { 3559 struct ncmount_cache *ncc; 3560 struct mount *ncc_mp; 3561 int i; 3562 3563 if (pcpu_ncache == NULL) 3564 return; 3565 3566 for (i = 0; i < NCMOUNT_NUMCACHE; ++i) { 3567 ncc = &ncmount_cache[i]; 3568 if (ncc->mp != mp->mnt_ncmounton.mount || 3569 ncc->ncp != mp->mnt_ncmounton.ncp) { 3570 continue; 3571 } 3572 spin_lock(&ncc->spin); 3573 atomic_add_int_nonlocked(&ncc->updating, 1); 3574 cpu_sfence(); 3575 KKASSERT(ncc->updating & 1); 3576 if (ncc->mp != mp->mnt_ncmounton.mount || 3577 ncc->ncp != mp->mnt_ncmounton.ncp) { 3578 cpu_sfence(); 3579 ++ncc->updating; 3580 spin_unlock(&ncc->spin); 3581 continue; 3582 } 3583 ncc_mp = ncc->mp; 3584 ncc->ncp = NULL; 3585 ncc->mp = NULL; 3586 if (ncc_mp) 3587 atomic_add_int(&ncc_mp->mnt_refs, -1); 3588 ncc_mp = ncc->mp_target; 3589 ncc->mp_target = NULL; 3590 if (ncc_mp) 3591 atomic_add_int(&ncc_mp->mnt_refs, -1); 3592 ncc->ticks = (int)ticks - hz * 120; 3593 3594 cpu_sfence(); 3595 atomic_add_int_nonlocked(&ncc->updating, 1); 3596 spin_unlock(&ncc->spin); 3597 } 3598 3599 /* 3600 * Pre-cache the mount point 3601 */ 3602 ncc = ncmount_cache_lookup(mp->mnt_ncmounton.mount, 3603 mp->mnt_ncmounton.ncp); 3604 3605 spin_lock(&ncc->spin); 3606 atomic_add_int_nonlocked(&ncc->updating, 1); 3607 cpu_sfence(); 3608 KKASSERT(ncc->updating & 1); 3609 3610 if (ncc->mp) 3611 atomic_add_int(&ncc->mp->mnt_refs, -1); 3612 atomic_add_int(&mp->mnt_ncmounton.mount->mnt_refs, 1); 3613 ncc->mp = mp->mnt_ncmounton.mount; 3614 ncc->ncp = mp->mnt_ncmounton.ncp; /* ptr compares only */ 3615 ncc->ticks = (int)ticks; 3616 3617 ncc->isneg = 0; 3618 if (ncc->mp_target != mp) { 3619 if (ncc->mp_target) 3620 atomic_add_int(&ncc->mp_target->mnt_refs, -1); 3621 ncc->mp_target = mp; 3622 atomic_add_int(&mp->mnt_refs, 1); 3623 } 3624 cpu_sfence(); 3625 atomic_add_int_nonlocked(&ncc->updating, 1); 3626 spin_unlock(&ncc->spin); 3627 } 3628 3629 /* 3630 * Scrap any ncmount_cache entries related to mp. Not only do we need to 3631 * scrap entries matching mp->mnt_ncmounton, but we also need to scrap any 3632 * negative hits involving (mp, <any>). 3633 * 3634 * A full scan is required. 3635 */ 3636 void 3637 cache_unmounting(struct mount *mp) 3638 { 3639 struct ncmount_cache *ncc; 3640 struct pcpu_ncache *pcpu; 3641 struct mount *ncc_mp; 3642 int i; 3643 3644 pcpu = pcpu_ncache; 3645 if (pcpu == NULL) 3646 return; 3647 3648 for (i = 0; i < ncpus; ++i) 3649 spin_lock(&pcpu[i].umount_spin); 3650 3651 for (i = 0; i < NCMOUNT_NUMCACHE; ++i) { 3652 ncc = &ncmount_cache[i]; 3653 if (ncc->mp != mp && ncc->mp_target != mp) 3654 continue; 3655 spin_lock(&ncc->spin); 3656 atomic_add_int_nonlocked(&ncc->updating, 1); 3657 cpu_sfence(); 3658 3659 if (ncc->mp != mp && ncc->mp_target != mp) { 3660 atomic_add_int_nonlocked(&ncc->updating, 1); 3661 cpu_sfence(); 3662 spin_unlock(&ncc->spin); 3663 continue; 3664 } 3665 ncc_mp = ncc->mp; 3666 ncc->ncp = NULL; 3667 ncc->mp = NULL; 3668 if (ncc_mp) 3669 atomic_add_int(&ncc_mp->mnt_refs, -1); 3670 ncc_mp = ncc->mp_target; 3671 ncc->mp_target = NULL; 3672 if (ncc_mp) 3673 atomic_add_int(&ncc_mp->mnt_refs, -1); 3674 ncc->ticks = (int)ticks - hz * 120; 3675 3676 cpu_sfence(); 3677 atomic_add_int_nonlocked(&ncc->updating, 1); 3678 spin_unlock(&ncc->spin); 3679 } 3680 3681 for (i = 0; i < ncpus; ++i) 3682 spin_unlock(&pcpu[i].umount_spin); 3683 } 3684 3685 /* 3686 * Resolve an unresolved namecache entry, generally by looking it up. 3687 * The passed ncp must be locked and refd. 3688 * 3689 * Theoretically since a vnode cannot be recycled while held, and since 3690 * the nc_parent chain holds its vnode as long as children exist, the 3691 * direct parent of the cache entry we are trying to resolve should 3692 * have a valid vnode. If not then generate an error that we can 3693 * determine is related to a resolver bug. 3694 * 3695 * However, if a vnode was in the middle of a recyclement when the NCP 3696 * got locked, ncp->nc_vp might point to a vnode that is about to become 3697 * invalid. cache_resolve() handles this case by unresolving the entry 3698 * and then re-resolving it. 3699 * 3700 * Note that successful resolution does not necessarily return an error 3701 * code of 0. If the ncp resolves to a negative cache hit then ENOENT 3702 * will be returned. 3703 */ 3704 int 3705 cache_resolve(struct nchandle *nch, struct ucred *cred) 3706 { 3707 struct namecache *par_tmp; 3708 struct namecache *par; 3709 struct namecache *ncp; 3710 struct nchandle nctmp; 3711 struct mount *mp; 3712 struct vnode *dvp; 3713 int error; 3714 3715 ncp = nch->ncp; 3716 mp = nch->mount; 3717 KKASSERT(_cache_lockstatus(ncp) == LK_EXCLUSIVE); 3718 restart: 3719 /* 3720 * If the ncp is already resolved we have nothing to do. However, 3721 * we do want to guarentee that a usable vnode is returned when 3722 * a vnode is present, so make sure it hasn't been reclaimed. 3723 */ 3724 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 3725 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 3726 _cache_setunresolved(ncp); 3727 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) 3728 return (ncp->nc_error); 3729 } 3730 3731 /* 3732 * If the ncp was destroyed it will never resolve again. This 3733 * can basically only happen when someone is chdir'd into an 3734 * empty directory which is then rmdir'd. We want to catch this 3735 * here and not dive the VFS because the VFS might actually 3736 * have a way to re-resolve the disconnected ncp, which will 3737 * result in inconsistencies in the cdir/nch for proc->p_fd. 3738 */ 3739 if (ncp->nc_flag & NCF_DESTROYED) 3740 return(EINVAL); 3741 3742 /* 3743 * Mount points need special handling because the parent does not 3744 * belong to the same filesystem as the ncp. 3745 */ 3746 if (ncp == mp->mnt_ncmountpt.ncp) 3747 return (cache_resolve_mp(mp)); 3748 3749 /* 3750 * We expect an unbroken chain of ncps to at least the mount point, 3751 * and even all the way to root (but this code doesn't have to go 3752 * past the mount point). 3753 */ 3754 if (ncp->nc_parent == NULL) { 3755 kprintf("EXDEV case 1 %p %*.*s\n", ncp, 3756 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 3757 ncp->nc_error = EXDEV; 3758 return(ncp->nc_error); 3759 } 3760 3761 /* 3762 * The vp's of the parent directories in the chain are held via vhold() 3763 * due to the existance of the child, and should not disappear. 3764 * However, there are cases where they can disappear: 3765 * 3766 * - due to filesystem I/O errors. 3767 * - due to NFS being stupid about tracking the namespace and 3768 * destroys the namespace for entire directories quite often. 3769 * - due to forced unmounts. 3770 * - due to an rmdir (parent will be marked DESTROYED) 3771 * 3772 * When this occurs we have to track the chain backwards and resolve 3773 * it, looping until the resolver catches up to the current node. We 3774 * could recurse here but we might run ourselves out of kernel stack 3775 * so we do it in a more painful manner. This situation really should 3776 * not occur all that often, or if it does not have to go back too 3777 * many nodes to resolve the ncp. 3778 */ 3779 while ((dvp = cache_dvpref(ncp)) == NULL) { 3780 /* 3781 * This case can occur if a process is CD'd into a 3782 * directory which is then rmdir'd. If the parent is marked 3783 * destroyed there is no point trying to resolve it. 3784 */ 3785 if (ncp->nc_parent->nc_flag & NCF_DESTROYED) 3786 return(ENOENT); 3787 par = ncp->nc_parent; 3788 _cache_hold(par); 3789 _cache_lock(par); 3790 while ((par_tmp = par->nc_parent) != NULL && 3791 par_tmp->nc_vp == NULL) { 3792 _cache_hold(par_tmp); 3793 _cache_lock(par_tmp); 3794 _cache_put(par); 3795 par = par_tmp; 3796 } 3797 if (par->nc_parent == NULL) { 3798 kprintf("EXDEV case 2 %*.*s\n", 3799 par->nc_nlen, par->nc_nlen, par->nc_name); 3800 _cache_put(par); 3801 return (EXDEV); 3802 } 3803 /* 3804 * The parent is not set in stone, ref and lock it to prevent 3805 * it from disappearing. Also note that due to renames it 3806 * is possible for our ncp to move and for par to no longer 3807 * be one of its parents. We resolve it anyway, the loop 3808 * will handle any moves. 3809 */ 3810 _cache_get(par); /* additional hold/lock */ 3811 _cache_put(par); /* from earlier hold/lock */ 3812 if (par == nch->mount->mnt_ncmountpt.ncp) { 3813 cache_resolve_mp(nch->mount); 3814 } else if ((dvp = cache_dvpref(par)) == NULL) { 3815 kprintf("[diagnostic] cache_resolve: raced on %*.*s\n", 3816 par->nc_nlen, par->nc_nlen, par->nc_name); 3817 _cache_put(par); 3818 continue; 3819 } else { 3820 if (par->nc_flag & NCF_UNRESOLVED) { 3821 nctmp.mount = mp; 3822 nctmp.ncp = par; 3823 par->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred); 3824 } 3825 vrele(dvp); 3826 } 3827 if ((error = par->nc_error) != 0) { 3828 if (par->nc_error != EAGAIN) { 3829 kprintf("EXDEV case 3 %*.*s error %d\n", 3830 par->nc_nlen, par->nc_nlen, par->nc_name, 3831 par->nc_error); 3832 _cache_put(par); 3833 return(error); 3834 } 3835 kprintf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n", 3836 par, par->nc_nlen, par->nc_nlen, par->nc_name); 3837 } 3838 _cache_put(par); 3839 /* loop */ 3840 } 3841 3842 /* 3843 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected 3844 * ncp's and reattach them. If this occurs the original ncp is marked 3845 * EAGAIN to force a relookup. 3846 * 3847 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed 3848 * ncp must already be resolved. 3849 */ 3850 if (dvp) { 3851 nctmp.mount = mp; 3852 nctmp.ncp = ncp; 3853 ncp->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred); 3854 vrele(dvp); 3855 } else { 3856 ncp->nc_error = EPERM; 3857 } 3858 if (ncp->nc_error == EAGAIN) { 3859 kprintf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n", 3860 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 3861 goto restart; 3862 } 3863 return(ncp->nc_error); 3864 } 3865 3866 /* 3867 * Resolve the ncp associated with a mount point. Such ncp's almost always 3868 * remain resolved and this routine is rarely called. NFS MPs tends to force 3869 * re-resolution more often due to its mac-truck-smash-the-namecache 3870 * method of tracking namespace changes. 3871 * 3872 * The semantics for this call is that the passed ncp must be locked on 3873 * entry and will be locked on return. However, if we actually have to 3874 * resolve the mount point we temporarily unlock the entry in order to 3875 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of 3876 * the unlock we have to recheck the flags after we relock. 3877 */ 3878 static int 3879 cache_resolve_mp(struct mount *mp) 3880 { 3881 struct namecache *ncp = mp->mnt_ncmountpt.ncp; 3882 struct vnode *vp; 3883 int error; 3884 3885 KKASSERT(mp != NULL); 3886 3887 /* 3888 * If the ncp is already resolved we have nothing to do. However, 3889 * we do want to guarentee that a usable vnode is returned when 3890 * a vnode is present, so make sure it hasn't been reclaimed. 3891 */ 3892 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 3893 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 3894 _cache_setunresolved(ncp); 3895 } 3896 3897 if (ncp->nc_flag & NCF_UNRESOLVED) { 3898 _cache_unlock(ncp); 3899 while (vfs_busy(mp, 0)) 3900 ; 3901 error = VFS_ROOT(mp, &vp); 3902 _cache_lock(ncp); 3903 3904 /* 3905 * recheck the ncp state after relocking. 3906 */ 3907 if (ncp->nc_flag & NCF_UNRESOLVED) { 3908 ncp->nc_error = error; 3909 if (error == 0) { 3910 _cache_setvp(mp, ncp, vp); 3911 vput(vp); 3912 } else { 3913 kprintf("[diagnostic] cache_resolve_mp: failed" 3914 " to resolve mount %p err=%d ncp=%p\n", 3915 mp, error, ncp); 3916 _cache_setvp(mp, ncp, NULL); 3917 } 3918 } else if (error == 0) { 3919 vput(vp); 3920 } 3921 vfs_unbusy(mp); 3922 } 3923 return(ncp->nc_error); 3924 } 3925 3926 /* 3927 * Clean out negative cache entries when too many have accumulated. 3928 */ 3929 static void 3930 _cache_cleanneg(long count) 3931 { 3932 struct pcpu_ncache *pn; 3933 struct namecache *ncp; 3934 static uint32_t neg_rover; 3935 uint32_t n; 3936 long vnegs; 3937 3938 n = neg_rover++; /* SMP heuristical, race ok */ 3939 cpu_ccfence(); 3940 n = n % (uint32_t)ncpus; 3941 3942 /* 3943 * Normalize vfscache_negs and count. count is sometimes based 3944 * on vfscache_negs. vfscache_negs is heuristical and can sometimes 3945 * have crazy values. 3946 */ 3947 vnegs = vfscache_negs; 3948 cpu_ccfence(); 3949 if (vnegs <= MINNEG) 3950 vnegs = MINNEG; 3951 if (count < 1) 3952 count = 1; 3953 3954 pn = &pcpu_ncache[n]; 3955 spin_lock(&pn->neg_spin); 3956 count = pn->neg_count * count / vnegs + 1; 3957 spin_unlock(&pn->neg_spin); 3958 3959 /* 3960 * Attempt to clean out the specified number of negative cache 3961 * entries. 3962 */ 3963 while (count > 0) { 3964 spin_lock(&pn->neg_spin); 3965 ncp = TAILQ_FIRST(&pn->neg_list); 3966 if (ncp == NULL) { 3967 spin_unlock(&pn->neg_spin); 3968 break; 3969 } 3970 TAILQ_REMOVE(&pn->neg_list, ncp, nc_vnode); 3971 TAILQ_INSERT_TAIL(&pn->neg_list, ncp, nc_vnode); 3972 _cache_hold(ncp); 3973 spin_unlock(&pn->neg_spin); 3974 3975 /* 3976 * This can race, so we must re-check that the ncp 3977 * is on the ncneg.list after successfully locking it. 3978 */ 3979 if (_cache_lock_special(ncp) == 0) { 3980 if (ncp->nc_vp == NULL && 3981 (ncp->nc_flag & NCF_UNRESOLVED) == 0) { 3982 cache_zap(ncp); 3983 } else { 3984 _cache_unlock(ncp); 3985 _cache_drop(ncp); 3986 } 3987 } else { 3988 _cache_drop(ncp); 3989 } 3990 --count; 3991 } 3992 } 3993 3994 /* 3995 * Clean out positive cache entries when too many have accumulated. 3996 */ 3997 static void 3998 _cache_cleanpos(long count) 3999 { 4000 static volatile int rover; 4001 struct nchash_head *nchpp; 4002 struct namecache *ncp; 4003 int rover_copy; 4004 4005 /* 4006 * Attempt to clean out the specified number of negative cache 4007 * entries. 4008 */ 4009 while (count > 0) { 4010 rover_copy = ++rover; /* MPSAFEENOUGH */ 4011 cpu_ccfence(); 4012 nchpp = NCHHASH(rover_copy); 4013 4014 if (TAILQ_FIRST(&nchpp->list) == NULL) { 4015 --count; 4016 continue; 4017 } 4018 4019 /* 4020 * Cycle ncp on list, ignore and do not move DUMMY 4021 * ncps. These are temporary list iterators. 4022 * 4023 * We must cycle the ncp to the end of the list to 4024 * ensure that all ncp's have an equal chance of 4025 * being removed. 4026 */ 4027 spin_lock(&nchpp->spin); 4028 ncp = TAILQ_FIRST(&nchpp->list); 4029 while (ncp && (ncp->nc_flag & NCF_DUMMY)) 4030 ncp = TAILQ_NEXT(ncp, nc_hash); 4031 if (ncp) { 4032 TAILQ_REMOVE(&nchpp->list, ncp, nc_hash); 4033 TAILQ_INSERT_TAIL(&nchpp->list, ncp, nc_hash); 4034 _cache_hold(ncp); 4035 } 4036 spin_unlock(&nchpp->spin); 4037 4038 if (ncp) { 4039 if (_cache_lock_special(ncp) == 0) { 4040 cache_zap(ncp); 4041 } else { 4042 _cache_drop(ncp); 4043 } 4044 } 4045 --count; 4046 } 4047 } 4048 4049 /* 4050 * This is a kitchen sink function to clean out ncps which we 4051 * tried to zap from cache_drop() but failed because we were 4052 * unable to acquire the parent lock. 4053 * 4054 * Such entries can also be removed via cache_inval_vp(), such 4055 * as when unmounting. 4056 */ 4057 static void 4058 _cache_cleandefered(void) 4059 { 4060 struct nchash_head *nchpp; 4061 struct namecache *ncp; 4062 struct namecache dummy; 4063 int i; 4064 4065 /* 4066 * Create a list iterator. DUMMY indicates that this is a list 4067 * iterator, DESTROYED prevents matches by lookup functions. 4068 */ 4069 numdefered = 0; 4070 pcpu_ncache[mycpu->gd_cpuid].numdefered = 0; 4071 bzero(&dummy, sizeof(dummy)); 4072 dummy.nc_flag = NCF_DESTROYED | NCF_DUMMY; 4073 dummy.nc_refs = 1; 4074 4075 for (i = 0; i <= nchash; ++i) { 4076 nchpp = &nchashtbl[i]; 4077 4078 spin_lock(&nchpp->spin); 4079 TAILQ_INSERT_HEAD(&nchpp->list, &dummy, nc_hash); 4080 ncp = &dummy; 4081 while ((ncp = TAILQ_NEXT(ncp, nc_hash)) != NULL) { 4082 if ((ncp->nc_flag & NCF_DEFEREDZAP) == 0) 4083 continue; 4084 TAILQ_REMOVE(&nchpp->list, &dummy, nc_hash); 4085 TAILQ_INSERT_AFTER(&nchpp->list, ncp, &dummy, nc_hash); 4086 _cache_hold(ncp); 4087 spin_unlock(&nchpp->spin); 4088 if (_cache_lock_nonblock(ncp) == 0) { 4089 ncp->nc_flag &= ~NCF_DEFEREDZAP; 4090 _cache_unlock(ncp); 4091 } 4092 _cache_drop(ncp); 4093 spin_lock(&nchpp->spin); 4094 ncp = &dummy; 4095 } 4096 TAILQ_REMOVE(&nchpp->list, &dummy, nc_hash); 4097 spin_unlock(&nchpp->spin); 4098 } 4099 } 4100 4101 /* 4102 * Name cache initialization, from vfsinit() when we are booting 4103 */ 4104 void 4105 nchinit(void) 4106 { 4107 struct pcpu_ncache *pn; 4108 globaldata_t gd; 4109 int i; 4110 4111 /* 4112 * Per-cpu accounting and negative hit list 4113 */ 4114 pcpu_ncache = kmalloc(sizeof(*pcpu_ncache) * ncpus, 4115 M_VFSCACHE, M_WAITOK|M_ZERO); 4116 for (i = 0; i < ncpus; ++i) { 4117 pn = &pcpu_ncache[i]; 4118 TAILQ_INIT(&pn->neg_list); 4119 spin_init(&pn->neg_spin, "ncneg"); 4120 spin_init(&pn->umount_spin, "ncumm"); 4121 } 4122 4123 /* 4124 * Initialise per-cpu namecache effectiveness statistics. 4125 */ 4126 for (i = 0; i < ncpus; ++i) { 4127 gd = globaldata_find(i); 4128 gd->gd_nchstats = &nchstats[i]; 4129 } 4130 4131 /* 4132 * Create a generous namecache hash table 4133 */ 4134 nchashtbl = hashinit_ext(vfs_inodehashsize(), 4135 sizeof(struct nchash_head), 4136 M_VFSCACHE, &nchash); 4137 for (i = 0; i <= (int)nchash; ++i) { 4138 TAILQ_INIT(&nchashtbl[i].list); 4139 spin_init(&nchashtbl[i].spin, "nchinit_hash"); 4140 } 4141 for (i = 0; i < NCMOUNT_NUMCACHE; ++i) 4142 spin_init(&ncmount_cache[i].spin, "nchinit_cache"); 4143 nclockwarn = 5 * hz; 4144 } 4145 4146 /* 4147 * Called from start_init() to bootstrap the root filesystem. Returns 4148 * a referenced, unlocked namecache record. 4149 */ 4150 void 4151 cache_allocroot(struct nchandle *nch, struct mount *mp, struct vnode *vp) 4152 { 4153 nch->ncp = cache_alloc(0); 4154 nch->mount = mp; 4155 _cache_mntref(mp); 4156 if (vp) 4157 _cache_setvp(nch->mount, nch->ncp, vp); 4158 } 4159 4160 /* 4161 * vfs_cache_setroot() 4162 * 4163 * Create an association between the root of our namecache and 4164 * the root vnode. This routine may be called several times during 4165 * booting. 4166 * 4167 * If the caller intends to save the returned namecache pointer somewhere 4168 * it must cache_hold() it. 4169 */ 4170 void 4171 vfs_cache_setroot(struct vnode *nvp, struct nchandle *nch) 4172 { 4173 struct vnode *ovp; 4174 struct nchandle onch; 4175 4176 ovp = rootvnode; 4177 onch = rootnch; 4178 rootvnode = nvp; 4179 if (nch) 4180 rootnch = *nch; 4181 else 4182 cache_zero(&rootnch); 4183 if (ovp) 4184 vrele(ovp); 4185 if (onch.ncp) 4186 cache_drop(&onch); 4187 } 4188 4189 /* 4190 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache 4191 * topology and is being removed as quickly as possible. The new VOP_N*() 4192 * API calls are required to make specific adjustments using the supplied 4193 * ncp pointers rather then just bogusly purging random vnodes. 4194 * 4195 * Invalidate all namecache entries to a particular vnode as well as 4196 * any direct children of that vnode in the namecache. This is a 4197 * 'catch all' purge used by filesystems that do not know any better. 4198 * 4199 * Note that the linkage between the vnode and its namecache entries will 4200 * be removed, but the namecache entries themselves might stay put due to 4201 * active references from elsewhere in the system or due to the existance of 4202 * the children. The namecache topology is left intact even if we do not 4203 * know what the vnode association is. Such entries will be marked 4204 * NCF_UNRESOLVED. 4205 */ 4206 void 4207 cache_purge(struct vnode *vp) 4208 { 4209 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN); 4210 } 4211 4212 static int disablecwd; 4213 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, 4214 "Disable getcwd"); 4215 4216 static u_long numcwdcalls; 4217 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdcalls, CTLFLAG_RD, &numcwdcalls, 0, 4218 "Number of current directory resolution calls"); 4219 static u_long numcwdfailnf; 4220 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdfailnf, CTLFLAG_RD, &numcwdfailnf, 0, 4221 "Number of current directory failures due to lack of file"); 4222 static u_long numcwdfailsz; 4223 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdfailsz, CTLFLAG_RD, &numcwdfailsz, 0, 4224 "Number of current directory failures due to large result"); 4225 static u_long numcwdfound; 4226 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdfound, CTLFLAG_RD, &numcwdfound, 0, 4227 "Number of current directory resolution successes"); 4228 4229 /* 4230 * MPALMOSTSAFE 4231 */ 4232 int 4233 sys___getcwd(struct __getcwd_args *uap) 4234 { 4235 u_int buflen; 4236 int error; 4237 char *buf; 4238 char *bp; 4239 4240 if (disablecwd) 4241 return (ENODEV); 4242 4243 buflen = uap->buflen; 4244 if (buflen == 0) 4245 return (EINVAL); 4246 if (buflen > MAXPATHLEN) 4247 buflen = MAXPATHLEN; 4248 4249 buf = kmalloc(buflen, M_TEMP, M_WAITOK); 4250 bp = kern_getcwd(buf, buflen, &error); 4251 if (error == 0) 4252 error = copyout(bp, uap->buf, strlen(bp) + 1); 4253 kfree(buf, M_TEMP); 4254 return (error); 4255 } 4256 4257 char * 4258 kern_getcwd(char *buf, size_t buflen, int *error) 4259 { 4260 struct proc *p = curproc; 4261 char *bp; 4262 int i, slash_prefixed; 4263 struct filedesc *fdp; 4264 struct nchandle nch; 4265 struct namecache *ncp; 4266 4267 numcwdcalls++; 4268 bp = buf; 4269 bp += buflen - 1; 4270 *bp = '\0'; 4271 fdp = p->p_fd; 4272 slash_prefixed = 0; 4273 4274 nch = fdp->fd_ncdir; 4275 ncp = nch.ncp; 4276 if (ncp) 4277 _cache_hold(ncp); 4278 4279 while (ncp && (ncp != fdp->fd_nrdir.ncp || 4280 nch.mount != fdp->fd_nrdir.mount) 4281 ) { 4282 /* 4283 * While traversing upwards if we encounter the root 4284 * of the current mount we have to skip to the mount point 4285 * in the underlying filesystem. 4286 */ 4287 if (ncp == nch.mount->mnt_ncmountpt.ncp) { 4288 nch = nch.mount->mnt_ncmounton; 4289 _cache_drop(ncp); 4290 ncp = nch.ncp; 4291 if (ncp) 4292 _cache_hold(ncp); 4293 continue; 4294 } 4295 4296 /* 4297 * Prepend the path segment 4298 */ 4299 for (i = ncp->nc_nlen - 1; i >= 0; i--) { 4300 if (bp == buf) { 4301 numcwdfailsz++; 4302 *error = ERANGE; 4303 bp = NULL; 4304 goto done; 4305 } 4306 *--bp = ncp->nc_name[i]; 4307 } 4308 if (bp == buf) { 4309 numcwdfailsz++; 4310 *error = ERANGE; 4311 bp = NULL; 4312 goto done; 4313 } 4314 *--bp = '/'; 4315 slash_prefixed = 1; 4316 4317 /* 4318 * Go up a directory. This isn't a mount point so we don't 4319 * have to check again. 4320 */ 4321 while ((nch.ncp = ncp->nc_parent) != NULL) { 4322 if (ncp_shared_lock_disable) 4323 _cache_lock(ncp); 4324 else 4325 _cache_lock_shared(ncp); 4326 if (nch.ncp != ncp->nc_parent) { 4327 _cache_unlock(ncp); 4328 continue; 4329 } 4330 _cache_hold(nch.ncp); 4331 _cache_unlock(ncp); 4332 break; 4333 } 4334 _cache_drop(ncp); 4335 ncp = nch.ncp; 4336 } 4337 if (ncp == NULL) { 4338 numcwdfailnf++; 4339 *error = ENOENT; 4340 bp = NULL; 4341 goto done; 4342 } 4343 if (!slash_prefixed) { 4344 if (bp == buf) { 4345 numcwdfailsz++; 4346 *error = ERANGE; 4347 bp = NULL; 4348 goto done; 4349 } 4350 *--bp = '/'; 4351 } 4352 numcwdfound++; 4353 *error = 0; 4354 done: 4355 if (ncp) 4356 _cache_drop(ncp); 4357 return (bp); 4358 } 4359 4360 /* 4361 * Thus begins the fullpath magic. 4362 * 4363 * The passed nchp is referenced but not locked. 4364 */ 4365 static int disablefullpath; 4366 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, 4367 &disablefullpath, 0, 4368 "Disable fullpath lookups"); 4369 4370 int 4371 cache_fullpath(struct proc *p, struct nchandle *nchp, struct nchandle *nchbase, 4372 char **retbuf, char **freebuf, int guess) 4373 { 4374 struct nchandle fd_nrdir; 4375 struct nchandle nch; 4376 struct namecache *ncp; 4377 struct mount *mp, *new_mp; 4378 char *bp, *buf; 4379 int slash_prefixed; 4380 int error = 0; 4381 int i; 4382 4383 *retbuf = NULL; 4384 *freebuf = NULL; 4385 4386 buf = kmalloc(MAXPATHLEN, M_TEMP, M_WAITOK); 4387 bp = buf + MAXPATHLEN - 1; 4388 *bp = '\0'; 4389 if (nchbase) 4390 fd_nrdir = *nchbase; 4391 else if (p != NULL) 4392 fd_nrdir = p->p_fd->fd_nrdir; 4393 else 4394 fd_nrdir = rootnch; 4395 slash_prefixed = 0; 4396 nch = *nchp; 4397 ncp = nch.ncp; 4398 if (ncp) 4399 _cache_hold(ncp); 4400 mp = nch.mount; 4401 4402 while (ncp && (ncp != fd_nrdir.ncp || mp != fd_nrdir.mount)) { 4403 new_mp = NULL; 4404 4405 /* 4406 * If we are asked to guess the upwards path, we do so whenever 4407 * we encounter an ncp marked as a mountpoint. We try to find 4408 * the actual mountpoint by finding the mountpoint with this 4409 * ncp. 4410 */ 4411 if (guess && (ncp->nc_flag & NCF_ISMOUNTPT)) { 4412 new_mp = mount_get_by_nc(ncp); 4413 } 4414 /* 4415 * While traversing upwards if we encounter the root 4416 * of the current mount we have to skip to the mount point. 4417 */ 4418 if (ncp == mp->mnt_ncmountpt.ncp) { 4419 new_mp = mp; 4420 } 4421 if (new_mp) { 4422 nch = new_mp->mnt_ncmounton; 4423 _cache_drop(ncp); 4424 ncp = nch.ncp; 4425 if (ncp) 4426 _cache_hold(ncp); 4427 mp = nch.mount; 4428 continue; 4429 } 4430 4431 /* 4432 * Prepend the path segment 4433 */ 4434 for (i = ncp->nc_nlen - 1; i >= 0; i--) { 4435 if (bp == buf) { 4436 kfree(buf, M_TEMP); 4437 error = ENOMEM; 4438 goto done; 4439 } 4440 *--bp = ncp->nc_name[i]; 4441 } 4442 if (bp == buf) { 4443 kfree(buf, M_TEMP); 4444 error = ENOMEM; 4445 goto done; 4446 } 4447 *--bp = '/'; 4448 slash_prefixed = 1; 4449 4450 /* 4451 * Go up a directory. This isn't a mount point so we don't 4452 * have to check again. 4453 * 4454 * We can only safely access nc_parent with ncp held locked. 4455 */ 4456 while ((nch.ncp = ncp->nc_parent) != NULL) { 4457 _cache_lock_shared(ncp); 4458 if (nch.ncp != ncp->nc_parent) { 4459 _cache_unlock(ncp); 4460 continue; 4461 } 4462 _cache_hold(nch.ncp); 4463 _cache_unlock(ncp); 4464 break; 4465 } 4466 _cache_drop(ncp); 4467 ncp = nch.ncp; 4468 } 4469 if (ncp == NULL) { 4470 kfree(buf, M_TEMP); 4471 error = ENOENT; 4472 goto done; 4473 } 4474 4475 if (!slash_prefixed) { 4476 if (bp == buf) { 4477 kfree(buf, M_TEMP); 4478 error = ENOMEM; 4479 goto done; 4480 } 4481 *--bp = '/'; 4482 } 4483 *retbuf = bp; 4484 *freebuf = buf; 4485 error = 0; 4486 done: 4487 if (ncp) 4488 _cache_drop(ncp); 4489 return(error); 4490 } 4491 4492 int 4493 vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf, 4494 char **freebuf, int guess) 4495 { 4496 struct namecache *ncp; 4497 struct nchandle nch; 4498 int error; 4499 4500 *freebuf = NULL; 4501 if (disablefullpath) 4502 return (ENODEV); 4503 4504 if (p == NULL) 4505 return (EINVAL); 4506 4507 /* vn is NULL, client wants us to use p->p_textvp */ 4508 if (vn == NULL) { 4509 if ((vn = p->p_textvp) == NULL) 4510 return (EINVAL); 4511 } 4512 spin_lock_shared(&vn->v_spin); 4513 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) { 4514 if (ncp->nc_nlen) 4515 break; 4516 } 4517 if (ncp == NULL) { 4518 spin_unlock_shared(&vn->v_spin); 4519 return (EINVAL); 4520 } 4521 _cache_hold(ncp); 4522 spin_unlock_shared(&vn->v_spin); 4523 4524 nch.ncp = ncp; 4525 nch.mount = vn->v_mount; 4526 error = cache_fullpath(p, &nch, NULL, retbuf, freebuf, guess); 4527 _cache_drop(ncp); 4528 return (error); 4529 } 4530 4531 void 4532 vfscache_rollup_cpu(struct globaldata *gd) 4533 { 4534 struct pcpu_ncache *pn; 4535 long count; 4536 4537 if (pcpu_ncache == NULL) 4538 return; 4539 pn = &pcpu_ncache[gd->gd_cpuid]; 4540 4541 if (pn->vfscache_count) { 4542 count = atomic_swap_long(&pn->vfscache_count, 0); 4543 atomic_add_long(&vfscache_count, count); 4544 } 4545 if (pn->vfscache_leafs) { 4546 count = atomic_swap_long(&pn->vfscache_leafs, 0); 4547 atomic_add_long(&vfscache_leafs, count); 4548 } 4549 if (pn->vfscache_negs) { 4550 count = atomic_swap_long(&pn->vfscache_negs, 0); 4551 atomic_add_long(&vfscache_negs, count); 4552 } 4553 if (pn->numdefered) { 4554 count = atomic_swap_long(&pn->numdefered, 0); 4555 atomic_add_long(&numdefered, count); 4556 } 4557 } 4558