1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993, 1995 35 * The Regents of the University of California. All rights reserved. 36 * 37 * This code is derived from software contributed to Berkeley by 38 * Poul-Henning Kamp of the FreeBSD Project. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 3. All advertising materials mentioning features or use of this software 49 * must display the following acknowledgement: 50 * This product includes software developed by the University of 51 * California, Berkeley and its contributors. 52 * 4. Neither the name of the University nor the names of its contributors 53 * may be used to endorse or promote products derived from this software 54 * without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 66 * SUCH DAMAGE. 67 * 68 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95 69 * $FreeBSD: src/sys/kern/vfs_cache.c,v 1.42.2.6 2001/10/05 20:07:03 dillon Exp $ 70 * $DragonFly: src/sys/kern/vfs_cache.c,v 1.52 2005/03/09 05:16:23 dillon Exp $ 71 */ 72 73 #include <sys/param.h> 74 #include <sys/systm.h> 75 #include <sys/kernel.h> 76 #include <sys/sysctl.h> 77 #include <sys/mount.h> 78 #include <sys/vnode.h> 79 #include <sys/malloc.h> 80 #include <sys/sysproto.h> 81 #include <sys/proc.h> 82 #include <sys/namei.h> 83 #include <sys/nlookup.h> 84 #include <sys/filedesc.h> 85 #include <sys/fnv_hash.h> 86 #include <sys/globaldata.h> 87 #include <sys/kern_syscall.h> 88 #include <sys/dirent.h> 89 #include <ddb/ddb.h> 90 91 /* 92 * Random lookups in the cache are accomplished with a hash table using 93 * a hash key of (nc_src_vp, name). 94 * 95 * Negative entries may exist and correspond to structures where nc_vp 96 * is NULL. In a negative entry, NCF_WHITEOUT will be set if the entry 97 * corresponds to a whited-out directory entry (verses simply not finding the 98 * entry at all). 99 * 100 * Upon reaching the last segment of a path, if the reference is for DELETE, 101 * or NOCACHE is set (rewrite), and the name is located in the cache, it 102 * will be dropped. 103 */ 104 105 /* 106 * Structures associated with name cacheing. 107 */ 108 #define NCHHASH(hash) (&nchashtbl[(hash) & nchash]) 109 #define MINNEG 1024 110 111 MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries"); 112 113 static LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */ 114 static struct namecache_list ncneglist; /* instead of vnode */ 115 116 /* 117 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server 118 * to create the namecache infrastructure leading to a dangling vnode. 119 * 120 * 0 Only errors are reported 121 * 1 Successes are reported 122 * 2 Successes + the whole directory scan is reported 123 * 3 Force the directory scan code run as if the parent vnode did not 124 * have a namecache record, even if it does have one. 125 */ 126 static int ncvp_debug; 127 SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0, ""); 128 129 static u_long nchash; /* size of hash table */ 130 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, ""); 131 132 static u_long ncnegfactor = 16; /* ratio of negative entries */ 133 SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, ""); 134 135 static int nclockwarn; /* warn on locked entries in ticks */ 136 SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0, ""); 137 138 static u_long numneg; /* number of cache entries allocated */ 139 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, ""); 140 141 static u_long numcache; /* number of cache entries allocated */ 142 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, ""); 143 144 static u_long numunres; /* number of unresolved entries */ 145 SYSCTL_ULONG(_debug, OID_AUTO, numunres, CTLFLAG_RD, &numunres, 0, ""); 146 147 SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), ""); 148 SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), ""); 149 150 static int cache_resolve_mp(struct namecache *ncp); 151 static void cache_rehash(struct namecache *ncp); 152 153 /* 154 * The new name cache statistics 155 */ 156 SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics"); 157 #define STATNODE(mode, name, var) \ 158 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, ""); 159 STATNODE(CTLFLAG_RD, numneg, &numneg); 160 STATNODE(CTLFLAG_RD, numcache, &numcache); 161 static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls); 162 static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits); 163 static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits); 164 static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks); 165 static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss); 166 static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap); 167 static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps); 168 static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits); 169 static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps); 170 static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits); 171 172 struct nchstats nchstats[SMP_MAXCPU]; 173 /* 174 * Export VFS cache effectiveness statistics to user-land. 175 * 176 * The statistics are left for aggregation to user-land so 177 * neat things can be achieved, like observing per-CPU cache 178 * distribution. 179 */ 180 static int 181 sysctl_nchstats(SYSCTL_HANDLER_ARGS) 182 { 183 struct globaldata *gd; 184 int i, error; 185 186 error = 0; 187 for (i = 0; i < ncpus; ++i) { 188 gd = globaldata_find(i); 189 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats), 190 sizeof(struct nchstats)))) 191 break; 192 } 193 194 return (error); 195 } 196 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD, 197 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics"); 198 199 static void cache_zap(struct namecache *ncp); 200 201 /* 202 * cache_hold() and cache_drop() prevent the premature deletion of a 203 * namecache entry but do not prevent operations (such as zapping) on 204 * that namecache entry. 205 */ 206 static __inline 207 struct namecache * 208 _cache_hold(struct namecache *ncp) 209 { 210 ++ncp->nc_refs; 211 return(ncp); 212 } 213 214 /* 215 * When dropping an entry, if only one ref remains and the entry has not 216 * been resolved, zap it. Since the one reference is being dropped the 217 * entry had better not be locked. 218 */ 219 static __inline 220 void 221 _cache_drop(struct namecache *ncp) 222 { 223 KKASSERT(ncp->nc_refs > 0); 224 if (ncp->nc_refs == 1 && 225 (ncp->nc_flag & NCF_UNRESOLVED) && 226 TAILQ_EMPTY(&ncp->nc_list) 227 ) { 228 KKASSERT(ncp->nc_exlocks == 0); 229 cache_lock(ncp); 230 cache_zap(ncp); 231 } else { 232 --ncp->nc_refs; 233 } 234 } 235 236 /* 237 * Link a new namecache entry to its parent. Be careful to avoid races 238 * if vhold() blocks in the future. 239 * 240 * If we are creating a child under an oldapi parent we must mark the 241 * child as being an oldapi entry as well. 242 */ 243 static void 244 cache_link_parent(struct namecache *ncp, struct namecache *par) 245 { 246 KKASSERT(ncp->nc_parent == NULL); 247 ncp->nc_parent = par; 248 if (TAILQ_EMPTY(&par->nc_list)) { 249 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry); 250 /* 251 * Any vp associated with an ncp which has children must 252 * be held to prevent it from being recycled. 253 */ 254 if (par->nc_vp) 255 vhold(par->nc_vp); 256 } else { 257 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry); 258 } 259 } 260 261 /* 262 * Remove the parent association from a namecache structure. If this is 263 * the last child of the parent the cache_drop(par) will attempt to 264 * recursively zap the parent. 265 */ 266 static void 267 cache_unlink_parent(struct namecache *ncp) 268 { 269 struct namecache *par; 270 271 if ((par = ncp->nc_parent) != NULL) { 272 ncp->nc_parent = NULL; 273 par = cache_hold(par); 274 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry); 275 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list)) 276 vdrop(par->nc_vp); 277 cache_drop(par); 278 } 279 } 280 281 /* 282 * Allocate a new namecache structure. Most of the code does not require 283 * zero-termination of the string but it makes vop_compat_ncreate() easier. 284 */ 285 static struct namecache * 286 cache_alloc(int nlen) 287 { 288 struct namecache *ncp; 289 290 ncp = malloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO); 291 if (nlen) 292 ncp->nc_name = malloc(nlen + 1, M_VFSCACHE, M_WAITOK); 293 ncp->nc_nlen = nlen; 294 ncp->nc_flag = NCF_UNRESOLVED; 295 ncp->nc_error = ENOTCONN; /* needs to be resolved */ 296 ncp->nc_refs = 1; 297 TAILQ_INIT(&ncp->nc_list); 298 cache_lock(ncp); 299 return(ncp); 300 } 301 302 static void 303 cache_free(struct namecache *ncp) 304 { 305 KKASSERT(ncp->nc_refs == 1 && ncp->nc_exlocks == 1); 306 if (ncp->nc_name) 307 free(ncp->nc_name, M_VFSCACHE); 308 free(ncp, M_VFSCACHE); 309 } 310 311 /* 312 * Ref and deref a namecache structure. 313 */ 314 struct namecache * 315 cache_hold(struct namecache *ncp) 316 { 317 return(_cache_hold(ncp)); 318 } 319 320 void 321 cache_drop(struct namecache *ncp) 322 { 323 _cache_drop(ncp); 324 } 325 326 /* 327 * Namespace locking. The caller must already hold a reference to the 328 * namecache structure in order to lock/unlock it. This function prevents 329 * the namespace from being created or destroyed by accessors other then 330 * the lock holder. 331 * 332 * Note that holding a locked namecache structure prevents other threads 333 * from making namespace changes (e.g. deleting or creating), prevents 334 * vnode association state changes by other threads, and prevents the 335 * namecache entry from being resolved or unresolved by other threads. 336 * 337 * The lock owner has full authority to associate/disassociate vnodes 338 * and resolve/unresolve the locked ncp. 339 * 340 * In particular, if a vnode is associated with a locked cache entry 341 * that vnode will *NOT* be recycled. We accomplish this by vhold()ing the 342 * vnode. XXX we should find a more efficient way to prevent the vnode 343 * from being recycled, but remember that any given vnode may have multiple 344 * namecache associations (think hardlinks). 345 */ 346 void 347 cache_lock(struct namecache *ncp) 348 { 349 thread_t td; 350 int didwarn; 351 352 KKASSERT(ncp->nc_refs != 0); 353 didwarn = 0; 354 td = curthread; 355 356 for (;;) { 357 if (ncp->nc_exlocks == 0) { 358 ncp->nc_exlocks = 1; 359 ncp->nc_locktd = td; 360 /* 361 * The vp associated with a locked ncp must be held 362 * to prevent it from being recycled (which would 363 * cause the ncp to become unresolved). 364 * 365 * XXX loop on race for later MPSAFE work. 366 */ 367 if (ncp->nc_vp) 368 vhold(ncp->nc_vp); 369 break; 370 } 371 if (ncp->nc_locktd == td) { 372 ++ncp->nc_exlocks; 373 break; 374 } 375 ncp->nc_flag |= NCF_LOCKREQ; 376 if (tsleep(ncp, 0, "clock", nclockwarn) == EWOULDBLOCK) { 377 if (didwarn) 378 continue; 379 didwarn = 1; 380 printf("[diagnostic] cache_lock: blocked on %p", ncp); 381 if ((ncp->nc_flag & NCF_MOUNTPT) && ncp->nc_mount) 382 printf(" [MOUNTFROM %s]\n", ncp->nc_mount->mnt_stat.f_mntfromname); 383 else 384 printf(" \"%*.*s\"\n", 385 ncp->nc_nlen, ncp->nc_nlen, 386 ncp->nc_name); 387 } 388 } 389 390 if (didwarn == 1) { 391 printf("[diagnostic] cache_lock: unblocked %*.*s\n", 392 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 393 } 394 } 395 396 int 397 cache_lock_nonblock(struct namecache *ncp) 398 { 399 thread_t td; 400 401 KKASSERT(ncp->nc_refs != 0); 402 td = curthread; 403 if (ncp->nc_exlocks == 0) { 404 ncp->nc_exlocks = 1; 405 ncp->nc_locktd = td; 406 /* 407 * The vp associated with a locked ncp must be held 408 * to prevent it from being recycled (which would 409 * cause the ncp to become unresolved). 410 * 411 * XXX loop on race for later MPSAFE work. 412 */ 413 if (ncp->nc_vp) 414 vhold(ncp->nc_vp); 415 return(0); 416 } else { 417 return(EWOULDBLOCK); 418 } 419 } 420 421 void 422 cache_unlock(struct namecache *ncp) 423 { 424 thread_t td = curthread; 425 426 KKASSERT(ncp->nc_refs > 0); 427 KKASSERT(ncp->nc_exlocks > 0); 428 KKASSERT(ncp->nc_locktd == td); 429 if (--ncp->nc_exlocks == 0) { 430 if (ncp->nc_vp) 431 vdrop(ncp->nc_vp); 432 ncp->nc_locktd = NULL; 433 if (ncp->nc_flag & NCF_LOCKREQ) { 434 ncp->nc_flag &= ~NCF_LOCKREQ; 435 wakeup(ncp); 436 } 437 } 438 } 439 440 /* 441 * ref-and-lock, unlock-and-deref functions. 442 */ 443 struct namecache * 444 cache_get(struct namecache *ncp) 445 { 446 _cache_hold(ncp); 447 cache_lock(ncp); 448 return(ncp); 449 } 450 451 int 452 cache_get_nonblock(struct namecache *ncp) 453 { 454 /* XXX MP */ 455 if (ncp->nc_exlocks == 0 || ncp->nc_locktd == curthread) { 456 _cache_hold(ncp); 457 cache_lock(ncp); 458 return(0); 459 } 460 return(EWOULDBLOCK); 461 } 462 463 void 464 cache_put(struct namecache *ncp) 465 { 466 cache_unlock(ncp); 467 _cache_drop(ncp); 468 } 469 470 /* 471 * Resolve an unresolved ncp by associating a vnode with it. If the 472 * vnode is NULL, a negative cache entry is created. 473 * 474 * The ncp should be locked on entry and will remain locked on return. 475 */ 476 void 477 cache_setvp(struct namecache *ncp, struct vnode *vp) 478 { 479 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED); 480 ncp->nc_vp = vp; 481 if (vp != NULL) { 482 /* 483 * Any vp associated with an ncp which has children must 484 * be held. Any vp associated with a locked ncp must be held. 485 */ 486 if (!TAILQ_EMPTY(&ncp->nc_list)) 487 vhold(vp); 488 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode); 489 if (ncp->nc_exlocks) 490 vhold(vp); 491 492 /* 493 * Set auxillary flags 494 */ 495 switch(vp->v_type) { 496 case VDIR: 497 ncp->nc_flag |= NCF_ISDIR; 498 break; 499 case VLNK: 500 ncp->nc_flag |= NCF_ISSYMLINK; 501 /* XXX cache the contents of the symlink */ 502 break; 503 default: 504 break; 505 } 506 ++numcache; 507 ncp->nc_error = 0; 508 } else { 509 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode); 510 ++numneg; 511 ncp->nc_error = ENOENT; 512 } 513 ncp->nc_flag &= ~NCF_UNRESOLVED; 514 } 515 516 void 517 cache_settimeout(struct namecache *ncp, int nticks) 518 { 519 if ((ncp->nc_timeout = ticks + nticks) == 0) 520 ncp->nc_timeout = 1; 521 } 522 523 /* 524 * Disassociate the vnode or negative-cache association and mark a 525 * namecache entry as unresolved again. Note that the ncp is still 526 * left in the hash table and still linked to its parent. 527 * 528 * The ncp should be locked and refd on entry and will remain locked and refd 529 * on return. 530 * 531 * This routine is normally never called on a directory containing children. 532 * However, NFS often does just that in its rename() code as a cop-out to 533 * avoid complex namespace operations. This disconnects a directory vnode 534 * from its namecache and can cause the OLDAPI and NEWAPI to get out of 535 * sync. 536 */ 537 void 538 cache_setunresolved(struct namecache *ncp) 539 { 540 struct vnode *vp; 541 542 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 543 ncp->nc_flag |= NCF_UNRESOLVED; 544 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK); 545 ncp->nc_timeout = 0; 546 ncp->nc_error = ENOTCONN; 547 ++numunres; 548 if ((vp = ncp->nc_vp) != NULL) { 549 --numcache; 550 ncp->nc_vp = NULL; 551 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode); 552 553 /* 554 * Any vp associated with an ncp with children is 555 * held by that ncp. Any vp associated with a locked 556 * ncp is held by that ncp. These conditions must be 557 * undone when the vp is cleared out from the ncp. 558 */ 559 if (!TAILQ_EMPTY(&ncp->nc_list)) 560 vdrop(vp); 561 if (ncp->nc_exlocks) 562 vdrop(vp); 563 } else { 564 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode); 565 --numneg; 566 } 567 } 568 } 569 570 /* 571 * Invalidate portions of the namecache topology given a starting entry. 572 * The passed ncp is set to an unresolved state and: 573 * 574 * The passed ncp must be locked. 575 * 576 * CINV_DESTROY - Set a flag in the passed ncp entry indicating 577 * that the physical underlying nodes have been 578 * destroyed... as in deleted. For example, when 579 * a directory is removed. This will cause record 580 * lookups on the name to no longer be able to find 581 * the record and tells the resolver to return failure 582 * rather then trying to resolve through the parent. 583 * 584 * The topology itself, including ncp->nc_name, 585 * remains intact. 586 * 587 * This only applies to the passed ncp, if CINV_CHILDREN 588 * is specified the children are not flagged. 589 * 590 * CINV_CHILDREN - Set all children (recursively) to an unresolved 591 * state as well. 592 * 593 * Note that this will also have the side effect of 594 * cleaning out any unreferenced nodes in the topology 595 * from the leaves up as the recursion backs out. 596 * 597 * Note that the topology for any referenced nodes remains intact. 598 * 599 * It is possible for cache_inval() to race a cache_resolve(), meaning that 600 * the namecache entry may not actually be invalidated on return if it was 601 * revalidated while recursing down into its children. This code guarentees 602 * that the node(s) will go through an invalidation cycle, but does not 603 * guarentee that they will remain in an invalidated state. 604 * 605 * Returns non-zero if a revalidation was detected during the invalidation 606 * recursion, zero otherwise. Note that since only the original ncp is 607 * locked the revalidation ultimately can only indicate that the original ncp 608 * *MIGHT* no have been reresolved. 609 */ 610 int 611 cache_inval(struct namecache *ncp, int flags) 612 { 613 struct namecache *kid; 614 struct namecache *nextkid; 615 int rcnt = 0; 616 617 KKASSERT(ncp->nc_exlocks); 618 619 cache_setunresolved(ncp); 620 if (flags & CINV_DESTROY) 621 ncp->nc_flag |= NCF_DESTROYED; 622 623 if ((flags & CINV_CHILDREN) && 624 (kid = TAILQ_FIRST(&ncp->nc_list)) != NULL 625 ) { 626 cache_hold(kid); 627 cache_unlock(ncp); 628 while (kid) { 629 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL) 630 cache_hold(nextkid); 631 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 || 632 TAILQ_FIRST(&kid->nc_list) 633 ) { 634 cache_lock(kid); 635 rcnt += cache_inval(kid, flags & ~CINV_DESTROY); 636 cache_unlock(kid); 637 } 638 cache_drop(kid); 639 kid = nextkid; 640 } 641 cache_lock(ncp); 642 } 643 644 /* 645 * Someone could have gotten in there while ncp was unlocked, 646 * retry if so. 647 */ 648 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) 649 ++rcnt; 650 return (rcnt); 651 } 652 653 /* 654 * Invalidate a vnode's namecache associations. To avoid races against 655 * the resolver we do not invalidate a node which we previously invalidated 656 * but which was then re-resolved while we were in the invalidation loop. 657 * 658 * Returns non-zero if any namecache entries remain after the invalidation 659 * loop completed. 660 * 661 * NOTE: unlike the namecache topology which guarentees that ncp's will not 662 * be ripped out of the topology while held, the vnode's v_namecache list 663 * has no such restriction. NCP's can be ripped out of the list at virtually 664 * any time if not locked, even if held. 665 */ 666 int 667 cache_inval_vp(struct vnode *vp, int flags) 668 { 669 struct namecache *ncp; 670 struct namecache *next; 671 672 restart: 673 ncp = TAILQ_FIRST(&vp->v_namecache); 674 if (ncp) 675 cache_hold(ncp); 676 while (ncp) { 677 /* loop entered with ncp held */ 678 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL) 679 cache_hold(next); 680 cache_lock(ncp); 681 if (ncp->nc_vp != vp) { 682 printf("Warning: cache_inval_vp: race-A detected on " 683 "%s\n", ncp->nc_name); 684 cache_put(ncp); 685 cache_drop(next); 686 goto restart; 687 } 688 cache_inval(ncp, flags); 689 cache_put(ncp); /* also releases reference */ 690 ncp = next; 691 if (ncp && ncp->nc_vp != vp) { 692 printf("Warning: cache_inval_vp: race-B detected on " 693 "%s\n", ncp->nc_name); 694 cache_drop(ncp); 695 goto restart; 696 } 697 } 698 return(TAILQ_FIRST(&vp->v_namecache) != NULL); 699 } 700 701 /* 702 * The source ncp has been renamed to the target ncp. Both fncp and tncp 703 * must be locked. Both will be set to unresolved, any children of tncp 704 * will be disconnected (the prior contents of the target is assumed to be 705 * destroyed by the rename operation, e.g. renaming over an empty directory), 706 * and all children of fncp will be moved to tncp. 707 * 708 * XXX the disconnection could pose a problem, check code paths to make 709 * sure any code that blocks can handle the parent being changed out from 710 * under it. Maybe we should lock the children (watch out for deadlocks) ? 711 * 712 * After we return the caller has the option of calling cache_setvp() if 713 * the vnode of the new target ncp is known. 714 * 715 * Any process CD'd into any of the children will no longer be able to ".." 716 * back out. An rm -rf can cause this situation to occur. 717 */ 718 void 719 cache_rename(struct namecache *fncp, struct namecache *tncp) 720 { 721 struct namecache *scan; 722 int didwarn = 0; 723 724 cache_setunresolved(fncp); 725 cache_setunresolved(tncp); 726 while (cache_inval(tncp, CINV_CHILDREN) != 0) { 727 if (didwarn++ % 10 == 0) { 728 printf("Warning: cache_rename: race during " 729 "rename %s->%s\n", 730 fncp->nc_name, tncp->nc_name); 731 } 732 tsleep(tncp, 0, "mvrace", hz / 10); 733 cache_setunresolved(tncp); 734 } 735 while ((scan = TAILQ_FIRST(&fncp->nc_list)) != NULL) { 736 cache_hold(scan); 737 cache_unlink_parent(scan); 738 cache_link_parent(scan, tncp); 739 if (scan->nc_flag & NCF_HASHED) 740 cache_rehash(scan); 741 cache_drop(scan); 742 } 743 } 744 745 /* 746 * vget the vnode associated with the namecache entry. Resolve the namecache 747 * entry if necessary and deal with namecache/vp races. The passed ncp must 748 * be referenced and may be locked. The ncp's ref/locking state is not 749 * effected by this call. 750 * 751 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked 752 * (depending on the passed lk_type) will be returned in *vpp with an error 753 * of 0, or NULL will be returned in *vpp with a non-0 error code. The 754 * most typical error is ENOENT, meaning that the ncp represents a negative 755 * cache hit and there is no vnode to retrieve, but other errors can occur 756 * too. 757 * 758 * The main race we have to deal with are namecache zaps. The ncp itself 759 * will not disappear since it is referenced, and it turns out that the 760 * validity of the vp pointer can be checked simply by rechecking the 761 * contents of ncp->nc_vp. 762 */ 763 int 764 cache_vget(struct namecache *ncp, struct ucred *cred, 765 int lk_type, struct vnode **vpp) 766 { 767 struct vnode *vp; 768 int error; 769 770 again: 771 vp = NULL; 772 if (ncp->nc_flag & NCF_UNRESOLVED) { 773 cache_lock(ncp); 774 error = cache_resolve(ncp, cred); 775 cache_unlock(ncp); 776 } else { 777 error = 0; 778 } 779 if (error == 0 && (vp = ncp->nc_vp) != NULL) { 780 error = vget(vp, lk_type, curthread); 781 if (error) { 782 if (vp != ncp->nc_vp) /* handle cache_zap race */ 783 goto again; 784 vp = NULL; 785 } else if (vp != ncp->nc_vp) { /* handle cache_zap race */ 786 vput(vp); 787 goto again; 788 } 789 } 790 if (error == 0 && vp == NULL) 791 error = ENOENT; 792 *vpp = vp; 793 return(error); 794 } 795 796 int 797 cache_vref(struct namecache *ncp, struct ucred *cred, struct vnode **vpp) 798 { 799 struct vnode *vp; 800 int error; 801 802 again: 803 vp = NULL; 804 if (ncp->nc_flag & NCF_UNRESOLVED) { 805 cache_lock(ncp); 806 error = cache_resolve(ncp, cred); 807 cache_unlock(ncp); 808 } else { 809 error = 0; 810 } 811 if (error == 0 && (vp = ncp->nc_vp) != NULL) { 812 vref(vp); 813 if (vp != ncp->nc_vp) { /* handle cache_zap race */ 814 vrele(vp); 815 goto again; 816 } 817 } 818 if (error == 0 && vp == NULL) 819 error = ENOENT; 820 *vpp = vp; 821 return(error); 822 } 823 824 /* 825 * Convert a directory vnode to a namecache record without any other 826 * knowledge of the topology. This ONLY works with directory vnodes and 827 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the 828 * returned ncp (if not NULL) will be held and unlocked. 829 * 830 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned. 831 * If 'makeit' is 1 we attempt to track-down and create the namecache topology 832 * for dvp. This will fail only if the directory has been deleted out from 833 * under the caller. 834 * 835 * Callers must always check for a NULL return no matter the value of 'makeit'. 836 */ 837 838 static int cache_inefficient_scan(struct namecache *ncp, struct ucred *cred, 839 struct vnode *dvp); 840 841 struct namecache * 842 cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit) 843 { 844 struct namecache *ncp; 845 struct vnode *pvp; 846 int error; 847 848 /* 849 * Temporary debugging code to force the directory scanning code 850 * to be exercised. 851 */ 852 ncp = NULL; 853 if (ncvp_debug >= 3 && makeit && TAILQ_FIRST(&dvp->v_namecache)) { 854 ncp = TAILQ_FIRST(&dvp->v_namecache); 855 printf("cache_fromdvp: forcing %s\n", ncp->nc_name); 856 goto force; 857 } 858 859 /* 860 * Loop until resolution, inside code will break out on error. 861 */ 862 while ((ncp = TAILQ_FIRST(&dvp->v_namecache)) == NULL && makeit) { 863 force: 864 /* 865 * If dvp is the root of its filesystem it should already 866 * have a namecache pointer associated with it as a side 867 * effect of the mount, but it may have been disassociated. 868 */ 869 if (dvp->v_flag & VROOT) { 870 ncp = cache_get(dvp->v_mount->mnt_ncp); 871 error = cache_resolve_mp(ncp); 872 cache_put(ncp); 873 if (ncvp_debug) { 874 printf("cache_fromdvp: resolve root of mount %p error %d", 875 dvp->v_mount, error); 876 } 877 if (error) { 878 if (ncvp_debug) 879 printf(" failed\n"); 880 ncp = NULL; 881 break; 882 } 883 if (ncvp_debug) 884 printf(" succeeded\n"); 885 continue; 886 } 887 888 /* 889 * Get the parent directory and resolve its ncp. 890 */ 891 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred); 892 if (error) { 893 printf("lookupdotdot failed %d %p\n", error, pvp); 894 break; 895 } 896 VOP_UNLOCK(pvp, 0, curthread); 897 898 /* 899 * XXX this recursion could run the kernel out of stack, 900 * change to a less efficient algorithm if we get too deep 901 * (use 'makeit' for a depth counter?) 902 */ 903 ncp = cache_fromdvp(pvp, cred, makeit); 904 vrele(pvp); 905 if (ncp == NULL) 906 break; 907 908 /* 909 * Do an inefficient scan of pvp (embodied by ncp) to look 910 * for dvp. This will create a namecache record for dvp on 911 * success. We loop up to recheck on success. 912 * 913 * ncp and dvp are both held but not locked. 914 */ 915 error = cache_inefficient_scan(ncp, cred, dvp); 916 cache_drop(ncp); 917 if (error) { 918 printf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n", 919 pvp, ncp->nc_name, dvp); 920 ncp = NULL; 921 break; 922 } 923 if (ncvp_debug) { 924 printf("cache_fromdvp: scan %p (%s) succeeded\n", 925 pvp, ncp->nc_name); 926 } 927 } 928 if (ncp) 929 cache_hold(ncp); 930 return (ncp); 931 } 932 933 /* 934 * Do an inefficient scan of the directory represented by ncp looking for 935 * the directory vnode dvp. ncp must be held but not locked on entry and 936 * will be held on return. dvp must be refd but not locked on entry and 937 * will remain refd on return. 938 * 939 * Why do this at all? Well, due to its stateless nature the NFS server 940 * converts file handles directly to vnodes without necessarily going through 941 * the namecache ops that would otherwise create the namecache topology 942 * leading to the vnode. We could either (1) Change the namecache algorithms 943 * to allow disconnect namecache records that are re-merged opportunistically, 944 * or (2) Make the NFS server backtrack and scan to recover a connected 945 * namecache topology in order to then be able to issue new API lookups. 946 * 947 * It turns out that (1) is a huge mess. It takes a nice clean set of 948 * namecache algorithms and introduces a lot of complication in every subsystem 949 * that calls into the namecache to deal with the re-merge case, especially 950 * since we are using the namecache to placehold negative lookups and the 951 * vnode might not be immediately assigned. (2) is certainly far less 952 * efficient then (1), but since we are only talking about directories here 953 * (which are likely to remain cached), the case does not actually run all 954 * that often and has the supreme advantage of not polluting the namecache 955 * algorithms. 956 */ 957 static int 958 cache_inefficient_scan(struct namecache *ncp, struct ucred *cred, 959 struct vnode *dvp) 960 { 961 struct nlcomponent nlc; 962 struct namecache *rncp; 963 struct dirent *den; 964 struct vnode *pvp; 965 struct vattr vat; 966 struct iovec iov; 967 struct uio uio; 968 u_long *cookies; 969 off_t baseoff; 970 int ncookies; 971 int blksize; 972 int eofflag; 973 char *rbuf; 974 int error; 975 int xoff; 976 int i; 977 978 vat.va_blocksize = 0; 979 if ((error = VOP_GETATTR(dvp, &vat, curthread)) != 0) 980 return (error); 981 if ((error = cache_vget(ncp, cred, LK_SHARED, &pvp)) != 0) 982 return (error); 983 if (ncvp_debug) 984 printf("inefficient_scan: directory iosize %ld vattr fileid = %ld\n", vat.va_blocksize, (long)vat.va_fileid); 985 if ((blksize = vat.va_blocksize) == 0) 986 blksize = DEV_BSIZE; 987 rbuf = malloc(blksize, M_TEMP, M_WAITOK); 988 rncp = NULL; 989 990 eofflag = 0; 991 uio.uio_offset = 0; 992 cookies = NULL; 993 again: 994 baseoff = uio.uio_offset; 995 iov.iov_base = rbuf; 996 iov.iov_len = blksize; 997 uio.uio_iov = &iov; 998 uio.uio_iovcnt = 1; 999 uio.uio_resid = blksize; 1000 uio.uio_segflg = UIO_SYSSPACE; 1001 uio.uio_rw = UIO_READ; 1002 uio.uio_td = curthread; 1003 1004 if (cookies) { 1005 free(cookies, M_TEMP); 1006 cookies = NULL; 1007 } 1008 if (ncvp_debug >= 2) 1009 printf("cache_inefficient_scan: readdir @ %08x\n", (int)baseoff); 1010 error = VOP_READDIR(pvp, &uio, cred, &eofflag, &ncookies, &cookies); 1011 if (error == 0 && cookies == NULL) 1012 error = EPERM; 1013 if (error == 0) { 1014 for (i = 0; i < ncookies; ++i) { 1015 xoff = (int)(cookies[i] - (u_long)baseoff); 1016 /* 1017 * UFS plays a little trick to skip the first entry 1018 * in a directory ("."), by assigning the cookie to 1019 * dpoff + dp->d_reclen in the loop. This causes 1020 * the last cookie to be assigned to the data-end of 1021 * the directory. XXX 1022 */ 1023 if (xoff == blksize) 1024 break; 1025 KKASSERT(xoff >= 0 && xoff <= blksize); 1026 den = (struct dirent *)(rbuf + xoff); 1027 if (ncvp_debug >= 2) 1028 printf("cache_inefficient_scan: %*.*s\n", 1029 den->d_namlen, den->d_namlen, den->d_name); 1030 if (den->d_type != DT_WHT && 1031 den->d_fileno == vat.va_fileid) { 1032 if (ncvp_debug) 1033 printf("cache_inefficient_scan: MATCHED inode %ld path %s/%*.*s\n", vat.va_fileid, ncp->nc_name, den->d_namlen, den->d_namlen, den->d_name); 1034 nlc.nlc_nameptr = den->d_name; 1035 nlc.nlc_namelen = den->d_namlen; 1036 VOP_UNLOCK(pvp, 0, curthread); 1037 rncp = cache_nlookup(ncp, &nlc); 1038 KKASSERT(rncp != NULL); 1039 break; 1040 } 1041 } 1042 if (rncp == NULL && eofflag == 0 && uio.uio_resid != blksize) 1043 goto again; 1044 } 1045 if (cookies) { 1046 free(cookies, M_TEMP); 1047 cookies = NULL; 1048 } 1049 if (rncp) { 1050 vrele(pvp); 1051 if (rncp->nc_flag & NCF_UNRESOLVED) { 1052 cache_setvp(rncp, dvp); 1053 if (ncvp_debug >= 2) { 1054 printf("cache_inefficient_scan: setvp %s/%s = %p\n", 1055 ncp->nc_name, rncp->nc_name, dvp); 1056 } 1057 } else { 1058 if (ncvp_debug >= 2) { 1059 printf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n", 1060 ncp->nc_name, rncp->nc_name, dvp, 1061 rncp->nc_vp); 1062 } 1063 } 1064 if (rncp->nc_vp == NULL) 1065 error = rncp->nc_error; 1066 cache_put(rncp); 1067 } else { 1068 printf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n", 1069 dvp, ncp->nc_name); 1070 vput(pvp); 1071 error = ENOENT; 1072 } 1073 free(rbuf, M_TEMP); 1074 return (error); 1075 } 1076 1077 /* 1078 * Zap a namecache entry. The ncp is unconditionally set to an unresolved 1079 * state, which disassociates it from its vnode or ncneglist. 1080 * 1081 * Then, if there are no additional references to the ncp and no children, 1082 * the ncp is removed from the topology and destroyed. This function will 1083 * also run through the nc_parent chain and destroy parent ncps if possible. 1084 * As a side benefit, it turns out the only conditions that allow running 1085 * up the chain are also the conditions to ensure no deadlock will occur. 1086 * 1087 * References and/or children may exist if the ncp is in the middle of the 1088 * topology, preventing the ncp from being destroyed. 1089 * 1090 * This function must be called with the ncp held and locked and will unlock 1091 * and drop it during zapping. 1092 */ 1093 static void 1094 cache_zap(struct namecache *ncp) 1095 { 1096 struct namecache *par; 1097 1098 /* 1099 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED. 1100 */ 1101 cache_setunresolved(ncp); 1102 1103 /* 1104 * Try to scrap the entry and possibly tail-recurse on its parent. 1105 * We only scrap unref'd (other then our ref) unresolved entries, 1106 * we do not scrap 'live' entries. 1107 */ 1108 while (ncp->nc_flag & NCF_UNRESOLVED) { 1109 /* 1110 * Someone other then us has a ref, stop. 1111 */ 1112 if (ncp->nc_refs > 1) 1113 goto done; 1114 1115 /* 1116 * We have children, stop. 1117 */ 1118 if (!TAILQ_EMPTY(&ncp->nc_list)) 1119 goto done; 1120 1121 /* 1122 * Remove ncp from the topology: hash table and parent linkage. 1123 */ 1124 if (ncp->nc_flag & NCF_HASHED) { 1125 ncp->nc_flag &= ~NCF_HASHED; 1126 LIST_REMOVE(ncp, nc_hash); 1127 } 1128 if ((par = ncp->nc_parent) != NULL) { 1129 par = cache_hold(par); 1130 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry); 1131 ncp->nc_parent = NULL; 1132 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list)) 1133 vdrop(par->nc_vp); 1134 } 1135 1136 /* 1137 * ncp should not have picked up any refs. Physically 1138 * destroy the ncp. 1139 */ 1140 KKASSERT(ncp->nc_refs == 1); 1141 --numunres; 1142 /* cache_unlock(ncp) not required */ 1143 ncp->nc_refs = -1; /* safety */ 1144 if (ncp->nc_name) 1145 free(ncp->nc_name, M_VFSCACHE); 1146 free(ncp, M_VFSCACHE); 1147 1148 /* 1149 * Loop on the parent (it may be NULL). Only bother looping 1150 * if the parent has a single ref (ours), which also means 1151 * we can lock it trivially. 1152 */ 1153 ncp = par; 1154 if (ncp == NULL) 1155 return; 1156 if (ncp->nc_refs != 1) { 1157 cache_drop(ncp); 1158 return; 1159 } 1160 KKASSERT(par->nc_exlocks == 0); 1161 cache_lock(ncp); 1162 } 1163 done: 1164 cache_unlock(ncp); 1165 --ncp->nc_refs; 1166 } 1167 1168 static enum { CHI_LOW, CHI_HIGH } cache_hysteresis_state = CHI_LOW; 1169 1170 static __inline 1171 void 1172 cache_hysteresis(void) 1173 { 1174 /* 1175 * Don't cache too many negative hits. We use hysteresis to reduce 1176 * the impact on the critical path. 1177 */ 1178 switch(cache_hysteresis_state) { 1179 case CHI_LOW: 1180 if (numneg > MINNEG && numneg * ncnegfactor > numcache) { 1181 cache_cleanneg(10); 1182 cache_hysteresis_state = CHI_HIGH; 1183 } 1184 break; 1185 case CHI_HIGH: 1186 if (numneg > MINNEG * 9 / 10 && 1187 numneg * ncnegfactor * 9 / 10 > numcache 1188 ) { 1189 cache_cleanneg(10); 1190 } else { 1191 cache_hysteresis_state = CHI_LOW; 1192 } 1193 break; 1194 } 1195 } 1196 1197 /* 1198 * NEW NAMECACHE LOOKUP API 1199 * 1200 * Lookup an entry in the cache. A locked, referenced, non-NULL 1201 * entry is *always* returned, even if the supplied component is illegal. 1202 * The resulting namecache entry should be returned to the system with 1203 * cache_put() or cache_unlock() + cache_drop(). 1204 * 1205 * namecache locks are recursive but care must be taken to avoid lock order 1206 * reversals. 1207 * 1208 * Nobody else will be able to manipulate the associated namespace (e.g. 1209 * create, delete, rename, rename-target) until the caller unlocks the 1210 * entry. 1211 * 1212 * The returned entry will be in one of three states: positive hit (non-null 1213 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set). 1214 * Unresolved entries must be resolved through the filesystem to associate the 1215 * vnode and/or determine whether a positive or negative hit has occured. 1216 * 1217 * It is not necessary to lock a directory in order to lock namespace under 1218 * that directory. In fact, it is explicitly not allowed to do that. A 1219 * directory is typically only locked when being created, renamed, or 1220 * destroyed. 1221 * 1222 * The directory (par) may be unresolved, in which case any returned child 1223 * will likely also be marked unresolved. Likely but not guarenteed. Since 1224 * the filesystem lookup requires a resolved directory vnode the caller is 1225 * responsible for resolving the namecache chain top-down. This API 1226 * specifically allows whole chains to be created in an unresolved state. 1227 */ 1228 struct namecache * 1229 cache_nlookup(struct namecache *par, struct nlcomponent *nlc) 1230 { 1231 struct namecache *ncp; 1232 struct namecache *new_ncp; 1233 struct nchashhead *nchpp; 1234 u_int32_t hash; 1235 globaldata_t gd; 1236 1237 numcalls++; 1238 gd = mycpu; 1239 1240 /* 1241 * Try to locate an existing entry 1242 */ 1243 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT); 1244 hash = fnv_32_buf(&par, sizeof(par), hash); 1245 new_ncp = NULL; 1246 restart: 1247 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 1248 numchecks++; 1249 1250 /* 1251 * Zap entries that have timed out. 1252 */ 1253 if (ncp->nc_timeout && 1254 (int)(ncp->nc_timeout - ticks) < 0 && 1255 (ncp->nc_flag & NCF_UNRESOLVED) == 0 && 1256 ncp->nc_exlocks == 0 1257 ) { 1258 cache_zap(cache_get(ncp)); 1259 goto restart; 1260 } 1261 1262 /* 1263 * Break out if we find a matching entry. Note that 1264 * UNRESOLVED entries may match, but DESTROYED entries 1265 * do not. 1266 */ 1267 if (ncp->nc_parent == par && 1268 ncp->nc_nlen == nlc->nlc_namelen && 1269 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 && 1270 (ncp->nc_flag & NCF_DESTROYED) == 0 1271 ) { 1272 if (cache_get_nonblock(ncp) == 0) { 1273 if (new_ncp) 1274 cache_free(new_ncp); 1275 goto found; 1276 } 1277 cache_get(ncp); 1278 cache_put(ncp); 1279 goto restart; 1280 } 1281 } 1282 1283 /* 1284 * We failed to locate an entry, create a new entry and add it to 1285 * the cache. We have to relookup after possibly blocking in 1286 * malloc. 1287 */ 1288 if (new_ncp == NULL) { 1289 new_ncp = cache_alloc(nlc->nlc_namelen); 1290 goto restart; 1291 } 1292 1293 ncp = new_ncp; 1294 1295 /* 1296 * Initialize as a new UNRESOLVED entry, lock (non-blocking), 1297 * and link to the parent. The mount point is usually inherited 1298 * from the parent unless this is a special case such as a mount 1299 * point where nlc_namelen is 0. The caller is responsible for 1300 * setting nc_mount in that case. If nlc_namelen is 0 nc_name will 1301 * be NULL. 1302 */ 1303 if (nlc->nlc_namelen) { 1304 bcopy(nlc->nlc_nameptr, ncp->nc_name, nlc->nlc_namelen); 1305 ncp->nc_name[nlc->nlc_namelen] = 0; 1306 ncp->nc_mount = par->nc_mount; 1307 } 1308 nchpp = NCHHASH(hash); 1309 LIST_INSERT_HEAD(nchpp, ncp, nc_hash); 1310 ncp->nc_flag |= NCF_HASHED; 1311 cache_link_parent(ncp, par); 1312 found: 1313 /* 1314 * stats and namecache size management 1315 */ 1316 if (ncp->nc_flag & NCF_UNRESOLVED) 1317 ++gd->gd_nchstats->ncs_miss; 1318 else if (ncp->nc_vp) 1319 ++gd->gd_nchstats->ncs_goodhits; 1320 else 1321 ++gd->gd_nchstats->ncs_neghits; 1322 cache_hysteresis(); 1323 return(ncp); 1324 } 1325 1326 /* 1327 * Resolve an unresolved namecache entry, generally by looking it up. 1328 * The passed ncp must be locked and refd. 1329 * 1330 * Theoretically since a vnode cannot be recycled while held, and since 1331 * the nc_parent chain holds its vnode as long as children exist, the 1332 * direct parent of the cache entry we are trying to resolve should 1333 * have a valid vnode. If not then generate an error that we can 1334 * determine is related to a resolver bug. 1335 * 1336 * Note that successful resolution does not necessarily return an error 1337 * code of 0. If the ncp resolves to a negative cache hit then ENOENT 1338 * will be returned. 1339 */ 1340 int 1341 cache_resolve(struct namecache *ncp, struct ucred *cred) 1342 { 1343 struct namecache *par; 1344 int error; 1345 1346 restart: 1347 /* 1348 * If the ncp is already resolved we have nothing to do. 1349 */ 1350 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) 1351 return (ncp->nc_error); 1352 1353 /* 1354 * Mount points need special handling because the parent does not 1355 * belong to the same filesystem as the ncp. 1356 */ 1357 if (ncp->nc_flag & NCF_MOUNTPT) 1358 return (cache_resolve_mp(ncp)); 1359 1360 /* 1361 * We expect an unbroken chain of ncps to at least the mount point, 1362 * and even all the way to root (but this code doesn't have to go 1363 * past the mount point). 1364 */ 1365 if (ncp->nc_parent == NULL) { 1366 printf("EXDEV case 1 %p %*.*s\n", ncp, 1367 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 1368 ncp->nc_error = EXDEV; 1369 return(ncp->nc_error); 1370 } 1371 1372 /* 1373 * The vp's of the parent directories in the chain are held via vhold() 1374 * due to the existance of the child, and should not disappear. 1375 * However, there are cases where they can disappear: 1376 * 1377 * - due to filesystem I/O errors. 1378 * - due to NFS being stupid about tracking the namespace and 1379 * destroys the namespace for entire directories quite often. 1380 * - due to forced unmounts. 1381 * - due to an rmdir (parent will be marked DESTROYED) 1382 * 1383 * When this occurs we have to track the chain backwards and resolve 1384 * it, looping until the resolver catches up to the current node. We 1385 * could recurse here but we might run ourselves out of kernel stack 1386 * so we do it in a more painful manner. This situation really should 1387 * not occur all that often, or if it does not have to go back too 1388 * many nodes to resolve the ncp. 1389 */ 1390 while (ncp->nc_parent->nc_vp == NULL) { 1391 /* 1392 * This case can occur if a process is CD'd into a 1393 * directory which is then rmdir'd. If the parent is marked 1394 * destroyed there is no point trying to resolve it. 1395 */ 1396 if (ncp->nc_parent->nc_flag & NCF_DESTROYED) 1397 return(ENOENT); 1398 1399 par = ncp->nc_parent; 1400 while (par->nc_parent && par->nc_parent->nc_vp == NULL) 1401 par = par->nc_parent; 1402 if (par->nc_parent == NULL) { 1403 printf("EXDEV case 2 %*.*s\n", 1404 par->nc_nlen, par->nc_nlen, par->nc_name); 1405 return (EXDEV); 1406 } 1407 printf("[diagnostic] cache_resolve: had to recurse on %*.*s\n", 1408 par->nc_nlen, par->nc_nlen, par->nc_name); 1409 /* 1410 * The parent is not set in stone, ref and lock it to prevent 1411 * it from disappearing. Also note that due to renames it 1412 * is possible for our ncp to move and for par to no longer 1413 * be one of its parents. We resolve it anyway, the loop 1414 * will handle any moves. 1415 */ 1416 cache_get(par); 1417 if (par->nc_flag & NCF_MOUNTPT) { 1418 cache_resolve_mp(par); 1419 } else if (par->nc_parent->nc_vp == NULL) { 1420 printf("[diagnostic] cache_resolve: raced on %*.*s\n", par->nc_nlen, par->nc_nlen, par->nc_name); 1421 cache_put(par); 1422 continue; 1423 } else if (par->nc_flag & NCF_UNRESOLVED) { 1424 par->nc_error = VOP_NRESOLVE(par, cred); 1425 } 1426 if ((error = par->nc_error) != 0) { 1427 if (par->nc_error != EAGAIN) { 1428 printf("EXDEV case 3 %*.*s error %d\n", 1429 par->nc_nlen, par->nc_nlen, par->nc_name, 1430 par->nc_error); 1431 cache_put(par); 1432 return(error); 1433 } 1434 printf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n", 1435 par, par->nc_nlen, par->nc_nlen, par->nc_name); 1436 } 1437 cache_put(par); 1438 /* loop */ 1439 } 1440 1441 /* 1442 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected 1443 * ncp's and reattach them. If this occurs the original ncp is marked 1444 * EAGAIN to force a relookup. 1445 * 1446 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed 1447 * ncp must already be resolved. 1448 */ 1449 KKASSERT((ncp->nc_flag & NCF_MOUNTPT) == 0); 1450 ncp->nc_error = VOP_NRESOLVE(ncp, cred); 1451 /*vop_nresolve(*ncp->nc_parent->nc_vp->v_ops, ncp, cred);*/ 1452 if (ncp->nc_error == EAGAIN) { 1453 printf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n", 1454 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 1455 goto restart; 1456 } 1457 return(ncp->nc_error); 1458 } 1459 1460 /* 1461 * Resolve the ncp associated with a mount point. Such ncp's almost always 1462 * remain resolved and this routine is rarely called. NFS MPs tends to force 1463 * re-resolution more often due to its mac-truck-smash-the-namecache 1464 * method of tracking namespace changes. 1465 * 1466 * The semantics for this call is that the passed ncp must be locked on 1467 * entry and will be locked on return. However, if we actually have to 1468 * resolve the mount point we temporarily unlock the entry in order to 1469 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of 1470 * the unlock we have to recheck the flags after we relock. 1471 */ 1472 static int 1473 cache_resolve_mp(struct namecache *ncp) 1474 { 1475 struct vnode *vp; 1476 struct mount *mp = ncp->nc_mount; 1477 int error; 1478 1479 KKASSERT(mp != NULL); 1480 if (ncp->nc_flag & NCF_UNRESOLVED) { 1481 cache_unlock(ncp); 1482 while (vfs_busy(mp, 0, NULL, curthread)) 1483 ; 1484 error = VFS_ROOT(mp, &vp); 1485 cache_lock(ncp); 1486 1487 /* 1488 * recheck the ncp state after relocking. 1489 */ 1490 if (ncp->nc_flag & NCF_UNRESOLVED) { 1491 ncp->nc_error = error; 1492 if (error == 0) { 1493 cache_setvp(ncp, vp); 1494 vput(vp); 1495 } else { 1496 printf("[diagnostic] cache_resolve_mp: failed to resolve mount %p\n", mp); 1497 cache_setvp(ncp, NULL); 1498 } 1499 } else if (error == 0) { 1500 vput(vp); 1501 } 1502 vfs_unbusy(mp, curthread); 1503 } 1504 return(ncp->nc_error); 1505 } 1506 1507 void 1508 cache_cleanneg(int count) 1509 { 1510 struct namecache *ncp; 1511 1512 /* 1513 * Automode from the vnlru proc - clean out 10% of the negative cache 1514 * entries. 1515 */ 1516 if (count == 0) 1517 count = numneg / 10 + 1; 1518 1519 /* 1520 * Attempt to clean out the specified number of negative cache 1521 * entries. 1522 */ 1523 while (count) { 1524 ncp = TAILQ_FIRST(&ncneglist); 1525 if (ncp == NULL) { 1526 KKASSERT(numneg == 0); 1527 break; 1528 } 1529 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode); 1530 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode); 1531 if (cache_get_nonblock(ncp) == 0) 1532 cache_zap(ncp); 1533 --count; 1534 } 1535 } 1536 1537 /* 1538 * Rehash a ncp. Rehashing is typically required if the name changes (should 1539 * not generally occur) or the parent link changes. This function will 1540 * unhash the ncp if the ncp is no longer hashable. 1541 */ 1542 static void 1543 cache_rehash(struct namecache *ncp) 1544 { 1545 struct nchashhead *nchpp; 1546 u_int32_t hash; 1547 1548 if (ncp->nc_flag & NCF_HASHED) { 1549 ncp->nc_flag &= ~NCF_HASHED; 1550 LIST_REMOVE(ncp, nc_hash); 1551 } 1552 if (ncp->nc_nlen && ncp->nc_parent) { 1553 hash = fnv_32_buf(ncp->nc_name, ncp->nc_nlen, FNV1_32_INIT); 1554 hash = fnv_32_buf(&ncp->nc_parent, 1555 sizeof(ncp->nc_parent), hash); 1556 nchpp = NCHHASH(hash); 1557 LIST_INSERT_HEAD(nchpp, ncp, nc_hash); 1558 ncp->nc_flag |= NCF_HASHED; 1559 } 1560 } 1561 1562 /* 1563 * Name cache initialization, from vfsinit() when we are booting 1564 */ 1565 void 1566 nchinit(void) 1567 { 1568 int i; 1569 globaldata_t gd; 1570 1571 /* initialise per-cpu namecache effectiveness statistics. */ 1572 for (i = 0; i < ncpus; ++i) { 1573 gd = globaldata_find(i); 1574 gd->gd_nchstats = &nchstats[i]; 1575 } 1576 TAILQ_INIT(&ncneglist); 1577 nchashtbl = hashinit(desiredvnodes*2, M_VFSCACHE, &nchash); 1578 nclockwarn = 1 * hz; 1579 } 1580 1581 /* 1582 * Called from start_init() to bootstrap the root filesystem. Returns 1583 * a referenced, unlocked namecache record. 1584 */ 1585 struct namecache * 1586 cache_allocroot(struct mount *mp, struct vnode *vp) 1587 { 1588 struct namecache *ncp = cache_alloc(0); 1589 1590 ncp->nc_flag |= NCF_MOUNTPT | NCF_ROOT; 1591 ncp->nc_mount = mp; 1592 cache_setvp(ncp, vp); 1593 return(ncp); 1594 } 1595 1596 /* 1597 * vfs_cache_setroot() 1598 * 1599 * Create an association between the root of our namecache and 1600 * the root vnode. This routine may be called several times during 1601 * booting. 1602 * 1603 * If the caller intends to save the returned namecache pointer somewhere 1604 * it must cache_hold() it. 1605 */ 1606 void 1607 vfs_cache_setroot(struct vnode *nvp, struct namecache *ncp) 1608 { 1609 struct vnode *ovp; 1610 struct namecache *oncp; 1611 1612 ovp = rootvnode; 1613 oncp = rootncp; 1614 rootvnode = nvp; 1615 rootncp = ncp; 1616 1617 if (ovp) 1618 vrele(ovp); 1619 if (oncp) 1620 cache_drop(oncp); 1621 } 1622 1623 /* 1624 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache 1625 * topology and is being removed as quickly as possible. The new VOP_N*() 1626 * API calls are required to make specific adjustments using the supplied 1627 * ncp pointers rather then just bogusly purging random vnodes. 1628 * 1629 * Invalidate all namecache entries to a particular vnode as well as 1630 * any direct children of that vnode in the namecache. This is a 1631 * 'catch all' purge used by filesystems that do not know any better. 1632 * 1633 * A new vnode v_id is generated. Note that no vnode will ever have a 1634 * v_id of 0. 1635 * 1636 * Note that the linkage between the vnode and its namecache entries will 1637 * be removed, but the namecache entries themselves might stay put due to 1638 * active references from elsewhere in the system or due to the existance of 1639 * the children. The namecache topology is left intact even if we do not 1640 * know what the vnode association is. Such entries will be marked 1641 * NCF_UNRESOLVED. 1642 * 1643 * XXX: Only time and the size of v_id prevents this from failing: 1644 * XXX: In theory we should hunt down all (struct vnode*, v_id) 1645 * XXX: soft references and nuke them, at least on the global 1646 * XXX: v_id wraparound. The period of resistance can be extended 1647 * XXX: by incrementing each vnodes v_id individually instead of 1648 * XXX: using the global v_id. 1649 */ 1650 void 1651 cache_purge(struct vnode *vp) 1652 { 1653 static u_long nextid; 1654 1655 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN); 1656 1657 /* 1658 * Calculate a new unique id for ".." handling 1659 */ 1660 do { 1661 nextid++; 1662 } while (nextid == vp->v_id || nextid == 0); 1663 vp->v_id = nextid; 1664 } 1665 1666 /* 1667 * Flush all entries referencing a particular filesystem. 1668 * 1669 * Since we need to check it anyway, we will flush all the invalid 1670 * entries at the same time. 1671 */ 1672 void 1673 cache_purgevfs(struct mount *mp) 1674 { 1675 struct nchashhead *nchpp; 1676 struct namecache *ncp, *nnp; 1677 1678 /* 1679 * Scan hash tables for applicable entries. 1680 */ 1681 for (nchpp = &nchashtbl[nchash]; nchpp >= nchashtbl; nchpp--) { 1682 ncp = LIST_FIRST(nchpp); 1683 if (ncp) 1684 cache_hold(ncp); 1685 while (ncp) { 1686 nnp = LIST_NEXT(ncp, nc_hash); 1687 if (nnp) 1688 cache_hold(nnp); 1689 if (ncp->nc_mount == mp) { 1690 cache_lock(ncp); 1691 cache_zap(ncp); 1692 } else { 1693 cache_drop(ncp); 1694 } 1695 ncp = nnp; 1696 } 1697 } 1698 } 1699 1700 static int disablecwd; 1701 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, ""); 1702 1703 static u_long numcwdcalls; STATNODE(CTLFLAG_RD, numcwdcalls, &numcwdcalls); 1704 static u_long numcwdfail1; STATNODE(CTLFLAG_RD, numcwdfail1, &numcwdfail1); 1705 static u_long numcwdfail2; STATNODE(CTLFLAG_RD, numcwdfail2, &numcwdfail2); 1706 static u_long numcwdfail3; STATNODE(CTLFLAG_RD, numcwdfail3, &numcwdfail3); 1707 static u_long numcwdfail4; STATNODE(CTLFLAG_RD, numcwdfail4, &numcwdfail4); 1708 static u_long numcwdfound; STATNODE(CTLFLAG_RD, numcwdfound, &numcwdfound); 1709 1710 int 1711 __getcwd(struct __getcwd_args *uap) 1712 { 1713 int buflen; 1714 int error; 1715 char *buf; 1716 char *bp; 1717 1718 if (disablecwd) 1719 return (ENODEV); 1720 1721 buflen = uap->buflen; 1722 if (buflen < 2) 1723 return (EINVAL); 1724 if (buflen > MAXPATHLEN) 1725 buflen = MAXPATHLEN; 1726 1727 buf = malloc(buflen, M_TEMP, M_WAITOK); 1728 bp = kern_getcwd(buf, buflen, &error); 1729 if (error == 0) 1730 error = copyout(bp, uap->buf, strlen(bp) + 1); 1731 free(buf, M_TEMP); 1732 return (error); 1733 } 1734 1735 char * 1736 kern_getcwd(char *buf, size_t buflen, int *error) 1737 { 1738 struct proc *p = curproc; 1739 char *bp; 1740 int i, slash_prefixed; 1741 struct filedesc *fdp; 1742 struct namecache *ncp; 1743 1744 numcwdcalls++; 1745 bp = buf; 1746 bp += buflen - 1; 1747 *bp = '\0'; 1748 fdp = p->p_fd; 1749 slash_prefixed = 0; 1750 1751 ncp = fdp->fd_ncdir; 1752 while (ncp && ncp != fdp->fd_nrdir && (ncp->nc_flag & NCF_ROOT) == 0) { 1753 if (ncp->nc_flag & NCF_MOUNTPT) { 1754 if (ncp->nc_mount == NULL) { 1755 *error = EBADF; /* forced unmount? */ 1756 return(NULL); 1757 } 1758 ncp = ncp->nc_parent; 1759 continue; 1760 } 1761 for (i = ncp->nc_nlen - 1; i >= 0; i--) { 1762 if (bp == buf) { 1763 numcwdfail4++; 1764 *error = ENOMEM; 1765 return(NULL); 1766 } 1767 *--bp = ncp->nc_name[i]; 1768 } 1769 if (bp == buf) { 1770 numcwdfail4++; 1771 *error = ENOMEM; 1772 return(NULL); 1773 } 1774 *--bp = '/'; 1775 slash_prefixed = 1; 1776 ncp = ncp->nc_parent; 1777 } 1778 if (ncp == NULL) { 1779 numcwdfail2++; 1780 *error = ENOENT; 1781 return(NULL); 1782 } 1783 if (!slash_prefixed) { 1784 if (bp == buf) { 1785 numcwdfail4++; 1786 *error = ENOMEM; 1787 return(NULL); 1788 } 1789 *--bp = '/'; 1790 } 1791 numcwdfound++; 1792 *error = 0; 1793 return (bp); 1794 } 1795 1796 /* 1797 * Thus begins the fullpath magic. 1798 */ 1799 1800 #undef STATNODE 1801 #define STATNODE(name) \ 1802 static u_int name; \ 1803 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "") 1804 1805 static int disablefullpath; 1806 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, 1807 &disablefullpath, 0, ""); 1808 1809 STATNODE(numfullpathcalls); 1810 STATNODE(numfullpathfail1); 1811 STATNODE(numfullpathfail2); 1812 STATNODE(numfullpathfail3); 1813 STATNODE(numfullpathfail4); 1814 STATNODE(numfullpathfound); 1815 1816 int 1817 cache_fullpath(struct proc *p, struct namecache *ncp, char **retbuf, char **freebuf) 1818 { 1819 char *bp, *buf; 1820 int i, slash_prefixed; 1821 struct namecache *fd_nrdir; 1822 1823 numfullpathcalls--; 1824 1825 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 1826 bp = buf + MAXPATHLEN - 1; 1827 *bp = '\0'; 1828 if (p != NULL) 1829 fd_nrdir = p->p_fd->fd_nrdir; 1830 else 1831 fd_nrdir = NULL; 1832 slash_prefixed = 0; 1833 while (ncp && ncp != fd_nrdir && (ncp->nc_flag & NCF_ROOT) == 0) { 1834 if (ncp->nc_flag & NCF_MOUNTPT) { 1835 if (ncp->nc_mount == NULL) { 1836 free(buf, M_TEMP); 1837 return(EBADF); 1838 } 1839 ncp = ncp->nc_parent; 1840 continue; 1841 } 1842 for (i = ncp->nc_nlen - 1; i >= 0; i--) { 1843 if (bp == buf) { 1844 numfullpathfail4++; 1845 free(buf, M_TEMP); 1846 return(ENOMEM); 1847 } 1848 *--bp = ncp->nc_name[i]; 1849 } 1850 if (bp == buf) { 1851 numfullpathfail4++; 1852 free(buf, M_TEMP); 1853 return(ENOMEM); 1854 } 1855 *--bp = '/'; 1856 slash_prefixed = 1; 1857 ncp = ncp->nc_parent; 1858 } 1859 if (ncp == NULL) { 1860 numfullpathfail2++; 1861 free(buf, M_TEMP); 1862 return(ENOENT); 1863 } 1864 if (p != NULL && (ncp->nc_flag & NCF_ROOT) && ncp != fd_nrdir) { 1865 bp = buf + MAXPATHLEN - 1; 1866 *bp = '\0'; 1867 slash_prefixed = 0; 1868 } 1869 if (!slash_prefixed) { 1870 if (bp == buf) { 1871 numfullpathfail4++; 1872 free(buf, M_TEMP); 1873 return(ENOMEM); 1874 } 1875 *--bp = '/'; 1876 } 1877 numfullpathfound++; 1878 *retbuf = bp; 1879 *freebuf = buf; 1880 1881 return(0); 1882 } 1883 1884 int 1885 vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf, char **freebuf) 1886 { 1887 struct namecache *ncp; 1888 1889 numfullpathcalls++; 1890 if (disablefullpath) 1891 return (ENODEV); 1892 1893 if (p == NULL) 1894 return (EINVAL); 1895 1896 /* vn is NULL, client wants us to use p->p_textvp */ 1897 if (vn == NULL) { 1898 if ((vn = p->p_textvp) == NULL) 1899 return (EINVAL); 1900 } 1901 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) { 1902 if (ncp->nc_nlen) 1903 break; 1904 } 1905 if (ncp == NULL) 1906 return (EINVAL); 1907 1908 numfullpathcalls--; 1909 return(cache_fullpath(p, ncp, retbuf, freebuf)); 1910 } 1911