1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993, 1995 35 * The Regents of the University of California. All rights reserved. 36 * 37 * This code is derived from software contributed to Berkeley by 38 * Poul-Henning Kamp of the FreeBSD Project. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 3. All advertising materials mentioning features or use of this software 49 * must display the following acknowledgement: 50 * This product includes software developed by the University of 51 * California, Berkeley and its contributors. 52 * 4. Neither the name of the University nor the names of its contributors 53 * may be used to endorse or promote products derived from this software 54 * without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 66 * SUCH DAMAGE. 67 * 68 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95 69 * $FreeBSD: src/sys/kern/vfs_cache.c,v 1.42.2.6 2001/10/05 20:07:03 dillon Exp $ 70 * $DragonFly: src/sys/kern/vfs_cache.c,v 1.77 2006/09/19 16:06:11 dillon Exp $ 71 */ 72 73 #include <sys/param.h> 74 #include <sys/systm.h> 75 #include <sys/kernel.h> 76 #include <sys/sysctl.h> 77 #include <sys/mount.h> 78 #include <sys/vnode.h> 79 #include <sys/malloc.h> 80 #include <sys/sysproto.h> 81 #include <sys/proc.h> 82 #include <sys/namei.h> 83 #include <sys/nlookup.h> 84 #include <sys/filedesc.h> 85 #include <sys/fnv_hash.h> 86 #include <sys/globaldata.h> 87 #include <sys/kern_syscall.h> 88 #include <sys/dirent.h> 89 #include <ddb/ddb.h> 90 91 /* 92 * Random lookups in the cache are accomplished with a hash table using 93 * a hash key of (nc_src_vp, name). 94 * 95 * Negative entries may exist and correspond to structures where nc_vp 96 * is NULL. In a negative entry, NCF_WHITEOUT will be set if the entry 97 * corresponds to a whited-out directory entry (verses simply not finding the 98 * entry at all). 99 * 100 * Upon reaching the last segment of a path, if the reference is for DELETE, 101 * or NOCACHE is set (rewrite), and the name is located in the cache, it 102 * will be dropped. 103 */ 104 105 /* 106 * Structures associated with name cacheing. 107 */ 108 #define NCHHASH(hash) (&nchashtbl[(hash) & nchash]) 109 #define MINNEG 1024 110 111 MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries"); 112 113 static LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */ 114 static struct namecache_list ncneglist; /* instead of vnode */ 115 116 /* 117 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server 118 * to create the namecache infrastructure leading to a dangling vnode. 119 * 120 * 0 Only errors are reported 121 * 1 Successes are reported 122 * 2 Successes + the whole directory scan is reported 123 * 3 Force the directory scan code run as if the parent vnode did not 124 * have a namecache record, even if it does have one. 125 */ 126 static int ncvp_debug; 127 SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0, ""); 128 129 static u_long nchash; /* size of hash table */ 130 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, ""); 131 132 static u_long ncnegfactor = 16; /* ratio of negative entries */ 133 SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, ""); 134 135 static int nclockwarn; /* warn on locked entries in ticks */ 136 SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0, ""); 137 138 static u_long numneg; /* number of cache entries allocated */ 139 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, ""); 140 141 static u_long numcache; /* number of cache entries allocated */ 142 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, ""); 143 144 static u_long numunres; /* number of unresolved entries */ 145 SYSCTL_ULONG(_debug, OID_AUTO, numunres, CTLFLAG_RD, &numunres, 0, ""); 146 147 SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), ""); 148 SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), ""); 149 150 static int cache_resolve_mp(struct namecache *ncp); 151 static void cache_rehash(struct namecache *ncp); 152 153 /* 154 * The new name cache statistics 155 */ 156 SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics"); 157 #define STATNODE(mode, name, var) \ 158 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, ""); 159 STATNODE(CTLFLAG_RD, numneg, &numneg); 160 STATNODE(CTLFLAG_RD, numcache, &numcache); 161 static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls); 162 static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits); 163 static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits); 164 static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks); 165 static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss); 166 static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap); 167 static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps); 168 static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits); 169 static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps); 170 static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits); 171 172 struct nchstats nchstats[SMP_MAXCPU]; 173 /* 174 * Export VFS cache effectiveness statistics to user-land. 175 * 176 * The statistics are left for aggregation to user-land so 177 * neat things can be achieved, like observing per-CPU cache 178 * distribution. 179 */ 180 static int 181 sysctl_nchstats(SYSCTL_HANDLER_ARGS) 182 { 183 struct globaldata *gd; 184 int i, error; 185 186 error = 0; 187 for (i = 0; i < ncpus; ++i) { 188 gd = globaldata_find(i); 189 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats), 190 sizeof(struct nchstats)))) 191 break; 192 } 193 194 return (error); 195 } 196 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD, 197 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics"); 198 199 static void cache_zap(struct namecache *ncp); 200 201 /* 202 * cache_hold() and cache_drop() prevent the premature deletion of a 203 * namecache entry but do not prevent operations (such as zapping) on 204 * that namecache entry. 205 * 206 * This routine may only be called from outside this source module if 207 * nc_refs is already at least 1. 208 * 209 * This is a rare case where callers are allowed to hold a spinlock, 210 * so we can't ourselves. 211 */ 212 static __inline 213 struct namecache * 214 _cache_hold(struct namecache *ncp) 215 { 216 atomic_add_int(&ncp->nc_refs, 1); 217 return(ncp); 218 } 219 220 /* 221 * When dropping an entry, if only one ref remains and the entry has not 222 * been resolved, zap it. Since the one reference is being dropped the 223 * entry had better not be locked. 224 */ 225 static __inline 226 void 227 _cache_drop(struct namecache *ncp) 228 { 229 KKASSERT(ncp->nc_refs > 0); 230 if (ncp->nc_refs == 1 && 231 (ncp->nc_flag & NCF_UNRESOLVED) && 232 TAILQ_EMPTY(&ncp->nc_list) 233 ) { 234 KKASSERT(ncp->nc_exlocks == 0); 235 cache_lock(ncp); 236 cache_zap(ncp); 237 } else { 238 atomic_subtract_int(&ncp->nc_refs, 1); 239 } 240 } 241 242 /* 243 * Link a new namecache entry to its parent. Be careful to avoid races 244 * if vhold() blocks in the future. 245 */ 246 static void 247 cache_link_parent(struct namecache *ncp, struct namecache *par) 248 { 249 KKASSERT(ncp->nc_parent == NULL); 250 ncp->nc_parent = par; 251 if (TAILQ_EMPTY(&par->nc_list)) { 252 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry); 253 /* 254 * Any vp associated with an ncp which has children must 255 * be held to prevent it from being recycled. 256 */ 257 if (par->nc_vp) 258 vhold(par->nc_vp); 259 } else { 260 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry); 261 } 262 } 263 264 /* 265 * Remove the parent association from a namecache structure. If this is 266 * the last child of the parent the cache_drop(par) will attempt to 267 * recursively zap the parent. 268 */ 269 static void 270 cache_unlink_parent(struct namecache *ncp) 271 { 272 struct namecache *par; 273 274 if ((par = ncp->nc_parent) != NULL) { 275 ncp->nc_parent = NULL; 276 par = cache_hold(par); 277 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry); 278 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list)) 279 vdrop(par->nc_vp); 280 cache_drop(par); 281 } 282 } 283 284 /* 285 * Allocate a new namecache structure. Most of the code does not require 286 * zero-termination of the string but it makes vop_compat_ncreate() easier. 287 */ 288 static struct namecache * 289 cache_alloc(int nlen) 290 { 291 struct namecache *ncp; 292 293 ncp = kmalloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO); 294 if (nlen) 295 ncp->nc_name = kmalloc(nlen + 1, M_VFSCACHE, M_WAITOK); 296 ncp->nc_nlen = nlen; 297 ncp->nc_flag = NCF_UNRESOLVED; 298 ncp->nc_error = ENOTCONN; /* needs to be resolved */ 299 ncp->nc_refs = 1; 300 301 /* 302 * Construct a fake FSMID based on the time of day and a 32 bit 303 * roller for uniqueness. This is used to generate a useful 304 * FSMID for filesystems which do not support it. 305 */ 306 ncp->nc_fsmid = cache_getnewfsmid(); 307 TAILQ_INIT(&ncp->nc_list); 308 cache_lock(ncp); 309 return(ncp); 310 } 311 312 static void 313 cache_free(struct namecache *ncp) 314 { 315 KKASSERT(ncp->nc_refs == 1 && ncp->nc_exlocks == 1); 316 if (ncp->nc_name) 317 kfree(ncp->nc_name, M_VFSCACHE); 318 kfree(ncp, M_VFSCACHE); 319 } 320 321 /* 322 * Ref and deref a namecache structure. 323 * 324 * Warning: caller may hold an unrelated read spinlock, which means we can't 325 * use read spinlocks here. 326 */ 327 struct namecache * 328 cache_hold(struct namecache *ncp) 329 { 330 return(_cache_hold(ncp)); 331 } 332 333 void 334 cache_drop(struct namecache *ncp) 335 { 336 _cache_drop(ncp); 337 } 338 339 /* 340 * Namespace locking. The caller must already hold a reference to the 341 * namecache structure in order to lock/unlock it. This function prevents 342 * the namespace from being created or destroyed by accessors other then 343 * the lock holder. 344 * 345 * Note that holding a locked namecache structure prevents other threads 346 * from making namespace changes (e.g. deleting or creating), prevents 347 * vnode association state changes by other threads, and prevents the 348 * namecache entry from being resolved or unresolved by other threads. 349 * 350 * The lock owner has full authority to associate/disassociate vnodes 351 * and resolve/unresolve the locked ncp. 352 * 353 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed 354 * or recycled, but it does NOT help you if the vnode had already initiated 355 * a recyclement. If this is important, use cache_get() rather then 356 * cache_lock() (and deal with the differences in the way the refs counter 357 * is handled). Or, alternatively, make an unconditional call to 358 * cache_validate() or cache_resolve() after cache_lock() returns. 359 */ 360 void 361 cache_lock(struct namecache *ncp) 362 { 363 thread_t td; 364 int didwarn; 365 366 KKASSERT(ncp->nc_refs != 0); 367 didwarn = 0; 368 td = curthread; 369 370 for (;;) { 371 if (ncp->nc_exlocks == 0) { 372 ncp->nc_exlocks = 1; 373 ncp->nc_locktd = td; 374 /* 375 * The vp associated with a locked ncp must be held 376 * to prevent it from being recycled (which would 377 * cause the ncp to become unresolved). 378 * 379 * WARNING! If VRECLAIMED is set the vnode could 380 * already be in the middle of a recycle. Callers 381 * should not assume that nc_vp is usable when 382 * not NULL. cache_vref() or cache_vget() must be 383 * called. 384 * 385 * XXX loop on race for later MPSAFE work. 386 */ 387 if (ncp->nc_vp) 388 vhold(ncp->nc_vp); 389 break; 390 } 391 if (ncp->nc_locktd == td) { 392 ++ncp->nc_exlocks; 393 break; 394 } 395 ncp->nc_flag |= NCF_LOCKREQ; 396 if (tsleep(ncp, 0, "clock", nclockwarn) == EWOULDBLOCK) { 397 if (didwarn) 398 continue; 399 didwarn = 1; 400 printf("[diagnostic] cache_lock: blocked on %p", ncp); 401 if ((ncp->nc_flag & NCF_MOUNTPT) && ncp->nc_mount) 402 printf(" [MOUNTFROM %s]\n", ncp->nc_mount->mnt_stat.f_mntfromname); 403 else 404 printf(" \"%*.*s\"\n", 405 ncp->nc_nlen, ncp->nc_nlen, 406 ncp->nc_name); 407 } 408 } 409 410 if (didwarn == 1) { 411 printf("[diagnostic] cache_lock: unblocked %*.*s\n", 412 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 413 } 414 } 415 416 int 417 cache_lock_nonblock(struct namecache *ncp) 418 { 419 thread_t td; 420 421 KKASSERT(ncp->nc_refs != 0); 422 td = curthread; 423 if (ncp->nc_exlocks == 0) { 424 ncp->nc_exlocks = 1; 425 ncp->nc_locktd = td; 426 /* 427 * The vp associated with a locked ncp must be held 428 * to prevent it from being recycled (which would 429 * cause the ncp to become unresolved). 430 * 431 * WARNING! If VRECLAIMED is set the vnode could 432 * already be in the middle of a recycle. Callers 433 * should not assume that nc_vp is usable when 434 * not NULL. cache_vref() or cache_vget() must be 435 * called. 436 * 437 * XXX loop on race for later MPSAFE work. 438 */ 439 if (ncp->nc_vp) 440 vhold(ncp->nc_vp); 441 return(0); 442 } else { 443 return(EWOULDBLOCK); 444 } 445 } 446 447 void 448 cache_unlock(struct namecache *ncp) 449 { 450 thread_t td = curthread; 451 452 KKASSERT(ncp->nc_refs > 0); 453 KKASSERT(ncp->nc_exlocks > 0); 454 KKASSERT(ncp->nc_locktd == td); 455 if (--ncp->nc_exlocks == 0) { 456 if (ncp->nc_vp) 457 vdrop(ncp->nc_vp); 458 ncp->nc_locktd = NULL; 459 if (ncp->nc_flag & NCF_LOCKREQ) { 460 ncp->nc_flag &= ~NCF_LOCKREQ; 461 wakeup(ncp); 462 } 463 } 464 } 465 466 /* 467 * ref-and-lock, unlock-and-deref functions. 468 * 469 * This function is primarily used by nlookup. Even though cache_lock 470 * holds the vnode, it is possible that the vnode may have already 471 * initiated a recyclement. We want cache_get() to return a definitively 472 * usable vnode or a definitively unresolved ncp. 473 */ 474 struct namecache * 475 cache_get(struct namecache *ncp) 476 { 477 _cache_hold(ncp); 478 cache_lock(ncp); 479 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 480 cache_setunresolved(ncp); 481 return(ncp); 482 } 483 484 int 485 cache_get_nonblock(struct namecache *ncp) 486 { 487 /* XXX MP */ 488 if (ncp->nc_exlocks == 0 || ncp->nc_locktd == curthread) { 489 _cache_hold(ncp); 490 cache_lock(ncp); 491 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 492 cache_setunresolved(ncp); 493 return(0); 494 } 495 return(EWOULDBLOCK); 496 } 497 498 void 499 cache_put(struct namecache *ncp) 500 { 501 cache_unlock(ncp); 502 _cache_drop(ncp); 503 } 504 505 /* 506 * Resolve an unresolved ncp by associating a vnode with it. If the 507 * vnode is NULL, a negative cache entry is created. 508 * 509 * The ncp should be locked on entry and will remain locked on return. 510 */ 511 void 512 cache_setvp(struct namecache *ncp, struct vnode *vp) 513 { 514 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED); 515 ncp->nc_vp = vp; 516 if (vp != NULL) { 517 /* 518 * Any vp associated with an ncp which has children must 519 * be held. Any vp associated with a locked ncp must be held. 520 */ 521 if (!TAILQ_EMPTY(&ncp->nc_list)) 522 vhold(vp); 523 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode); 524 if (ncp->nc_exlocks) 525 vhold(vp); 526 527 /* 528 * Set auxillary flags 529 */ 530 switch(vp->v_type) { 531 case VDIR: 532 ncp->nc_flag |= NCF_ISDIR; 533 break; 534 case VLNK: 535 ncp->nc_flag |= NCF_ISSYMLINK; 536 /* XXX cache the contents of the symlink */ 537 break; 538 default: 539 break; 540 } 541 ++numcache; 542 ncp->nc_error = 0; 543 } else { 544 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode); 545 ++numneg; 546 ncp->nc_error = ENOENT; 547 } 548 ncp->nc_flag &= ~NCF_UNRESOLVED; 549 } 550 551 void 552 cache_settimeout(struct namecache *ncp, int nticks) 553 { 554 if ((ncp->nc_timeout = ticks + nticks) == 0) 555 ncp->nc_timeout = 1; 556 } 557 558 /* 559 * Disassociate the vnode or negative-cache association and mark a 560 * namecache entry as unresolved again. Note that the ncp is still 561 * left in the hash table and still linked to its parent. 562 * 563 * The ncp should be locked and refd on entry and will remain locked and refd 564 * on return. 565 * 566 * This routine is normally never called on a directory containing children. 567 * However, NFS often does just that in its rename() code as a cop-out to 568 * avoid complex namespace operations. This disconnects a directory vnode 569 * from its namecache and can cause the OLDAPI and NEWAPI to get out of 570 * sync. 571 * 572 * NOTE: NCF_FSMID must be cleared so a refurbishment of the ncp, such as 573 * in a create, properly propogates flag up the chain. 574 */ 575 void 576 cache_setunresolved(struct namecache *ncp) 577 { 578 struct vnode *vp; 579 580 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 581 ncp->nc_flag |= NCF_UNRESOLVED; 582 ncp->nc_timeout = 0; 583 ncp->nc_error = ENOTCONN; 584 ++numunres; 585 if ((vp = ncp->nc_vp) != NULL) { 586 --numcache; 587 ncp->nc_vp = NULL; 588 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode); 589 590 /* 591 * Any vp associated with an ncp with children is 592 * held by that ncp. Any vp associated with a locked 593 * ncp is held by that ncp. These conditions must be 594 * undone when the vp is cleared out from the ncp. 595 */ 596 if (ncp->nc_flag & NCF_FSMID) 597 vupdatefsmid(vp); 598 if (!TAILQ_EMPTY(&ncp->nc_list)) 599 vdrop(vp); 600 if (ncp->nc_exlocks) 601 vdrop(vp); 602 } else { 603 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode); 604 --numneg; 605 } 606 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK| 607 NCF_FSMID); 608 } 609 } 610 611 /* 612 * Mark the namecache node as containing a mount point. 613 * 614 * XXX called with a ref'd but unlocked ncp. 615 */ 616 void 617 cache_setmountpt(struct namecache *ncp, struct mount *mp) 618 { 619 ncp->nc_mount = mp; 620 ncp->nc_flag |= NCF_MOUNTPT; 621 ncp->nc_parent->nc_flag |= NCF_MOUNTEDHERE; 622 } 623 624 /* 625 * Clean up a mount point in the namecache topology after an unmount. 626 * 627 * XXX we probably need to traverse the entire topology and clear 628 * the nc_mount pointer. 629 */ 630 void 631 cache_clrmountpt(struct namecache *ncp) 632 { 633 if (ncp->nc_parent) 634 ncp->nc_parent->nc_flag &= ~NCF_MOUNTEDHERE; 635 ncp->nc_mount = NULL; 636 } 637 638 /* 639 * Invalidate portions of the namecache topology given a starting entry. 640 * The passed ncp is set to an unresolved state and: 641 * 642 * The passed ncp must be locked. 643 * 644 * CINV_DESTROY - Set a flag in the passed ncp entry indicating 645 * that the physical underlying nodes have been 646 * destroyed... as in deleted. For example, when 647 * a directory is removed. This will cause record 648 * lookups on the name to no longer be able to find 649 * the record and tells the resolver to return failure 650 * rather then trying to resolve through the parent. 651 * 652 * The topology itself, including ncp->nc_name, 653 * remains intact. 654 * 655 * This only applies to the passed ncp, if CINV_CHILDREN 656 * is specified the children are not flagged. 657 * 658 * CINV_CHILDREN - Set all children (recursively) to an unresolved 659 * state as well. 660 * 661 * Note that this will also have the side effect of 662 * cleaning out any unreferenced nodes in the topology 663 * from the leaves up as the recursion backs out. 664 * 665 * Note that the topology for any referenced nodes remains intact. 666 * 667 * It is possible for cache_inval() to race a cache_resolve(), meaning that 668 * the namecache entry may not actually be invalidated on return if it was 669 * revalidated while recursing down into its children. This code guarentees 670 * that the node(s) will go through an invalidation cycle, but does not 671 * guarentee that they will remain in an invalidated state. 672 * 673 * Returns non-zero if a revalidation was detected during the invalidation 674 * recursion, zero otherwise. Note that since only the original ncp is 675 * locked the revalidation ultimately can only indicate that the original ncp 676 * *MIGHT* no have been reresolved. 677 */ 678 int 679 cache_inval(struct namecache *ncp, int flags) 680 { 681 struct namecache *kid; 682 struct namecache *nextkid; 683 int rcnt = 0; 684 685 KKASSERT(ncp->nc_exlocks); 686 687 cache_setunresolved(ncp); 688 if (flags & CINV_DESTROY) 689 ncp->nc_flag |= NCF_DESTROYED; 690 691 if ((flags & CINV_CHILDREN) && 692 (kid = TAILQ_FIRST(&ncp->nc_list)) != NULL 693 ) { 694 cache_hold(kid); 695 cache_unlock(ncp); 696 while (kid) { 697 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL) 698 cache_hold(nextkid); 699 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 || 700 TAILQ_FIRST(&kid->nc_list) 701 ) { 702 cache_lock(kid); 703 rcnt += cache_inval(kid, flags & ~CINV_DESTROY); 704 cache_unlock(kid); 705 } 706 cache_drop(kid); 707 kid = nextkid; 708 } 709 cache_lock(ncp); 710 } 711 712 /* 713 * Someone could have gotten in there while ncp was unlocked, 714 * retry if so. 715 */ 716 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) 717 ++rcnt; 718 return (rcnt); 719 } 720 721 /* 722 * Invalidate a vnode's namecache associations. To avoid races against 723 * the resolver we do not invalidate a node which we previously invalidated 724 * but which was then re-resolved while we were in the invalidation loop. 725 * 726 * Returns non-zero if any namecache entries remain after the invalidation 727 * loop completed. 728 * 729 * NOTE: unlike the namecache topology which guarentees that ncp's will not 730 * be ripped out of the topology while held, the vnode's v_namecache list 731 * has no such restriction. NCP's can be ripped out of the list at virtually 732 * any time if not locked, even if held. 733 */ 734 int 735 cache_inval_vp(struct vnode *vp, int flags) 736 { 737 struct namecache *ncp; 738 struct namecache *next; 739 740 restart: 741 ncp = TAILQ_FIRST(&vp->v_namecache); 742 if (ncp) 743 cache_hold(ncp); 744 while (ncp) { 745 /* loop entered with ncp held */ 746 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL) 747 cache_hold(next); 748 cache_lock(ncp); 749 if (ncp->nc_vp != vp) { 750 printf("Warning: cache_inval_vp: race-A detected on " 751 "%s\n", ncp->nc_name); 752 cache_put(ncp); 753 if (next) 754 cache_drop(next); 755 goto restart; 756 } 757 cache_inval(ncp, flags); 758 cache_put(ncp); /* also releases reference */ 759 ncp = next; 760 if (ncp && ncp->nc_vp != vp) { 761 printf("Warning: cache_inval_vp: race-B detected on " 762 "%s\n", ncp->nc_name); 763 cache_drop(ncp); 764 goto restart; 765 } 766 } 767 return(TAILQ_FIRST(&vp->v_namecache) != NULL); 768 } 769 770 /* 771 * The source ncp has been renamed to the target ncp. Both fncp and tncp 772 * must be locked. Both will be set to unresolved, any children of tncp 773 * will be disconnected (the prior contents of the target is assumed to be 774 * destroyed by the rename operation, e.g. renaming over an empty directory), 775 * and all children of fncp will be moved to tncp. 776 * 777 * XXX the disconnection could pose a problem, check code paths to make 778 * sure any code that blocks can handle the parent being changed out from 779 * under it. Maybe we should lock the children (watch out for deadlocks) ? 780 * 781 * After we return the caller has the option of calling cache_setvp() if 782 * the vnode of the new target ncp is known. 783 * 784 * Any process CD'd into any of the children will no longer be able to ".." 785 * back out. An rm -rf can cause this situation to occur. 786 */ 787 void 788 cache_rename(struct namecache *fncp, struct namecache *tncp) 789 { 790 struct namecache *scan; 791 int didwarn = 0; 792 793 cache_setunresolved(fncp); 794 cache_setunresolved(tncp); 795 while (cache_inval(tncp, CINV_CHILDREN) != 0) { 796 if (didwarn++ % 10 == 0) { 797 printf("Warning: cache_rename: race during " 798 "rename %s->%s\n", 799 fncp->nc_name, tncp->nc_name); 800 } 801 tsleep(tncp, 0, "mvrace", hz / 10); 802 cache_setunresolved(tncp); 803 } 804 while ((scan = TAILQ_FIRST(&fncp->nc_list)) != NULL) { 805 cache_hold(scan); 806 cache_unlink_parent(scan); 807 cache_link_parent(scan, tncp); 808 if (scan->nc_flag & NCF_HASHED) 809 cache_rehash(scan); 810 cache_drop(scan); 811 } 812 } 813 814 /* 815 * vget the vnode associated with the namecache entry. Resolve the namecache 816 * entry if necessary and deal with namecache/vp races. The passed ncp must 817 * be referenced and may be locked. The ncp's ref/locking state is not 818 * effected by this call. 819 * 820 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked 821 * (depending on the passed lk_type) will be returned in *vpp with an error 822 * of 0, or NULL will be returned in *vpp with a non-0 error code. The 823 * most typical error is ENOENT, meaning that the ncp represents a negative 824 * cache hit and there is no vnode to retrieve, but other errors can occur 825 * too. 826 * 827 * The main race we have to deal with are namecache zaps. The ncp itself 828 * will not disappear since it is referenced, and it turns out that the 829 * validity of the vp pointer can be checked simply by rechecking the 830 * contents of ncp->nc_vp. 831 */ 832 int 833 cache_vget(struct namecache *ncp, struct ucred *cred, 834 int lk_type, struct vnode **vpp) 835 { 836 struct vnode *vp; 837 int error; 838 839 again: 840 vp = NULL; 841 if (ncp->nc_flag & NCF_UNRESOLVED) { 842 cache_lock(ncp); 843 error = cache_resolve(ncp, cred); 844 cache_unlock(ncp); 845 } else { 846 error = 0; 847 } 848 if (error == 0 && (vp = ncp->nc_vp) != NULL) { 849 /* 850 * Accessing the vnode from the namecache is a bit 851 * dangerous. Because there are no refs on the vnode, it 852 * could be in the middle of a reclaim. 853 */ 854 if (vp->v_flag & VRECLAIMED) { 855 printf("Warning: vnode reclaim race detected in cache_vget on %p (%s)\n", vp, ncp->nc_name); 856 cache_lock(ncp); 857 cache_setunresolved(ncp); 858 cache_unlock(ncp); 859 goto again; 860 } 861 error = vget(vp, lk_type); 862 if (error) { 863 if (vp != ncp->nc_vp) 864 goto again; 865 vp = NULL; 866 } else if (vp != ncp->nc_vp) { 867 vput(vp); 868 goto again; 869 } else if (vp->v_flag & VRECLAIMED) { 870 panic("vget succeeded on a VRECLAIMED node! vp %p", vp); 871 } 872 } 873 if (error == 0 && vp == NULL) 874 error = ENOENT; 875 *vpp = vp; 876 return(error); 877 } 878 879 int 880 cache_vref(struct namecache *ncp, struct ucred *cred, struct vnode **vpp) 881 { 882 struct vnode *vp; 883 int error; 884 885 again: 886 vp = NULL; 887 if (ncp->nc_flag & NCF_UNRESOLVED) { 888 cache_lock(ncp); 889 error = cache_resolve(ncp, cred); 890 cache_unlock(ncp); 891 } else { 892 error = 0; 893 } 894 if (error == 0 && (vp = ncp->nc_vp) != NULL) { 895 /* 896 * Since we did not obtain any locks, a cache zap 897 * race can occur here if the vnode is in the middle 898 * of being reclaimed and has not yet been able to 899 * clean out its cache node. If that case occurs, 900 * we must lock and unresolve the cache, then loop 901 * to retry. 902 */ 903 if (vp->v_flag & VRECLAIMED) { 904 printf("Warning: vnode reclaim race detected on cache_vref %p (%s)\n", vp, ncp->nc_name); 905 cache_lock(ncp); 906 cache_setunresolved(ncp); 907 cache_unlock(ncp); 908 goto again; 909 } 910 vref_initial(vp, 1); 911 } 912 if (error == 0 && vp == NULL) 913 error = ENOENT; 914 *vpp = vp; 915 return(error); 916 } 917 918 /* 919 * Recursively set the FSMID update flag for namecache nodes leading 920 * to root. This will cause the next getattr or reclaim to increment the 921 * fsmid and mark the inode for lazy updating. 922 * 923 * Stop recursing when we hit a node whos NCF_FSMID flag is already set. 924 * This makes FSMIDs work in an Einsteinian fashion - where the observation 925 * effects the result. In this case a program monitoring a higher level 926 * node will have detected some prior change and started its scan (clearing 927 * NCF_FSMID in higher level nodes), but since it has not yet observed the 928 * node where we find NCF_FSMID still set, we can safely make the related 929 * modification without interfering with the theorized program. 930 * 931 * This also means that FSMIDs cannot represent time-domain quantities 932 * in a hierarchical sense. But the main reason for doing it this way 933 * is to reduce the amount of recursion that occurs in the critical path 934 * when e.g. a program is writing to a file that sits deep in a directory 935 * hierarchy. 936 */ 937 void 938 cache_update_fsmid(struct namecache *ncp) 939 { 940 struct vnode *vp; 941 struct namecache *scan; 942 943 /* 944 * Warning: even if we get a non-NULL vp it could still be in the 945 * middle of a recyclement. Don't do anything fancy, just set 946 * NCF_FSMID. 947 */ 948 if ((vp = ncp->nc_vp) != NULL) { 949 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) { 950 for (scan = ncp; scan; scan = scan->nc_parent) { 951 if (scan->nc_flag & NCF_FSMID) 952 break; 953 scan->nc_flag |= NCF_FSMID; 954 } 955 } 956 } else { 957 while (ncp && (ncp->nc_flag & NCF_FSMID) == 0) { 958 ncp->nc_flag |= NCF_FSMID; 959 ncp = ncp->nc_parent; 960 } 961 } 962 } 963 964 void 965 cache_update_fsmid_vp(struct vnode *vp) 966 { 967 struct namecache *ncp; 968 struct namecache *scan; 969 970 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) { 971 for (scan = ncp; scan; scan = scan->nc_parent) { 972 if (scan->nc_flag & NCF_FSMID) 973 break; 974 scan->nc_flag |= NCF_FSMID; 975 } 976 } 977 } 978 979 /* 980 * If getattr is called on a vnode (e.g. a stat call), the filesystem 981 * may call this routine to determine if the namecache has the hierarchical 982 * change flag set, requiring the fsmid to be updated. 983 * 984 * Since 0 indicates no support, make sure the filesystem fsmid is at least 985 * 1. 986 */ 987 int 988 cache_check_fsmid_vp(struct vnode *vp, int64_t *fsmid) 989 { 990 struct namecache *ncp; 991 int changed = 0; 992 993 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) { 994 if (ncp->nc_flag & NCF_FSMID) { 995 ncp->nc_flag &= ~NCF_FSMID; 996 changed = 1; 997 } 998 } 999 if (*fsmid == 0) 1000 ++*fsmid; 1001 if (changed) 1002 ++*fsmid; 1003 return(changed); 1004 } 1005 1006 /* 1007 * Obtain the FSMID for a vnode for filesystems which do not support 1008 * a built-in FSMID. 1009 */ 1010 int64_t 1011 cache_sync_fsmid_vp(struct vnode *vp) 1012 { 1013 struct namecache *ncp; 1014 1015 if ((ncp = TAILQ_FIRST(&vp->v_namecache)) != NULL) { 1016 if (ncp->nc_flag & NCF_FSMID) { 1017 ncp->nc_flag &= ~NCF_FSMID; 1018 ++ncp->nc_fsmid; 1019 } 1020 return(ncp->nc_fsmid); 1021 } 1022 return(VNOVAL); 1023 } 1024 1025 /* 1026 * Convert a directory vnode to a namecache record without any other 1027 * knowledge of the topology. This ONLY works with directory vnodes and 1028 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the 1029 * returned ncp (if not NULL) will be held and unlocked. 1030 * 1031 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned. 1032 * If 'makeit' is 1 we attempt to track-down and create the namecache topology 1033 * for dvp. This will fail only if the directory has been deleted out from 1034 * under the caller. 1035 * 1036 * Callers must always check for a NULL return no matter the value of 'makeit'. 1037 * 1038 * To avoid underflowing the kernel stack each recursive call increments 1039 * the makeit variable. 1040 */ 1041 1042 static int cache_inefficient_scan(struct namecache *ncp, struct ucred *cred, 1043 struct vnode *dvp); 1044 static int cache_fromdvp_try(struct vnode *dvp, struct ucred *cred, 1045 struct vnode **saved_dvp); 1046 1047 struct namecache * 1048 cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit) 1049 { 1050 struct namecache *ncp; 1051 struct vnode *saved_dvp; 1052 struct vnode *pvp; 1053 int error; 1054 1055 ncp = NULL; 1056 saved_dvp = NULL; 1057 1058 /* 1059 * Temporary debugging code to force the directory scanning code 1060 * to be exercised. 1061 */ 1062 if (ncvp_debug >= 3 && makeit && TAILQ_FIRST(&dvp->v_namecache)) { 1063 ncp = TAILQ_FIRST(&dvp->v_namecache); 1064 printf("cache_fromdvp: forcing %s\n", ncp->nc_name); 1065 goto force; 1066 } 1067 1068 /* 1069 * Loop until resolution, inside code will break out on error. 1070 */ 1071 while ((ncp = TAILQ_FIRST(&dvp->v_namecache)) == NULL && makeit) { 1072 force: 1073 /* 1074 * If dvp is the root of its filesystem it should already 1075 * have a namecache pointer associated with it as a side 1076 * effect of the mount, but it may have been disassociated. 1077 */ 1078 if (dvp->v_flag & VROOT) { 1079 ncp = cache_get(dvp->v_mount->mnt_ncp); 1080 error = cache_resolve_mp(ncp); 1081 cache_put(ncp); 1082 if (ncvp_debug) { 1083 printf("cache_fromdvp: resolve root of mount %p error %d", 1084 dvp->v_mount, error); 1085 } 1086 if (error) { 1087 if (ncvp_debug) 1088 printf(" failed\n"); 1089 ncp = NULL; 1090 break; 1091 } 1092 if (ncvp_debug) 1093 printf(" succeeded\n"); 1094 continue; 1095 } 1096 1097 /* 1098 * If we are recursed too deeply resort to an O(n^2) 1099 * algorithm to resolve the namecache topology. The 1100 * resolved pvp is left referenced in saved_dvp to 1101 * prevent the tree from being destroyed while we loop. 1102 */ 1103 if (makeit > 20) { 1104 error = cache_fromdvp_try(dvp, cred, &saved_dvp); 1105 if (error) { 1106 printf("lookupdotdot(longpath) failed %d " 1107 "dvp %p\n", error, dvp); 1108 break; 1109 } 1110 continue; 1111 } 1112 1113 /* 1114 * Get the parent directory and resolve its ncp. 1115 */ 1116 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred); 1117 if (error) { 1118 printf("lookupdotdot failed %d dvp %p\n", error, dvp); 1119 break; 1120 } 1121 vn_unlock(pvp); 1122 1123 /* 1124 * Reuse makeit as a recursion depth counter. 1125 */ 1126 ncp = cache_fromdvp(pvp, cred, makeit + 1); 1127 vrele(pvp); 1128 if (ncp == NULL) 1129 break; 1130 1131 /* 1132 * Do an inefficient scan of pvp (embodied by ncp) to look 1133 * for dvp. This will create a namecache record for dvp on 1134 * success. We loop up to recheck on success. 1135 * 1136 * ncp and dvp are both held but not locked. 1137 */ 1138 error = cache_inefficient_scan(ncp, cred, dvp); 1139 cache_drop(ncp); 1140 if (error) { 1141 printf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n", 1142 pvp, ncp->nc_name, dvp); 1143 ncp = NULL; 1144 break; 1145 } 1146 if (ncvp_debug) { 1147 printf("cache_fromdvp: scan %p (%s) succeeded\n", 1148 pvp, ncp->nc_name); 1149 } 1150 } 1151 if (ncp) 1152 cache_hold(ncp); 1153 if (saved_dvp) 1154 vrele(saved_dvp); 1155 return (ncp); 1156 } 1157 1158 /* 1159 * Go up the chain of parent directories until we find something 1160 * we can resolve into the namecache. This is very inefficient. 1161 */ 1162 static 1163 int 1164 cache_fromdvp_try(struct vnode *dvp, struct ucred *cred, 1165 struct vnode **saved_dvp) 1166 { 1167 struct namecache *ncp; 1168 struct vnode *pvp; 1169 int error; 1170 static time_t last_fromdvp_report; 1171 1172 /* 1173 * Loop getting the parent directory vnode until we get something we 1174 * can resolve in the namecache. 1175 */ 1176 vref(dvp); 1177 for (;;) { 1178 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred); 1179 if (error) { 1180 vrele(dvp); 1181 return (error); 1182 } 1183 vn_unlock(pvp); 1184 if ((ncp = TAILQ_FIRST(&pvp->v_namecache)) != NULL) { 1185 cache_hold(ncp); 1186 vrele(pvp); 1187 break; 1188 } 1189 if (pvp->v_flag & VROOT) { 1190 ncp = cache_get(pvp->v_mount->mnt_ncp); 1191 error = cache_resolve_mp(ncp); 1192 cache_unlock(ncp); 1193 vrele(pvp); 1194 if (error) { 1195 cache_drop(ncp); 1196 vrele(dvp); 1197 return (error); 1198 } 1199 break; 1200 } 1201 vrele(dvp); 1202 dvp = pvp; 1203 } 1204 if (last_fromdvp_report != time_second) { 1205 last_fromdvp_report = time_second; 1206 printf("Warning: extremely inefficient path resolution on %s\n", 1207 ncp->nc_name); 1208 } 1209 error = cache_inefficient_scan(ncp, cred, dvp); 1210 1211 /* 1212 * Hopefully dvp now has a namecache record associated with it. 1213 * Leave it referenced to prevent the kernel from recycling the 1214 * vnode. Otherwise extremely long directory paths could result 1215 * in endless recycling. 1216 */ 1217 if (*saved_dvp) 1218 vrele(*saved_dvp); 1219 *saved_dvp = dvp; 1220 return (error); 1221 } 1222 1223 1224 /* 1225 * Do an inefficient scan of the directory represented by ncp looking for 1226 * the directory vnode dvp. ncp must be held but not locked on entry and 1227 * will be held on return. dvp must be refd but not locked on entry and 1228 * will remain refd on return. 1229 * 1230 * Why do this at all? Well, due to its stateless nature the NFS server 1231 * converts file handles directly to vnodes without necessarily going through 1232 * the namecache ops that would otherwise create the namecache topology 1233 * leading to the vnode. We could either (1) Change the namecache algorithms 1234 * to allow disconnect namecache records that are re-merged opportunistically, 1235 * or (2) Make the NFS server backtrack and scan to recover a connected 1236 * namecache topology in order to then be able to issue new API lookups. 1237 * 1238 * It turns out that (1) is a huge mess. It takes a nice clean set of 1239 * namecache algorithms and introduces a lot of complication in every subsystem 1240 * that calls into the namecache to deal with the re-merge case, especially 1241 * since we are using the namecache to placehold negative lookups and the 1242 * vnode might not be immediately assigned. (2) is certainly far less 1243 * efficient then (1), but since we are only talking about directories here 1244 * (which are likely to remain cached), the case does not actually run all 1245 * that often and has the supreme advantage of not polluting the namecache 1246 * algorithms. 1247 */ 1248 static int 1249 cache_inefficient_scan(struct namecache *ncp, struct ucred *cred, 1250 struct vnode *dvp) 1251 { 1252 struct nlcomponent nlc; 1253 struct namecache *rncp; 1254 struct dirent *den; 1255 struct vnode *pvp; 1256 struct vattr vat; 1257 struct iovec iov; 1258 struct uio uio; 1259 int blksize; 1260 int eofflag; 1261 int bytes; 1262 char *rbuf; 1263 int error; 1264 1265 vat.va_blocksize = 0; 1266 if ((error = VOP_GETATTR(dvp, &vat)) != 0) 1267 return (error); 1268 if ((error = cache_vref(ncp, cred, &pvp)) != 0) 1269 return (error); 1270 if (ncvp_debug) 1271 printf("inefficient_scan: directory iosize %ld vattr fileid = %ld\n", vat.va_blocksize, (long)vat.va_fileid); 1272 if ((blksize = vat.va_blocksize) == 0) 1273 blksize = DEV_BSIZE; 1274 rbuf = kmalloc(blksize, M_TEMP, M_WAITOK); 1275 rncp = NULL; 1276 1277 eofflag = 0; 1278 uio.uio_offset = 0; 1279 again: 1280 iov.iov_base = rbuf; 1281 iov.iov_len = blksize; 1282 uio.uio_iov = &iov; 1283 uio.uio_iovcnt = 1; 1284 uio.uio_resid = blksize; 1285 uio.uio_segflg = UIO_SYSSPACE; 1286 uio.uio_rw = UIO_READ; 1287 uio.uio_td = curthread; 1288 1289 if (ncvp_debug >= 2) 1290 printf("cache_inefficient_scan: readdir @ %08x\n", (int)uio.uio_offset); 1291 error = VOP_READDIR(pvp, &uio, cred, &eofflag, NULL, NULL); 1292 if (error == 0) { 1293 den = (struct dirent *)rbuf; 1294 bytes = blksize - uio.uio_resid; 1295 1296 while (bytes > 0) { 1297 if (ncvp_debug >= 2) { 1298 printf("cache_inefficient_scan: %*.*s\n", 1299 den->d_namlen, den->d_namlen, 1300 den->d_name); 1301 } 1302 if (den->d_type != DT_WHT && 1303 den->d_ino == vat.va_fileid) { 1304 if (ncvp_debug) { 1305 printf("cache_inefficient_scan: " 1306 "MATCHED inode %ld path %s/%*.*s\n", 1307 vat.va_fileid, ncp->nc_name, 1308 den->d_namlen, den->d_namlen, 1309 den->d_name); 1310 } 1311 nlc.nlc_nameptr = den->d_name; 1312 nlc.nlc_namelen = den->d_namlen; 1313 rncp = cache_nlookup(ncp, &nlc); 1314 KKASSERT(rncp != NULL); 1315 break; 1316 } 1317 bytes -= _DIRENT_DIRSIZ(den); 1318 den = _DIRENT_NEXT(den); 1319 } 1320 if (rncp == NULL && eofflag == 0 && uio.uio_resid != blksize) 1321 goto again; 1322 } 1323 vrele(pvp); 1324 if (rncp) { 1325 if (rncp->nc_flag & NCF_UNRESOLVED) { 1326 cache_setvp(rncp, dvp); 1327 if (ncvp_debug >= 2) { 1328 printf("cache_inefficient_scan: setvp %s/%s = %p\n", 1329 ncp->nc_name, rncp->nc_name, dvp); 1330 } 1331 } else { 1332 if (ncvp_debug >= 2) { 1333 printf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n", 1334 ncp->nc_name, rncp->nc_name, dvp, 1335 rncp->nc_vp); 1336 } 1337 } 1338 if (rncp->nc_vp == NULL) 1339 error = rncp->nc_error; 1340 cache_put(rncp); 1341 } else { 1342 printf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n", 1343 dvp, ncp->nc_name); 1344 error = ENOENT; 1345 } 1346 kfree(rbuf, M_TEMP); 1347 return (error); 1348 } 1349 1350 /* 1351 * Zap a namecache entry. The ncp is unconditionally set to an unresolved 1352 * state, which disassociates it from its vnode or ncneglist. 1353 * 1354 * Then, if there are no additional references to the ncp and no children, 1355 * the ncp is removed from the topology and destroyed. This function will 1356 * also run through the nc_parent chain and destroy parent ncps if possible. 1357 * As a side benefit, it turns out the only conditions that allow running 1358 * up the chain are also the conditions to ensure no deadlock will occur. 1359 * 1360 * References and/or children may exist if the ncp is in the middle of the 1361 * topology, preventing the ncp from being destroyed. 1362 * 1363 * This function must be called with the ncp held and locked and will unlock 1364 * and drop it during zapping. 1365 */ 1366 static void 1367 cache_zap(struct namecache *ncp) 1368 { 1369 struct namecache *par; 1370 1371 /* 1372 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED. 1373 */ 1374 cache_setunresolved(ncp); 1375 1376 /* 1377 * Try to scrap the entry and possibly tail-recurse on its parent. 1378 * We only scrap unref'd (other then our ref) unresolved entries, 1379 * we do not scrap 'live' entries. 1380 */ 1381 while (ncp->nc_flag & NCF_UNRESOLVED) { 1382 /* 1383 * Someone other then us has a ref, stop. 1384 */ 1385 if (ncp->nc_refs > 1) 1386 goto done; 1387 1388 /* 1389 * We have children, stop. 1390 */ 1391 if (!TAILQ_EMPTY(&ncp->nc_list)) 1392 goto done; 1393 1394 /* 1395 * Remove ncp from the topology: hash table and parent linkage. 1396 */ 1397 if (ncp->nc_flag & NCF_HASHED) { 1398 ncp->nc_flag &= ~NCF_HASHED; 1399 LIST_REMOVE(ncp, nc_hash); 1400 } 1401 if ((par = ncp->nc_parent) != NULL) { 1402 par = cache_hold(par); 1403 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry); 1404 ncp->nc_parent = NULL; 1405 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list)) 1406 vdrop(par->nc_vp); 1407 } 1408 1409 /* 1410 * ncp should not have picked up any refs. Physically 1411 * destroy the ncp. 1412 */ 1413 KKASSERT(ncp->nc_refs == 1); 1414 --numunres; 1415 /* cache_unlock(ncp) not required */ 1416 ncp->nc_refs = -1; /* safety */ 1417 if (ncp->nc_name) 1418 kfree(ncp->nc_name, M_VFSCACHE); 1419 kfree(ncp, M_VFSCACHE); 1420 1421 /* 1422 * Loop on the parent (it may be NULL). Only bother looping 1423 * if the parent has a single ref (ours), which also means 1424 * we can lock it trivially. 1425 */ 1426 ncp = par; 1427 if (ncp == NULL) 1428 return; 1429 if (ncp->nc_refs != 1) { 1430 cache_drop(ncp); 1431 return; 1432 } 1433 KKASSERT(par->nc_exlocks == 0); 1434 cache_lock(ncp); 1435 } 1436 done: 1437 cache_unlock(ncp); 1438 atomic_subtract_int(&ncp->nc_refs, 1); 1439 } 1440 1441 static enum { CHI_LOW, CHI_HIGH } cache_hysteresis_state = CHI_LOW; 1442 1443 static __inline 1444 void 1445 cache_hysteresis(void) 1446 { 1447 /* 1448 * Don't cache too many negative hits. We use hysteresis to reduce 1449 * the impact on the critical path. 1450 */ 1451 switch(cache_hysteresis_state) { 1452 case CHI_LOW: 1453 if (numneg > MINNEG && numneg * ncnegfactor > numcache) { 1454 cache_cleanneg(10); 1455 cache_hysteresis_state = CHI_HIGH; 1456 } 1457 break; 1458 case CHI_HIGH: 1459 if (numneg > MINNEG * 9 / 10 && 1460 numneg * ncnegfactor * 9 / 10 > numcache 1461 ) { 1462 cache_cleanneg(10); 1463 } else { 1464 cache_hysteresis_state = CHI_LOW; 1465 } 1466 break; 1467 } 1468 } 1469 1470 /* 1471 * NEW NAMECACHE LOOKUP API 1472 * 1473 * Lookup an entry in the cache. A locked, referenced, non-NULL 1474 * entry is *always* returned, even if the supplied component is illegal. 1475 * The resulting namecache entry should be returned to the system with 1476 * cache_put() or cache_unlock() + cache_drop(). 1477 * 1478 * namecache locks are recursive but care must be taken to avoid lock order 1479 * reversals. 1480 * 1481 * Nobody else will be able to manipulate the associated namespace (e.g. 1482 * create, delete, rename, rename-target) until the caller unlocks the 1483 * entry. 1484 * 1485 * The returned entry will be in one of three states: positive hit (non-null 1486 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set). 1487 * Unresolved entries must be resolved through the filesystem to associate the 1488 * vnode and/or determine whether a positive or negative hit has occured. 1489 * 1490 * It is not necessary to lock a directory in order to lock namespace under 1491 * that directory. In fact, it is explicitly not allowed to do that. A 1492 * directory is typically only locked when being created, renamed, or 1493 * destroyed. 1494 * 1495 * The directory (par) may be unresolved, in which case any returned child 1496 * will likely also be marked unresolved. Likely but not guarenteed. Since 1497 * the filesystem lookup requires a resolved directory vnode the caller is 1498 * responsible for resolving the namecache chain top-down. This API 1499 * specifically allows whole chains to be created in an unresolved state. 1500 */ 1501 struct namecache * 1502 cache_nlookup(struct namecache *par, struct nlcomponent *nlc) 1503 { 1504 struct namecache *ncp; 1505 struct namecache *new_ncp; 1506 struct nchashhead *nchpp; 1507 u_int32_t hash; 1508 globaldata_t gd; 1509 1510 numcalls++; 1511 gd = mycpu; 1512 1513 /* 1514 * Try to locate an existing entry 1515 */ 1516 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT); 1517 hash = fnv_32_buf(&par, sizeof(par), hash); 1518 new_ncp = NULL; 1519 restart: 1520 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 1521 numchecks++; 1522 1523 /* 1524 * Zap entries that have timed out. 1525 */ 1526 if (ncp->nc_timeout && 1527 (int)(ncp->nc_timeout - ticks) < 0 && 1528 (ncp->nc_flag & NCF_UNRESOLVED) == 0 && 1529 ncp->nc_exlocks == 0 1530 ) { 1531 cache_zap(cache_get(ncp)); 1532 goto restart; 1533 } 1534 1535 /* 1536 * Break out if we find a matching entry. Note that 1537 * UNRESOLVED entries may match, but DESTROYED entries 1538 * do not. 1539 */ 1540 if (ncp->nc_parent == par && 1541 ncp->nc_nlen == nlc->nlc_namelen && 1542 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 && 1543 (ncp->nc_flag & NCF_DESTROYED) == 0 1544 ) { 1545 if (cache_get_nonblock(ncp) == 0) { 1546 if (new_ncp) 1547 cache_free(new_ncp); 1548 goto found; 1549 } 1550 cache_get(ncp); 1551 cache_put(ncp); 1552 goto restart; 1553 } 1554 } 1555 1556 /* 1557 * We failed to locate an entry, create a new entry and add it to 1558 * the cache. We have to relookup after possibly blocking in 1559 * malloc. 1560 */ 1561 if (new_ncp == NULL) { 1562 new_ncp = cache_alloc(nlc->nlc_namelen); 1563 goto restart; 1564 } 1565 1566 ncp = new_ncp; 1567 1568 /* 1569 * Initialize as a new UNRESOLVED entry, lock (non-blocking), 1570 * and link to the parent. The mount point is usually inherited 1571 * from the parent unless this is a special case such as a mount 1572 * point where nlc_namelen is 0. The caller is responsible for 1573 * setting nc_mount in that case. If nlc_namelen is 0 nc_name will 1574 * be NULL. 1575 */ 1576 if (nlc->nlc_namelen) { 1577 bcopy(nlc->nlc_nameptr, ncp->nc_name, nlc->nlc_namelen); 1578 ncp->nc_name[nlc->nlc_namelen] = 0; 1579 ncp->nc_mount = par->nc_mount; 1580 } 1581 nchpp = NCHHASH(hash); 1582 LIST_INSERT_HEAD(nchpp, ncp, nc_hash); 1583 ncp->nc_flag |= NCF_HASHED; 1584 cache_link_parent(ncp, par); 1585 found: 1586 /* 1587 * stats and namecache size management 1588 */ 1589 if (ncp->nc_flag & NCF_UNRESOLVED) 1590 ++gd->gd_nchstats->ncs_miss; 1591 else if (ncp->nc_vp) 1592 ++gd->gd_nchstats->ncs_goodhits; 1593 else 1594 ++gd->gd_nchstats->ncs_neghits; 1595 cache_hysteresis(); 1596 return(ncp); 1597 } 1598 1599 /* 1600 * Locate the mount point under a namecache entry. We locate a special 1601 * child ncp with a 0-length name and retrieve the mount point from it. 1602 */ 1603 struct mount * 1604 cache_findmount(struct namecache *par) 1605 { 1606 struct namecache *ncp; 1607 u_int32_t hash; 1608 1609 hash = FNV1_32_INIT; /* special 0-length name */ 1610 hash = fnv_32_buf(&par, sizeof(par), hash); 1611 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 1612 if (ncp->nc_nlen == 0 && (ncp->nc_flag & NCF_MOUNTPT)) 1613 return(ncp->nc_mount); 1614 } 1615 return(NULL); 1616 } 1617 1618 /* 1619 * Given a locked ncp, validate that the vnode, if present, is actually 1620 * usable. If it is not usable set the ncp to an unresolved state. 1621 */ 1622 void 1623 cache_validate(struct namecache *ncp) 1624 { 1625 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 1626 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 1627 cache_setunresolved(ncp); 1628 } 1629 } 1630 1631 /* 1632 * Resolve an unresolved namecache entry, generally by looking it up. 1633 * The passed ncp must be locked and refd. 1634 * 1635 * Theoretically since a vnode cannot be recycled while held, and since 1636 * the nc_parent chain holds its vnode as long as children exist, the 1637 * direct parent of the cache entry we are trying to resolve should 1638 * have a valid vnode. If not then generate an error that we can 1639 * determine is related to a resolver bug. 1640 * 1641 * However, if a vnode was in the middle of a recyclement when the NCP 1642 * got locked, ncp->nc_vp might point to a vnode that is about to become 1643 * invalid. cache_resolve() handles this case by unresolving the entry 1644 * and then re-resolving it. 1645 * 1646 * Note that successful resolution does not necessarily return an error 1647 * code of 0. If the ncp resolves to a negative cache hit then ENOENT 1648 * will be returned. 1649 */ 1650 int 1651 cache_resolve(struct namecache *ncp, struct ucred *cred) 1652 { 1653 struct namecache *par; 1654 int error; 1655 1656 restart: 1657 /* 1658 * If the ncp is already resolved we have nothing to do. However, 1659 * we do want to guarentee that a usable vnode is returned when 1660 * a vnode is present, so make sure it hasn't been reclaimed. 1661 */ 1662 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 1663 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 1664 cache_setunresolved(ncp); 1665 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) 1666 return (ncp->nc_error); 1667 } 1668 1669 /* 1670 * Mount points need special handling because the parent does not 1671 * belong to the same filesystem as the ncp. 1672 */ 1673 if (ncp->nc_flag & NCF_MOUNTPT) 1674 return (cache_resolve_mp(ncp)); 1675 1676 /* 1677 * We expect an unbroken chain of ncps to at least the mount point, 1678 * and even all the way to root (but this code doesn't have to go 1679 * past the mount point). 1680 */ 1681 if (ncp->nc_parent == NULL) { 1682 printf("EXDEV case 1 %p %*.*s\n", ncp, 1683 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 1684 ncp->nc_error = EXDEV; 1685 return(ncp->nc_error); 1686 } 1687 1688 /* 1689 * The vp's of the parent directories in the chain are held via vhold() 1690 * due to the existance of the child, and should not disappear. 1691 * However, there are cases where they can disappear: 1692 * 1693 * - due to filesystem I/O errors. 1694 * - due to NFS being stupid about tracking the namespace and 1695 * destroys the namespace for entire directories quite often. 1696 * - due to forced unmounts. 1697 * - due to an rmdir (parent will be marked DESTROYED) 1698 * 1699 * When this occurs we have to track the chain backwards and resolve 1700 * it, looping until the resolver catches up to the current node. We 1701 * could recurse here but we might run ourselves out of kernel stack 1702 * so we do it in a more painful manner. This situation really should 1703 * not occur all that often, or if it does not have to go back too 1704 * many nodes to resolve the ncp. 1705 */ 1706 while (ncp->nc_parent->nc_vp == NULL) { 1707 /* 1708 * This case can occur if a process is CD'd into a 1709 * directory which is then rmdir'd. If the parent is marked 1710 * destroyed there is no point trying to resolve it. 1711 */ 1712 if (ncp->nc_parent->nc_flag & NCF_DESTROYED) 1713 return(ENOENT); 1714 1715 par = ncp->nc_parent; 1716 while (par->nc_parent && par->nc_parent->nc_vp == NULL) 1717 par = par->nc_parent; 1718 if (par->nc_parent == NULL) { 1719 printf("EXDEV case 2 %*.*s\n", 1720 par->nc_nlen, par->nc_nlen, par->nc_name); 1721 return (EXDEV); 1722 } 1723 printf("[diagnostic] cache_resolve: had to recurse on %*.*s\n", 1724 par->nc_nlen, par->nc_nlen, par->nc_name); 1725 /* 1726 * The parent is not set in stone, ref and lock it to prevent 1727 * it from disappearing. Also note that due to renames it 1728 * is possible for our ncp to move and for par to no longer 1729 * be one of its parents. We resolve it anyway, the loop 1730 * will handle any moves. 1731 */ 1732 cache_get(par); 1733 if (par->nc_flag & NCF_MOUNTPT) { 1734 cache_resolve_mp(par); 1735 } else if (par->nc_parent->nc_vp == NULL) { 1736 printf("[diagnostic] cache_resolve: raced on %*.*s\n", par->nc_nlen, par->nc_nlen, par->nc_name); 1737 cache_put(par); 1738 continue; 1739 } else if (par->nc_flag & NCF_UNRESOLVED) { 1740 par->nc_error = VOP_NRESOLVE(par, cred); 1741 } 1742 if ((error = par->nc_error) != 0) { 1743 if (par->nc_error != EAGAIN) { 1744 printf("EXDEV case 3 %*.*s error %d\n", 1745 par->nc_nlen, par->nc_nlen, par->nc_name, 1746 par->nc_error); 1747 cache_put(par); 1748 return(error); 1749 } 1750 printf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n", 1751 par, par->nc_nlen, par->nc_nlen, par->nc_name); 1752 } 1753 cache_put(par); 1754 /* loop */ 1755 } 1756 1757 /* 1758 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected 1759 * ncp's and reattach them. If this occurs the original ncp is marked 1760 * EAGAIN to force a relookup. 1761 * 1762 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed 1763 * ncp must already be resolved. 1764 */ 1765 KKASSERT((ncp->nc_flag & NCF_MOUNTPT) == 0); 1766 ncp->nc_error = VOP_NRESOLVE(ncp, cred); 1767 /*vop_nresolve(*ncp->nc_parent->nc_vp->v_ops, ncp, cred);*/ 1768 if (ncp->nc_error == EAGAIN) { 1769 printf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n", 1770 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 1771 goto restart; 1772 } 1773 return(ncp->nc_error); 1774 } 1775 1776 /* 1777 * Resolve the ncp associated with a mount point. Such ncp's almost always 1778 * remain resolved and this routine is rarely called. NFS MPs tends to force 1779 * re-resolution more often due to its mac-truck-smash-the-namecache 1780 * method of tracking namespace changes. 1781 * 1782 * The semantics for this call is that the passed ncp must be locked on 1783 * entry and will be locked on return. However, if we actually have to 1784 * resolve the mount point we temporarily unlock the entry in order to 1785 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of 1786 * the unlock we have to recheck the flags after we relock. 1787 */ 1788 static int 1789 cache_resolve_mp(struct namecache *ncp) 1790 { 1791 struct vnode *vp; 1792 struct mount *mp = ncp->nc_mount; 1793 int error; 1794 1795 KKASSERT(mp != NULL); 1796 1797 /* 1798 * If the ncp is already resolved we have nothing to do. However, 1799 * we do want to guarentee that a usable vnode is returned when 1800 * a vnode is present, so make sure it hasn't been reclaimed. 1801 */ 1802 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 1803 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 1804 cache_setunresolved(ncp); 1805 } 1806 1807 if (ncp->nc_flag & NCF_UNRESOLVED) { 1808 cache_unlock(ncp); 1809 while (vfs_busy(mp, 0)) 1810 ; 1811 error = VFS_ROOT(mp, &vp); 1812 cache_lock(ncp); 1813 1814 /* 1815 * recheck the ncp state after relocking. 1816 */ 1817 if (ncp->nc_flag & NCF_UNRESOLVED) { 1818 ncp->nc_error = error; 1819 if (error == 0) { 1820 cache_setvp(ncp, vp); 1821 vput(vp); 1822 } else { 1823 printf("[diagnostic] cache_resolve_mp: failed to resolve mount %p\n", mp); 1824 cache_setvp(ncp, NULL); 1825 } 1826 } else if (error == 0) { 1827 vput(vp); 1828 } 1829 vfs_unbusy(mp); 1830 } 1831 return(ncp->nc_error); 1832 } 1833 1834 void 1835 cache_cleanneg(int count) 1836 { 1837 struct namecache *ncp; 1838 1839 /* 1840 * Automode from the vnlru proc - clean out 10% of the negative cache 1841 * entries. 1842 */ 1843 if (count == 0) 1844 count = numneg / 10 + 1; 1845 1846 /* 1847 * Attempt to clean out the specified number of negative cache 1848 * entries. 1849 */ 1850 while (count) { 1851 ncp = TAILQ_FIRST(&ncneglist); 1852 if (ncp == NULL) { 1853 KKASSERT(numneg == 0); 1854 break; 1855 } 1856 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode); 1857 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode); 1858 if (cache_get_nonblock(ncp) == 0) 1859 cache_zap(ncp); 1860 --count; 1861 } 1862 } 1863 1864 /* 1865 * Rehash a ncp. Rehashing is typically required if the name changes (should 1866 * not generally occur) or the parent link changes. This function will 1867 * unhash the ncp if the ncp is no longer hashable. 1868 */ 1869 static void 1870 cache_rehash(struct namecache *ncp) 1871 { 1872 struct nchashhead *nchpp; 1873 u_int32_t hash; 1874 1875 if (ncp->nc_flag & NCF_HASHED) { 1876 ncp->nc_flag &= ~NCF_HASHED; 1877 LIST_REMOVE(ncp, nc_hash); 1878 } 1879 if (ncp->nc_nlen && ncp->nc_parent) { 1880 hash = fnv_32_buf(ncp->nc_name, ncp->nc_nlen, FNV1_32_INIT); 1881 hash = fnv_32_buf(&ncp->nc_parent, 1882 sizeof(ncp->nc_parent), hash); 1883 nchpp = NCHHASH(hash); 1884 LIST_INSERT_HEAD(nchpp, ncp, nc_hash); 1885 ncp->nc_flag |= NCF_HASHED; 1886 } 1887 } 1888 1889 /* 1890 * Name cache initialization, from vfsinit() when we are booting 1891 */ 1892 void 1893 nchinit(void) 1894 { 1895 int i; 1896 globaldata_t gd; 1897 1898 /* initialise per-cpu namecache effectiveness statistics. */ 1899 for (i = 0; i < ncpus; ++i) { 1900 gd = globaldata_find(i); 1901 gd->gd_nchstats = &nchstats[i]; 1902 } 1903 TAILQ_INIT(&ncneglist); 1904 nchashtbl = hashinit(desiredvnodes*2, M_VFSCACHE, &nchash); 1905 nclockwarn = 1 * hz; 1906 } 1907 1908 /* 1909 * Called from start_init() to bootstrap the root filesystem. Returns 1910 * a referenced, unlocked namecache record. 1911 */ 1912 struct namecache * 1913 cache_allocroot(struct mount *mp, struct vnode *vp) 1914 { 1915 struct namecache *ncp = cache_alloc(0); 1916 1917 ncp->nc_flag |= NCF_MOUNTPT | NCF_ROOT; 1918 ncp->nc_mount = mp; 1919 cache_setvp(ncp, vp); 1920 return(ncp); 1921 } 1922 1923 /* 1924 * vfs_cache_setroot() 1925 * 1926 * Create an association between the root of our namecache and 1927 * the root vnode. This routine may be called several times during 1928 * booting. 1929 * 1930 * If the caller intends to save the returned namecache pointer somewhere 1931 * it must cache_hold() it. 1932 */ 1933 void 1934 vfs_cache_setroot(struct vnode *nvp, struct namecache *ncp) 1935 { 1936 struct vnode *ovp; 1937 struct namecache *oncp; 1938 1939 ovp = rootvnode; 1940 oncp = rootncp; 1941 rootvnode = nvp; 1942 rootncp = ncp; 1943 1944 if (ovp) 1945 vrele(ovp); 1946 if (oncp) 1947 cache_drop(oncp); 1948 } 1949 1950 /* 1951 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache 1952 * topology and is being removed as quickly as possible. The new VOP_N*() 1953 * API calls are required to make specific adjustments using the supplied 1954 * ncp pointers rather then just bogusly purging random vnodes. 1955 * 1956 * Invalidate all namecache entries to a particular vnode as well as 1957 * any direct children of that vnode in the namecache. This is a 1958 * 'catch all' purge used by filesystems that do not know any better. 1959 * 1960 * Note that the linkage between the vnode and its namecache entries will 1961 * be removed, but the namecache entries themselves might stay put due to 1962 * active references from elsewhere in the system or due to the existance of 1963 * the children. The namecache topology is left intact even if we do not 1964 * know what the vnode association is. Such entries will be marked 1965 * NCF_UNRESOLVED. 1966 */ 1967 void 1968 cache_purge(struct vnode *vp) 1969 { 1970 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN); 1971 } 1972 1973 /* 1974 * Flush all entries referencing a particular filesystem. 1975 * 1976 * Since we need to check it anyway, we will flush all the invalid 1977 * entries at the same time. 1978 */ 1979 void 1980 cache_purgevfs(struct mount *mp) 1981 { 1982 struct nchashhead *nchpp; 1983 struct namecache *ncp, *nnp; 1984 1985 /* 1986 * Scan hash tables for applicable entries. 1987 */ 1988 for (nchpp = &nchashtbl[nchash]; nchpp >= nchashtbl; nchpp--) { 1989 ncp = LIST_FIRST(nchpp); 1990 if (ncp) 1991 cache_hold(ncp); 1992 while (ncp) { 1993 nnp = LIST_NEXT(ncp, nc_hash); 1994 if (nnp) 1995 cache_hold(nnp); 1996 if (ncp->nc_mount == mp) { 1997 cache_lock(ncp); 1998 cache_zap(ncp); 1999 } else { 2000 cache_drop(ncp); 2001 } 2002 ncp = nnp; 2003 } 2004 } 2005 } 2006 2007 /* 2008 * Create a new (theoretically) unique fsmid 2009 */ 2010 int64_t 2011 cache_getnewfsmid(void) 2012 { 2013 static int fsmid_roller; 2014 int64_t fsmid; 2015 2016 ++fsmid_roller; 2017 fsmid = ((int64_t)time_second << 32) | 2018 (fsmid_roller & 0x7FFFFFFF); 2019 return (fsmid); 2020 } 2021 2022 2023 static int disablecwd; 2024 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, ""); 2025 2026 static u_long numcwdcalls; STATNODE(CTLFLAG_RD, numcwdcalls, &numcwdcalls); 2027 static u_long numcwdfail1; STATNODE(CTLFLAG_RD, numcwdfail1, &numcwdfail1); 2028 static u_long numcwdfail2; STATNODE(CTLFLAG_RD, numcwdfail2, &numcwdfail2); 2029 static u_long numcwdfail3; STATNODE(CTLFLAG_RD, numcwdfail3, &numcwdfail3); 2030 static u_long numcwdfail4; STATNODE(CTLFLAG_RD, numcwdfail4, &numcwdfail4); 2031 static u_long numcwdfound; STATNODE(CTLFLAG_RD, numcwdfound, &numcwdfound); 2032 2033 int 2034 sys___getcwd(struct __getcwd_args *uap) 2035 { 2036 int buflen; 2037 int error; 2038 char *buf; 2039 char *bp; 2040 2041 if (disablecwd) 2042 return (ENODEV); 2043 2044 buflen = uap->buflen; 2045 if (buflen < 2) 2046 return (EINVAL); 2047 if (buflen > MAXPATHLEN) 2048 buflen = MAXPATHLEN; 2049 2050 buf = kmalloc(buflen, M_TEMP, M_WAITOK); 2051 bp = kern_getcwd(buf, buflen, &error); 2052 if (error == 0) 2053 error = copyout(bp, uap->buf, strlen(bp) + 1); 2054 kfree(buf, M_TEMP); 2055 return (error); 2056 } 2057 2058 char * 2059 kern_getcwd(char *buf, size_t buflen, int *error) 2060 { 2061 struct proc *p = curproc; 2062 char *bp; 2063 int i, slash_prefixed; 2064 struct filedesc *fdp; 2065 struct namecache *ncp; 2066 2067 numcwdcalls++; 2068 bp = buf; 2069 bp += buflen - 1; 2070 *bp = '\0'; 2071 fdp = p->p_fd; 2072 slash_prefixed = 0; 2073 2074 ncp = fdp->fd_ncdir; 2075 while (ncp && ncp != fdp->fd_nrdir && (ncp->nc_flag & NCF_ROOT) == 0) { 2076 if (ncp->nc_flag & NCF_MOUNTPT) { 2077 if (ncp->nc_mount == NULL) { 2078 *error = EBADF; /* forced unmount? */ 2079 return(NULL); 2080 } 2081 ncp = ncp->nc_parent; 2082 continue; 2083 } 2084 for (i = ncp->nc_nlen - 1; i >= 0; i--) { 2085 if (bp == buf) { 2086 numcwdfail4++; 2087 *error = ENOMEM; 2088 return(NULL); 2089 } 2090 *--bp = ncp->nc_name[i]; 2091 } 2092 if (bp == buf) { 2093 numcwdfail4++; 2094 *error = ENOMEM; 2095 return(NULL); 2096 } 2097 *--bp = '/'; 2098 slash_prefixed = 1; 2099 ncp = ncp->nc_parent; 2100 } 2101 if (ncp == NULL) { 2102 numcwdfail2++; 2103 *error = ENOENT; 2104 return(NULL); 2105 } 2106 if (!slash_prefixed) { 2107 if (bp == buf) { 2108 numcwdfail4++; 2109 *error = ENOMEM; 2110 return(NULL); 2111 } 2112 *--bp = '/'; 2113 } 2114 numcwdfound++; 2115 *error = 0; 2116 return (bp); 2117 } 2118 2119 /* 2120 * Thus begins the fullpath magic. 2121 */ 2122 2123 #undef STATNODE 2124 #define STATNODE(name) \ 2125 static u_int name; \ 2126 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "") 2127 2128 static int disablefullpath; 2129 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, 2130 &disablefullpath, 0, ""); 2131 2132 STATNODE(numfullpathcalls); 2133 STATNODE(numfullpathfail1); 2134 STATNODE(numfullpathfail2); 2135 STATNODE(numfullpathfail3); 2136 STATNODE(numfullpathfail4); 2137 STATNODE(numfullpathfound); 2138 2139 int 2140 cache_fullpath(struct proc *p, struct namecache *ncp, char **retbuf, char **freebuf) 2141 { 2142 char *bp, *buf; 2143 int i, slash_prefixed; 2144 struct namecache *fd_nrdir; 2145 2146 numfullpathcalls--; 2147 2148 buf = kmalloc(MAXPATHLEN, M_TEMP, M_WAITOK); 2149 bp = buf + MAXPATHLEN - 1; 2150 *bp = '\0'; 2151 if (p != NULL) 2152 fd_nrdir = p->p_fd->fd_nrdir; 2153 else 2154 fd_nrdir = NULL; 2155 slash_prefixed = 0; 2156 while (ncp && ncp != fd_nrdir && (ncp->nc_flag & NCF_ROOT) == 0) { 2157 if (ncp->nc_flag & NCF_MOUNTPT) { 2158 if (ncp->nc_mount == NULL) { 2159 kfree(buf, M_TEMP); 2160 return(EBADF); 2161 } 2162 ncp = ncp->nc_parent; 2163 continue; 2164 } 2165 for (i = ncp->nc_nlen - 1; i >= 0; i--) { 2166 if (bp == buf) { 2167 numfullpathfail4++; 2168 kfree(buf, M_TEMP); 2169 return(ENOMEM); 2170 } 2171 *--bp = ncp->nc_name[i]; 2172 } 2173 if (bp == buf) { 2174 numfullpathfail4++; 2175 kfree(buf, M_TEMP); 2176 return(ENOMEM); 2177 } 2178 *--bp = '/'; 2179 slash_prefixed = 1; 2180 ncp = ncp->nc_parent; 2181 } 2182 if (ncp == NULL) { 2183 numfullpathfail2++; 2184 kfree(buf, M_TEMP); 2185 return(ENOENT); 2186 } 2187 if (p != NULL && (ncp->nc_flag & NCF_ROOT) && ncp != fd_nrdir) { 2188 bp = buf + MAXPATHLEN - 1; 2189 *bp = '\0'; 2190 slash_prefixed = 0; 2191 } 2192 if (!slash_prefixed) { 2193 if (bp == buf) { 2194 numfullpathfail4++; 2195 kfree(buf, M_TEMP); 2196 return(ENOMEM); 2197 } 2198 *--bp = '/'; 2199 } 2200 numfullpathfound++; 2201 *retbuf = bp; 2202 *freebuf = buf; 2203 2204 return(0); 2205 } 2206 2207 int 2208 vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf, char **freebuf) 2209 { 2210 struct namecache *ncp; 2211 2212 numfullpathcalls++; 2213 if (disablefullpath) 2214 return (ENODEV); 2215 2216 if (p == NULL) 2217 return (EINVAL); 2218 2219 /* vn is NULL, client wants us to use p->p_textvp */ 2220 if (vn == NULL) { 2221 if ((vn = p->p_textvp) == NULL) 2222 return (EINVAL); 2223 } 2224 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) { 2225 if (ncp->nc_nlen) 2226 break; 2227 } 2228 if (ncp == NULL) 2229 return (EINVAL); 2230 2231 numfullpathcalls--; 2232 return(cache_fullpath(p, ncp, retbuf, freebuf)); 2233 } 2234