1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993, 1995 35 * The Regents of the University of California. All rights reserved. 36 * 37 * This code is derived from software contributed to Berkeley by 38 * Poul-Henning Kamp of the FreeBSD Project. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 3. All advertising materials mentioning features or use of this software 49 * must display the following acknowledgement: 50 * This product includes software developed by the University of 51 * California, Berkeley and its contributors. 52 * 4. Neither the name of the University nor the names of its contributors 53 * may be used to endorse or promote products derived from this software 54 * without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 66 * SUCH DAMAGE. 67 * 68 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95 69 * $FreeBSD: src/sys/kern/vfs_cache.c,v 1.42.2.6 2001/10/05 20:07:03 dillon Exp $ 70 * $DragonFly: src/sys/kern/vfs_cache.c,v 1.72 2006/06/05 07:26:10 dillon Exp $ 71 */ 72 73 #include <sys/param.h> 74 #include <sys/systm.h> 75 #include <sys/kernel.h> 76 #include <sys/sysctl.h> 77 #include <sys/mount.h> 78 #include <sys/vnode.h> 79 #include <sys/malloc.h> 80 #include <sys/sysproto.h> 81 #include <sys/proc.h> 82 #include <sys/namei.h> 83 #include <sys/nlookup.h> 84 #include <sys/filedesc.h> 85 #include <sys/fnv_hash.h> 86 #include <sys/globaldata.h> 87 #include <sys/kern_syscall.h> 88 #include <sys/dirent.h> 89 #include <ddb/ddb.h> 90 91 /* 92 * Random lookups in the cache are accomplished with a hash table using 93 * a hash key of (nc_src_vp, name). 94 * 95 * Negative entries may exist and correspond to structures where nc_vp 96 * is NULL. In a negative entry, NCF_WHITEOUT will be set if the entry 97 * corresponds to a whited-out directory entry (verses simply not finding the 98 * entry at all). 99 * 100 * Upon reaching the last segment of a path, if the reference is for DELETE, 101 * or NOCACHE is set (rewrite), and the name is located in the cache, it 102 * will be dropped. 103 */ 104 105 /* 106 * Structures associated with name cacheing. 107 */ 108 #define NCHHASH(hash) (&nchashtbl[(hash) & nchash]) 109 #define MINNEG 1024 110 111 MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries"); 112 113 static LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */ 114 static struct namecache_list ncneglist; /* instead of vnode */ 115 116 /* 117 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server 118 * to create the namecache infrastructure leading to a dangling vnode. 119 * 120 * 0 Only errors are reported 121 * 1 Successes are reported 122 * 2 Successes + the whole directory scan is reported 123 * 3 Force the directory scan code run as if the parent vnode did not 124 * have a namecache record, even if it does have one. 125 */ 126 static int ncvp_debug; 127 SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0, ""); 128 129 static u_long nchash; /* size of hash table */ 130 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, ""); 131 132 static u_long ncnegfactor = 16; /* ratio of negative entries */ 133 SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, ""); 134 135 static int nclockwarn; /* warn on locked entries in ticks */ 136 SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0, ""); 137 138 static u_long numneg; /* number of cache entries allocated */ 139 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, ""); 140 141 static u_long numcache; /* number of cache entries allocated */ 142 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, ""); 143 144 static u_long numunres; /* number of unresolved entries */ 145 SYSCTL_ULONG(_debug, OID_AUTO, numunres, CTLFLAG_RD, &numunres, 0, ""); 146 147 SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), ""); 148 SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), ""); 149 150 static int cache_resolve_mp(struct namecache *ncp); 151 static void cache_rehash(struct namecache *ncp); 152 153 /* 154 * The new name cache statistics 155 */ 156 SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics"); 157 #define STATNODE(mode, name, var) \ 158 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, ""); 159 STATNODE(CTLFLAG_RD, numneg, &numneg); 160 STATNODE(CTLFLAG_RD, numcache, &numcache); 161 static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls); 162 static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits); 163 static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits); 164 static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks); 165 static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss); 166 static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap); 167 static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps); 168 static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits); 169 static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps); 170 static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits); 171 172 struct nchstats nchstats[SMP_MAXCPU]; 173 /* 174 * Export VFS cache effectiveness statistics to user-land. 175 * 176 * The statistics are left for aggregation to user-land so 177 * neat things can be achieved, like observing per-CPU cache 178 * distribution. 179 */ 180 static int 181 sysctl_nchstats(SYSCTL_HANDLER_ARGS) 182 { 183 struct globaldata *gd; 184 int i, error; 185 186 error = 0; 187 for (i = 0; i < ncpus; ++i) { 188 gd = globaldata_find(i); 189 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats), 190 sizeof(struct nchstats)))) 191 break; 192 } 193 194 return (error); 195 } 196 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD, 197 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics"); 198 199 static void cache_zap(struct namecache *ncp); 200 201 /* 202 * cache_hold() and cache_drop() prevent the premature deletion of a 203 * namecache entry but do not prevent operations (such as zapping) on 204 * that namecache entry. 205 * 206 * This routine may only be called from outside this source module if 207 * nc_refs is already at least 1. 208 * 209 * This is a rare case where callers are allowed to hold a spinlock, 210 * so we can't ourselves. 211 */ 212 static __inline 213 struct namecache * 214 _cache_hold(struct namecache *ncp) 215 { 216 atomic_add_int(&ncp->nc_refs, 1); 217 return(ncp); 218 } 219 220 /* 221 * When dropping an entry, if only one ref remains and the entry has not 222 * been resolved, zap it. Since the one reference is being dropped the 223 * entry had better not be locked. 224 */ 225 static __inline 226 void 227 _cache_drop(struct namecache *ncp) 228 { 229 KKASSERT(ncp->nc_refs > 0); 230 if (ncp->nc_refs == 1 && 231 (ncp->nc_flag & NCF_UNRESOLVED) && 232 TAILQ_EMPTY(&ncp->nc_list) 233 ) { 234 KKASSERT(ncp->nc_exlocks == 0); 235 cache_lock(ncp); 236 cache_zap(ncp); 237 } else { 238 atomic_subtract_int(&ncp->nc_refs, 1); 239 } 240 } 241 242 /* 243 * Link a new namecache entry to its parent. Be careful to avoid races 244 * if vhold() blocks in the future. 245 * 246 * If we are creating a child under an oldapi parent we must mark the 247 * child as being an oldapi entry as well. 248 */ 249 static void 250 cache_link_parent(struct namecache *ncp, struct namecache *par) 251 { 252 KKASSERT(ncp->nc_parent == NULL); 253 ncp->nc_parent = par; 254 if (TAILQ_EMPTY(&par->nc_list)) { 255 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry); 256 /* 257 * Any vp associated with an ncp which has children must 258 * be held to prevent it from being recycled. 259 */ 260 if (par->nc_vp) 261 vhold(par->nc_vp); 262 } else { 263 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry); 264 } 265 } 266 267 /* 268 * Remove the parent association from a namecache structure. If this is 269 * the last child of the parent the cache_drop(par) will attempt to 270 * recursively zap the parent. 271 */ 272 static void 273 cache_unlink_parent(struct namecache *ncp) 274 { 275 struct namecache *par; 276 277 if ((par = ncp->nc_parent) != NULL) { 278 ncp->nc_parent = NULL; 279 par = cache_hold(par); 280 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry); 281 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list)) 282 vdrop(par->nc_vp); 283 cache_drop(par); 284 } 285 } 286 287 /* 288 * Allocate a new namecache structure. Most of the code does not require 289 * zero-termination of the string but it makes vop_compat_ncreate() easier. 290 */ 291 static struct namecache * 292 cache_alloc(int nlen) 293 { 294 struct namecache *ncp; 295 296 ncp = malloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO); 297 if (nlen) 298 ncp->nc_name = malloc(nlen + 1, M_VFSCACHE, M_WAITOK); 299 ncp->nc_nlen = nlen; 300 ncp->nc_flag = NCF_UNRESOLVED; 301 ncp->nc_error = ENOTCONN; /* needs to be resolved */ 302 ncp->nc_refs = 1; 303 304 /* 305 * Construct a fake FSMID based on the time of day and a 32 bit 306 * roller for uniqueness. This is used to generate a useful 307 * FSMID for filesystems which do not support it. 308 */ 309 ncp->nc_fsmid = cache_getnewfsmid(); 310 TAILQ_INIT(&ncp->nc_list); 311 cache_lock(ncp); 312 return(ncp); 313 } 314 315 static void 316 cache_free(struct namecache *ncp) 317 { 318 KKASSERT(ncp->nc_refs == 1 && ncp->nc_exlocks == 1); 319 if (ncp->nc_name) 320 free(ncp->nc_name, M_VFSCACHE); 321 free(ncp, M_VFSCACHE); 322 } 323 324 /* 325 * Ref and deref a namecache structure. 326 * 327 * Warning: caller may hold an unrelated read spinlock, which means we can't 328 * use read spinlocks here. 329 */ 330 struct namecache * 331 cache_hold(struct namecache *ncp) 332 { 333 return(_cache_hold(ncp)); 334 } 335 336 void 337 cache_drop(struct namecache *ncp) 338 { 339 _cache_drop(ncp); 340 } 341 342 /* 343 * Namespace locking. The caller must already hold a reference to the 344 * namecache structure in order to lock/unlock it. This function prevents 345 * the namespace from being created or destroyed by accessors other then 346 * the lock holder. 347 * 348 * Note that holding a locked namecache structure prevents other threads 349 * from making namespace changes (e.g. deleting or creating), prevents 350 * vnode association state changes by other threads, and prevents the 351 * namecache entry from being resolved or unresolved by other threads. 352 * 353 * The lock owner has full authority to associate/disassociate vnodes 354 * and resolve/unresolve the locked ncp. 355 * 356 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed 357 * or recycled, but it does NOT help you if the vnode had already initiated 358 * a recyclement. If this is important, use cache_get() rather then 359 * cache_lock() (and deal with the differences in the way the refs counter 360 * is handled). Or, alternatively, make an unconditional call to 361 * cache_validate() or cache_resolve() after cache_lock() returns. 362 */ 363 void 364 cache_lock(struct namecache *ncp) 365 { 366 thread_t td; 367 int didwarn; 368 369 KKASSERT(ncp->nc_refs != 0); 370 didwarn = 0; 371 td = curthread; 372 373 for (;;) { 374 if (ncp->nc_exlocks == 0) { 375 ncp->nc_exlocks = 1; 376 ncp->nc_locktd = td; 377 /* 378 * The vp associated with a locked ncp must be held 379 * to prevent it from being recycled (which would 380 * cause the ncp to become unresolved). 381 * 382 * WARNING! If VRECLAIMED is set the vnode could 383 * already be in the middle of a recycle. Callers 384 * should not assume that nc_vp is usable when 385 * not NULL. cache_vref() or cache_vget() must be 386 * called. 387 * 388 * XXX loop on race for later MPSAFE work. 389 */ 390 if (ncp->nc_vp) 391 vhold(ncp->nc_vp); 392 break; 393 } 394 if (ncp->nc_locktd == td) { 395 ++ncp->nc_exlocks; 396 break; 397 } 398 ncp->nc_flag |= NCF_LOCKREQ; 399 if (tsleep(ncp, 0, "clock", nclockwarn) == EWOULDBLOCK) { 400 if (didwarn) 401 continue; 402 didwarn = 1; 403 printf("[diagnostic] cache_lock: blocked on %p", ncp); 404 if ((ncp->nc_flag & NCF_MOUNTPT) && ncp->nc_mount) 405 printf(" [MOUNTFROM %s]\n", ncp->nc_mount->mnt_stat.f_mntfromname); 406 else 407 printf(" \"%*.*s\"\n", 408 ncp->nc_nlen, ncp->nc_nlen, 409 ncp->nc_name); 410 } 411 } 412 413 if (didwarn == 1) { 414 printf("[diagnostic] cache_lock: unblocked %*.*s\n", 415 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 416 } 417 } 418 419 int 420 cache_lock_nonblock(struct namecache *ncp) 421 { 422 thread_t td; 423 424 KKASSERT(ncp->nc_refs != 0); 425 td = curthread; 426 if (ncp->nc_exlocks == 0) { 427 ncp->nc_exlocks = 1; 428 ncp->nc_locktd = td; 429 /* 430 * The vp associated with a locked ncp must be held 431 * to prevent it from being recycled (which would 432 * cause the ncp to become unresolved). 433 * 434 * WARNING! If VRECLAIMED is set the vnode could 435 * already be in the middle of a recycle. Callers 436 * should not assume that nc_vp is usable when 437 * not NULL. cache_vref() or cache_vget() must be 438 * called. 439 * 440 * XXX loop on race for later MPSAFE work. 441 */ 442 if (ncp->nc_vp) 443 vhold(ncp->nc_vp); 444 return(0); 445 } else { 446 return(EWOULDBLOCK); 447 } 448 } 449 450 void 451 cache_unlock(struct namecache *ncp) 452 { 453 thread_t td = curthread; 454 455 KKASSERT(ncp->nc_refs > 0); 456 KKASSERT(ncp->nc_exlocks > 0); 457 KKASSERT(ncp->nc_locktd == td); 458 if (--ncp->nc_exlocks == 0) { 459 if (ncp->nc_vp) 460 vdrop(ncp->nc_vp); 461 ncp->nc_locktd = NULL; 462 if (ncp->nc_flag & NCF_LOCKREQ) { 463 ncp->nc_flag &= ~NCF_LOCKREQ; 464 wakeup(ncp); 465 } 466 } 467 } 468 469 /* 470 * ref-and-lock, unlock-and-deref functions. 471 * 472 * This function is primarily used by nlookup. Even though cache_lock 473 * holds the vnode, it is possible that the vnode may have already 474 * initiated a recyclement. We want cache_get() to return a definitively 475 * usable vnode or a definitively unresolved ncp. 476 */ 477 struct namecache * 478 cache_get(struct namecache *ncp) 479 { 480 _cache_hold(ncp); 481 cache_lock(ncp); 482 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 483 cache_setunresolved(ncp); 484 return(ncp); 485 } 486 487 int 488 cache_get_nonblock(struct namecache *ncp) 489 { 490 /* XXX MP */ 491 if (ncp->nc_exlocks == 0 || ncp->nc_locktd == curthread) { 492 _cache_hold(ncp); 493 cache_lock(ncp); 494 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 495 cache_setunresolved(ncp); 496 return(0); 497 } 498 return(EWOULDBLOCK); 499 } 500 501 void 502 cache_put(struct namecache *ncp) 503 { 504 cache_unlock(ncp); 505 _cache_drop(ncp); 506 } 507 508 /* 509 * Resolve an unresolved ncp by associating a vnode with it. If the 510 * vnode is NULL, a negative cache entry is created. 511 * 512 * The ncp should be locked on entry and will remain locked on return. 513 */ 514 void 515 cache_setvp(struct namecache *ncp, struct vnode *vp) 516 { 517 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED); 518 ncp->nc_vp = vp; 519 if (vp != NULL) { 520 /* 521 * Any vp associated with an ncp which has children must 522 * be held. Any vp associated with a locked ncp must be held. 523 */ 524 if (!TAILQ_EMPTY(&ncp->nc_list)) 525 vhold(vp); 526 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode); 527 if (ncp->nc_exlocks) 528 vhold(vp); 529 530 /* 531 * Set auxillary flags 532 */ 533 switch(vp->v_type) { 534 case VDIR: 535 ncp->nc_flag |= NCF_ISDIR; 536 break; 537 case VLNK: 538 ncp->nc_flag |= NCF_ISSYMLINK; 539 /* XXX cache the contents of the symlink */ 540 break; 541 default: 542 break; 543 } 544 ++numcache; 545 ncp->nc_error = 0; 546 } else { 547 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode); 548 ++numneg; 549 ncp->nc_error = ENOENT; 550 } 551 ncp->nc_flag &= ~NCF_UNRESOLVED; 552 } 553 554 void 555 cache_settimeout(struct namecache *ncp, int nticks) 556 { 557 if ((ncp->nc_timeout = ticks + nticks) == 0) 558 ncp->nc_timeout = 1; 559 } 560 561 /* 562 * Disassociate the vnode or negative-cache association and mark a 563 * namecache entry as unresolved again. Note that the ncp is still 564 * left in the hash table and still linked to its parent. 565 * 566 * The ncp should be locked and refd on entry and will remain locked and refd 567 * on return. 568 * 569 * This routine is normally never called on a directory containing children. 570 * However, NFS often does just that in its rename() code as a cop-out to 571 * avoid complex namespace operations. This disconnects a directory vnode 572 * from its namecache and can cause the OLDAPI and NEWAPI to get out of 573 * sync. 574 * 575 * NOTE: NCF_FSMID must be cleared so a refurbishment of the ncp, such as 576 * in a create, properly propogates flag up the chain. 577 */ 578 void 579 cache_setunresolved(struct namecache *ncp) 580 { 581 struct vnode *vp; 582 583 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 584 ncp->nc_flag |= NCF_UNRESOLVED; 585 ncp->nc_timeout = 0; 586 ncp->nc_error = ENOTCONN; 587 ++numunres; 588 if ((vp = ncp->nc_vp) != NULL) { 589 --numcache; 590 ncp->nc_vp = NULL; 591 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode); 592 593 /* 594 * Any vp associated with an ncp with children is 595 * held by that ncp. Any vp associated with a locked 596 * ncp is held by that ncp. These conditions must be 597 * undone when the vp is cleared out from the ncp. 598 */ 599 if (ncp->nc_flag & NCF_FSMID) 600 vupdatefsmid(vp); 601 if (!TAILQ_EMPTY(&ncp->nc_list)) 602 vdrop(vp); 603 if (ncp->nc_exlocks) 604 vdrop(vp); 605 } else { 606 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode); 607 --numneg; 608 } 609 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK| 610 NCF_FSMID); 611 } 612 } 613 614 /* 615 * Invalidate portions of the namecache topology given a starting entry. 616 * The passed ncp is set to an unresolved state and: 617 * 618 * The passed ncp must be locked. 619 * 620 * CINV_DESTROY - Set a flag in the passed ncp entry indicating 621 * that the physical underlying nodes have been 622 * destroyed... as in deleted. For example, when 623 * a directory is removed. This will cause record 624 * lookups on the name to no longer be able to find 625 * the record and tells the resolver to return failure 626 * rather then trying to resolve through the parent. 627 * 628 * The topology itself, including ncp->nc_name, 629 * remains intact. 630 * 631 * This only applies to the passed ncp, if CINV_CHILDREN 632 * is specified the children are not flagged. 633 * 634 * CINV_CHILDREN - Set all children (recursively) to an unresolved 635 * state as well. 636 * 637 * Note that this will also have the side effect of 638 * cleaning out any unreferenced nodes in the topology 639 * from the leaves up as the recursion backs out. 640 * 641 * Note that the topology for any referenced nodes remains intact. 642 * 643 * It is possible for cache_inval() to race a cache_resolve(), meaning that 644 * the namecache entry may not actually be invalidated on return if it was 645 * revalidated while recursing down into its children. This code guarentees 646 * that the node(s) will go through an invalidation cycle, but does not 647 * guarentee that they will remain in an invalidated state. 648 * 649 * Returns non-zero if a revalidation was detected during the invalidation 650 * recursion, zero otherwise. Note that since only the original ncp is 651 * locked the revalidation ultimately can only indicate that the original ncp 652 * *MIGHT* no have been reresolved. 653 */ 654 int 655 cache_inval(struct namecache *ncp, int flags) 656 { 657 struct namecache *kid; 658 struct namecache *nextkid; 659 int rcnt = 0; 660 661 KKASSERT(ncp->nc_exlocks); 662 663 cache_setunresolved(ncp); 664 if (flags & CINV_DESTROY) 665 ncp->nc_flag |= NCF_DESTROYED; 666 667 if ((flags & CINV_CHILDREN) && 668 (kid = TAILQ_FIRST(&ncp->nc_list)) != NULL 669 ) { 670 cache_hold(kid); 671 cache_unlock(ncp); 672 while (kid) { 673 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL) 674 cache_hold(nextkid); 675 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 || 676 TAILQ_FIRST(&kid->nc_list) 677 ) { 678 cache_lock(kid); 679 rcnt += cache_inval(kid, flags & ~CINV_DESTROY); 680 cache_unlock(kid); 681 } 682 cache_drop(kid); 683 kid = nextkid; 684 } 685 cache_lock(ncp); 686 } 687 688 /* 689 * Someone could have gotten in there while ncp was unlocked, 690 * retry if so. 691 */ 692 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) 693 ++rcnt; 694 return (rcnt); 695 } 696 697 /* 698 * Invalidate a vnode's namecache associations. To avoid races against 699 * the resolver we do not invalidate a node which we previously invalidated 700 * but which was then re-resolved while we were in the invalidation loop. 701 * 702 * Returns non-zero if any namecache entries remain after the invalidation 703 * loop completed. 704 * 705 * NOTE: unlike the namecache topology which guarentees that ncp's will not 706 * be ripped out of the topology while held, the vnode's v_namecache list 707 * has no such restriction. NCP's can be ripped out of the list at virtually 708 * any time if not locked, even if held. 709 */ 710 int 711 cache_inval_vp(struct vnode *vp, int flags) 712 { 713 struct namecache *ncp; 714 struct namecache *next; 715 716 restart: 717 ncp = TAILQ_FIRST(&vp->v_namecache); 718 if (ncp) 719 cache_hold(ncp); 720 while (ncp) { 721 /* loop entered with ncp held */ 722 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL) 723 cache_hold(next); 724 cache_lock(ncp); 725 if (ncp->nc_vp != vp) { 726 printf("Warning: cache_inval_vp: race-A detected on " 727 "%s\n", ncp->nc_name); 728 cache_put(ncp); 729 if (next) 730 cache_drop(next); 731 goto restart; 732 } 733 cache_inval(ncp, flags); 734 cache_put(ncp); /* also releases reference */ 735 ncp = next; 736 if (ncp && ncp->nc_vp != vp) { 737 printf("Warning: cache_inval_vp: race-B detected on " 738 "%s\n", ncp->nc_name); 739 cache_drop(ncp); 740 goto restart; 741 } 742 } 743 return(TAILQ_FIRST(&vp->v_namecache) != NULL); 744 } 745 746 /* 747 * The source ncp has been renamed to the target ncp. Both fncp and tncp 748 * must be locked. Both will be set to unresolved, any children of tncp 749 * will be disconnected (the prior contents of the target is assumed to be 750 * destroyed by the rename operation, e.g. renaming over an empty directory), 751 * and all children of fncp will be moved to tncp. 752 * 753 * XXX the disconnection could pose a problem, check code paths to make 754 * sure any code that blocks can handle the parent being changed out from 755 * under it. Maybe we should lock the children (watch out for deadlocks) ? 756 * 757 * After we return the caller has the option of calling cache_setvp() if 758 * the vnode of the new target ncp is known. 759 * 760 * Any process CD'd into any of the children will no longer be able to ".." 761 * back out. An rm -rf can cause this situation to occur. 762 */ 763 void 764 cache_rename(struct namecache *fncp, struct namecache *tncp) 765 { 766 struct namecache *scan; 767 int didwarn = 0; 768 769 cache_setunresolved(fncp); 770 cache_setunresolved(tncp); 771 while (cache_inval(tncp, CINV_CHILDREN) != 0) { 772 if (didwarn++ % 10 == 0) { 773 printf("Warning: cache_rename: race during " 774 "rename %s->%s\n", 775 fncp->nc_name, tncp->nc_name); 776 } 777 tsleep(tncp, 0, "mvrace", hz / 10); 778 cache_setunresolved(tncp); 779 } 780 while ((scan = TAILQ_FIRST(&fncp->nc_list)) != NULL) { 781 cache_hold(scan); 782 cache_unlink_parent(scan); 783 cache_link_parent(scan, tncp); 784 if (scan->nc_flag & NCF_HASHED) 785 cache_rehash(scan); 786 cache_drop(scan); 787 } 788 } 789 790 /* 791 * vget the vnode associated with the namecache entry. Resolve the namecache 792 * entry if necessary and deal with namecache/vp races. The passed ncp must 793 * be referenced and may be locked. The ncp's ref/locking state is not 794 * effected by this call. 795 * 796 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked 797 * (depending on the passed lk_type) will be returned in *vpp with an error 798 * of 0, or NULL will be returned in *vpp with a non-0 error code. The 799 * most typical error is ENOENT, meaning that the ncp represents a negative 800 * cache hit and there is no vnode to retrieve, but other errors can occur 801 * too. 802 * 803 * The main race we have to deal with are namecache zaps. The ncp itself 804 * will not disappear since it is referenced, and it turns out that the 805 * validity of the vp pointer can be checked simply by rechecking the 806 * contents of ncp->nc_vp. 807 */ 808 int 809 cache_vget(struct namecache *ncp, struct ucred *cred, 810 int lk_type, struct vnode **vpp) 811 { 812 struct vnode *vp; 813 int error; 814 815 again: 816 vp = NULL; 817 if (ncp->nc_flag & NCF_UNRESOLVED) { 818 cache_lock(ncp); 819 error = cache_resolve(ncp, cred); 820 cache_unlock(ncp); 821 } else { 822 error = 0; 823 } 824 if (error == 0 && (vp = ncp->nc_vp) != NULL) { 825 /* 826 * Accessing the vnode from the namecache is a bit 827 * dangerous. Because there are no refs on the vnode, it 828 * could be in the middle of a reclaim. 829 */ 830 if (vp->v_flag & VRECLAIMED) { 831 printf("Warning: vnode reclaim race detected in cache_vget on %p (%s)\n", vp, ncp->nc_name); 832 cache_lock(ncp); 833 cache_setunresolved(ncp); 834 cache_unlock(ncp); 835 goto again; 836 } 837 error = vget(vp, lk_type); 838 if (error) { 839 if (vp != ncp->nc_vp) 840 goto again; 841 vp = NULL; 842 } else if (vp != ncp->nc_vp) { 843 vput(vp); 844 goto again; 845 } else if (vp->v_flag & VRECLAIMED) { 846 panic("vget succeeded on a VRECLAIMED node! vp %p", vp); 847 } 848 } 849 if (error == 0 && vp == NULL) 850 error = ENOENT; 851 *vpp = vp; 852 return(error); 853 } 854 855 int 856 cache_vref(struct namecache *ncp, struct ucred *cred, struct vnode **vpp) 857 { 858 struct vnode *vp; 859 int error; 860 861 again: 862 vp = NULL; 863 if (ncp->nc_flag & NCF_UNRESOLVED) { 864 cache_lock(ncp); 865 error = cache_resolve(ncp, cred); 866 cache_unlock(ncp); 867 } else { 868 error = 0; 869 } 870 if (error == 0 && (vp = ncp->nc_vp) != NULL) { 871 /* 872 * Since we did not obtain any locks, a cache zap 873 * race can occur here if the vnode is in the middle 874 * of being reclaimed and has not yet been able to 875 * clean out its cache node. If that case occurs, 876 * we must lock and unresolve the cache, then loop 877 * to retry. 878 */ 879 if (vp->v_flag & VRECLAIMED) { 880 printf("Warning: vnode reclaim race detected on cache_vref %p (%s)\n", vp, ncp->nc_name); 881 cache_lock(ncp); 882 cache_setunresolved(ncp); 883 cache_unlock(ncp); 884 goto again; 885 } 886 vref(vp); 887 } 888 if (error == 0 && vp == NULL) 889 error = ENOENT; 890 *vpp = vp; 891 return(error); 892 } 893 894 /* 895 * Recursively set the FSMID update flag for namecache nodes leading 896 * to root. This will cause the next getattr or reclaim to increment the 897 * fsmid and mark the inode for lazy updating. 898 * 899 * Stop recursing when we hit a node whos NCF_FSMID flag is already set. 900 * This makes FSMIDs work in an Einsteinian fashion - where the observation 901 * effects the result. In this case a program monitoring a higher level 902 * node will have detected some prior change and started its scan (clearing 903 * NCF_FSMID in higher level nodes), but since it has not yet observed the 904 * node where we find NCF_FSMID still set, we can safely make the related 905 * modification without interfering with the theorized program. 906 * 907 * This also means that FSMIDs cannot represent time-domain quantities 908 * in a hierarchical sense. But the main reason for doing it this way 909 * is to reduce the amount of recursion that occurs in the critical path 910 * when e.g. a program is writing to a file that sits deep in a directory 911 * hierarchy. 912 */ 913 void 914 cache_update_fsmid(struct namecache *ncp) 915 { 916 struct vnode *vp; 917 struct namecache *scan; 918 919 /* 920 * Warning: even if we get a non-NULL vp it could still be in the 921 * middle of a recyclement. Don't do anything fancy, just set 922 * NCF_FSMID. 923 */ 924 if ((vp = ncp->nc_vp) != NULL) { 925 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) { 926 for (scan = ncp; scan; scan = scan->nc_parent) { 927 if (scan->nc_flag & NCF_FSMID) 928 break; 929 scan->nc_flag |= NCF_FSMID; 930 } 931 } 932 } else { 933 while (ncp && (ncp->nc_flag & NCF_FSMID) == 0) { 934 ncp->nc_flag |= NCF_FSMID; 935 ncp = ncp->nc_parent; 936 } 937 } 938 } 939 940 void 941 cache_update_fsmid_vp(struct vnode *vp) 942 { 943 struct namecache *ncp; 944 struct namecache *scan; 945 946 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) { 947 for (scan = ncp; scan; scan = scan->nc_parent) { 948 if (scan->nc_flag & NCF_FSMID) 949 break; 950 scan->nc_flag |= NCF_FSMID; 951 } 952 } 953 } 954 955 /* 956 * If getattr is called on a vnode (e.g. a stat call), the filesystem 957 * may call this routine to determine if the namecache has the hierarchical 958 * change flag set, requiring the fsmid to be updated. 959 * 960 * Since 0 indicates no support, make sure the filesystem fsmid is at least 961 * 1. 962 */ 963 int 964 cache_check_fsmid_vp(struct vnode *vp, int64_t *fsmid) 965 { 966 struct namecache *ncp; 967 int changed = 0; 968 969 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) { 970 if (ncp->nc_flag & NCF_FSMID) { 971 ncp->nc_flag &= ~NCF_FSMID; 972 changed = 1; 973 } 974 } 975 if (*fsmid == 0) 976 ++*fsmid; 977 if (changed) 978 ++*fsmid; 979 return(changed); 980 } 981 982 /* 983 * Obtain the FSMID for a vnode for filesystems which do not support 984 * a built-in FSMID. 985 */ 986 int64_t 987 cache_sync_fsmid_vp(struct vnode *vp) 988 { 989 struct namecache *ncp; 990 991 if ((ncp = TAILQ_FIRST(&vp->v_namecache)) != NULL) { 992 if (ncp->nc_flag & NCF_FSMID) { 993 ncp->nc_flag &= ~NCF_FSMID; 994 ++ncp->nc_fsmid; 995 } 996 return(ncp->nc_fsmid); 997 } 998 return(VNOVAL); 999 } 1000 1001 /* 1002 * Convert a directory vnode to a namecache record without any other 1003 * knowledge of the topology. This ONLY works with directory vnodes and 1004 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the 1005 * returned ncp (if not NULL) will be held and unlocked. 1006 * 1007 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned. 1008 * If 'makeit' is 1 we attempt to track-down and create the namecache topology 1009 * for dvp. This will fail only if the directory has been deleted out from 1010 * under the caller. 1011 * 1012 * Callers must always check for a NULL return no matter the value of 'makeit'. 1013 * 1014 * To avoid underflowing the kernel stack each recursive call increments 1015 * the makeit variable. 1016 */ 1017 1018 static int cache_inefficient_scan(struct namecache *ncp, struct ucred *cred, 1019 struct vnode *dvp); 1020 static int cache_fromdvp_try(struct vnode *dvp, struct ucred *cred, 1021 struct vnode **saved_dvp); 1022 1023 struct namecache * 1024 cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit) 1025 { 1026 struct namecache *ncp; 1027 struct vnode *saved_dvp; 1028 struct vnode *pvp; 1029 int error; 1030 1031 ncp = NULL; 1032 saved_dvp = NULL; 1033 1034 /* 1035 * Temporary debugging code to force the directory scanning code 1036 * to be exercised. 1037 */ 1038 if (ncvp_debug >= 3 && makeit && TAILQ_FIRST(&dvp->v_namecache)) { 1039 ncp = TAILQ_FIRST(&dvp->v_namecache); 1040 printf("cache_fromdvp: forcing %s\n", ncp->nc_name); 1041 goto force; 1042 } 1043 1044 /* 1045 * Loop until resolution, inside code will break out on error. 1046 */ 1047 while ((ncp = TAILQ_FIRST(&dvp->v_namecache)) == NULL && makeit) { 1048 force: 1049 /* 1050 * If dvp is the root of its filesystem it should already 1051 * have a namecache pointer associated with it as a side 1052 * effect of the mount, but it may have been disassociated. 1053 */ 1054 if (dvp->v_flag & VROOT) { 1055 ncp = cache_get(dvp->v_mount->mnt_ncp); 1056 error = cache_resolve_mp(ncp); 1057 cache_put(ncp); 1058 if (ncvp_debug) { 1059 printf("cache_fromdvp: resolve root of mount %p error %d", 1060 dvp->v_mount, error); 1061 } 1062 if (error) { 1063 if (ncvp_debug) 1064 printf(" failed\n"); 1065 ncp = NULL; 1066 break; 1067 } 1068 if (ncvp_debug) 1069 printf(" succeeded\n"); 1070 continue; 1071 } 1072 1073 /* 1074 * If we are recursed too deeply resort to an O(n^2) 1075 * algorithm to resolve the namecache topology. The 1076 * resolved pvp is left referenced in saved_dvp to 1077 * prevent the tree from being destroyed while we loop. 1078 */ 1079 if (makeit > 20) { 1080 error = cache_fromdvp_try(dvp, cred, &saved_dvp); 1081 if (error) { 1082 printf("lookupdotdot(longpath) failed %d " 1083 "dvp %p\n", error, dvp); 1084 break; 1085 } 1086 continue; 1087 } 1088 1089 /* 1090 * Get the parent directory and resolve its ncp. 1091 */ 1092 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred); 1093 if (error) { 1094 printf("lookupdotdot failed %d dvp %p\n", error, dvp); 1095 break; 1096 } 1097 VOP_UNLOCK(pvp, 0); 1098 1099 /* 1100 * Reuse makeit as a recursion depth counter. 1101 */ 1102 ncp = cache_fromdvp(pvp, cred, makeit + 1); 1103 vrele(pvp); 1104 if (ncp == NULL) 1105 break; 1106 1107 /* 1108 * Do an inefficient scan of pvp (embodied by ncp) to look 1109 * for dvp. This will create a namecache record for dvp on 1110 * success. We loop up to recheck on success. 1111 * 1112 * ncp and dvp are both held but not locked. 1113 */ 1114 error = cache_inefficient_scan(ncp, cred, dvp); 1115 cache_drop(ncp); 1116 if (error) { 1117 printf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n", 1118 pvp, ncp->nc_name, dvp); 1119 ncp = NULL; 1120 break; 1121 } 1122 if (ncvp_debug) { 1123 printf("cache_fromdvp: scan %p (%s) succeeded\n", 1124 pvp, ncp->nc_name); 1125 } 1126 } 1127 if (ncp) 1128 cache_hold(ncp); 1129 if (saved_dvp) 1130 vrele(saved_dvp); 1131 return (ncp); 1132 } 1133 1134 /* 1135 * Go up the chain of parent directories until we find something 1136 * we can resolve into the namecache. This is very inefficient. 1137 */ 1138 static 1139 int 1140 cache_fromdvp_try(struct vnode *dvp, struct ucred *cred, 1141 struct vnode **saved_dvp) 1142 { 1143 struct namecache *ncp; 1144 struct vnode *pvp; 1145 int error; 1146 static time_t last_fromdvp_report; 1147 1148 /* 1149 * Loop getting the parent directory vnode until we get something we 1150 * can resolve in the namecache. 1151 */ 1152 vref(dvp); 1153 for (;;) { 1154 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred); 1155 if (error) { 1156 vrele(dvp); 1157 return (error); 1158 } 1159 VOP_UNLOCK(pvp, 0); 1160 if ((ncp = TAILQ_FIRST(&pvp->v_namecache)) != NULL) { 1161 cache_hold(ncp); 1162 vrele(pvp); 1163 break; 1164 } 1165 if (pvp->v_flag & VROOT) { 1166 ncp = cache_get(pvp->v_mount->mnt_ncp); 1167 error = cache_resolve_mp(ncp); 1168 cache_unlock(ncp); 1169 vrele(pvp); 1170 if (error) { 1171 cache_drop(ncp); 1172 vrele(dvp); 1173 return (error); 1174 } 1175 break; 1176 } 1177 vrele(dvp); 1178 dvp = pvp; 1179 } 1180 if (last_fromdvp_report != time_second) { 1181 last_fromdvp_report = time_second; 1182 printf("Warning: extremely inefficient path resolution on %s\n", 1183 ncp->nc_name); 1184 } 1185 error = cache_inefficient_scan(ncp, cred, dvp); 1186 1187 /* 1188 * Hopefully dvp now has a namecache record associated with it. 1189 * Leave it referenced to prevent the kernel from recycling the 1190 * vnode. Otherwise extremely long directory paths could result 1191 * in endless recycling. 1192 */ 1193 if (*saved_dvp) 1194 vrele(*saved_dvp); 1195 *saved_dvp = dvp; 1196 return (error); 1197 } 1198 1199 1200 /* 1201 * Do an inefficient scan of the directory represented by ncp looking for 1202 * the directory vnode dvp. ncp must be held but not locked on entry and 1203 * will be held on return. dvp must be refd but not locked on entry and 1204 * will remain refd on return. 1205 * 1206 * Why do this at all? Well, due to its stateless nature the NFS server 1207 * converts file handles directly to vnodes without necessarily going through 1208 * the namecache ops that would otherwise create the namecache topology 1209 * leading to the vnode. We could either (1) Change the namecache algorithms 1210 * to allow disconnect namecache records that are re-merged opportunistically, 1211 * or (2) Make the NFS server backtrack and scan to recover a connected 1212 * namecache topology in order to then be able to issue new API lookups. 1213 * 1214 * It turns out that (1) is a huge mess. It takes a nice clean set of 1215 * namecache algorithms and introduces a lot of complication in every subsystem 1216 * that calls into the namecache to deal with the re-merge case, especially 1217 * since we are using the namecache to placehold negative lookups and the 1218 * vnode might not be immediately assigned. (2) is certainly far less 1219 * efficient then (1), but since we are only talking about directories here 1220 * (which are likely to remain cached), the case does not actually run all 1221 * that often and has the supreme advantage of not polluting the namecache 1222 * algorithms. 1223 */ 1224 static int 1225 cache_inefficient_scan(struct namecache *ncp, struct ucred *cred, 1226 struct vnode *dvp) 1227 { 1228 struct nlcomponent nlc; 1229 struct namecache *rncp; 1230 struct dirent *den; 1231 struct vnode *pvp; 1232 struct vattr vat; 1233 struct iovec iov; 1234 struct uio uio; 1235 int blksize; 1236 int eofflag; 1237 int bytes; 1238 char *rbuf; 1239 int error; 1240 1241 vat.va_blocksize = 0; 1242 if ((error = VOP_GETATTR(dvp, &vat)) != 0) 1243 return (error); 1244 if ((error = cache_vget(ncp, cred, LK_SHARED, &pvp)) != 0) 1245 return (error); 1246 if (ncvp_debug) 1247 printf("inefficient_scan: directory iosize %ld vattr fileid = %ld\n", vat.va_blocksize, (long)vat.va_fileid); 1248 if ((blksize = vat.va_blocksize) == 0) 1249 blksize = DEV_BSIZE; 1250 rbuf = malloc(blksize, M_TEMP, M_WAITOK); 1251 rncp = NULL; 1252 1253 eofflag = 0; 1254 uio.uio_offset = 0; 1255 again: 1256 iov.iov_base = rbuf; 1257 iov.iov_len = blksize; 1258 uio.uio_iov = &iov; 1259 uio.uio_iovcnt = 1; 1260 uio.uio_resid = blksize; 1261 uio.uio_segflg = UIO_SYSSPACE; 1262 uio.uio_rw = UIO_READ; 1263 uio.uio_td = curthread; 1264 1265 if (ncvp_debug >= 2) 1266 printf("cache_inefficient_scan: readdir @ %08x\n", (int)uio.uio_offset); 1267 error = VOP_READDIR(pvp, &uio, cred, &eofflag, NULL, NULL); 1268 if (error == 0) { 1269 den = (struct dirent *)rbuf; 1270 bytes = blksize - uio.uio_resid; 1271 1272 while (bytes > 0) { 1273 if (ncvp_debug >= 2) { 1274 printf("cache_inefficient_scan: %*.*s\n", 1275 den->d_namlen, den->d_namlen, 1276 den->d_name); 1277 } 1278 if (den->d_type != DT_WHT && 1279 den->d_ino == vat.va_fileid) { 1280 if (ncvp_debug) { 1281 printf("cache_inefficient_scan: " 1282 "MATCHED inode %ld path %s/%*.*s\n", 1283 vat.va_fileid, ncp->nc_name, 1284 den->d_namlen, den->d_namlen, 1285 den->d_name); 1286 } 1287 nlc.nlc_nameptr = den->d_name; 1288 nlc.nlc_namelen = den->d_namlen; 1289 VOP_UNLOCK(pvp, 0); 1290 rncp = cache_nlookup(ncp, &nlc); 1291 KKASSERT(rncp != NULL); 1292 break; 1293 } 1294 bytes -= _DIRENT_DIRSIZ(den); 1295 den = _DIRENT_NEXT(den); 1296 } 1297 if (rncp == NULL && eofflag == 0 && uio.uio_resid != blksize) 1298 goto again; 1299 } 1300 if (rncp) { 1301 vrele(pvp); 1302 if (rncp->nc_flag & NCF_UNRESOLVED) { 1303 cache_setvp(rncp, dvp); 1304 if (ncvp_debug >= 2) { 1305 printf("cache_inefficient_scan: setvp %s/%s = %p\n", 1306 ncp->nc_name, rncp->nc_name, dvp); 1307 } 1308 } else { 1309 if (ncvp_debug >= 2) { 1310 printf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n", 1311 ncp->nc_name, rncp->nc_name, dvp, 1312 rncp->nc_vp); 1313 } 1314 } 1315 if (rncp->nc_vp == NULL) 1316 error = rncp->nc_error; 1317 cache_put(rncp); 1318 } else { 1319 printf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n", 1320 dvp, ncp->nc_name); 1321 vput(pvp); 1322 error = ENOENT; 1323 } 1324 free(rbuf, M_TEMP); 1325 return (error); 1326 } 1327 1328 /* 1329 * Zap a namecache entry. The ncp is unconditionally set to an unresolved 1330 * state, which disassociates it from its vnode or ncneglist. 1331 * 1332 * Then, if there are no additional references to the ncp and no children, 1333 * the ncp is removed from the topology and destroyed. This function will 1334 * also run through the nc_parent chain and destroy parent ncps if possible. 1335 * As a side benefit, it turns out the only conditions that allow running 1336 * up the chain are also the conditions to ensure no deadlock will occur. 1337 * 1338 * References and/or children may exist if the ncp is in the middle of the 1339 * topology, preventing the ncp from being destroyed. 1340 * 1341 * This function must be called with the ncp held and locked and will unlock 1342 * and drop it during zapping. 1343 */ 1344 static void 1345 cache_zap(struct namecache *ncp) 1346 { 1347 struct namecache *par; 1348 1349 /* 1350 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED. 1351 */ 1352 cache_setunresolved(ncp); 1353 1354 /* 1355 * Try to scrap the entry and possibly tail-recurse on its parent. 1356 * We only scrap unref'd (other then our ref) unresolved entries, 1357 * we do not scrap 'live' entries. 1358 */ 1359 while (ncp->nc_flag & NCF_UNRESOLVED) { 1360 /* 1361 * Someone other then us has a ref, stop. 1362 */ 1363 if (ncp->nc_refs > 1) 1364 goto done; 1365 1366 /* 1367 * We have children, stop. 1368 */ 1369 if (!TAILQ_EMPTY(&ncp->nc_list)) 1370 goto done; 1371 1372 /* 1373 * Remove ncp from the topology: hash table and parent linkage. 1374 */ 1375 if (ncp->nc_flag & NCF_HASHED) { 1376 ncp->nc_flag &= ~NCF_HASHED; 1377 LIST_REMOVE(ncp, nc_hash); 1378 } 1379 if ((par = ncp->nc_parent) != NULL) { 1380 par = cache_hold(par); 1381 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry); 1382 ncp->nc_parent = NULL; 1383 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list)) 1384 vdrop(par->nc_vp); 1385 } 1386 1387 /* 1388 * ncp should not have picked up any refs. Physically 1389 * destroy the ncp. 1390 */ 1391 KKASSERT(ncp->nc_refs == 1); 1392 --numunres; 1393 /* cache_unlock(ncp) not required */ 1394 ncp->nc_refs = -1; /* safety */ 1395 if (ncp->nc_name) 1396 free(ncp->nc_name, M_VFSCACHE); 1397 free(ncp, M_VFSCACHE); 1398 1399 /* 1400 * Loop on the parent (it may be NULL). Only bother looping 1401 * if the parent has a single ref (ours), which also means 1402 * we can lock it trivially. 1403 */ 1404 ncp = par; 1405 if (ncp == NULL) 1406 return; 1407 if (ncp->nc_refs != 1) { 1408 cache_drop(ncp); 1409 return; 1410 } 1411 KKASSERT(par->nc_exlocks == 0); 1412 cache_lock(ncp); 1413 } 1414 done: 1415 cache_unlock(ncp); 1416 atomic_subtract_int(&ncp->nc_refs, 1); 1417 } 1418 1419 static enum { CHI_LOW, CHI_HIGH } cache_hysteresis_state = CHI_LOW; 1420 1421 static __inline 1422 void 1423 cache_hysteresis(void) 1424 { 1425 /* 1426 * Don't cache too many negative hits. We use hysteresis to reduce 1427 * the impact on the critical path. 1428 */ 1429 switch(cache_hysteresis_state) { 1430 case CHI_LOW: 1431 if (numneg > MINNEG && numneg * ncnegfactor > numcache) { 1432 cache_cleanneg(10); 1433 cache_hysteresis_state = CHI_HIGH; 1434 } 1435 break; 1436 case CHI_HIGH: 1437 if (numneg > MINNEG * 9 / 10 && 1438 numneg * ncnegfactor * 9 / 10 > numcache 1439 ) { 1440 cache_cleanneg(10); 1441 } else { 1442 cache_hysteresis_state = CHI_LOW; 1443 } 1444 break; 1445 } 1446 } 1447 1448 /* 1449 * NEW NAMECACHE LOOKUP API 1450 * 1451 * Lookup an entry in the cache. A locked, referenced, non-NULL 1452 * entry is *always* returned, even if the supplied component is illegal. 1453 * The resulting namecache entry should be returned to the system with 1454 * cache_put() or cache_unlock() + cache_drop(). 1455 * 1456 * namecache locks are recursive but care must be taken to avoid lock order 1457 * reversals. 1458 * 1459 * Nobody else will be able to manipulate the associated namespace (e.g. 1460 * create, delete, rename, rename-target) until the caller unlocks the 1461 * entry. 1462 * 1463 * The returned entry will be in one of three states: positive hit (non-null 1464 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set). 1465 * Unresolved entries must be resolved through the filesystem to associate the 1466 * vnode and/or determine whether a positive or negative hit has occured. 1467 * 1468 * It is not necessary to lock a directory in order to lock namespace under 1469 * that directory. In fact, it is explicitly not allowed to do that. A 1470 * directory is typically only locked when being created, renamed, or 1471 * destroyed. 1472 * 1473 * The directory (par) may be unresolved, in which case any returned child 1474 * will likely also be marked unresolved. Likely but not guarenteed. Since 1475 * the filesystem lookup requires a resolved directory vnode the caller is 1476 * responsible for resolving the namecache chain top-down. This API 1477 * specifically allows whole chains to be created in an unresolved state. 1478 */ 1479 struct namecache * 1480 cache_nlookup(struct namecache *par, struct nlcomponent *nlc) 1481 { 1482 struct namecache *ncp; 1483 struct namecache *new_ncp; 1484 struct nchashhead *nchpp; 1485 u_int32_t hash; 1486 globaldata_t gd; 1487 1488 numcalls++; 1489 gd = mycpu; 1490 1491 /* 1492 * Try to locate an existing entry 1493 */ 1494 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT); 1495 hash = fnv_32_buf(&par, sizeof(par), hash); 1496 new_ncp = NULL; 1497 restart: 1498 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 1499 numchecks++; 1500 1501 /* 1502 * Zap entries that have timed out. 1503 */ 1504 if (ncp->nc_timeout && 1505 (int)(ncp->nc_timeout - ticks) < 0 && 1506 (ncp->nc_flag & NCF_UNRESOLVED) == 0 && 1507 ncp->nc_exlocks == 0 1508 ) { 1509 cache_zap(cache_get(ncp)); 1510 goto restart; 1511 } 1512 1513 /* 1514 * Break out if we find a matching entry. Note that 1515 * UNRESOLVED entries may match, but DESTROYED entries 1516 * do not. 1517 */ 1518 if (ncp->nc_parent == par && 1519 ncp->nc_nlen == nlc->nlc_namelen && 1520 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 && 1521 (ncp->nc_flag & NCF_DESTROYED) == 0 1522 ) { 1523 if (cache_get_nonblock(ncp) == 0) { 1524 if (new_ncp) 1525 cache_free(new_ncp); 1526 goto found; 1527 } 1528 cache_get(ncp); 1529 cache_put(ncp); 1530 goto restart; 1531 } 1532 } 1533 1534 /* 1535 * We failed to locate an entry, create a new entry and add it to 1536 * the cache. We have to relookup after possibly blocking in 1537 * malloc. 1538 */ 1539 if (new_ncp == NULL) { 1540 new_ncp = cache_alloc(nlc->nlc_namelen); 1541 goto restart; 1542 } 1543 1544 ncp = new_ncp; 1545 1546 /* 1547 * Initialize as a new UNRESOLVED entry, lock (non-blocking), 1548 * and link to the parent. The mount point is usually inherited 1549 * from the parent unless this is a special case such as a mount 1550 * point where nlc_namelen is 0. The caller is responsible for 1551 * setting nc_mount in that case. If nlc_namelen is 0 nc_name will 1552 * be NULL. 1553 */ 1554 if (nlc->nlc_namelen) { 1555 bcopy(nlc->nlc_nameptr, ncp->nc_name, nlc->nlc_namelen); 1556 ncp->nc_name[nlc->nlc_namelen] = 0; 1557 ncp->nc_mount = par->nc_mount; 1558 } 1559 nchpp = NCHHASH(hash); 1560 LIST_INSERT_HEAD(nchpp, ncp, nc_hash); 1561 ncp->nc_flag |= NCF_HASHED; 1562 cache_link_parent(ncp, par); 1563 found: 1564 /* 1565 * stats and namecache size management 1566 */ 1567 if (ncp->nc_flag & NCF_UNRESOLVED) 1568 ++gd->gd_nchstats->ncs_miss; 1569 else if (ncp->nc_vp) 1570 ++gd->gd_nchstats->ncs_goodhits; 1571 else 1572 ++gd->gd_nchstats->ncs_neghits; 1573 cache_hysteresis(); 1574 return(ncp); 1575 } 1576 1577 /* 1578 * Given a locked ncp, validate that the vnode, if present, is actually 1579 * usable. If it is not usable set the ncp to an unresolved state. 1580 */ 1581 void 1582 cache_validate(struct namecache *ncp) 1583 { 1584 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 1585 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 1586 cache_setunresolved(ncp); 1587 } 1588 } 1589 1590 /* 1591 * Resolve an unresolved namecache entry, generally by looking it up. 1592 * The passed ncp must be locked and refd. 1593 * 1594 * Theoretically since a vnode cannot be recycled while held, and since 1595 * the nc_parent chain holds its vnode as long as children exist, the 1596 * direct parent of the cache entry we are trying to resolve should 1597 * have a valid vnode. If not then generate an error that we can 1598 * determine is related to a resolver bug. 1599 * 1600 * However, if a vnode was in the middle of a recyclement when the NCP 1601 * got locked, ncp->nc_vp might point to a vnode that is about to become 1602 * invalid. cache_resolve() handles this case by unresolving the entry 1603 * and then re-resolving it. 1604 * 1605 * Note that successful resolution does not necessarily return an error 1606 * code of 0. If the ncp resolves to a negative cache hit then ENOENT 1607 * will be returned. 1608 */ 1609 int 1610 cache_resolve(struct namecache *ncp, struct ucred *cred) 1611 { 1612 struct namecache *par; 1613 int error; 1614 1615 restart: 1616 /* 1617 * If the ncp is already resolved we have nothing to do. However, 1618 * we do want to guarentee that a usable vnode is returned when 1619 * a vnode is present, so make sure it hasn't been reclaimed. 1620 */ 1621 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 1622 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 1623 cache_setunresolved(ncp); 1624 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) 1625 return (ncp->nc_error); 1626 } 1627 1628 /* 1629 * Mount points need special handling because the parent does not 1630 * belong to the same filesystem as the ncp. 1631 */ 1632 if (ncp->nc_flag & NCF_MOUNTPT) 1633 return (cache_resolve_mp(ncp)); 1634 1635 /* 1636 * We expect an unbroken chain of ncps to at least the mount point, 1637 * and even all the way to root (but this code doesn't have to go 1638 * past the mount point). 1639 */ 1640 if (ncp->nc_parent == NULL) { 1641 printf("EXDEV case 1 %p %*.*s\n", ncp, 1642 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 1643 ncp->nc_error = EXDEV; 1644 return(ncp->nc_error); 1645 } 1646 1647 /* 1648 * The vp's of the parent directories in the chain are held via vhold() 1649 * due to the existance of the child, and should not disappear. 1650 * However, there are cases where they can disappear: 1651 * 1652 * - due to filesystem I/O errors. 1653 * - due to NFS being stupid about tracking the namespace and 1654 * destroys the namespace for entire directories quite often. 1655 * - due to forced unmounts. 1656 * - due to an rmdir (parent will be marked DESTROYED) 1657 * 1658 * When this occurs we have to track the chain backwards and resolve 1659 * it, looping until the resolver catches up to the current node. We 1660 * could recurse here but we might run ourselves out of kernel stack 1661 * so we do it in a more painful manner. This situation really should 1662 * not occur all that often, or if it does not have to go back too 1663 * many nodes to resolve the ncp. 1664 */ 1665 while (ncp->nc_parent->nc_vp == NULL) { 1666 /* 1667 * This case can occur if a process is CD'd into a 1668 * directory which is then rmdir'd. If the parent is marked 1669 * destroyed there is no point trying to resolve it. 1670 */ 1671 if (ncp->nc_parent->nc_flag & NCF_DESTROYED) 1672 return(ENOENT); 1673 1674 par = ncp->nc_parent; 1675 while (par->nc_parent && par->nc_parent->nc_vp == NULL) 1676 par = par->nc_parent; 1677 if (par->nc_parent == NULL) { 1678 printf("EXDEV case 2 %*.*s\n", 1679 par->nc_nlen, par->nc_nlen, par->nc_name); 1680 return (EXDEV); 1681 } 1682 printf("[diagnostic] cache_resolve: had to recurse on %*.*s\n", 1683 par->nc_nlen, par->nc_nlen, par->nc_name); 1684 /* 1685 * The parent is not set in stone, ref and lock it to prevent 1686 * it from disappearing. Also note that due to renames it 1687 * is possible for our ncp to move and for par to no longer 1688 * be one of its parents. We resolve it anyway, the loop 1689 * will handle any moves. 1690 */ 1691 cache_get(par); 1692 if (par->nc_flag & NCF_MOUNTPT) { 1693 cache_resolve_mp(par); 1694 } else if (par->nc_parent->nc_vp == NULL) { 1695 printf("[diagnostic] cache_resolve: raced on %*.*s\n", par->nc_nlen, par->nc_nlen, par->nc_name); 1696 cache_put(par); 1697 continue; 1698 } else if (par->nc_flag & NCF_UNRESOLVED) { 1699 par->nc_error = VOP_NRESOLVE(par, cred); 1700 } 1701 if ((error = par->nc_error) != 0) { 1702 if (par->nc_error != EAGAIN) { 1703 printf("EXDEV case 3 %*.*s error %d\n", 1704 par->nc_nlen, par->nc_nlen, par->nc_name, 1705 par->nc_error); 1706 cache_put(par); 1707 return(error); 1708 } 1709 printf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n", 1710 par, par->nc_nlen, par->nc_nlen, par->nc_name); 1711 } 1712 cache_put(par); 1713 /* loop */ 1714 } 1715 1716 /* 1717 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected 1718 * ncp's and reattach them. If this occurs the original ncp is marked 1719 * EAGAIN to force a relookup. 1720 * 1721 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed 1722 * ncp must already be resolved. 1723 */ 1724 KKASSERT((ncp->nc_flag & NCF_MOUNTPT) == 0); 1725 ncp->nc_error = VOP_NRESOLVE(ncp, cred); 1726 /*vop_nresolve(*ncp->nc_parent->nc_vp->v_ops, ncp, cred);*/ 1727 if (ncp->nc_error == EAGAIN) { 1728 printf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n", 1729 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 1730 goto restart; 1731 } 1732 return(ncp->nc_error); 1733 } 1734 1735 /* 1736 * Resolve the ncp associated with a mount point. Such ncp's almost always 1737 * remain resolved and this routine is rarely called. NFS MPs tends to force 1738 * re-resolution more often due to its mac-truck-smash-the-namecache 1739 * method of tracking namespace changes. 1740 * 1741 * The semantics for this call is that the passed ncp must be locked on 1742 * entry and will be locked on return. However, if we actually have to 1743 * resolve the mount point we temporarily unlock the entry in order to 1744 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of 1745 * the unlock we have to recheck the flags after we relock. 1746 */ 1747 static int 1748 cache_resolve_mp(struct namecache *ncp) 1749 { 1750 struct vnode *vp; 1751 struct mount *mp = ncp->nc_mount; 1752 int error; 1753 1754 KKASSERT(mp != NULL); 1755 1756 /* 1757 * If the ncp is already resolved we have nothing to do. However, 1758 * we do want to guarentee that a usable vnode is returned when 1759 * a vnode is present, so make sure it hasn't been reclaimed. 1760 */ 1761 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 1762 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 1763 cache_setunresolved(ncp); 1764 } 1765 1766 if (ncp->nc_flag & NCF_UNRESOLVED) { 1767 cache_unlock(ncp); 1768 while (vfs_busy(mp, 0)) 1769 ; 1770 error = VFS_ROOT(mp, &vp); 1771 cache_lock(ncp); 1772 1773 /* 1774 * recheck the ncp state after relocking. 1775 */ 1776 if (ncp->nc_flag & NCF_UNRESOLVED) { 1777 ncp->nc_error = error; 1778 if (error == 0) { 1779 cache_setvp(ncp, vp); 1780 vput(vp); 1781 } else { 1782 printf("[diagnostic] cache_resolve_mp: failed to resolve mount %p\n", mp); 1783 cache_setvp(ncp, NULL); 1784 } 1785 } else if (error == 0) { 1786 vput(vp); 1787 } 1788 vfs_unbusy(mp); 1789 } 1790 return(ncp->nc_error); 1791 } 1792 1793 void 1794 cache_cleanneg(int count) 1795 { 1796 struct namecache *ncp; 1797 1798 /* 1799 * Automode from the vnlru proc - clean out 10% of the negative cache 1800 * entries. 1801 */ 1802 if (count == 0) 1803 count = numneg / 10 + 1; 1804 1805 /* 1806 * Attempt to clean out the specified number of negative cache 1807 * entries. 1808 */ 1809 while (count) { 1810 ncp = TAILQ_FIRST(&ncneglist); 1811 if (ncp == NULL) { 1812 KKASSERT(numneg == 0); 1813 break; 1814 } 1815 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode); 1816 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode); 1817 if (cache_get_nonblock(ncp) == 0) 1818 cache_zap(ncp); 1819 --count; 1820 } 1821 } 1822 1823 /* 1824 * Rehash a ncp. Rehashing is typically required if the name changes (should 1825 * not generally occur) or the parent link changes. This function will 1826 * unhash the ncp if the ncp is no longer hashable. 1827 */ 1828 static void 1829 cache_rehash(struct namecache *ncp) 1830 { 1831 struct nchashhead *nchpp; 1832 u_int32_t hash; 1833 1834 if (ncp->nc_flag & NCF_HASHED) { 1835 ncp->nc_flag &= ~NCF_HASHED; 1836 LIST_REMOVE(ncp, nc_hash); 1837 } 1838 if (ncp->nc_nlen && ncp->nc_parent) { 1839 hash = fnv_32_buf(ncp->nc_name, ncp->nc_nlen, FNV1_32_INIT); 1840 hash = fnv_32_buf(&ncp->nc_parent, 1841 sizeof(ncp->nc_parent), hash); 1842 nchpp = NCHHASH(hash); 1843 LIST_INSERT_HEAD(nchpp, ncp, nc_hash); 1844 ncp->nc_flag |= NCF_HASHED; 1845 } 1846 } 1847 1848 /* 1849 * Name cache initialization, from vfsinit() when we are booting 1850 */ 1851 void 1852 nchinit(void) 1853 { 1854 int i; 1855 globaldata_t gd; 1856 1857 /* initialise per-cpu namecache effectiveness statistics. */ 1858 for (i = 0; i < ncpus; ++i) { 1859 gd = globaldata_find(i); 1860 gd->gd_nchstats = &nchstats[i]; 1861 } 1862 TAILQ_INIT(&ncneglist); 1863 nchashtbl = hashinit(desiredvnodes*2, M_VFSCACHE, &nchash); 1864 nclockwarn = 1 * hz; 1865 } 1866 1867 /* 1868 * Called from start_init() to bootstrap the root filesystem. Returns 1869 * a referenced, unlocked namecache record. 1870 */ 1871 struct namecache * 1872 cache_allocroot(struct mount *mp, struct vnode *vp) 1873 { 1874 struct namecache *ncp = cache_alloc(0); 1875 1876 ncp->nc_flag |= NCF_MOUNTPT | NCF_ROOT; 1877 ncp->nc_mount = mp; 1878 cache_setvp(ncp, vp); 1879 return(ncp); 1880 } 1881 1882 /* 1883 * vfs_cache_setroot() 1884 * 1885 * Create an association between the root of our namecache and 1886 * the root vnode. This routine may be called several times during 1887 * booting. 1888 * 1889 * If the caller intends to save the returned namecache pointer somewhere 1890 * it must cache_hold() it. 1891 */ 1892 void 1893 vfs_cache_setroot(struct vnode *nvp, struct namecache *ncp) 1894 { 1895 struct vnode *ovp; 1896 struct namecache *oncp; 1897 1898 ovp = rootvnode; 1899 oncp = rootncp; 1900 rootvnode = nvp; 1901 rootncp = ncp; 1902 1903 if (ovp) 1904 vrele(ovp); 1905 if (oncp) 1906 cache_drop(oncp); 1907 } 1908 1909 /* 1910 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache 1911 * topology and is being removed as quickly as possible. The new VOP_N*() 1912 * API calls are required to make specific adjustments using the supplied 1913 * ncp pointers rather then just bogusly purging random vnodes. 1914 * 1915 * Invalidate all namecache entries to a particular vnode as well as 1916 * any direct children of that vnode in the namecache. This is a 1917 * 'catch all' purge used by filesystems that do not know any better. 1918 * 1919 * Note that the linkage between the vnode and its namecache entries will 1920 * be removed, but the namecache entries themselves might stay put due to 1921 * active references from elsewhere in the system or due to the existance of 1922 * the children. The namecache topology is left intact even if we do not 1923 * know what the vnode association is. Such entries will be marked 1924 * NCF_UNRESOLVED. 1925 */ 1926 void 1927 cache_purge(struct vnode *vp) 1928 { 1929 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN); 1930 } 1931 1932 /* 1933 * Flush all entries referencing a particular filesystem. 1934 * 1935 * Since we need to check it anyway, we will flush all the invalid 1936 * entries at the same time. 1937 */ 1938 void 1939 cache_purgevfs(struct mount *mp) 1940 { 1941 struct nchashhead *nchpp; 1942 struct namecache *ncp, *nnp; 1943 1944 /* 1945 * Scan hash tables for applicable entries. 1946 */ 1947 for (nchpp = &nchashtbl[nchash]; nchpp >= nchashtbl; nchpp--) { 1948 ncp = LIST_FIRST(nchpp); 1949 if (ncp) 1950 cache_hold(ncp); 1951 while (ncp) { 1952 nnp = LIST_NEXT(ncp, nc_hash); 1953 if (nnp) 1954 cache_hold(nnp); 1955 if (ncp->nc_mount == mp) { 1956 cache_lock(ncp); 1957 cache_zap(ncp); 1958 } else { 1959 cache_drop(ncp); 1960 } 1961 ncp = nnp; 1962 } 1963 } 1964 } 1965 1966 /* 1967 * Create a new (theoretically) unique fsmid 1968 */ 1969 int64_t 1970 cache_getnewfsmid(void) 1971 { 1972 static int fsmid_roller; 1973 int64_t fsmid; 1974 1975 ++fsmid_roller; 1976 fsmid = ((int64_t)time_second << 32) | 1977 (fsmid_roller & 0x7FFFFFFF); 1978 return (fsmid); 1979 } 1980 1981 1982 static int disablecwd; 1983 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, ""); 1984 1985 static u_long numcwdcalls; STATNODE(CTLFLAG_RD, numcwdcalls, &numcwdcalls); 1986 static u_long numcwdfail1; STATNODE(CTLFLAG_RD, numcwdfail1, &numcwdfail1); 1987 static u_long numcwdfail2; STATNODE(CTLFLAG_RD, numcwdfail2, &numcwdfail2); 1988 static u_long numcwdfail3; STATNODE(CTLFLAG_RD, numcwdfail3, &numcwdfail3); 1989 static u_long numcwdfail4; STATNODE(CTLFLAG_RD, numcwdfail4, &numcwdfail4); 1990 static u_long numcwdfound; STATNODE(CTLFLAG_RD, numcwdfound, &numcwdfound); 1991 1992 int 1993 sys___getcwd(struct __getcwd_args *uap) 1994 { 1995 int buflen; 1996 int error; 1997 char *buf; 1998 char *bp; 1999 2000 if (disablecwd) 2001 return (ENODEV); 2002 2003 buflen = uap->buflen; 2004 if (buflen < 2) 2005 return (EINVAL); 2006 if (buflen > MAXPATHLEN) 2007 buflen = MAXPATHLEN; 2008 2009 buf = malloc(buflen, M_TEMP, M_WAITOK); 2010 bp = kern_getcwd(buf, buflen, &error); 2011 if (error == 0) 2012 error = copyout(bp, uap->buf, strlen(bp) + 1); 2013 free(buf, M_TEMP); 2014 return (error); 2015 } 2016 2017 char * 2018 kern_getcwd(char *buf, size_t buflen, int *error) 2019 { 2020 struct proc *p = curproc; 2021 char *bp; 2022 int i, slash_prefixed; 2023 struct filedesc *fdp; 2024 struct namecache *ncp; 2025 2026 numcwdcalls++; 2027 bp = buf; 2028 bp += buflen - 1; 2029 *bp = '\0'; 2030 fdp = p->p_fd; 2031 slash_prefixed = 0; 2032 2033 ncp = fdp->fd_ncdir; 2034 while (ncp && ncp != fdp->fd_nrdir && (ncp->nc_flag & NCF_ROOT) == 0) { 2035 if (ncp->nc_flag & NCF_MOUNTPT) { 2036 if (ncp->nc_mount == NULL) { 2037 *error = EBADF; /* forced unmount? */ 2038 return(NULL); 2039 } 2040 ncp = ncp->nc_parent; 2041 continue; 2042 } 2043 for (i = ncp->nc_nlen - 1; i >= 0; i--) { 2044 if (bp == buf) { 2045 numcwdfail4++; 2046 *error = ENOMEM; 2047 return(NULL); 2048 } 2049 *--bp = ncp->nc_name[i]; 2050 } 2051 if (bp == buf) { 2052 numcwdfail4++; 2053 *error = ENOMEM; 2054 return(NULL); 2055 } 2056 *--bp = '/'; 2057 slash_prefixed = 1; 2058 ncp = ncp->nc_parent; 2059 } 2060 if (ncp == NULL) { 2061 numcwdfail2++; 2062 *error = ENOENT; 2063 return(NULL); 2064 } 2065 if (!slash_prefixed) { 2066 if (bp == buf) { 2067 numcwdfail4++; 2068 *error = ENOMEM; 2069 return(NULL); 2070 } 2071 *--bp = '/'; 2072 } 2073 numcwdfound++; 2074 *error = 0; 2075 return (bp); 2076 } 2077 2078 /* 2079 * Thus begins the fullpath magic. 2080 */ 2081 2082 #undef STATNODE 2083 #define STATNODE(name) \ 2084 static u_int name; \ 2085 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "") 2086 2087 static int disablefullpath; 2088 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, 2089 &disablefullpath, 0, ""); 2090 2091 STATNODE(numfullpathcalls); 2092 STATNODE(numfullpathfail1); 2093 STATNODE(numfullpathfail2); 2094 STATNODE(numfullpathfail3); 2095 STATNODE(numfullpathfail4); 2096 STATNODE(numfullpathfound); 2097 2098 int 2099 cache_fullpath(struct proc *p, struct namecache *ncp, char **retbuf, char **freebuf) 2100 { 2101 char *bp, *buf; 2102 int i, slash_prefixed; 2103 struct namecache *fd_nrdir; 2104 2105 numfullpathcalls--; 2106 2107 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 2108 bp = buf + MAXPATHLEN - 1; 2109 *bp = '\0'; 2110 if (p != NULL) 2111 fd_nrdir = p->p_fd->fd_nrdir; 2112 else 2113 fd_nrdir = NULL; 2114 slash_prefixed = 0; 2115 while (ncp && ncp != fd_nrdir && (ncp->nc_flag & NCF_ROOT) == 0) { 2116 if (ncp->nc_flag & NCF_MOUNTPT) { 2117 if (ncp->nc_mount == NULL) { 2118 free(buf, M_TEMP); 2119 return(EBADF); 2120 } 2121 ncp = ncp->nc_parent; 2122 continue; 2123 } 2124 for (i = ncp->nc_nlen - 1; i >= 0; i--) { 2125 if (bp == buf) { 2126 numfullpathfail4++; 2127 free(buf, M_TEMP); 2128 return(ENOMEM); 2129 } 2130 *--bp = ncp->nc_name[i]; 2131 } 2132 if (bp == buf) { 2133 numfullpathfail4++; 2134 free(buf, M_TEMP); 2135 return(ENOMEM); 2136 } 2137 *--bp = '/'; 2138 slash_prefixed = 1; 2139 ncp = ncp->nc_parent; 2140 } 2141 if (ncp == NULL) { 2142 numfullpathfail2++; 2143 free(buf, M_TEMP); 2144 return(ENOENT); 2145 } 2146 if (p != NULL && (ncp->nc_flag & NCF_ROOT) && ncp != fd_nrdir) { 2147 bp = buf + MAXPATHLEN - 1; 2148 *bp = '\0'; 2149 slash_prefixed = 0; 2150 } 2151 if (!slash_prefixed) { 2152 if (bp == buf) { 2153 numfullpathfail4++; 2154 free(buf, M_TEMP); 2155 return(ENOMEM); 2156 } 2157 *--bp = '/'; 2158 } 2159 numfullpathfound++; 2160 *retbuf = bp; 2161 *freebuf = buf; 2162 2163 return(0); 2164 } 2165 2166 int 2167 vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf, char **freebuf) 2168 { 2169 struct namecache *ncp; 2170 2171 numfullpathcalls++; 2172 if (disablefullpath) 2173 return (ENODEV); 2174 2175 if (p == NULL) 2176 return (EINVAL); 2177 2178 /* vn is NULL, client wants us to use p->p_textvp */ 2179 if (vn == NULL) { 2180 if ((vn = p->p_textvp) == NULL) 2181 return (EINVAL); 2182 } 2183 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) { 2184 if (ncp->nc_nlen) 2185 break; 2186 } 2187 if (ncp == NULL) 2188 return (EINVAL); 2189 2190 numfullpathcalls--; 2191 return(cache_fullpath(p, ncp, retbuf, freebuf)); 2192 } 2193