1 /* $OpenBSD: vfs_cache.c,v 1.32 2009/08/24 15:51:40 thib Exp $ */ 2 /* $NetBSD: vfs_cache.c,v 1.13 1996/02/04 02:18:09 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)vfs_cache.c 8.3 (Berkeley) 8/22/94 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/time.h> 38 #include <sys/mount.h> 39 #include <sys/vnode.h> 40 #include <sys/namei.h> 41 #include <sys/errno.h> 42 #include <sys/malloc.h> 43 #include <sys/pool.h> 44 #include <sys/hash.h> 45 46 /* 47 * TODO: namecache access should really be locked. 48 */ 49 50 /* 51 * Name caching works as follows: 52 * 53 * Names found by directory scans are retained in a cache 54 * for future reference. It is managed LRU, so frequently 55 * used names will hang around. Cache is indexed by hash value 56 * obtained from (vp, name) where vp refers to the directory 57 * containing name. 58 * 59 * For simplicity (and economy of storage), names longer than 60 * a maximum length of NCHNAMLEN are not cached; they occur 61 * infrequently in any case, and are almost never of interest. 62 * 63 * Upon reaching the last segment of a path, if the reference 64 * is for DELETE, or NOCACHE is set (rewrite), and the 65 * name is located in the cache, it will be dropped. 66 */ 67 68 /* 69 * Structures associated with name caching. 70 */ 71 long numcache; /* total number of cache entries allocated */ 72 long numneg; /* number of negative cache entries */ 73 74 TAILQ_HEAD(, namecache) nclruhead; /* Regular Entry LRU chain */ 75 TAILQ_HEAD(, namecache) nclruneghead; /* Negative Entry LRU chain */ 76 struct nchstats nchstats; /* cache effectiveness statistics */ 77 78 int doingcache = 1; /* 1 => enable the cache */ 79 80 struct pool nch_pool; 81 82 void cache_zap(struct namecache *); 83 u_long nextvnodeid; 84 85 static int 86 namecache_compare(struct namecache *n1, struct namecache *n2) 87 { 88 if (n1->nc_nlen == n2->nc_nlen) 89 return (memcmp(n1->nc_name, n2->nc_name, n1->nc_nlen)); 90 else 91 return (n1->nc_nlen - n2->nc_nlen); 92 } 93 94 RB_GENERATE(namecache_rb_cache, namecache, n_rbcache, namecache_compare); 95 96 /* 97 * blow away a namecache entry 98 */ 99 void 100 cache_zap(struct namecache *ncp) 101 { 102 struct vnode *dvp = NULL; 103 104 if (ncp->nc_vp != NULL) { 105 TAILQ_REMOVE(&nclruhead, ncp, nc_lru); 106 numcache--; 107 } else { 108 TAILQ_REMOVE(&nclruneghead, ncp, nc_neg); 109 numneg--; 110 } 111 if (ncp->nc_dvp) { 112 RB_REMOVE(namecache_rb_cache, &ncp->nc_dvp->v_nc_tree, ncp); 113 if (RB_EMPTY(&ncp->nc_dvp->v_nc_tree)) 114 dvp = ncp->nc_dvp; 115 } 116 if (ncp->nc_vp && (ncp->nc_vpid == ncp->nc_vp->v_id)) { 117 if (ncp->nc_vp != ncp->nc_dvp && 118 ncp->nc_vp->v_type == VDIR && 119 (ncp->nc_nlen > 2 || 120 (ncp->nc_nlen > 1 && 121 ncp->nc_name[1] != '.') || 122 (ncp->nc_nlen > 0 && 123 ncp->nc_name[0] != '.'))) { 124 TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_me); 125 } 126 } 127 pool_put(&nch_pool, ncp); 128 if (dvp) 129 vdrop(dvp); 130 } 131 132 /* 133 * Look for a name in the cache. We don't do this if the segment name is 134 * long, simply so the cache can avoid holding long names (which would 135 * either waste space, or add greatly to the complexity). 136 * 137 * Lookup is called with ni_dvp pointing to the directory to search, 138 * ni_ptr pointing to the name of the entry being sought, ni_namelen 139 * tells the length of the name, and ni_hash contains a hash of 140 * the name. If the lookup succeeds, the vnode is returned in ni_vp 141 * and a status of 0 is returned. If the locking fails for whatever 142 * reason, the vnode is unlocked and the error is returned to caller. 143 * If the lookup determines that the name does not exist (negative caching), 144 * a status of ENOENT is returned. If the lookup fails, a status of -1 145 * is returned. 146 */ 147 int 148 cache_lookup(struct vnode *dvp, struct vnode **vpp, 149 struct componentname *cnp) 150 { 151 struct namecache *ncp; 152 struct namecache n; 153 struct vnode *vp; 154 struct proc *p = curproc; 155 u_long vpid; 156 int error; 157 158 *vpp = NULL; 159 160 if (!doingcache) { 161 cnp->cn_flags &= ~MAKEENTRY; 162 return (-1); 163 } 164 if (cnp->cn_namelen > NCHNAMLEN) { 165 nchstats.ncs_long++; 166 cnp->cn_flags &= ~MAKEENTRY; 167 return (-1); 168 } 169 170 /* lookup in directory vnode's redblack tree */ 171 n.nc_nlen = cnp->cn_namelen; 172 memcpy(n.nc_name, cnp->cn_nameptr, n.nc_nlen); 173 ncp = RB_FIND(namecache_rb_cache, &dvp->v_nc_tree, &n); 174 175 if (ncp == NULL) { 176 nchstats.ncs_miss++; 177 return (-1); 178 } 179 if ((cnp->cn_flags & MAKEENTRY) == 0) { 180 nchstats.ncs_badhits++; 181 goto remove; 182 } else if (ncp->nc_vp == NULL) { 183 if (cnp->cn_nameiop != CREATE || 184 (cnp->cn_flags & ISLASTCN) == 0) { 185 nchstats.ncs_neghits++; 186 /* 187 * Move this slot to end of the negative LRU chain, 188 */ 189 if (TAILQ_NEXT(ncp, nc_neg) != NULL) { 190 TAILQ_REMOVE(&nclruneghead, ncp, nc_neg); 191 TAILQ_INSERT_TAIL(&nclruneghead, ncp, 192 nc_neg); 193 } 194 return (ENOENT); 195 } else { 196 nchstats.ncs_badhits++; 197 goto remove; 198 } 199 } else if (ncp->nc_vpid != ncp->nc_vp->v_id) { 200 nchstats.ncs_falsehits++; 201 goto remove; 202 } 203 204 vp = ncp->nc_vp; 205 vpid = vp->v_id; 206 if (vp == dvp) { /* lookup on "." */ 207 vref(dvp); 208 error = 0; 209 } else if (cnp->cn_flags & ISDOTDOT) { 210 VOP_UNLOCK(dvp, 0, p); 211 cnp->cn_flags |= PDIRUNLOCK; 212 error = vget(vp, LK_EXCLUSIVE, p); 213 /* 214 * If the above vget() succeeded and both LOCKPARENT and 215 * ISLASTCN is set, lock the directory vnode as well. 216 */ 217 if (!error && (~cnp->cn_flags & (LOCKPARENT|ISLASTCN)) == 0) { 218 if ((error = vn_lock(dvp, LK_EXCLUSIVE, p)) != 0) { 219 vput(vp); 220 return (error); 221 } 222 cnp->cn_flags &= ~PDIRUNLOCK; 223 } 224 } else { 225 error = vget(vp, LK_EXCLUSIVE, p); 226 /* 227 * If the above vget() failed or either of LOCKPARENT or 228 * ISLASTCN is set, unlock the directory vnode. 229 */ 230 if (error || (~cnp->cn_flags & (LOCKPARENT|ISLASTCN)) != 0) { 231 VOP_UNLOCK(dvp, 0, p); 232 cnp->cn_flags |= PDIRUNLOCK; 233 } 234 } 235 236 /* 237 * Check that the lock succeeded, and that the capability number did 238 * not change while we were waiting for the lock. 239 */ 240 if (error || vpid != vp->v_id) { 241 if (!error) { 242 vput(vp); 243 nchstats.ncs_falsehits++; 244 } else 245 nchstats.ncs_badhits++; 246 /* 247 * The parent needs to be locked when we return to VOP_LOOKUP(). 248 * The `.' case here should be extremely rare (if it can happen 249 * at all), so we don't bother optimizing out the unlock/relock. 250 */ 251 if (vp == dvp || error || 252 (~cnp->cn_flags & (LOCKPARENT|ISLASTCN)) != 0) { 253 if ((error = vn_lock(dvp, LK_EXCLUSIVE, p)) != 0) 254 return (error); 255 cnp->cn_flags &= ~PDIRUNLOCK; 256 } 257 return (-1); 258 } 259 260 nchstats.ncs_goodhits++; 261 /* 262 * Move this slot to end of the regular LRU chain. 263 */ 264 if (TAILQ_NEXT(ncp, nc_lru) != NULL) { 265 TAILQ_REMOVE(&nclruhead, ncp, nc_lru); 266 TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru); 267 } 268 *vpp = vp; 269 return (0); 270 271 remove: 272 /* 273 * Last component and we are renaming or deleting, 274 * the cache entry is invalid, or otherwise don't 275 * want cache entry to exist. 276 */ 277 cache_zap(ncp); 278 return (-1); 279 } 280 281 /* 282 * Scan cache looking for name of directory entry pointing at vp. 283 * 284 * Fill in dvpp. 285 * 286 * If bufp is non-NULL, also place the name in the buffer which starts 287 * at bufp, immediately before *bpp, and move bpp backwards to point 288 * at the start of it. (Yes, this is a little baroque, but it's done 289 * this way to cater to the whims of getcwd). 290 * 291 * Returns 0 on success, -1 on cache miss, positive errno on failure. 292 * 293 * TODO: should we return *dvpp locked? 294 */ 295 296 int 297 cache_revlookup(struct vnode *vp, struct vnode **dvpp, char **bpp, char *bufp) 298 { 299 struct namecache *ncp; 300 struct vnode *dvp = NULL; 301 char *bp; 302 303 if (!doingcache) 304 goto out; 305 TAILQ_FOREACH(ncp, &vp->v_cache_dst, nc_me) { 306 dvp = ncp->nc_dvp; 307 if (dvp && dvp != vp && ncp->nc_dvpid == dvp->v_id) 308 goto found; 309 } 310 goto miss; 311 found: 312 #ifdef DIAGNOSTIC 313 if (ncp->nc_nlen == 1 && 314 ncp->nc_name[0] == '.') 315 panic("cache_revlookup: found entry for ."); 316 if (ncp->nc_nlen == 2 && 317 ncp->nc_name[0] == '.' && 318 ncp->nc_name[1] == '.') 319 panic("cache_revlookup: found entry for .."); 320 #endif 321 nchstats.ncs_revhits++; 322 323 if (bufp != NULL) { 324 bp = *bpp; 325 bp -= ncp->nc_nlen; 326 if (bp <= bufp) { 327 *dvpp = NULL; 328 return (ERANGE); 329 } 330 memcpy(bp, ncp->nc_name, ncp->nc_nlen); 331 *bpp = bp; 332 } 333 334 *dvpp = dvp; 335 336 /* 337 * XXX: Should we vget() here to have more 338 * consistent semantics with cache_lookup()? 339 */ 340 return (0); 341 342 miss: 343 nchstats.ncs_revmiss++; 344 out: 345 *dvpp = NULL; 346 return (-1); 347 } 348 349 /* 350 * Add an entry to the cache 351 */ 352 void 353 cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) 354 { 355 struct namecache *ncp, *lncp; 356 357 if (!doingcache || cnp->cn_namelen > NCHNAMLEN) 358 return; 359 360 /* 361 * allocate, or recycle (free and allocate) an ncp. 362 */ 363 if (numcache >= desiredvnodes) { 364 if ((ncp = TAILQ_FIRST(&nclruhead)) != NULL) 365 cache_zap(ncp); 366 else if ((ncp = TAILQ_FIRST(&nclruneghead)) != NULL) 367 cache_zap(ncp); 368 else 369 panic("wtf? leak?"); 370 } 371 ncp = pool_get(&nch_pool, PR_WAITOK|PR_ZERO); 372 373 /* grab the vnode we just found */ 374 ncp->nc_vp = vp; 375 if (vp) 376 ncp->nc_vpid = vp->v_id; 377 378 /* fill in cache info */ 379 ncp->nc_dvp = dvp; 380 ncp->nc_dvpid = dvp->v_id; 381 ncp->nc_nlen = cnp->cn_namelen; 382 bcopy(cnp->cn_nameptr, ncp->nc_name, (unsigned)ncp->nc_nlen); 383 if (RB_EMPTY(&dvp->v_nc_tree)) { 384 vhold(dvp); 385 } 386 if ((lncp = RB_INSERT(namecache_rb_cache, &dvp->v_nc_tree, ncp)) 387 != NULL) { 388 /* someone has raced us and added a different entry 389 * for the same vnode (different ncp) - we don't need 390 * this entry, so free it and we are done. 391 */ 392 pool_put(&nch_pool, ncp); 393 /* we know now dvp->v_nc_tree is not empty, no need 394 * to vdrop here 395 */ 396 goto done; 397 } 398 if (vp) { 399 TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru); 400 numcache++; 401 /* don't put . or .. in the reverse map */ 402 if (vp != dvp && vp->v_type == VDIR && 403 (ncp->nc_nlen > 2 || 404 (ncp->nc_nlen > 1 && 405 ncp->nc_name[1] != '.') || 406 (ncp->nc_nlen > 0 && 407 ncp->nc_name[0] != '.'))) 408 TAILQ_INSERT_TAIL(&vp->v_cache_dst, ncp, 409 nc_me); 410 } else { 411 TAILQ_INSERT_TAIL(&nclruneghead, ncp, nc_neg); 412 numneg++; 413 } 414 if (numneg > desiredvnodes) { 415 if ((ncp = TAILQ_FIRST(&nclruneghead)) 416 != NULL) 417 cache_zap(ncp); 418 } 419 done: 420 return; 421 } 422 423 424 /* 425 * Name cache initialization, from vfs_init() when we are booting 426 */ 427 void 428 nchinit() 429 { 430 TAILQ_INIT(&nclruhead); 431 TAILQ_INIT(&nclruneghead); 432 pool_init(&nch_pool, sizeof(struct namecache), 0, 0, 0, "nchpl", 433 &pool_allocator_nointr); 434 } 435 436 /* 437 * Cache flush, a particular vnode; called when a vnode is renamed to 438 * hide entries that would now be invalid 439 */ 440 void 441 cache_purge(struct vnode *vp) 442 { 443 struct namecache *ncp; 444 445 while ((ncp = TAILQ_FIRST(&vp->v_cache_dst))) 446 cache_zap(ncp); 447 while ((ncp = RB_ROOT(&vp->v_nc_tree))) 448 cache_zap(ncp); 449 450 /* XXX this blows goats */ 451 vp->v_id = ++nextvnodeid; 452 if (vp->v_id == 0) 453 vp->v_id = ++nextvnodeid; 454 } 455 456 /* 457 * Cache flush, a whole filesystem; called when filesys is umounted to 458 * remove entries that would now be invalid 459 * 460 * The line "nxtcp = nchhead" near the end is to avoid potential problems 461 * if the cache lru chain is modified while we are dumping the 462 * inode. This makes the algorithm O(n^2), but do you think I care? 463 */ 464 void 465 cache_purgevfs(struct mount *mp) 466 { 467 struct namecache *ncp, *nxtcp; 468 469 /* whack the regular entries */ 470 for (ncp = TAILQ_FIRST(&nclruhead); ncp != TAILQ_END(&nclruhead); 471 ncp = nxtcp) { 472 if (ncp->nc_dvp == NULL || ncp->nc_dvp->v_mount != mp) { 473 nxtcp = TAILQ_NEXT(ncp, nc_lru); 474 continue; 475 } 476 /* free the resources we had */ 477 cache_zap(ncp); 478 /* cause rescan of list, it may have altered */ 479 nxtcp = TAILQ_FIRST(&nclruhead); 480 } 481 /* whack the negative entries */ 482 for (ncp = TAILQ_FIRST(&nclruneghead); ncp != TAILQ_END(&nclruneghead); 483 ncp = nxtcp) { 484 if (ncp->nc_dvp == NULL || ncp->nc_dvp->v_mount != mp) { 485 nxtcp = TAILQ_NEXT(ncp, nc_neg); 486 continue; 487 } 488 /* free the resources we had */ 489 cache_zap(ncp); 490 /* cause rescan of list, it may have altered */ 491 nxtcp = TAILQ_FIRST(&nclruneghead); 492 } 493 } 494