xref: /netbsd-src/sys/kern/vfs_cache.c (revision b8c616269f5ebf18ab2e35cb8099d683130a177c)
1 /*	$NetBSD: vfs_cache.c,v 1.38 2003/02/01 06:23:45 thorpej Exp $	*/
2 
3 /*
4  * Copyright (c) 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by the University of
18  *	California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)vfs_cache.c	8.3 (Berkeley) 8/22/94
36  */
37 
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: vfs_cache.c,v 1.38 2003/02/01 06:23:45 thorpej Exp $");
40 
41 #include "opt_ddb.h"
42 #include "opt_revcache.h"
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/time.h>
47 #include <sys/mount.h>
48 #include <sys/vnode.h>
49 #include <sys/namei.h>
50 #include <sys/errno.h>
51 #include <sys/malloc.h>
52 #include <sys/pool.h>
53 
54 /*
55  * Name caching works as follows:
56  *
57  * Names found by directory scans are retained in a cache
58  * for future reference.  It is managed LRU, so frequently
59  * used names will hang around.  Cache is indexed by hash value
60  * obtained from (dvp, name) where dvp refers to the directory
61  * containing name.
62  *
63  * For simplicity (and economy of storage), names longer than
64  * a maximum length of NCHNAMLEN are not cached; they occur
65  * infrequently in any case, and are almost never of interest.
66  *
67  * Upon reaching the last segment of a path, if the reference
68  * is for DELETE, or NOCACHE is set (rewrite), and the
69  * name is located in the cache, it will be dropped.
70  * The entry is dropped also when it was not possible to lock
71  * the cached vnode, either because vget() failed or the generation
72  * number has changed while waiting for the lock.
73  */
74 
75 /*
76  * Structures associated with name cacheing.
77  */
78 LIST_HEAD(nchashhead, namecache) *nchashtbl;
79 u_long	nchash;				/* size of hash table - 1 */
80 long	numcache;			/* number of cache entries allocated */
81 #define	NCHASH(cnp, dvp)	(((cnp)->cn_hash ^ (dvp)->v_id) & nchash)
82 
83 LIST_HEAD(ncvhashhead, namecache) *ncvhashtbl;
84 u_long	ncvhash;			/* size of hash table - 1 */
85 #define	NCVHASH(vp)		((vp)->v_id & ncvhash)
86 
87 TAILQ_HEAD(, namecache) nclruhead;		/* LRU chain */
88 struct	nchstats nchstats;		/* cache effectiveness statistics */
89 
90 struct pool namecache_pool;
91 
92 MALLOC_DEFINE(M_CACHE, "namecache", "Dynamically allocated cache entries");
93 
94 int doingcache = 1;			/* 1 => enable the cache */
95 
96 /*
97  * Look for a the name in the cache. We don't do this
98  * if the segment name is long, simply so the cache can avoid
99  * holding long names (which would either waste space, or
100  * add greatly to the complexity).
101  *
102  * Lookup is called with ni_dvp pointing to the directory to search,
103  * ni_ptr pointing to the name of the entry being sought, ni_namelen
104  * tells the length of the name, and ni_hash contains a hash of
105  * the name. If the lookup succeeds, the vnode is locked, stored in ni_vp
106  * and a status of zero is returned. If the locking fails for whatever
107  * reason, the vnode is unlocked and the error is returned to caller.
108  * If the lookup determines that the name does not exist (negative cacheing),
109  * a status of ENOENT is returned. If the lookup fails, a status of -1
110  * is returned.
111  */
112 int
113 cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
114 {
115 	struct namecache *ncp;
116 	struct nchashhead *ncpp;
117 	struct vnode *vp;
118 	u_long vpid;
119 	int error;
120 
121 	if (!doingcache) {
122 		cnp->cn_flags &= ~MAKEENTRY;
123 		*vpp = NULL;
124 		return (-1);
125 	}
126 	if (cnp->cn_namelen > NCHNAMLEN) {
127 		nchstats.ncs_long++;
128 		cnp->cn_flags &= ~MAKEENTRY;
129 		*vpp = NULL;
130 		return (-1);
131 	}
132 	ncpp = &nchashtbl[NCHASH(cnp, dvp)];
133 	LIST_FOREACH(ncp, ncpp, nc_hash) {
134 		if (ncp->nc_dvp == dvp &&
135 		    ncp->nc_dvpid == dvp->v_id &&
136 		    ncp->nc_nlen == cnp->cn_namelen &&
137 		    !memcmp(ncp->nc_name, cnp->cn_nameptr, (u_int)ncp->nc_nlen))
138 			break;
139 	}
140 	if (ncp == 0) {
141 		nchstats.ncs_miss++;
142 		*vpp = NULL;
143 		return (-1);
144 	}
145 	if ((cnp->cn_flags & MAKEENTRY) == 0) {
146 		nchstats.ncs_badhits++;
147 		goto remove;
148 	} else if (ncp->nc_vp == NULL) {
149 		/*
150 		 * Restore the ISWHITEOUT flag saved earlier.
151 		 */
152 		cnp->cn_flags |= ncp->nc_vpid;
153 		if (cnp->cn_nameiop != CREATE ||
154 		    (cnp->cn_flags & ISLASTCN) == 0) {
155 			nchstats.ncs_neghits++;
156 			/*
157 			 * Move this slot to end of LRU chain,
158 			 * if not already there.
159 			 */
160 			if (TAILQ_NEXT(ncp, nc_lru) != 0) {
161 				TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
162 				TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
163 			}
164 			return (ENOENT);
165 		} else {
166 			nchstats.ncs_badhits++;
167 			goto remove;
168 		}
169 	} else if (ncp->nc_vpid != ncp->nc_vp->v_id) {
170 		nchstats.ncs_falsehits++;
171 		goto remove;
172 	}
173 
174 	vp = ncp->nc_vp;
175 	vpid = vp->v_id;
176 	if (vp == dvp) {	/* lookup on "." */
177 		VREF(dvp);
178 		error = 0;
179 	} else if (cnp->cn_flags & ISDOTDOT) {
180 		VOP_UNLOCK(dvp, 0);
181 		cnp->cn_flags |= PDIRUNLOCK;
182 		error = vget(vp, LK_EXCLUSIVE);
183 		/*
184 		 * If the above vget() succeeded and both LOCKPARENT and
185 		 * ISLASTCN is set, lock the directory vnode as well.
186 		 */
187 		if (!error && (~cnp->cn_flags & (LOCKPARENT|ISLASTCN)) == 0) {
188 			if ((error = vn_lock(dvp, LK_EXCLUSIVE)) != 0) {
189 				vput(vp);
190 				return (error);
191 			}
192 			cnp->cn_flags &= ~PDIRUNLOCK;
193 		}
194 	} else {
195 		error = vget(vp, LK_EXCLUSIVE);
196 		/*
197 		 * If the above vget() failed or either of LOCKPARENT or
198 		 * ISLASTCN is set, unlock the directory vnode.
199 		 */
200 		if (error || (~cnp->cn_flags & (LOCKPARENT|ISLASTCN)) != 0) {
201 			VOP_UNLOCK(dvp, 0);
202 			cnp->cn_flags |= PDIRUNLOCK;
203 		}
204 	}
205 
206 	/*
207 	 * Check that the lock succeeded, and that the capability number did
208 	 * not change while we were waiting for the lock.
209 	 */
210 	if (error || vpid != vp->v_id) {
211 		if (!error) {
212 			vput(vp);
213 			nchstats.ncs_falsehits++;
214 		} else
215 			nchstats.ncs_badhits++;
216 		/*
217 		 * The parent needs to be locked when we return to VOP_LOOKUP().
218 		 * The `.' case here should be extremely rare (if it can happen
219 		 * at all), so we don't bother optimizing out the unlock/relock.
220 		 */
221 		if (vp == dvp ||
222 		    error || (~cnp->cn_flags & (LOCKPARENT|ISLASTCN)) != 0) {
223 			if ((error = vn_lock(dvp, LK_EXCLUSIVE)) != 0)
224 				return (error);
225 			cnp->cn_flags &= ~PDIRUNLOCK;
226 		}
227 		*vpp = NULL;
228 		return (-1);
229 	}
230 
231 	nchstats.ncs_goodhits++;
232 	/*
233 	 * Move this slot to end of LRU chain, if not already there.
234 	 */
235 	if (TAILQ_NEXT(ncp, nc_lru) != 0) {
236 		TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
237 		TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
238 	}
239 	*vpp = vp;
240 	return (0);
241 
242 remove:
243 	/*
244 	 * Last component and we are renaming or deleting,
245 	 * the cache entry is invalid, or otherwise don't
246 	 * want cache entry to exist.
247 	 */
248 	TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
249 	LIST_REMOVE(ncp, nc_hash);
250 	ncp->nc_hash.le_prev = NULL;
251 	if (ncp->nc_vhash.le_prev != NULL) {
252 		LIST_REMOVE(ncp, nc_vhash);
253 		ncp->nc_vhash.le_prev = NULL;
254 	}
255 	TAILQ_INSERT_HEAD(&nclruhead, ncp, nc_lru);
256 	*vpp = NULL;
257 	return (-1);
258 }
259 
260 /*
261  * Scan cache looking for name of directory entry pointing at vp.
262  *
263  * Fill in dvpp.
264  *
265  * If bufp is non-NULL, also place the name in the buffer which starts
266  * at bufp, immediately before *bpp, and move bpp backwards to point
267  * at the start of it.  (Yes, this is a little baroque, but it's done
268  * this way to cater to the whims of getcwd).
269  *
270  * Returns 0 on success, -1 on cache miss, positive errno on failure.
271  */
272 int
273 cache_revlookup(struct vnode *vp, struct vnode **dvpp, char **bpp, char *bufp)
274 {
275 	struct namecache *ncp;
276 	struct vnode *dvp;
277 	struct ncvhashhead *nvcpp;
278 	char *bp;
279 
280 	if (!doingcache)
281 		goto out;
282 
283 	nvcpp = &ncvhashtbl[NCVHASH(vp)];
284 
285 	LIST_FOREACH(ncp, nvcpp, nc_vhash) {
286 		if (ncp->nc_vp == vp &&
287 		    ncp->nc_vpid == vp->v_id &&
288 		    (dvp = ncp->nc_dvp) != NULL &&
289 		    dvp != vp && 		/* avoid pesky . entries.. */
290 		    dvp->v_id == ncp->nc_dvpid) {
291 
292 #ifdef DIAGNOSTIC
293 			if (ncp->nc_nlen == 1 &&
294 			    ncp->nc_name[0] == '.')
295 				panic("cache_revlookup: found entry for .");
296 
297 			if (ncp->nc_nlen == 2 &&
298 			    ncp->nc_name[0] == '.' &&
299 			    ncp->nc_name[1] == '.')
300 				panic("cache_revlookup: found entry for ..");
301 #endif
302 			nchstats.ncs_revhits++;
303 
304 			if (bufp) {
305 				bp = *bpp;
306 				bp -= ncp->nc_nlen;
307 				if (bp <= bufp) {
308 					*dvpp = NULL;
309 					return (ERANGE);
310 				}
311 				memcpy(bp, ncp->nc_name, ncp->nc_nlen);
312 				*bpp = bp;
313 			}
314 
315 			/* XXX MP: how do we know dvp won't evaporate? */
316 			*dvpp = dvp;
317 			return (0);
318 		}
319 	}
320 	nchstats.ncs_revmiss++;
321  out:
322 	*dvpp = NULL;
323 	return (-1);
324 }
325 
326 /*
327  * Add an entry to the cache
328  */
329 void
330 cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
331 {
332 	struct namecache *ncp;
333 	struct nchashhead *ncpp;
334 	struct ncvhashhead *nvcpp;
335 
336 #ifdef DIAGNOSTIC
337 	if (cnp->cn_namelen > NCHNAMLEN)
338 		panic("cache_enter: name too long");
339 #endif
340 	if (!doingcache)
341 		return;
342 	/*
343 	 * Free the cache slot at head of lru chain.
344 	 */
345 	if (numcache < numvnodes) {
346 		ncp = pool_get(&namecache_pool, PR_WAITOK);
347 		memset(ncp, 0, sizeof(*ncp));
348 		numcache++;
349 	} else if ((ncp = TAILQ_FIRST(&nclruhead)) != NULL) {
350 		TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
351 		if (ncp->nc_hash.le_prev != NULL) {
352 			LIST_REMOVE(ncp, nc_hash);
353 			ncp->nc_hash.le_prev = NULL;
354 		}
355 		if (ncp->nc_vhash.le_prev != NULL) {
356 			LIST_REMOVE(ncp, nc_vhash);
357 			ncp->nc_vhash.le_prev = NULL;
358 		}
359 	} else
360 		return;
361 	/* Grab the vnode we just found. */
362 	ncp->nc_vp = vp;
363 	if (vp)
364 		ncp->nc_vpid = vp->v_id;
365 	else {
366 		/*
367 		 * For negative hits, save the ISWHITEOUT flag so we can
368 		 * restore it later when the cache entry is used again.
369 		 */
370 		ncp->nc_vpid = cnp->cn_flags & ISWHITEOUT;
371 	}
372 	/* Fill in cache info. */
373 	ncp->nc_dvp = dvp;
374 	ncp->nc_dvpid = dvp->v_id;
375 	ncp->nc_nlen = cnp->cn_namelen;
376 	memcpy(ncp->nc_name, cnp->cn_nameptr, (unsigned)ncp->nc_nlen);
377 	TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
378 	ncpp = &nchashtbl[NCHASH(cnp, dvp)];
379 	LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
380 
381 	ncp->nc_vhash.le_prev = NULL;
382 	ncp->nc_vhash.le_next = NULL;
383 
384 	/*
385 	 * Create reverse-cache entries (used in getcwd) for directories.
386 	 */
387 	if (vp != NULL &&
388 	    vp != dvp &&
389 #ifndef NAMECACHE_ENTER_REVERSE
390 	    vp->v_type == VDIR &&
391 #endif
392 	    (ncp->nc_nlen > 2 ||
393 	    (ncp->nc_nlen > 1 && ncp->nc_name[1] != '.') ||
394 	    (/* ncp->nc_nlen > 0 && */ ncp->nc_name[0] != '.'))) {
395 		nvcpp = &ncvhashtbl[NCVHASH(vp)];
396 		LIST_INSERT_HEAD(nvcpp, ncp, nc_vhash);
397 	}
398 }
399 
400 /*
401  * Name cache initialization, from vfs_init() when we are booting
402  */
403 void
404 nchinit(void)
405 {
406 
407 	TAILQ_INIT(&nclruhead);
408 	nchashtbl =
409 	    hashinit(desiredvnodes, HASH_LIST, M_CACHE, M_WAITOK, &nchash);
410 	ncvhashtbl =
411 #ifdef NAMECACHE_ENTER_REVERSE
412 	    hashinit(desiredvnodes, HASH_LIST, M_CACHE, M_WAITOK, &ncvhash);
413 #else
414 	    hashinit(desiredvnodes/8, HASH_LIST, M_CACHE, M_WAITOK, &ncvhash);
415 #endif
416 	pool_init(&namecache_pool, sizeof(struct namecache), 0, 0, 0,
417 	    "ncachepl", &pool_allocator_nointr);
418 }
419 
420 /*
421  * Name cache reinitialization, for when the maximum number of vnodes increases.
422  */
423 void
424 nchreinit(void)
425 {
426 	struct namecache *ncp;
427 	struct nchashhead *oldhash1, *hash1;
428 	struct ncvhashhead *oldhash2, *hash2;
429 	u_long i, oldmask1, oldmask2, mask1, mask2;
430 
431 	hash1 = hashinit(desiredvnodes, HASH_LIST, M_CACHE, M_WAITOK, &mask1);
432 	hash2 =
433 #ifdef NAMECACHE_ENTER_REVERSE
434 	    hashinit(desiredvnodes, HASH_LIST, M_CACHE, M_WAITOK, &mask2);
435 #else
436 	    hashinit(desiredvnodes/8, HASH_LIST, M_CACHE, M_WAITOK, &mask2);
437 #endif
438 	oldhash1 = nchashtbl;
439 	oldmask1 = nchash;
440 	nchashtbl = hash1;
441 	nchash = mask1;
442 	oldhash2 = ncvhashtbl;
443 	oldmask2 = ncvhash;
444 	ncvhashtbl = hash2;
445 	ncvhash = mask2;
446 	for (i = 0; i <= oldmask1; i++) {
447 		while ((ncp = LIST_FIRST(&oldhash1[i])) != NULL) {
448 			LIST_REMOVE(ncp, nc_hash);
449 			ncp->nc_hash.le_prev = NULL;
450 		}
451 	}
452 	for (i = 0; i <= oldmask2; i++) {
453 		while ((ncp = LIST_FIRST(&oldhash2[i])) != NULL) {
454 			LIST_REMOVE(ncp, nc_vhash);
455 			ncp->nc_vhash.le_prev = NULL;
456 		}
457 	}
458 	hashdone(oldhash1, M_CACHE);
459 	hashdone(oldhash2, M_CACHE);
460 }
461 
462 /*
463  * Cache flush, a particular vnode; called when a vnode is renamed to
464  * hide entries that would now be invalid
465  */
466 void
467 cache_purge(struct vnode *vp)
468 {
469 	struct namecache *ncp;
470 	struct nchashhead *ncpp;
471 	static u_long nextvnodeid;
472 
473 	vp->v_id = ++nextvnodeid;
474 	if (nextvnodeid != 0)
475 		return;
476 	for (ncpp = &nchashtbl[nchash]; ncpp >= nchashtbl; ncpp--) {
477 		LIST_FOREACH(ncp, ncpp, nc_hash) {
478 			ncp->nc_vpid = 0;
479 			ncp->nc_dvpid = 0;
480 		}
481 	}
482 	vp->v_id = ++nextvnodeid;
483 }
484 
485 /*
486  * Cache flush, a whole filesystem; called when filesys is umounted to
487  * remove entries that would now be invalid.
488  */
489 void
490 cache_purgevfs(struct mount *mp)
491 {
492 	struct namecache *ncp, *nxtcp;
493 
494 	for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) {
495 		nxtcp = TAILQ_NEXT(ncp, nc_lru);
496 		if (ncp->nc_dvp == NULL || ncp->nc_dvp->v_mount != mp) {
497 			continue;
498 		}
499 		/* Free the resources we had. */
500 		ncp->nc_vp = NULL;
501 		ncp->nc_dvp = NULL;
502 		TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
503 		if (ncp->nc_hash.le_prev != NULL) {
504 			LIST_REMOVE(ncp, nc_hash);
505 			ncp->nc_hash.le_prev = NULL;
506 		}
507 		if (ncp->nc_vhash.le_prev != NULL) {
508 			LIST_REMOVE(ncp, nc_vhash);
509 			ncp->nc_vhash.le_prev = NULL;
510 		}
511 		TAILQ_INSERT_HEAD(&nclruhead, ncp, nc_lru);
512 	}
513 }
514 
515 #ifdef DDB
516 void
517 namecache_print(struct vnode *vp, void (*pr)(const char *, ...))
518 {
519 	struct vnode *dvp = NULL;
520 	struct namecache *ncp;
521 
522 	TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
523 		if (ncp->nc_vp == vp && ncp->nc_vpid == vp->v_id) {
524 			(*pr)("name %.*s\n", ncp->nc_nlen, ncp->nc_name);
525 			dvp = ncp->nc_dvp;
526 		}
527 	}
528 	if (dvp == NULL) {
529 		(*pr)("name not found\n");
530 		return;
531 	}
532 	vp = dvp;
533 	TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
534 		if (ncp->nc_vp == vp && ncp->nc_vpid == vp->v_id) {
535 			(*pr)("parent %.*s\n", ncp->nc_nlen, ncp->nc_name);
536 		}
537 	}
538 }
539 #endif
540