xref: /netbsd-src/sbin/fsck_lfs/bufcache.c (revision 8b0f9554ff8762542c4defc4f70e1eb76fb508fa)
1 /* $NetBSD: bufcache.c,v 1.11 2007/10/08 21:39:49 ad Exp $ */
2 /*-
3  * Copyright (c) 2003 The NetBSD Foundation, Inc.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to The NetBSD Foundation
7  * by Konrad E. Schroder <perseant@hhhh.org>.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed by the NetBSD
20  *	Foundation, Inc. and its contributors.
21  * 4. Neither the name of The NetBSD Foundation nor the names of its
22  *    contributors may be used to endorse or promote products derived
23  *    from this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/time.h>
41 #include <sys/buf.h>
42 #include <sys/queue.h>
43 #include <sys/mount.h>
44 
45 #include <assert.h>
46 #include <err.h>
47 #include <stdio.h>
48 #include <stdlib.h>
49 #include <string.h>
50 #include <unistd.h>
51 #include <util.h>
52 
53 #include "bufcache.h"
54 #include "vnode.h"
55 
56 /*
57  * Definitions for the buffer free lists.
58  */
59 #define	BQUEUES		3	/* number of free buffer queues */
60 
61 #define	BQ_LOCKED	0	/* super-blocks &c */
62 #define	BQ_LRU		1	/* lru, useful buffers */
63 #define	BQ_AGE		2	/* rubbish */
64 
65 TAILQ_HEAD(bqueues, ubuf) bufqueues[BQUEUES];
66 
67 struct bufhash_struct *bufhash;
68 
69 #define HASH_MAX 1024
70 int hashmax  = HASH_MAX;
71 int hashmask = (HASH_MAX - 1);
72 
73 int maxbufs = BUF_CACHE_SIZE;
74 int nbufs = 0;
75 int cachehits = 0;
76 int cachemisses = 0;
77 int max_depth = 0;
78 off_t locked_queue_bytes = 0;
79 int locked_queue_count = 0;
80 
81 /* Simple buffer hash function */
82 static int
83 vl_hash(struct uvnode * vp, daddr_t lbn)
84 {
85 	return (int)((unsigned long) vp + lbn) & hashmask;
86 }
87 
88 /* Initialize buffer cache */
89 void
90 bufinit(int max)
91 {
92 	int i;
93 
94 	if (max) {
95 		for (hashmax = 1; hashmax < max; hashmax <<= 1)
96 			;
97 		hashmask = hashmax - 1;
98 	}
99 
100 	for (i = 0; i < BQUEUES; i++) {
101 		TAILQ_INIT(&bufqueues[i]);
102 	}
103 	bufhash = emalloc(hashmax * sizeof(*bufhash));
104 	for (i = 0; i < hashmax; i++)
105 		LIST_INIT(&bufhash[i]);
106 }
107 
108 /* Widen the hash table. */
109 void bufrehash(int max)
110 {
111 	int i, newhashmax, newhashmask;
112 	struct ubuf *bp, *nbp;
113 	struct bufhash_struct *np;
114 
115 	if (max < 0 || max < hashmax)
116 		return;
117 
118 	/* Round up to a power of two */
119 	for (newhashmax = 1; newhashmax < max; newhashmax <<= 1)
120 		;
121 	newhashmask = newhashmax - 1;
122 
123 	/* Allocate new empty hash table, if we can */
124 	np = emalloc(newhashmax * sizeof(*bufhash));
125 	for (i = 0; i < newhashmax; i++)
126 		LIST_INIT(&np[i]);
127 
128 	/* Now reassign all existing buffers to their new hash chains. */
129 	for (i = 0; i < hashmax; i++) {
130 		bp = LIST_FIRST(&bufhash[i]);
131 		while(bp) {
132 			nbp = LIST_NEXT(bp, b_hash);
133 			LIST_REMOVE(bp, b_hash);
134 			bp->b_hashval = vl_hash(bp->b_vp, bp->b_lblkno);
135 			LIST_INSERT_HEAD(&np[bp->b_hashval], bp, b_hash);
136 			bp = nbp;
137 		}
138 	}
139 
140 	/* Switch over and clean up */
141 	free(bufhash);
142 	bufhash = np;
143 	hashmax = newhashmax;
144 	hashmask = newhashmask;
145 }
146 
147 /* Print statistics of buffer cache usage */
148 void
149 bufstats(void)
150 {
151 	printf("buffer cache: %d hits %d misses (%2.2f%%); hash width %d, depth %d\n",
152 	    cachehits, cachemisses,
153 	    (cachehits * 100.0) / (cachehits + cachemisses),
154 	    hashmax, max_depth);
155 }
156 
157 /*
158  * Remove a buffer from the cache.
159  * Caller must remove the buffer from its free list.
160  */
161 void
162 buf_destroy(struct ubuf * bp)
163 {
164 	bp->b_flags |= B_NEEDCOMMIT;
165 	LIST_REMOVE(bp, b_vnbufs);
166 	LIST_REMOVE(bp, b_hash);
167 	if (!(bp->b_flags & B_DONTFREE))
168 		free(bp->b_data);
169 	free(bp);
170 	--nbufs;
171 }
172 
173 /* Remove a buffer from its free list. */
174 void
175 bremfree(struct ubuf * bp)
176 {
177 	struct bqueues *dp = NULL;
178 
179 	/*
180 	 * We only calculate the head of the freelist when removing
181 	 * the last element of the list as that is the only time that
182 	 * it is needed (e.g. to reset the tail pointer).
183 	 *
184 	 * NB: This makes an assumption about how tailq's are implemented.
185 	 */
186 	if (bp->b_flags & B_LOCKED) {
187 		locked_queue_bytes -= bp->b_bcount;
188 		--locked_queue_count;
189 	}
190 	if (TAILQ_NEXT(bp, b_freelist) == NULL) {
191 		for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
192 			if (dp->tqh_last == &bp->b_freelist.tqe_next)
193 				break;
194 		if (dp == &bufqueues[BQUEUES])
195 			errx(1, "bremfree: lost tail");
196 	}
197 	++bp->b_vp->v_usecount;
198 	TAILQ_REMOVE(dp, bp, b_freelist);
199 }
200 
201 /* Return a buffer if it is in the cache, otherwise return NULL. */
202 struct ubuf *
203 incore(struct uvnode * vp, int lbn)
204 {
205 	struct ubuf *bp;
206 	int hash, depth;
207 
208 	hash = vl_hash(vp, lbn);
209 	/* XXX use a real hash instead. */
210 	depth = 0;
211 	LIST_FOREACH(bp, &bufhash[hash], b_hash) {
212 		if (++depth > max_depth)
213 			max_depth = depth;
214 		assert(depth <= nbufs);
215 		if (bp->b_vp == vp && bp->b_lblkno == lbn) {
216 			return bp;
217 		}
218 	}
219 	return NULL;
220 }
221 
222 /*
223  * Return a buffer of the given size, lbn and uvnode.
224  * If none is in core, make a new one.
225  */
226 struct ubuf *
227 getblk(struct uvnode * vp, daddr_t lbn, int size)
228 {
229 	struct ubuf *bp;
230 #ifdef DEBUG
231 	static int warned;
232 #endif
233 
234 	/*
235 	 * First check the buffer cache lists.
236 	 * We might sometimes need to resize a buffer.  If we are growing
237 	 * the buffer, its contents are invalid; but shrinking is okay.
238 	 */
239 	if ((bp = incore(vp, lbn)) != NULL) {
240 		assert(!(bp->b_flags & B_NEEDCOMMIT));
241 		assert(!(bp->b_flags & B_BUSY));
242 		bp->b_flags |= B_BUSY;
243 		bremfree(bp);
244 		if (bp->b_bcount == size)
245 			return bp;
246 		else if (bp->b_bcount > size) {
247 			assert(!(bp->b_flags & B_DELWRI));
248 			bp->b_bcount = size;
249 			bp->b_data = erealloc(bp->b_data, size);
250 			return bp;
251 		}
252 
253 		buf_destroy(bp);
254 		bp = NULL;
255 	}
256 
257 	/*
258 	 * Not on the list.
259 	 * Get a new block of the appropriate size and use that.
260 	 * If not enough space, free blocks from the AGE and LRU lists
261 	 * to make room.
262 	 */
263 	while (nbufs >= maxbufs + locked_queue_count) {
264 		bp = TAILQ_FIRST(&bufqueues[BQ_AGE]);
265 		if (bp)
266 			TAILQ_REMOVE(&bufqueues[BQ_AGE], bp, b_freelist);
267 		if (bp == NULL) {
268 			bp = TAILQ_FIRST(&bufqueues[BQ_LRU]);
269 			if (bp)
270 				TAILQ_REMOVE(&bufqueues[BQ_LRU], bp,
271 				    b_freelist);
272 		}
273 		if (bp) {
274 			if (bp->b_flags & B_DELWRI)
275 				VOP_STRATEGY(bp);
276 			buf_destroy(bp);
277 			break;
278 		}
279 #ifdef DEBUG
280 		else if (!warned) {
281 			warnx("allocating more than %d buffers", maxbufs);
282 			++warned;
283 		}
284 #endif
285 		break;
286 	}
287 	++nbufs;
288 	bp = ecalloc(1, sizeof(*bp));
289 	bp->b_data = ecalloc(1, size);
290 	bp->b_vp = vp;
291 	bp->b_blkno = bp->b_lblkno = lbn;
292 	bp->b_bcount = size;
293 	bp->b_hashval = vl_hash(vp, lbn);
294 	LIST_INSERT_HEAD(&bufhash[bp->b_hashval], bp, b_hash);
295 	LIST_INSERT_HEAD(&vp->v_cleanblkhd, bp, b_vnbufs);
296 	bp->b_flags = B_BUSY;
297 
298 	return bp;
299 }
300 
301 /* Write a buffer to disk according to its strategy routine. */
302 void
303 bwrite(struct ubuf * bp)
304 {
305 	bp->b_flags &= ~(B_READ | B_DONE | B_DELWRI | B_LOCKED);
306 	VOP_STRATEGY(bp);
307 	bp->b_flags |= B_DONE;
308 	reassignbuf(bp, bp->b_vp);
309 	brelse(bp, 0);
310 }
311 
312 /* Put a buffer back on its free list, clear B_BUSY. */
313 void
314 brelse(struct ubuf * bp, int set)
315 {
316 	int age;
317 
318 	assert(!(bp->b_flags & B_NEEDCOMMIT));
319 	assert(bp->b_flags & B_BUSY);
320 
321 	bp->b_flags |= set;
322 
323 	age = bp->b_flags & B_AGE;
324 	bp->b_flags &= ~(B_BUSY | B_AGE);
325 	if (bp->b_flags & B_INVAL) {
326 		buf_destroy(bp);
327 		return;
328 	}
329 	if (bp->b_flags & B_LOCKED) {
330 		locked_queue_bytes += bp->b_bcount;
331 		++locked_queue_count;
332 		TAILQ_INSERT_TAIL(&bufqueues[BQ_LOCKED], bp, b_freelist);
333 	} else if (age) {
334 		TAILQ_INSERT_TAIL(&bufqueues[BQ_AGE], bp, b_freelist);
335 	} else {
336 		TAILQ_INSERT_TAIL(&bufqueues[BQ_LRU], bp, b_freelist);
337 	}
338 	--bp->b_vp->v_usecount;
339 
340 	/* Move to the front of the hash chain */
341 	if (LIST_FIRST(&bufhash[bp->b_hashval]) != bp) {
342 		LIST_REMOVE(bp, b_hash);
343 		LIST_INSERT_HEAD(&bufhash[bp->b_hashval], bp, b_hash);
344 	}
345 }
346 
347 /* Read the given block from disk, return it B_BUSY. */
348 int
349 bread(struct uvnode * vp, daddr_t lbn, int size, void * unused,
350     struct ubuf ** bpp)
351 {
352 	struct ubuf *bp;
353 	daddr_t daddr;
354 	int error;
355 
356 	bp = getblk(vp, lbn, size);
357 	*bpp = bp;
358 	if (bp->b_flags & (B_DELWRI | B_DONE)) {
359 		++cachehits;
360 		return 0;
361 	}
362 	++cachemisses;
363 
364 	/*
365 	 * Not found.  Need to find that block's location on disk,
366 	 * and load it in.
367 	 */
368 	daddr = -1;
369 	error = VOP_BMAP(vp, lbn, &daddr);
370 	bp->b_blkno = daddr;
371 	if (daddr >= 0) {
372 		bp->b_flags |= B_READ;
373 		return VOP_STRATEGY(bp);
374 	}
375 	memset(bp->b_data, 0, bp->b_bcount);
376 	return 0;
377 }
378 
379 /* Move a buffer between dirty and clean block lists. */
380 void
381 reassignbuf(struct ubuf * bp, struct uvnode * vp)
382 {
383 	LIST_REMOVE(bp, b_vnbufs);
384 	if (bp->b_flags & B_DELWRI) {
385 		LIST_INSERT_HEAD(&vp->v_dirtyblkhd, bp, b_vnbufs);
386 	} else {
387 		LIST_INSERT_HEAD(&vp->v_cleanblkhd, bp, b_vnbufs);
388 	}
389 }
390 
391 #ifdef DEBUG
392 void
393 dump_free_lists(void)
394 {
395 	struct ubuf *bp;
396 	int i;
397 
398 	for (i = 0; i <= BQ_LOCKED; i++) {
399 		printf("==> free list %d:\n", i);
400 		TAILQ_FOREACH(bp, &bufqueues[i], b_freelist) {
401 			printf("vp %p lbn %" PRId64 " flags %lx\n",
402 				bp->b_vp, bp->b_lblkno, bp->b_flags);
403 		}
404 	}
405 }
406 #endif
407