xref: /netbsd-src/sbin/fsck_lfs/bufcache.c (revision d909946ca08dceb44d7d0f22ec9488679695d976)
1 /* $NetBSD: bufcache.c,v 1.18 2016/08/18 08:08:02 christos Exp $ */
2 /*-
3  * Copyright (c) 2003 The NetBSD Foundation, Inc.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to The NetBSD Foundation
7  * by Konrad E. Schroder <perseant@hhhh.org>.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/types.h>
32 #include <sys/param.h>
33 #include <sys/time.h>
34 #include <sys/buf.h>
35 #include <sys/queue.h>
36 #include <sys/mount.h>
37 
38 #include <assert.h>
39 #include <err.h>
40 #include <stdio.h>
41 #include <stdlib.h>
42 #include <string.h>
43 #include <unistd.h>
44 #include <util.h>
45 
46 #include "bufcache.h"
47 #include "vnode.h"
48 
49 /*
50  * Definitions for the buffer free lists.
51  */
52 #define	BQUEUES		3	/* number of free buffer queues */
53 
54 #define	BQ_LOCKED	0	/* super-blocks &c */
55 #define	BQ_LRU		1	/* lru, useful buffers */
56 #define	BQ_AGE		2	/* rubbish */
57 
58 TAILQ_HEAD(bqueues, ubuf) bufqueues[BQUEUES];
59 
60 struct bufhash_struct *bufhash;
61 
62 #define HASH_MAX 1024
63 int hashmax  = HASH_MAX;
64 int hashmask = (HASH_MAX - 1);
65 
66 int maxbufs = BUF_CACHE_SIZE;
67 int nbufs = 0;
68 int cachehits = 0;
69 int cachemisses = 0;
70 int max_depth = 0;
71 off_t locked_queue_bytes = 0;
72 int locked_queue_count = 0;
73 
74 /* Simple buffer hash function */
75 static int
76 vl_hash(struct uvnode * vp, daddr_t lbn)
77 {
78 	return (int)((unsigned long) vp + lbn) & hashmask;
79 }
80 
81 /* Initialize buffer cache */
82 void
83 bufinit(int max)
84 {
85 	int i;
86 
87 	if (max) {
88 		for (hashmax = 1; hashmax < max; hashmax <<= 1)
89 			;
90 		hashmask = hashmax - 1;
91 	}
92 
93 	for (i = 0; i < BQUEUES; i++) {
94 		TAILQ_INIT(&bufqueues[i]);
95 	}
96 	bufhash = emalloc(hashmax * sizeof(*bufhash));
97 	for (i = 0; i < hashmax; i++)
98 		LIST_INIT(&bufhash[i]);
99 }
100 
101 /* Widen the hash table. */
102 void bufrehash(int max)
103 {
104 	int i, newhashmax;
105 	struct ubuf *bp, *nbp;
106 	struct bufhash_struct *np;
107 
108 	if (max < 0 || max <= hashmax)
109 		return;
110 
111 	/* Round up to a power of two */
112 	for (newhashmax = 1; newhashmax < max; newhashmax <<= 1)
113 		;
114 
115 	/* update the mask right away so vl_hash() uses it */
116 	hashmask = newhashmax - 1;
117 
118 	/* Allocate new empty hash table, if we can */
119 	np = emalloc(newhashmax * sizeof(*bufhash));
120 	for (i = 0; i < newhashmax; i++)
121 		LIST_INIT(&np[i]);
122 
123 	/* Now reassign all existing buffers to their new hash chains. */
124 	for (i = 0; i < hashmax; i++) {
125 		bp = LIST_FIRST(&bufhash[i]);
126 		while(bp) {
127 			nbp = LIST_NEXT(bp, b_hash);
128 			LIST_REMOVE(bp, b_hash);
129 			bp->b_hashval = vl_hash(bp->b_vp, bp->b_lblkno);
130 			LIST_INSERT_HEAD(&np[bp->b_hashval], bp, b_hash);
131 			bp = nbp;
132 		}
133 	}
134 
135 	/* Switch over and clean up */
136 	free(bufhash);
137 	bufhash = np;
138 	hashmax = newhashmax;
139 }
140 
141 /* Print statistics of buffer cache usage */
142 void
143 bufstats(void)
144 {
145 	printf("buffer cache: %d hits %d misses (%2.2f%%); hash width %d, depth %d\n",
146 	    cachehits, cachemisses,
147 	    (cachehits * 100.0) / (cachehits + cachemisses),
148 	    hashmax, max_depth);
149 }
150 
151 /*
152  * Remove a buffer from the cache.
153  * Caller must remove the buffer from its free list.
154  */
155 void
156 buf_destroy(struct ubuf * bp)
157 {
158 	LIST_REMOVE(bp, b_vnbufs);
159 	LIST_REMOVE(bp, b_hash);
160 	if (!(bp->b_flags & B_DONTFREE))
161 		free(bp->b_data);
162 	free(bp);
163 	--nbufs;
164 }
165 
166 /* Remove a buffer from its free list. */
167 void
168 bremfree(struct ubuf * bp)
169 {
170 	struct bqueues *dp = NULL;
171 
172 	/*
173 	 * We only calculate the head of the freelist when removing
174 	 * the last element of the list as that is the only time that
175 	 * it is needed (e.g. to reset the tail pointer).
176 	 *
177 	 * NB: This makes an assumption about how tailq's are implemented.
178 	 */
179 	if (bp->b_flags & B_LOCKED) {
180 		locked_queue_bytes -= bp->b_bcount;
181 		--locked_queue_count;
182 	}
183 	if (TAILQ_NEXT(bp, b_freelist) == NULL) {
184 		for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
185 			if (dp->tqh_last == &bp->b_freelist.tqe_next)
186 				break;
187 		if (dp == &bufqueues[BQUEUES])
188 			errx(1, "bremfree: lost tail");
189 	}
190 	++bp->b_vp->v_usecount;
191 	TAILQ_REMOVE(dp, bp, b_freelist);
192 }
193 
194 /* Return a buffer if it is in the cache, otherwise return NULL. */
195 struct ubuf *
196 incore(struct uvnode * vp, int lbn)
197 {
198 	struct ubuf *bp;
199 	int hash, depth;
200 
201 	hash = vl_hash(vp, lbn);
202 	/* XXX use a real hash instead. */
203 	depth = 0;
204 	LIST_FOREACH(bp, &bufhash[hash], b_hash) {
205 		if (++depth > max_depth)
206 			max_depth = depth;
207 		assert(depth <= nbufs);
208 		if (bp->b_vp == vp && bp->b_lblkno == lbn) {
209 			return bp;
210 		}
211 	}
212 	return NULL;
213 }
214 
215 /*
216  * Return a buffer of the given size, lbn and uvnode.
217  * If none is in core, make a new one.
218  */
219 struct ubuf *
220 getblk(struct uvnode * vp, daddr_t lbn, int size)
221 {
222 	struct ubuf *bp;
223 #ifdef DEBUG
224 	static int warned;
225 #endif
226 
227 	/*
228 	 * First check the buffer cache lists.
229 	 * We might sometimes need to resize a buffer.  If we are growing
230 	 * the buffer, its contents are invalid; but shrinking is okay.
231 	 */
232 	if ((bp = incore(vp, lbn)) != NULL) {
233 		assert(!(bp->b_flags & B_BUSY));
234 		bp->b_flags |= B_BUSY;
235 		bremfree(bp);
236 		if (bp->b_bcount == size)
237 			return bp;
238 		else if (bp->b_bcount > size) {
239 			assert(!(bp->b_flags & B_DELWRI));
240 			bp->b_bcount = size;
241 			bp->b_data = erealloc(bp->b_data, size);
242 			return bp;
243 		}
244 
245 		buf_destroy(bp);
246 		bp = NULL;
247 	}
248 
249 	/*
250 	 * Not on the list.
251 	 * Get a new block of the appropriate size and use that.
252 	 * If not enough space, free blocks from the AGE and LRU lists
253 	 * to make room.
254 	 */
255 	while (nbufs >= maxbufs + locked_queue_count) {
256 		bp = TAILQ_FIRST(&bufqueues[BQ_AGE]);
257 		if (bp)
258 			TAILQ_REMOVE(&bufqueues[BQ_AGE], bp, b_freelist);
259 		if (bp == NULL) {
260 			bp = TAILQ_FIRST(&bufqueues[BQ_LRU]);
261 			if (bp)
262 				TAILQ_REMOVE(&bufqueues[BQ_LRU], bp,
263 				    b_freelist);
264 		}
265 		if (bp) {
266 			if (bp->b_flags & B_DELWRI)
267 				VOP_STRATEGY(bp);
268 			buf_destroy(bp);
269 			break;
270 		}
271 #ifdef DEBUG
272 		else if (!warned) {
273 			warnx("allocating more than %d buffers", maxbufs);
274 			++warned;
275 		}
276 #endif
277 		break;
278 	}
279 	++nbufs;
280 	bp = ecalloc(1, sizeof(*bp));
281 	bp->b_data = ecalloc(1, size);
282 	bp->b_vp = vp;
283 	bp->b_blkno = bp->b_lblkno = lbn;
284 	bp->b_bcount = size;
285 	bp->b_hashval = vl_hash(vp, lbn);
286 	LIST_INSERT_HEAD(&bufhash[bp->b_hashval], bp, b_hash);
287 	LIST_INSERT_HEAD(&vp->v_cleanblkhd, bp, b_vnbufs);
288 	bp->b_flags = B_BUSY;
289 
290 	return bp;
291 }
292 
293 /* Write a buffer to disk according to its strategy routine. */
294 void
295 bwrite(struct ubuf * bp)
296 {
297 	bp->b_flags &= ~(B_READ | B_DONE | B_DELWRI | B_LOCKED);
298 	VOP_STRATEGY(bp);
299 	bp->b_flags |= B_DONE;
300 	reassignbuf(bp, bp->b_vp);
301 	brelse(bp, 0);
302 }
303 
304 /* Put a buffer back on its free list, clear B_BUSY. */
305 void
306 brelse(struct ubuf * bp, int set)
307 {
308 	int age;
309 
310 	assert(bp->b_flags & B_BUSY);
311 
312 	bp->b_flags |= set;
313 
314 	age = bp->b_flags & B_AGE;
315 	bp->b_flags &= ~(B_BUSY | B_AGE);
316 	if (bp->b_flags & B_INVAL) {
317 		buf_destroy(bp);
318 		return;
319 	}
320 	if (bp->b_flags & B_LOCKED) {
321 		locked_queue_bytes += bp->b_bcount;
322 		++locked_queue_count;
323 		TAILQ_INSERT_TAIL(&bufqueues[BQ_LOCKED], bp, b_freelist);
324 	} else if (age) {
325 		TAILQ_INSERT_TAIL(&bufqueues[BQ_AGE], bp, b_freelist);
326 	} else {
327 		TAILQ_INSERT_TAIL(&bufqueues[BQ_LRU], bp, b_freelist);
328 	}
329 	--bp->b_vp->v_usecount;
330 
331 	/* Move to the front of the hash chain */
332 	if (LIST_FIRST(&bufhash[bp->b_hashval]) != bp) {
333 		LIST_REMOVE(bp, b_hash);
334 		LIST_INSERT_HEAD(&bufhash[bp->b_hashval], bp, b_hash);
335 	}
336 }
337 
338 /* Read the given block from disk, return it B_BUSY. */
339 int
340 bread(struct uvnode * vp, daddr_t lbn, int size, int flags, struct ubuf ** bpp)
341 {
342 	struct ubuf *bp;
343 	daddr_t daddr;
344 
345 	bp = getblk(vp, lbn, size);
346 	*bpp = bp;
347 	if (bp->b_flags & (B_DELWRI | B_DONE)) {
348 		++cachehits;
349 		return 0;
350 	}
351 	++cachemisses;
352 
353 	/*
354 	 * Not found.  Need to find that block's location on disk,
355 	 * and load it in.
356 	 */
357 	daddr = -1;
358 	(void)VOP_BMAP(vp, lbn, &daddr);
359 	bp->b_blkno = daddr;
360 	if (daddr >= 0) {
361 		bp->b_flags |= B_READ;
362 		return VOP_STRATEGY(bp);
363 	}
364 	memset(bp->b_data, 0, bp->b_bcount);
365 	return 0;
366 }
367 
368 /* Move a buffer between dirty and clean block lists. */
369 void
370 reassignbuf(struct ubuf * bp, struct uvnode * vp)
371 {
372 	LIST_REMOVE(bp, b_vnbufs);
373 	if (bp->b_flags & B_DELWRI) {
374 		LIST_INSERT_HEAD(&vp->v_dirtyblkhd, bp, b_vnbufs);
375 	} else {
376 		LIST_INSERT_HEAD(&vp->v_cleanblkhd, bp, b_vnbufs);
377 	}
378 }
379 
380 #ifdef DEBUG
381 void
382 dump_free_lists(void)
383 {
384 	struct ubuf *bp;
385 	int i;
386 
387 	for (i = 0; i <= BQ_LOCKED; i++) {
388 		printf("==> free list %d:\n", i);
389 		TAILQ_FOREACH(bp, &bufqueues[i], b_freelist) {
390 			printf("vp %p lbn %" PRId64 " flags %lx\n",
391 				bp->b_vp, bp->b_lblkno, bp->b_flags);
392 		}
393 	}
394 }
395 #endif
396