xref: /minix3/minix/lib/libminixfs/cache.c (revision 3956ee9eeda988648c3f2092c7d31faa0a0e7294)
1 
2 #define _SYSTEM
3 
4 #include <assert.h>
5 #include <string.h>
6 #include <errno.h>
7 #include <math.h>
8 #include <stdlib.h>
9 
10 #include <machine/vmparam.h>
11 
12 #include <sys/param.h>
13 #include <sys/mman.h>
14 
15 #include <minix/dmap.h>
16 #include <minix/libminixfs.h>
17 #include <minix/syslib.h>
18 #include <minix/sysutil.h>
19 #include <minix/u64.h>
20 #include <minix/bdev.h>
21 #include <minix/bitmap.h>
22 
23 #include "inc.h"
24 
25 /* Buffer (block) cache.  To acquire a block, a routine calls lmfs_get_block(),
26  * telling which block it wants.  The block is then regarded as "in use" and
27  * has its reference count incremented.  All the blocks that are not in use are
28  * chained together in an LRU list, with 'front' pointing to the least recently
29  * used block, and 'rear' to the most recently used block.  A reverse chain is
30  * also maintained.  Usage for LRU is measured by the time the put_block() is
31  * done.  The second parameter to put_block() can violate the LRU order and put
32  * a block on the front of the list, if it will probably not be needed again.
33  * This is used internally only; the lmfs_put_block() API call has no second
34  * parameter.  If a block is modified, the modifying routine must mark the
35  * block as dirty, so the block will eventually be rewritten to the disk.
36  */
37 
38 /* Flags to put_block(). */
39 #define ONE_SHOT      0x1	/* set if block will not be needed again */
40 
41 #define BUFHASH(b) ((unsigned int)((b) % nr_bufs))
42 #define MARKCLEAN  lmfs_markclean
43 
44 #define MINBUFS 6 	/* minimal no of bufs for sanity check */
45 
46 static struct buf *front;       /* points to least recently used free block */
47 static struct buf *rear;        /* points to most recently used free block */
48 static unsigned int bufs_in_use;/* # bufs currently in use (not on free list)*/
49 
50 static void rm_lru(struct buf *bp);
51 static int read_block(struct buf *bp, size_t size);
52 static void freeblock(struct buf *bp);
53 static void cache_heuristic_check(void);
54 static void put_block(struct buf *bp, int put_flags);
55 
56 static int vmcache = 0; /* are we using vm's secondary cache? (initially not) */
57 
58 static struct buf *buf;
59 static struct buf **buf_hash;   /* the buffer hash table */
60 static unsigned int nr_bufs;
61 static int may_use_vmcache;
62 
63 static size_t fs_block_size = PAGE_SIZE;	/* raw i/o block size */
64 
65 static fsblkcnt_t fs_btotal = 0, fs_bused = 0;
66 
67 static int quiet = 0;
68 
69 void lmfs_setquiet(int q) { quiet = q; }
70 
71 static int fs_bufs_heuristic(int minbufs, fsblkcnt_t btotal,
72 	fsblkcnt_t bused, int blocksize)
73 {
74   struct vm_stats_info vsi;
75   int bufs;
76   u32_t kbytes_used_fs, kbytes_total_fs, kbcache, kb_fsmax;
77   u32_t kbytes_remain_mem;
78 
79   /* set a reasonable cache size; cache at most a certain
80    * portion of the used FS, and at most a certain %age of remaining
81    * memory
82    */
83   if(vm_info_stats(&vsi) != OK) {
84 	bufs = 1024;
85 	if(!quiet)
86 	  printf("fslib: heuristic info fail: default to %d bufs\n", bufs);
87 	return bufs;
88   }
89 
90   /* remaining free memory is unused memory plus memory in used for cache,
91    * as the cache can be evicted
92    */
93   kbytes_remain_mem = (u64_t)(vsi.vsi_free + vsi.vsi_cached) *
94 	vsi.vsi_pagesize / 1024;
95 
96   /* check fs usage. */
97   kbytes_used_fs  = (unsigned long)(((u64_t)bused * blocksize) / 1024);
98   kbytes_total_fs = (unsigned long)(((u64_t)btotal * blocksize) / 1024);
99 
100   /* heuristic for a desired cache size based on FS usage;
101    * but never bigger than half of the total filesystem
102    */
103   kb_fsmax = sqrt_approx(kbytes_used_fs)*40;
104   kb_fsmax = MIN(kb_fsmax, kbytes_total_fs/2);
105 
106   /* heuristic for a maximum usage - 10% of remaining memory */
107   kbcache = MIN(kbytes_remain_mem/10, kb_fsmax);
108   bufs = kbcache * 1024 / blocksize;
109 
110   /* but we simply need MINBUFS no matter what */
111   if(bufs < minbufs)
112 	bufs = minbufs;
113 
114   return bufs;
115 }
116 
117 void lmfs_change_blockusage(int delta)
118 {
119         /* Change the number of allocated blocks by 'delta.'
120          * Also accumulate the delta since the last cache re-evaluation.
121          * If it is outside a certain band, ask the cache library to
122          * re-evaluate the cache size.
123          */
124         static int bitdelta = 0, warn_low = TRUE, warn_high = TRUE;
125 
126 	/* Adjust the file system block usage counter accordingly. Do bounds
127 	 * checking, and report file system misbehavior.
128 	 */
129 	if (delta > 0 && (fsblkcnt_t)delta > fs_btotal - fs_bused) {
130 		if (warn_high) {
131 			printf("libminixfs: block usage overflow\n");
132 			warn_high = FALSE;
133 		}
134 		delta = (int)(fs_btotal - fs_bused);
135 	} else if (delta < 0 && (fsblkcnt_t)-delta > fs_bused) {
136 		if (warn_low) {
137 			printf("libminixfs: block usage underflow\n");
138 			warn_low = FALSE;
139 		}
140 		delta = -(int)fs_bused;
141 	}
142 	fs_bused += delta;
143 
144 	bitdelta += delta;
145 
146 #define BAND_KB (10*1024)	/* recheck cache every 10MB change */
147 
148 	/* If the accumulated delta exceeds the configured threshold, resize
149 	 * the cache, but only if the cache isn't in use any more. In order to
150 	 * avoid that the latter case blocks a resize forever, we also call
151 	 * this function from lmfs_flushall(). Since lmfs_buf_pool() may call
152 	 * lmfs_flushall(), reset 'bitdelta' before doing the heuristics check.
153 	 */
154 	if (bufs_in_use == 0 &&
155 	    (bitdelta*(int)fs_block_size/1024 > BAND_KB ||
156 	    bitdelta*(int)fs_block_size/1024 < -BAND_KB)) {
157 		bitdelta = 0;
158 		cache_heuristic_check();
159 	}
160 }
161 
162 void lmfs_markdirty(struct buf *bp)
163 {
164 	bp->lmfs_flags |= VMMC_DIRTY;
165 }
166 
167 void lmfs_markclean(struct buf *bp)
168 {
169 	bp->lmfs_flags &= ~VMMC_DIRTY;
170 }
171 
172 int lmfs_isclean(struct buf *bp)
173 {
174 	return !(bp->lmfs_flags & VMMC_DIRTY);
175 }
176 
177 static void free_unused_blocks(void)
178 {
179 	struct buf *bp;
180 
181 	int freed = 0, bytes = 0;
182 	printf("libminixfs: freeing; %d blocks in use\n", bufs_in_use);
183 	for(bp = &buf[0]; bp < &buf[nr_bufs]; bp++) {
184   		if(bp->lmfs_bytes > 0 && bp->lmfs_count == 0) {
185 			freed++;
186 			bytes += bp->lmfs_bytes;
187 			freeblock(bp);
188 		}
189 	}
190 	printf("libminixfs: freeing; %d blocks, %d bytes\n", freed, bytes);
191 }
192 
193 static void lmfs_alloc_block(struct buf *bp, size_t block_size)
194 {
195   int len;
196   ASSERT(!bp->data);
197   ASSERT(bp->lmfs_bytes == 0);
198 
199   len = roundup(block_size, PAGE_SIZE);
200 
201   if((bp->data = mmap(0, block_size, PROT_READ|PROT_WRITE,
202       MAP_PREALLOC|MAP_ANON, -1, 0)) == MAP_FAILED) {
203 	free_unused_blocks();
204 	if((bp->data = mmap(0, block_size, PROT_READ|PROT_WRITE,
205 		MAP_PREALLOC|MAP_ANON, -1, 0)) == MAP_FAILED) {
206 		panic("libminixfs: could not allocate block");
207 	}
208   }
209   assert(bp->data);
210   bp->lmfs_bytes = block_size;
211   bp->lmfs_needsetcache = 1;
212 }
213 
214 /*===========================================================================*
215  *				lmfs_get_block				     *
216  *===========================================================================*/
217 int lmfs_get_block(struct buf **bpp, dev_t dev, block64_t block, int how)
218 {
219 	return lmfs_get_block_ino(bpp, dev, block, how, VMC_NO_INODE, 0);
220 }
221 
222 static void munmap_t(void *a, int len)
223 {
224 	vir_bytes av = (vir_bytes) a;
225 	assert(a);
226 	assert(a != MAP_FAILED);
227 	assert(len > 0);
228 	assert(!(av % PAGE_SIZE));
229 
230 	len = roundup(len, PAGE_SIZE);
231 
232 	assert(!(len % PAGE_SIZE));
233 
234 	if(munmap(a, len) < 0)
235 		panic("libminixfs cache: munmap failed");
236 }
237 
238 static void raisecount(struct buf *bp)
239 {
240   assert(bufs_in_use >= 0);
241   ASSERT(bp->lmfs_count >= 0);
242   bp->lmfs_count++;
243   if(bp->lmfs_count == 1) bufs_in_use++;
244   assert(bufs_in_use > 0);
245 }
246 
247 static void lowercount(struct buf *bp)
248 {
249   assert(bufs_in_use > 0);
250   ASSERT(bp->lmfs_count > 0);
251   bp->lmfs_count--;
252   if(bp->lmfs_count == 0) bufs_in_use--;
253   assert(bufs_in_use >= 0);
254 }
255 
256 static void freeblock(struct buf *bp)
257 {
258   ASSERT(bp->lmfs_count == 0);
259   /* If the block taken is dirty, make it clean by writing it to the disk.
260    * Avoid hysteresis by flushing all other dirty blocks for the same device.
261    */
262   if (bp->lmfs_dev != NO_DEV) {
263 	if (!lmfs_isclean(bp)) lmfs_flushdev(bp->lmfs_dev);
264 	assert(bp->lmfs_bytes > 0);
265 	bp->lmfs_dev = NO_DEV;
266   }
267 
268   /* Fill in block's parameters and add it to the hash chain where it goes. */
269   MARKCLEAN(bp);		/* NO_DEV blocks may be marked dirty */
270   if(bp->lmfs_bytes > 0) {
271 	assert(bp->data);
272 	munmap_t(bp->data, bp->lmfs_bytes);
273 	bp->lmfs_bytes = 0;
274 	bp->data = NULL;
275   } else assert(!bp->data);
276 }
277 
278 /*===========================================================================*
279  *				find_block				     *
280  *===========================================================================*/
281 static struct buf *find_block(dev_t dev, block64_t block)
282 {
283 /* Search the hash chain for (dev, block). Return the buffer structure if
284  * found, or NULL otherwise.
285  */
286   struct buf *bp;
287   int b;
288 
289   assert(dev != NO_DEV);
290 
291   b = BUFHASH(block);
292   for (bp = buf_hash[b]; bp != NULL; bp = bp->lmfs_hash)
293 	if (bp->lmfs_blocknr == block && bp->lmfs_dev == dev)
294 		return bp;
295 
296   return NULL;
297 }
298 
299 /*===========================================================================*
300  *				get_block_ino				     *
301  *===========================================================================*/
302 static int get_block_ino(struct buf **bpp, dev_t dev, block64_t block, int how,
303 	ino_t ino, u64_t ino_off, size_t block_size)
304 {
305 /* Check to see if the requested block is in the block cache.  The requested
306  * block is identified by the block number in 'block' on device 'dev', counted
307  * in the file system block size.  The amount of data requested for this block
308  * is given in 'block_size', which may be less than the file system block size
309  * iff the requested block is the last (partial) block on a device.  Note that
310  * the given block size does *not* affect the conversion of 'block' to a byte
311  * offset!  Either way, if the block could be obtained, either from the cache
312  * or by reading from the device, return OK, with a pointer to the buffer
313  * structure stored in 'bpp'.  If not, return a negative error code (and no
314  * buffer).  If necessary, evict some other block and fetch the contents from
315  * disk (if 'how' is NORMAL).  If 'how' is NO_READ, the caller intends to
316  * overwrite the requested block in its entirety, so it is only necessary to
317  * see if it is in the cache; if it is not, any free buffer will do.  If 'how'
318  * is PEEK, the function returns the block if it is in the cache or the VM
319  * cache, and an ENOENT error code otherwise.
320  * In addition to the LRU chain, there is also a hash chain to link together
321  * blocks whose block numbers end with the same bit strings, for fast lookup.
322  */
323   int b, r;
324   static struct buf *bp;
325   uint64_t dev_off;
326   struct buf *prev_ptr;
327 
328   assert(buf_hash);
329   assert(buf);
330   assert(nr_bufs > 0);
331 
332   ASSERT(fs_block_size > 0);
333 
334   assert(dev != NO_DEV);
335 
336   assert(block <= UINT64_MAX / fs_block_size);
337 
338   dev_off = block * fs_block_size;
339 
340   if((ino_off % fs_block_size)) {
341 
342 	printf("cache: unaligned lmfs_get_block_ino ino_off %llu\n",
343 		ino_off);
344   	util_stacktrace();
345   }
346 
347   /* See if the block is in the cache. If so, we can return it right away. */
348   bp = find_block(dev, block);
349   if (bp != NULL && !(bp->lmfs_flags & VMMC_EVICTED)) {
350 	ASSERT(bp->lmfs_dev == dev);
351 	ASSERT(bp->lmfs_dev != NO_DEV);
352 
353 	/* The block must have exactly the requested number of bytes. */
354 	if (bp->lmfs_bytes != block_size)
355 		return EIO;
356 
357 	/* Block needed has been found. */
358 	if (bp->lmfs_count == 0) {
359 		rm_lru(bp);
360 		ASSERT(bp->lmfs_needsetcache == 0);
361 		ASSERT(!(bp->lmfs_flags & VMMC_BLOCK_LOCKED));
362 		/* FIXME: race condition against the VMMC_EVICTED check */
363 		bp->lmfs_flags |= VMMC_BLOCK_LOCKED;
364 	}
365 	raisecount(bp);
366 	ASSERT(bp->lmfs_flags & VMMC_BLOCK_LOCKED);
367 	ASSERT(bp->data);
368 
369 	if(ino != VMC_NO_INODE) {
370 		if(bp->lmfs_inode == VMC_NO_INODE
371 		|| bp->lmfs_inode != ino
372 		|| bp->lmfs_inode_offset != ino_off) {
373 			bp->lmfs_inode = ino;
374 			bp->lmfs_inode_offset = ino_off;
375 			bp->lmfs_needsetcache = 1;
376 		}
377 	}
378 
379 	*bpp = bp;
380 	return OK;
381   }
382 
383   /* We had the block in the cache but VM evicted it; invalidate it. */
384   if (bp != NULL) {
385 	assert(bp->lmfs_flags & VMMC_EVICTED);
386 	ASSERT(bp->lmfs_count == 0);
387 	ASSERT(!(bp->lmfs_flags & VMMC_BLOCK_LOCKED));
388 	ASSERT(!(bp->lmfs_flags & VMMC_DIRTY));
389 	bp->lmfs_dev = NO_DEV;
390 	bp->lmfs_bytes = 0;
391 	bp->data = NULL;
392   }
393 
394   /* Desired block is not on available chain. Find a free block to use. */
395   if(bp) {
396   	ASSERT(bp->lmfs_flags & VMMC_EVICTED);
397   } else {
398 	if ((bp = front) == NULL) panic("all buffers in use: %d", nr_bufs);
399   }
400   assert(bp);
401 
402   rm_lru(bp);
403 
404   /* Remove the block that was just taken from its hash chain. */
405   b = BUFHASH(bp->lmfs_blocknr);
406   prev_ptr = buf_hash[b];
407   if (prev_ptr == bp) {
408 	buf_hash[b] = bp->lmfs_hash;
409   } else {
410 	/* The block just taken is not on the front of its hash chain. */
411 	while (prev_ptr->lmfs_hash != NULL)
412 		if (prev_ptr->lmfs_hash == bp) {
413 			prev_ptr->lmfs_hash = bp->lmfs_hash;	/* found it */
414 			break;
415 		} else {
416 			prev_ptr = prev_ptr->lmfs_hash;	/* keep looking */
417 		}
418   }
419 
420   freeblock(bp);
421 
422   bp->lmfs_inode = ino;
423   bp->lmfs_inode_offset = ino_off;
424 
425   bp->lmfs_flags = VMMC_BLOCK_LOCKED;
426   bp->lmfs_needsetcache = 0;
427   bp->lmfs_dev = dev;		/* fill in device number */
428   bp->lmfs_blocknr = block;	/* fill in block number */
429   ASSERT(bp->lmfs_count == 0);
430   raisecount(bp);
431   b = BUFHASH(bp->lmfs_blocknr);
432   bp->lmfs_hash = buf_hash[b];
433 
434   buf_hash[b] = bp;		/* add to hash list */
435 
436   assert(dev != NO_DEV);
437 
438   /* The block is not found in our cache, but we do want it if it's in the VM
439    * cache. The exception is NO_READ, purely for context switching performance
440    * reasons. NO_READ is used for 1) newly allocated blocks, 2) blocks being
441    * prefetched, and 3) blocks about to be fully overwritten. In the first two
442    * cases, VM will not have the block in its cache anyway, and for the third
443    * we save on one VM call only if the block is in the VM cache.
444    */
445   assert(!bp->data);
446   assert(!bp->lmfs_bytes);
447   if (how != NO_READ && vmcache) {
448 	if((bp->data = vm_map_cacheblock(dev, dev_off, ino, ino_off,
449 	    &bp->lmfs_flags, roundup(block_size, PAGE_SIZE))) != MAP_FAILED) {
450 		bp->lmfs_bytes = block_size;
451 		ASSERT(!bp->lmfs_needsetcache);
452 		*bpp = bp;
453 		return OK;
454 	}
455   }
456   bp->data = NULL;
457 
458   /* The block is not in the cache, and VM does not know about it. If we were
459    * requested to search for the block only, we can now return failure to the
460    * caller. Return the block to the pool without allocating data pages, since
461    * these would be freed upon recycling the block anyway.
462    */
463   if (how == PEEK) {
464 	bp->lmfs_dev = NO_DEV;
465 
466 	put_block(bp, ONE_SHOT);
467 
468 	return ENOENT;
469   }
470 
471   /* Not in the cache; reserve memory for its contents. */
472 
473   lmfs_alloc_block(bp, block_size);
474 
475   assert(bp->data);
476 
477   if (how == NORMAL) {
478 	/* Try to read the block. Return an error code on failure. */
479 	if ((r = read_block(bp, block_size)) != OK) {
480 		put_block(bp, 0);
481 
482 		return r;
483 	}
484   } else if(how == NO_READ) {
485   	/* This block will be overwritten by new contents. */
486   } else
487 	panic("unexpected 'how' value: %d", how);
488 
489   assert(bp->data);
490 
491   *bpp = bp;			/* return the newly acquired block */
492   return OK;
493 }
494 
495 /*===========================================================================*
496  *				lmfs_get_block_ino			     *
497  *===========================================================================*/
498 int lmfs_get_block_ino(struct buf **bpp, dev_t dev, block64_t block, int how,
499 	ino_t ino, u64_t ino_off)
500 {
501   return get_block_ino(bpp, dev, block, how, ino, ino_off, fs_block_size);
502 }
503 
504 /*===========================================================================*
505  *				lmfs_get_partial_block			     *
506  *===========================================================================*/
507 int lmfs_get_partial_block(struct buf **bpp, dev_t dev, block64_t block,
508 	int how, size_t block_size)
509 {
510   return get_block_ino(bpp, dev, block, how, VMC_NO_INODE, 0, block_size);
511 }
512 
513 /*===========================================================================*
514  *				put_block				     *
515  *===========================================================================*/
516 static void put_block(struct buf *bp, int put_flags)
517 {
518 /* Return a block to the list of available blocks.   Depending on 'put_flags'
519  * it may be put on the front or rear of the LRU chain.  Blocks that are
520  * expected to be needed again at some point go on the rear; blocks that are
521  * unlikely to be needed again at all go on the front.
522  */
523   dev_t dev;
524   uint64_t dev_off;
525   int r, setflags;
526 
527   assert(bp != NULL);
528 
529   dev = bp->lmfs_dev;
530 
531   dev_off = bp->lmfs_blocknr * fs_block_size;
532 
533   lowercount(bp);
534   if (bp->lmfs_count != 0) return;	/* block is still in use */
535 
536   /* Put this block back on the LRU chain.  */
537   if (dev == NO_DEV || dev == DEV_RAM || (put_flags & ONE_SHOT)) {
538 	/* Block will not be needed again. Put it on front of chain.
539   	 * It will be the next block to be evicted from the cache.
540   	 */
541 	bp->lmfs_prev = NULL;
542 	bp->lmfs_next = front;
543 	if (front == NULL)
544 		rear = bp;	/* LRU chain was empty */
545 	else
546 		front->lmfs_prev = bp;
547 	front = bp;
548   }
549   else {
550 	/* Block may be needed again.  Put it on rear of chain.
551   	 * It will not be evicted from the cache for a long time.
552   	 */
553 	bp->lmfs_prev = rear;
554 	bp->lmfs_next = NULL;
555 	if (rear == NULL)
556 		front = bp;
557 	else
558 		rear->lmfs_next = bp;
559 	rear = bp;
560   }
561 
562   assert(bp->lmfs_flags & VMMC_BLOCK_LOCKED);
563   bp->lmfs_flags &= ~VMMC_BLOCK_LOCKED;
564 
565   /* block has sensible content - if necessary, identify it to VM */
566   if(vmcache && bp->lmfs_needsetcache && dev != NO_DEV) {
567 	assert(bp->data);
568 
569 	setflags = (put_flags & ONE_SHOT) ? VMSF_ONCE : 0;
570 
571 	if ((r = vm_set_cacheblock(bp->data, dev, dev_off, bp->lmfs_inode,
572 	    bp->lmfs_inode_offset, &bp->lmfs_flags,
573 	    roundup(bp->lmfs_bytes, PAGE_SIZE), setflags)) != OK) {
574 		if(r == ENOSYS) {
575 			printf("libminixfs: ENOSYS, disabling VM calls\n");
576 			vmcache = 0;
577 		} else if (r == ENOMEM) {
578 			/* Do not panic in this case. Running out of memory is
579 			 * bad, especially since it may lead to applications
580 			 * crashing when trying to access memory-mapped pages
581 			 * we haven't been able to pass off to the VM cache,
582 			 * but the entire file system crashing is always worse.
583 			 */
584 			printf("libminixfs: no memory for cache block!\n");
585 		} else {
586 			panic("libminixfs: setblock of %p dev 0x%llx off "
587 				"0x%llx failed\n", bp->data, dev, dev_off);
588 		}
589 	}
590   }
591   bp->lmfs_needsetcache = 0;
592 
593   /* Now that we (may) have given the block to VM, invalidate the block if it
594    * is a one-shot block.  Otherwise, it may still be reobtained immediately
595    * after, which could be a problem if VM already forgot the block and we are
596    * expected to pass it to VM again, which then wouldn't happen.
597    */
598   if (put_flags & ONE_SHOT)
599 	bp->lmfs_dev = NO_DEV;
600 }
601 
602 /*===========================================================================*
603  *				lmfs_put_block				     *
604  *===========================================================================*/
605 void lmfs_put_block(struct buf *bp)
606 {
607 /* User interface to put_block(). */
608 
609   if (bp == NULL) return;	/* for poorly written file systems */
610 
611   put_block(bp, 0);
612 }
613 
614 /*===========================================================================*
615  *				lmfs_free_block				     *
616  *===========================================================================*/
617 void lmfs_free_block(dev_t dev, block64_t block)
618 {
619 /* The file system has just freed the given block. The block may previously
620  * have been in use as data block for an inode. Therefore, we now need to tell
621  * VM that the block is no longer associated with an inode. If we fail to do so
622  * and the inode now has a hole at this location, mapping in the hole would
623  * yield the old block contents rather than a zeroed page. In addition, if the
624  * block is in the cache, it will be removed, even if it was dirty.
625  */
626   struct buf *bp;
627   int r;
628 
629   /* Tell VM to forget about the block. The primary purpose of this call is to
630    * break the inode association, but since the block is part of a mounted file
631    * system, it is not expected to be accessed directly anyway. So, save some
632    * cache memory by throwing it out of the VM cache altogether.
633    */
634   if (vmcache) {
635 	if ((r = vm_forget_cacheblock(dev, block * fs_block_size,
636 	    fs_block_size)) != OK)
637 		printf("libminixfs: vm_forget_cacheblock failed (%d)\n", r);
638   }
639 
640   if ((bp = find_block(dev, block)) != NULL) {
641 	lmfs_markclean(bp);
642 
643 	/* Invalidate the block. The block may or may not be in use right now,
644 	 * so don't be smart about freeing memory or repositioning in the LRU.
645 	 */
646 	bp->lmfs_dev = NO_DEV;
647   }
648 
649   /* Note that this is *not* the right place to implement TRIM support. Even
650    * though the block is freed, on the device it may still be part of a
651    * previous checkpoint or snapshot of some sort. Only the file system can
652    * be trusted to decide which blocks can be reused on the device!
653    */
654 }
655 
656 /*===========================================================================*
657  *				lmfs_zero_block_ino			     *
658  *===========================================================================*/
659 void lmfs_zero_block_ino(dev_t dev, ino_t ino, u64_t ino_off)
660 {
661 /* Files may have holes. From an application perspective, these are just file
662  * regions filled with zeroes. From a file system perspective however, holes
663  * may represent unallocated regions on disk. Thus, these holes do not have
664  * corresponding blocks on the disk, and therefore also no block number.
665  * Therefore, we cannot simply use lmfs_get_block_ino() for them. For reads,
666  * this is not a problem, since the file system can just zero out the target
667  * application buffer instead. For mapped pages however, this *is* a problem,
668  * since the VM cache needs to be told about the corresponding block, and VM
669  * does not accept blocks without a device offset. The role of this function is
670  * therefore to tell VM about the hole using a fake device offset. The device
671  * offsets are picked so that the VM cache will see a block memory-mapped for
672  * the hole in the file, while the same block is not visible when
673  * memory-mapping the block device.
674  */
675   struct buf *bp;
676   static block64_t fake_block = 0;
677   int r;
678 
679   if (!vmcache)
680 	return;
681 
682   assert(fs_block_size > 0);
683 
684   /* Pick a block number which is above the threshold of what can possibly be
685    * mapped in by mmap'ing the device, since off_t is signed, and it is safe to
686    * say that it will take a while before we have 8-exabyte devices. Pick a
687    * different block number each time to avoid possible concurrency issues.
688    * FIXME: it does not seem like VM actually verifies mmap offsets though..
689    */
690   if (fake_block == 0 || ++fake_block >= UINT64_MAX / fs_block_size)
691 	fake_block = ((uint64_t)INT64_MAX + 1) / fs_block_size;
692 
693   /* Obtain a block. */
694   if ((r = lmfs_get_block_ino(&bp, dev, fake_block, NO_READ, ino,
695       ino_off)) != OK)
696 	panic("libminixfs: getting a NO_READ block failed: %d", r);
697   assert(bp != NULL);
698   assert(bp->lmfs_dev != NO_DEV);
699 
700   /* The block is already zeroed, as it has just been allocated with mmap. File
701    * systems do not rely on this assumption yet, so if VM ever gets changed to
702    * not clear the blocks we allocate (e.g., by recycling pages in the VM cache
703    * for the same process, which would be safe), we need to add a memset here.
704    */
705 
706   /* Release the block. We don't expect it to be accessed ever again. Moreover,
707    * if we keep the block around in the VM cache, it may erroneously be mapped
708    * in beyond the file end later. Hence, use VMSF_ONCE when passing it to VM.
709    * TODO: tell VM that it is an all-zeroes block, so that VM can deduplicate
710    * all such pages in its cache.
711    */
712   put_block(bp, ONE_SHOT);
713 }
714 
715 void lmfs_set_blockusage(fsblkcnt_t btotal, fsblkcnt_t bused)
716 {
717 
718   assert(bused <= btotal);
719   fs_btotal = btotal;
720   fs_bused = bused;
721 
722   /* if the cache isn't in use, we could resize it. */
723   if (bufs_in_use == 0)
724 	cache_heuristic_check();
725 }
726 
727 /*===========================================================================*
728  *				read_block				     *
729  *===========================================================================*/
730 static int read_block(struct buf *bp, size_t block_size)
731 {
732 /* Read a disk block of 'size' bytes.  The given size is always the FS block
733  * size, except for the last block of a device.  If an I/O error occurs,
734  * invalidate the block and return an error code.
735  */
736   ssize_t r;
737   off_t pos;
738   dev_t dev = bp->lmfs_dev;
739 
740   assert(dev != NO_DEV);
741 
742   ASSERT(bp->lmfs_bytes == block_size);
743   ASSERT(fs_block_size > 0);
744 
745   pos = (off_t)bp->lmfs_blocknr * fs_block_size;
746   if (block_size > PAGE_SIZE) {
747 #define MAXPAGES 20
748 	vir_bytes blockrem, vaddr = (vir_bytes) bp->data;
749 	int p = 0;
750   	static iovec_t iovec[MAXPAGES];
751 	blockrem = block_size;
752 	while(blockrem > 0) {
753 		vir_bytes chunk = blockrem >= PAGE_SIZE ? PAGE_SIZE : blockrem;
754 		iovec[p].iov_addr = vaddr;
755 		iovec[p].iov_size = chunk;
756 		vaddr += chunk;
757 		blockrem -= chunk;
758 		p++;
759 	}
760   	r = bdev_gather(dev, pos, iovec, p, BDEV_NOFLAGS);
761   } else {
762 	r = bdev_read(dev, pos, bp->data, block_size, BDEV_NOFLAGS);
763   }
764   if (r != (ssize_t)block_size) {
765 	printf("fs cache: I/O error on device %d/%d, block %"PRIu64" (%zd)\n",
766 	    major(dev), minor(dev), bp->lmfs_blocknr, r);
767 	if (r >= 0)
768 		r = EIO; /* TODO: retry retrieving (just) the remaining part */
769 
770 	bp->lmfs_dev = NO_DEV;	/* invalidate block */
771 
772 	return r;
773   }
774 
775   return OK;
776 }
777 
778 /*===========================================================================*
779  *				lmfs_invalidate				     *
780  *===========================================================================*/
781 void lmfs_invalidate(
782   dev_t device			/* device whose blocks are to be purged */
783 )
784 {
785 /* Remove all the blocks belonging to some device from the cache. */
786 
787   register struct buf *bp;
788 
789   assert(device != NO_DEV);
790 
791   for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) {
792 	if (bp->lmfs_dev == device) {
793 		assert(bp->data);
794 		assert(bp->lmfs_bytes > 0);
795 		munmap_t(bp->data, bp->lmfs_bytes);
796 		bp->lmfs_dev = NO_DEV;
797 		bp->lmfs_bytes = 0;
798 		bp->data = NULL;
799 	}
800   }
801 
802   /* Clear the cache even if VM caching is disabled for the file system:
803    * caching may be disabled as side effect of an error, leaving blocks behind
804    * in the actual VM cache.
805    */
806   vm_clear_cache(device);
807 }
808 
809 /*===========================================================================*
810  *				sort_blocks				     *
811  *===========================================================================*/
812 static void sort_blocks(struct buf **bufq, unsigned int bufqsize)
813 {
814   struct buf *bp;
815   int i, j, gap;
816 
817   gap = 1;
818   do
819 	gap = 3 * gap + 1;
820   while ((unsigned int)gap <= bufqsize);
821 
822   while (gap != 1) {
823 	gap /= 3;
824 	for (j = gap; (unsigned int)j < bufqsize; j++) {
825 		for (i = j - gap; i >= 0 &&
826 		    bufq[i]->lmfs_blocknr > bufq[i + gap]->lmfs_blocknr;
827 		    i -= gap) {
828 			bp = bufq[i];
829 			bufq[i] = bufq[i + gap];
830 			bufq[i + gap] = bp;
831 		}
832 	}
833   }
834 }
835 
836 /*===========================================================================*
837  *				rw_scattered				     *
838  *===========================================================================*/
839 static void rw_scattered(
840   dev_t dev,			/* major-minor device number */
841   struct buf **bufq,		/* pointer to array of buffers */
842   unsigned int bufqsize,	/* number of buffers */
843   int rw_flag			/* READING or WRITING */
844 )
845 {
846 /* Read or write scattered data from a device. */
847 
848   register struct buf *bp;
849   register iovec_t *iop;
850   static iovec_t iovec[NR_IOREQS];
851   off_t pos;
852   unsigned int i, iov_per_block;
853   unsigned int start_in_use = bufs_in_use, start_bufqsize = bufqsize;
854 
855   if(bufqsize == 0) return;
856 
857   /* for READING, check all buffers on the list are obtained and held
858    * (count > 0)
859    */
860   if (rw_flag == READING) {
861 	assert(bufqsize <= LMFS_MAX_PREFETCH);
862 
863 	for(i = 0; i < bufqsize; i++) {
864 		assert(bufq[i] != NULL);
865 		assert(bufq[i]->lmfs_count > 0);
866   	}
867 
868   	/* therefore they are all 'in use' and must be at least this many */
869 	assert(start_in_use >= start_bufqsize);
870   }
871 
872   assert(dev != NO_DEV);
873   assert(fs_block_size > 0);
874   assert(howmany(fs_block_size, PAGE_SIZE) <= NR_IOREQS);
875 
876   /* For WRITING, (Shell) sort buffers on lmfs_blocknr.
877    * For READING, the buffers are already sorted.
878    */
879   if (rw_flag == WRITING)
880 	sort_blocks(bufq, bufqsize);
881 
882   /* Set up I/O vector and do I/O.  The result of bdev I/O is OK if everything
883    * went fine, otherwise the error code for the first failed transfer.
884    */
885   while (bufqsize > 0) {
886 	unsigned int p, nblocks = 0, niovecs = 0;
887 	int r;
888 	for (iop = iovec; nblocks < bufqsize; nblocks++) {
889 		vir_bytes vdata, blockrem;
890 		bp = bufq[nblocks];
891 		if (bp->lmfs_blocknr != bufq[0]->lmfs_blocknr + nblocks)
892 			break;
893 		blockrem = bp->lmfs_bytes;
894 		iov_per_block = howmany(blockrem, PAGE_SIZE);
895 		if (niovecs > NR_IOREQS - iov_per_block) break;
896 		vdata = (vir_bytes) bp->data;
897 		for(p = 0; p < iov_per_block; p++) {
898 			vir_bytes chunk =
899 			    blockrem < PAGE_SIZE ? blockrem : PAGE_SIZE;
900 			iop->iov_addr = vdata;
901 			iop->iov_size = chunk;
902 			vdata += PAGE_SIZE;
903 			blockrem -= chunk;
904 			iop++;
905 			niovecs++;
906 		}
907 		assert(p == iov_per_block);
908 		assert(blockrem == 0);
909 	}
910 
911 	assert(nblocks > 0);
912 	assert(niovecs > 0 && niovecs <= NR_IOREQS);
913 
914 	pos = (off_t)bufq[0]->lmfs_blocknr * fs_block_size;
915 	if (rw_flag == READING)
916 		r = bdev_gather(dev, pos, iovec, niovecs, BDEV_NOFLAGS);
917 	else
918 		r = bdev_scatter(dev, pos, iovec, niovecs, BDEV_NOFLAGS);
919 
920 	/* Harvest the results.  The driver may have returned an error, or it
921 	 * may have done less than what we asked for.
922 	 */
923 	if (r < 0) {
924 		printf("fs cache: I/O error %d on device %d/%d, "
925 		    "block %"PRIu64"\n",
926 		    r, major(dev), minor(dev), bufq[0]->lmfs_blocknr);
927 	}
928 	for (i = 0; i < nblocks; i++) {
929 		bp = bufq[i];
930 		if (r < (ssize_t)bp->lmfs_bytes) {
931 			/* Transfer failed. */
932 			if (i == 0) {
933 				bp->lmfs_dev = NO_DEV;	/* Invalidate block */
934 			}
935 			break;
936 		}
937 		if (rw_flag == READING) {
938 			lmfs_put_block(bp);
939 		} else {
940 			MARKCLEAN(bp);
941 		}
942 		r -= bp->lmfs_bytes;
943 	}
944 
945 	bufq += i;
946 	bufqsize -= i;
947 
948 	if (rw_flag == READING) {
949 		/* Don't bother reading more than the device is willing to
950 		 * give at this time.  Don't forget to release those extras.
951 		 */
952 		while (bufqsize > 0) {
953 			bp = *bufq++;
954 			bp->lmfs_dev = NO_DEV;	/* invalidate block */
955 			lmfs_put_block(bp);
956 			bufqsize--;
957 		}
958 	}
959 	if (rw_flag == WRITING && i == 0) {
960 		/* We're not making progress, this means we might keep
961 		 * looping. Buffers remain dirty if un-written. Buffers are
962 		 * lost if invalidate()d or LRU-removed while dirty. This
963 		 * is better than keeping unwritable blocks around forever..
964 		 */
965 		break;
966 	}
967   }
968 
969   if(rw_flag == READING) {
970   	assert(start_in_use >= start_bufqsize);
971 
972 	/* READING callers assume all bufs are released. */
973 	assert(start_in_use - start_bufqsize == bufs_in_use);
974   }
975 }
976 
977 /*===========================================================================*
978  *				lmfs_readahead				     *
979  *===========================================================================*/
980 void lmfs_readahead(dev_t dev, block64_t base_block, unsigned int nblocks,
981 	size_t last_size)
982 {
983 /* Read ahead 'nblocks' blocks starting from the block 'base_block' on device
984  * 'dev'. The number of blocks must be between 1 and LMFS_MAX_PREFETCH,
985  * inclusive. All blocks have the file system's block size, possibly except the
986  * last block in the range, which is of size 'last_size'. The caller must
987  * ensure that none of the blocks in the range are already in the cache.
988  * However, the caller must also not rely on all or even any of the blocks to
989  * be present in the cache afterwards--failures are (deliberately!) ignored.
990  */
991   static struct buf *bufq[LMFS_MAX_PREFETCH]; /* static because of size only */
992   struct buf *bp;
993   unsigned int count;
994   int r;
995 
996   assert(nblocks >= 1 && nblocks <= LMFS_MAX_PREFETCH);
997 
998   for (count = 0; count < nblocks; count++) {
999 	if (count == nblocks - 1)
1000 		r = lmfs_get_partial_block(&bp, dev, base_block + count,
1001 		    NO_READ, last_size);
1002 	else
1003 		r = lmfs_get_block(&bp, dev, base_block + count, NO_READ);
1004 
1005 	if (r != OK)
1006 		break;
1007 
1008 	/* We could add a flag that makes the get_block() calls fail if the
1009 	 * block is already in the cache, but it is not a major concern if it
1010 	 * is: we just perform a useless read in that case. However, if the
1011 	 * block is cached *and* dirty, we are about to lose its new contents.
1012 	 */
1013 	assert(lmfs_isclean(bp));
1014 
1015 	bufq[count] = bp;
1016   }
1017 
1018   rw_scattered(dev, bufq, count, READING);
1019 }
1020 
1021 /*===========================================================================*
1022  *				lmfs_prefetch				     *
1023  *===========================================================================*/
1024 unsigned int lmfs_readahead_limit(void)
1025 {
1026 /* Return the maximum number of blocks that should be read ahead at once. The
1027  * return value is guaranteed to be between 1 and LMFS_MAX_PREFETCH, inclusive.
1028  */
1029   unsigned int max_transfer, max_bufs;
1030 
1031   /* The returned value is the minimum of two factors: the maximum number of
1032    * blocks that can be transferred in a single I/O gather request (see how
1033    * rw_scattered() generates I/O requests), and a policy limit on the number
1034    * of buffers that any read-ahead operation may use (that is, thrash).
1035    */
1036   max_transfer = NR_IOREQS / MAX(fs_block_size / PAGE_SIZE, 1);
1037 
1038   /* The constants have been imported from MFS as is, and may need tuning. */
1039   if (nr_bufs < 50)
1040 	max_bufs = 18;
1041   else
1042 	max_bufs = nr_bufs - 4;
1043 
1044   return MIN(max_transfer, max_bufs);
1045 }
1046 
1047 /*===========================================================================*
1048  *				lmfs_prefetch				     *
1049  *===========================================================================*/
1050 void lmfs_prefetch(dev_t dev, const block64_t *blockset, unsigned int nblocks)
1051 {
1052 /* The given set of blocks is expected to be needed soon, so prefetch a
1053  * convenient subset. The blocks are expected to be sorted by likelihood of
1054  * being accessed soon, making the first block of the set the most important
1055  * block to prefetch right now. The caller must have made sure that the blocks
1056  * are not in the cache already. The array may have duplicate block numbers.
1057  */
1058   bitchunk_t blocks_before[BITMAP_CHUNKS(LMFS_MAX_PREFETCH)];
1059   bitchunk_t blocks_after[BITMAP_CHUNKS(LMFS_MAX_PREFETCH)];
1060   block64_t block, base_block;
1061   unsigned int i, bit, nr_before, nr_after, span, limit, nr_blocks;
1062 
1063   if (nblocks == 0)
1064 	return;
1065 
1066   /* Here is the deal. We are going to prefetch one range only, because seeking
1067    * is too expensive for just prefetching. The range we select should at least
1068    * include the first ("base") block of the given set, since that is the block
1069    * the caller is primarily interested in. Thus, the rest of the range is
1070    * going to have to be directly around this base block. We first check which
1071    * blocks from the set fall just before and after the base block, which then
1072    * allows us to construct a contiguous range of desired blocks directly
1073    * around the base block, in O(n) time. As a natural part of this, we ignore
1074    * duplicate blocks in the given set. We then read from the beginning of this
1075    * range, in order to maximize the chance that a next prefetch request will
1076    * continue from the last disk position without requiring a seek. However, we
1077    * do correct for the maximum number of blocks we can (or should) read in at
1078    * once, such that we will still end up reading the base block.
1079    */
1080   base_block = blockset[0];
1081 
1082   memset(blocks_before, 0, sizeof(blocks_before));
1083   memset(blocks_after, 0, sizeof(blocks_after));
1084 
1085   for (i = 1; i < nblocks; i++) {
1086 	block = blockset[i];
1087 
1088 	if (block < base_block && block + LMFS_MAX_PREFETCH >= base_block) {
1089 		bit = base_block - block - 1;
1090 		assert(bit < LMFS_MAX_PREFETCH);
1091 		SET_BIT(blocks_before, bit);
1092 	} else if (block > base_block &&
1093 	    block - LMFS_MAX_PREFETCH <= base_block) {
1094 		bit = block - base_block - 1;
1095 		assert(bit < LMFS_MAX_PREFETCH);
1096 		SET_BIT(blocks_after, bit);
1097 	}
1098   }
1099 
1100   for (nr_before = 0; nr_before < LMFS_MAX_PREFETCH; nr_before++)
1101 	if (!GET_BIT(blocks_before, nr_before))
1102 		break;
1103 
1104   for (nr_after = 0; nr_after < LMFS_MAX_PREFETCH; nr_after++)
1105 	if (!GET_BIT(blocks_after, nr_after))
1106 		break;
1107 
1108   /* The number of blocks to prefetch is the minimum of two factors: the number
1109    * of blocks in the range around the base block, and the maximum number of
1110    * blocks that should be read ahead at once at all.
1111    */
1112   span = nr_before + 1 + nr_after;
1113   limit = lmfs_readahead_limit();
1114 
1115   nr_blocks = MIN(span, limit);
1116   assert(nr_blocks >= 1 && nr_blocks <= LMFS_MAX_PREFETCH);
1117 
1118   /* Start prefetching from the lowest block within the contiguous range, but
1119    * make sure that we read at least the original base block itself, too.
1120    */
1121   base_block -= MIN(nr_before, nr_blocks - 1);
1122 
1123   lmfs_readahead(dev, base_block, nr_blocks, fs_block_size);
1124 }
1125 
1126 /*===========================================================================*
1127  *				lmfs_flushdev				     *
1128  *===========================================================================*/
1129 void lmfs_flushdev(dev_t dev)
1130 {
1131 /* Flush all dirty blocks for one device. */
1132 
1133   register struct buf *bp;
1134   static struct buf **dirty;
1135   static unsigned int dirtylistsize = 0;
1136   unsigned int ndirty;
1137 
1138   if(dirtylistsize != nr_bufs) {
1139 	if(dirtylistsize > 0) {
1140 		assert(dirty != NULL);
1141 		free(dirty);
1142 	}
1143 	if(!(dirty = malloc(sizeof(dirty[0])*nr_bufs)))
1144 		panic("couldn't allocate dirty buf list");
1145 	dirtylistsize = nr_bufs;
1146   }
1147 
1148   for (bp = &buf[0], ndirty = 0; bp < &buf[nr_bufs]; bp++) {
1149 	/* Do not flush dirty blocks that are in use (lmfs_count>0): the file
1150 	 * system may mark the block as dirty before changing its contents, in
1151 	 * which case the new contents could end up being lost.
1152 	 */
1153 	if (!lmfs_isclean(bp) && bp->lmfs_dev == dev && bp->lmfs_count == 0) {
1154 		dirty[ndirty++] = bp;
1155 	}
1156   }
1157 
1158   rw_scattered(dev, dirty, ndirty, WRITING);
1159 }
1160 
1161 /*===========================================================================*
1162  *				rm_lru					     *
1163  *===========================================================================*/
1164 static void rm_lru(struct buf *bp)
1165 {
1166 /* Remove a block from its LRU chain. */
1167   struct buf *next_ptr, *prev_ptr;
1168 
1169   next_ptr = bp->lmfs_next;	/* successor on LRU chain */
1170   prev_ptr = bp->lmfs_prev;	/* predecessor on LRU chain */
1171   if (prev_ptr != NULL)
1172 	prev_ptr->lmfs_next = next_ptr;
1173   else
1174 	front = next_ptr;	/* this block was at front of chain */
1175 
1176   if (next_ptr != NULL)
1177 	next_ptr->lmfs_prev = prev_ptr;
1178   else
1179 	rear = prev_ptr;	/* this block was at rear of chain */
1180 }
1181 
1182 /*===========================================================================*
1183  *				cache_resize				     *
1184  *===========================================================================*/
1185 static void cache_resize(size_t blocksize, unsigned int bufs)
1186 {
1187   struct buf *bp;
1188 
1189   assert(blocksize > 0);
1190   assert(bufs >= MINBUFS);
1191 
1192   for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++)
1193 	if(bp->lmfs_count != 0) panic("change blocksize with buffer in use");
1194 
1195   lmfs_buf_pool(bufs);
1196 
1197   fs_block_size = blocksize;
1198 }
1199 
1200 static void cache_heuristic_check(void)
1201 {
1202   int bufs, d;
1203 
1204   bufs = fs_bufs_heuristic(MINBUFS, fs_btotal, fs_bused, fs_block_size);
1205 
1206   /* set the cache to the new heuristic size if the new one
1207    * is more than 10% off from the current one.
1208    */
1209   d = bufs-nr_bufs;
1210   if(d < 0) d = -d;
1211   if(d*100/nr_bufs > 10) {
1212 	cache_resize(fs_block_size, bufs);
1213   }
1214 }
1215 
1216 /*===========================================================================*
1217  *			lmfs_set_blocksize				     *
1218  *===========================================================================*/
1219 void lmfs_set_blocksize(size_t new_block_size)
1220 {
1221   cache_resize(new_block_size, MINBUFS);
1222   cache_heuristic_check();
1223 
1224   /* Decide whether to use seconday cache or not.
1225    * Only do this if the block size is a multiple of the page size, and using
1226    * the VM cache has been enabled for this FS.
1227    */
1228 
1229   vmcache = 0;
1230 
1231   if(may_use_vmcache && !(new_block_size % PAGE_SIZE))
1232 	vmcache = 1;
1233 }
1234 
1235 /*===========================================================================*
1236  *                              lmfs_buf_pool                                *
1237  *===========================================================================*/
1238 void lmfs_buf_pool(int new_nr_bufs)
1239 {
1240 /* Initialize the buffer pool. */
1241   register struct buf *bp;
1242 
1243   assert(new_nr_bufs >= MINBUFS);
1244 
1245   if(nr_bufs > 0) {
1246 	assert(buf);
1247 	lmfs_flushall();
1248   	for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) {
1249 		if(bp->data) {
1250 			assert(bp->lmfs_bytes > 0);
1251 			munmap_t(bp->data, bp->lmfs_bytes);
1252 		}
1253 	}
1254   }
1255 
1256   if(buf)
1257 	free(buf);
1258 
1259   if(!(buf = calloc(sizeof(buf[0]), new_nr_bufs)))
1260 	panic("couldn't allocate buf list (%d)", new_nr_bufs);
1261 
1262   if(buf_hash)
1263 	free(buf_hash);
1264   if(!(buf_hash = calloc(sizeof(buf_hash[0]), new_nr_bufs)))
1265 	panic("couldn't allocate buf hash list (%d)", new_nr_bufs);
1266 
1267   nr_bufs = new_nr_bufs;
1268 
1269   bufs_in_use = 0;
1270   front = &buf[0];
1271   rear = &buf[nr_bufs - 1];
1272 
1273   for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) {
1274         bp->lmfs_blocknr = NO_BLOCK;
1275         bp->lmfs_dev = NO_DEV;
1276         bp->lmfs_next = bp + 1;
1277         bp->lmfs_prev = bp - 1;
1278         bp->data = NULL;
1279         bp->lmfs_bytes = 0;
1280   }
1281   front->lmfs_prev = NULL;
1282   rear->lmfs_next = NULL;
1283 
1284   for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) bp->lmfs_hash = bp->lmfs_next;
1285   buf_hash[0] = front;
1286 }
1287 
1288 void lmfs_flushall(void)
1289 {
1290 	struct buf *bp;
1291 	for(bp = &buf[0]; bp < &buf[nr_bufs]; bp++)
1292 		if(bp->lmfs_dev != NO_DEV && !lmfs_isclean(bp))
1293 			lmfs_flushdev(bp->lmfs_dev);
1294 
1295 	/* This is the moment where it is least likely (although certainly not
1296 	 * impossible!) that there are buffers in use, since buffers should not
1297 	 * be held across file system syncs. See if we already intended to
1298 	 * resize the buffer cache, but couldn't. Be aware that we may be
1299 	 * called indirectly from within lmfs_change_blockusage(), so care must
1300 	 * be taken not to recurse infinitely. TODO: see if it is better to
1301 	 * resize the cache from here *only*, thus guaranteeing a clean cache.
1302 	 */
1303 	lmfs_change_blockusage(0);
1304 }
1305 
1306 size_t lmfs_fs_block_size(void)
1307 {
1308 	return fs_block_size;
1309 }
1310 
1311 void lmfs_may_use_vmcache(int ok)
1312 {
1313 	may_use_vmcache = ok;
1314 }
1315