xref: /minix3/minix/lib/libminixfs/cache.c (revision 594df55e53732746ac76b15ad87a3eac02ec1619)
1 
2 #define _SYSTEM
3 
4 #include <assert.h>
5 #include <string.h>
6 #include <errno.h>
7 #include <math.h>
8 #include <stdlib.h>
9 
10 #include <machine/vmparam.h>
11 
12 #include <sys/param.h>
13 #include <sys/mman.h>
14 
15 #include <minix/dmap.h>
16 #include <minix/libminixfs.h>
17 #include <minix/syslib.h>
18 #include <minix/sysutil.h>
19 #include <minix/u64.h>
20 #include <minix/bdev.h>
21 #include <minix/bitmap.h>
22 
23 #include "inc.h"
24 
25 /* Buffer (block) cache.  To acquire a block, a routine calls lmfs_get_block(),
26  * telling which block it wants.  The block is then regarded as "in use" and
27  * has its reference count incremented.  All the blocks that are not in use are
28  * chained together in an LRU list, with 'front' pointing to the least recently
29  * used block, and 'rear' to the most recently used block.  A reverse chain is
30  * also maintained.  Usage for LRU is measured by the time the put_block() is
31  * done.  The second parameter to put_block() can violate the LRU order and put
32  * a block on the front of the list, if it will probably not be needed again.
33  * This is used internally only; the lmfs_put_block() API call has no second
34  * parameter.  If a block is modified, the modifying routine must mark the
35  * block as dirty, so the block will eventually be rewritten to the disk.
36  */
37 
38 /* Flags to put_block(). */
39 #define ONE_SHOT      0x1	/* set if block will not be needed again */
40 
41 #define BUFHASH(b) ((unsigned int)((b) % nr_bufs))
42 #define MARKCLEAN  lmfs_markclean
43 
44 #define MINBUFS 6 	/* minimal no of bufs for sanity check */
45 
46 static struct buf *front;       /* points to least recently used free block */
47 static struct buf *rear;        /* points to most recently used free block */
48 static unsigned int bufs_in_use;/* # bufs currently in use (not on free list)*/
49 
50 static void rm_lru(struct buf *bp);
51 static int read_block(struct buf *bp, size_t size);
52 static void freeblock(struct buf *bp);
53 static void cache_heuristic_check(void);
54 static void put_block(struct buf *bp, int put_flags);
55 
56 static int vmcache = 0; /* are we using vm's secondary cache? (initially not) */
57 
58 static struct buf *buf;
59 static struct buf **buf_hash;   /* the buffer hash table */
60 static unsigned int nr_bufs;
61 static int may_use_vmcache;
62 
63 static size_t fs_block_size = PAGE_SIZE;	/* raw i/o block size */
64 
65 static fsblkcnt_t fs_btotal = 0, fs_bused = 0;
66 
67 static int quiet = 0;
68 
69 typedef struct buf *noxfer_buf_ptr_t; /* annotation for temporary buf ptrs */
70 
71 void lmfs_setquiet(int q) { quiet = q; }
72 
73 static int fs_bufs_heuristic(int minbufs, fsblkcnt_t btotal,
74 	fsblkcnt_t bused, int blocksize)
75 {
76   struct vm_stats_info vsi;
77   int bufs;
78   u32_t kbytes_used_fs, kbytes_total_fs, kbcache, kb_fsmax;
79   u32_t kbytes_remain_mem;
80 
81   /* set a reasonable cache size; cache at most a certain
82    * portion of the used FS, and at most a certain %age of remaining
83    * memory
84    */
85   if(vm_info_stats(&vsi) != OK) {
86 	bufs = 1024;
87 	if(!quiet)
88 	  printf("fslib: heuristic info fail: default to %d bufs\n", bufs);
89 	return bufs;
90   }
91 
92   /* remaining free memory is unused memory plus memory in used for cache,
93    * as the cache can be evicted
94    */
95   kbytes_remain_mem = (u64_t)(vsi.vsi_free + vsi.vsi_cached) *
96 	vsi.vsi_pagesize / 1024;
97 
98   /* check fs usage. */
99   kbytes_used_fs  = (unsigned long)(((u64_t)bused * blocksize) / 1024);
100   kbytes_total_fs = (unsigned long)(((u64_t)btotal * blocksize) / 1024);
101 
102   /* heuristic for a desired cache size based on FS usage;
103    * but never bigger than half of the total filesystem
104    */
105   kb_fsmax = sqrt_approx(kbytes_used_fs)*40;
106   kb_fsmax = MIN(kb_fsmax, kbytes_total_fs/2);
107 
108   /* heuristic for a maximum usage - 10% of remaining memory */
109   kbcache = MIN(kbytes_remain_mem/10, kb_fsmax);
110   bufs = kbcache * 1024 / blocksize;
111 
112   /* but we simply need MINBUFS no matter what */
113   if(bufs < minbufs)
114 	bufs = minbufs;
115 
116   return bufs;
117 }
118 
119 void lmfs_change_blockusage(int delta)
120 {
121         /* Change the number of allocated blocks by 'delta.'
122          * Also accumulate the delta since the last cache re-evaluation.
123          * If it is outside a certain band, ask the cache library to
124          * re-evaluate the cache size.
125          */
126         static int bitdelta = 0, warn_low = TRUE, warn_high = TRUE;
127 
128 	/* Adjust the file system block usage counter accordingly. Do bounds
129 	 * checking, and report file system misbehavior.
130 	 */
131 	if (delta > 0 && (fsblkcnt_t)delta > fs_btotal - fs_bused) {
132 		if (warn_high) {
133 			printf("libminixfs: block usage overflow\n");
134 			warn_high = FALSE;
135 		}
136 		delta = (int)(fs_btotal - fs_bused);
137 	} else if (delta < 0 && (fsblkcnt_t)-delta > fs_bused) {
138 		if (warn_low) {
139 			printf("libminixfs: block usage underflow\n");
140 			warn_low = FALSE;
141 		}
142 		delta = -(int)fs_bused;
143 	}
144 	fs_bused += delta;
145 
146 	bitdelta += delta;
147 
148 #define BAND_KB (10*1024)	/* recheck cache every 10MB change */
149 
150 	/* If the accumulated delta exceeds the configured threshold, resize
151 	 * the cache, but only if the cache isn't in use any more. In order to
152 	 * avoid that the latter case blocks a resize forever, we also call
153 	 * this function from lmfs_flushall(). Since lmfs_buf_pool() may call
154 	 * lmfs_flushall(), reset 'bitdelta' before doing the heuristics check.
155 	 */
156 	if (bufs_in_use == 0 &&
157 	    (bitdelta*(int)fs_block_size/1024 > BAND_KB ||
158 	    bitdelta*(int)fs_block_size/1024 < -BAND_KB)) {
159 		bitdelta = 0;
160 		cache_heuristic_check();
161 	}
162 }
163 
164 void lmfs_markdirty(struct buf *bp)
165 {
166 	bp->lmfs_flags |= VMMC_DIRTY;
167 }
168 
169 void lmfs_markclean(struct buf *bp)
170 {
171 	bp->lmfs_flags &= ~VMMC_DIRTY;
172 }
173 
174 int lmfs_isclean(struct buf *bp)
175 {
176 	return !(bp->lmfs_flags & VMMC_DIRTY);
177 }
178 
179 static void free_unused_blocks(void)
180 {
181 	struct buf *bp;
182 
183 	int freed = 0, bytes = 0;
184 	printf("libminixfs: freeing; %d blocks in use\n", bufs_in_use);
185 	for(bp = &buf[0]; bp < &buf[nr_bufs]; bp++) {
186   		if(bp->lmfs_bytes > 0 && bp->lmfs_count == 0) {
187 			freed++;
188 			bytes += bp->lmfs_bytes;
189 			freeblock(bp);
190 		}
191 	}
192 	printf("libminixfs: freeing; %d blocks, %d bytes\n", freed, bytes);
193 }
194 
195 static void lmfs_alloc_block(struct buf *bp, size_t block_size)
196 {
197   int len;
198   ASSERT(!bp->data);
199   ASSERT(bp->lmfs_bytes == 0);
200 
201   len = roundup(block_size, PAGE_SIZE);
202 
203   if((bp->data = mmap(0, block_size, PROT_READ|PROT_WRITE,
204       MAP_PREALLOC|MAP_ANON, -1, 0)) == MAP_FAILED) {
205 	free_unused_blocks();
206 	if((bp->data = mmap(0, block_size, PROT_READ|PROT_WRITE,
207 		MAP_PREALLOC|MAP_ANON, -1, 0)) == MAP_FAILED) {
208 		panic("libminixfs: could not allocate block");
209 	}
210   }
211   assert(bp->data);
212   bp->lmfs_bytes = block_size;
213   bp->lmfs_needsetcache = 1;
214 }
215 
216 /*===========================================================================*
217  *				lmfs_get_block				     *
218  *===========================================================================*/
219 int lmfs_get_block(struct buf **bpp, dev_t dev, block64_t block, int how)
220 {
221 	return lmfs_get_block_ino(bpp, dev, block, how, VMC_NO_INODE, 0);
222 }
223 
224 static void munmap_t(void *a, int len)
225 {
226 	vir_bytes av = (vir_bytes) a;
227 	assert(a);
228 	assert(a != MAP_FAILED);
229 	assert(len > 0);
230 	assert(!(av % PAGE_SIZE));
231 
232 	len = roundup(len, PAGE_SIZE);
233 
234 	assert(!(len % PAGE_SIZE));
235 
236 	if(munmap(a, len) < 0)
237 		panic("libminixfs cache: munmap failed");
238 }
239 
240 static void raisecount(struct buf *bp)
241 {
242   assert(bufs_in_use >= 0);
243   ASSERT(bp->lmfs_count >= 0);
244   bp->lmfs_count++;
245   if(bp->lmfs_count == 1) bufs_in_use++;
246   assert(bufs_in_use > 0);
247 }
248 
249 static void lowercount(struct buf *bp)
250 {
251   assert(bufs_in_use > 0);
252   ASSERT(bp->lmfs_count > 0);
253   bp->lmfs_count--;
254   if(bp->lmfs_count == 0) bufs_in_use--;
255   assert(bufs_in_use >= 0);
256 }
257 
258 static void freeblock(struct buf *bp)
259 {
260   ASSERT(bp->lmfs_count == 0);
261   /* If the block taken is dirty, make it clean by writing it to the disk.
262    * Avoid hysteresis by flushing all other dirty blocks for the same device.
263    */
264   if (bp->lmfs_dev != NO_DEV) {
265 	if (!lmfs_isclean(bp)) lmfs_flushdev(bp->lmfs_dev);
266 	assert(bp->lmfs_bytes > 0);
267 	bp->lmfs_dev = NO_DEV;
268   }
269 
270   /* Fill in block's parameters and add it to the hash chain where it goes. */
271   MARKCLEAN(bp);		/* NO_DEV blocks may be marked dirty */
272   if(bp->lmfs_bytes > 0) {
273 	assert(bp->data);
274 	munmap_t(bp->data, bp->lmfs_bytes);
275 	bp->lmfs_bytes = 0;
276 	bp->data = NULL;
277   } else assert(!bp->data);
278 }
279 
280 /*===========================================================================*
281  *				find_block				     *
282  *===========================================================================*/
283 static struct buf *find_block(dev_t dev, block64_t block)
284 {
285 /* Search the hash chain for (dev, block). Return the buffer structure if
286  * found, or NULL otherwise.
287  */
288   struct buf *bp;
289   int b;
290 
291   assert(dev != NO_DEV);
292 
293   b = BUFHASH(block);
294   for (bp = buf_hash[b]; bp != NULL; bp = bp->lmfs_hash)
295 	if (bp->lmfs_blocknr == block && bp->lmfs_dev == dev)
296 		return bp;
297 
298   return NULL;
299 }
300 
301 /*===========================================================================*
302  *				get_block_ino				     *
303  *===========================================================================*/
304 static int get_block_ino(struct buf **bpp, dev_t dev, block64_t block, int how,
305 	ino_t ino, u64_t ino_off, size_t block_size)
306 {
307 /* Check to see if the requested block is in the block cache.  The requested
308  * block is identified by the block number in 'block' on device 'dev', counted
309  * in the file system block size.  The amount of data requested for this block
310  * is given in 'block_size', which may be less than the file system block size
311  * iff the requested block is the last (partial) block on a device.  Note that
312  * the given block size does *not* affect the conversion of 'block' to a byte
313  * offset!  Either way, if the block could be obtained, either from the cache
314  * or by reading from the device, return OK, with a pointer to the buffer
315  * structure stored in 'bpp'.  If not, return a negative error code (and no
316  * buffer).  If necessary, evict some other block and fetch the contents from
317  * disk (if 'how' is NORMAL).  If 'how' is NO_READ, the caller intends to
318  * overwrite the requested block in its entirety, so it is only necessary to
319  * see if it is in the cache; if it is not, any free buffer will do.  If 'how'
320  * is PEEK, the function returns the block if it is in the cache or the VM
321  * cache, and an ENOENT error code otherwise.
322  * In addition to the LRU chain, there is also a hash chain to link together
323  * blocks whose block numbers end with the same bit strings, for fast lookup.
324  */
325   int b, r;
326   static struct buf *bp;
327   uint64_t dev_off;
328   struct buf *prev_ptr;
329 
330   assert(buf_hash);
331   assert(buf);
332   assert(nr_bufs > 0);
333 
334   ASSERT(fs_block_size > 0);
335 
336   assert(dev != NO_DEV);
337 
338   assert(block <= UINT64_MAX / fs_block_size);
339 
340   dev_off = block * fs_block_size;
341 
342   if((ino_off % fs_block_size)) {
343 
344 	printf("cache: unaligned lmfs_get_block_ino ino_off %llu\n",
345 		ino_off);
346   	util_stacktrace();
347   }
348 
349   /* See if the block is in the cache. If so, we can return it right away. */
350   bp = find_block(dev, block);
351   if (bp != NULL && !(bp->lmfs_flags & VMMC_EVICTED)) {
352 	ASSERT(bp->lmfs_dev == dev);
353 	ASSERT(bp->lmfs_dev != NO_DEV);
354 
355 	/* The block must have exactly the requested number of bytes. */
356 	if (bp->lmfs_bytes != block_size)
357 		return EIO;
358 
359 	/* Block needed has been found. */
360 	if (bp->lmfs_count == 0) {
361 		rm_lru(bp);
362 		ASSERT(bp->lmfs_needsetcache == 0);
363 		ASSERT(!(bp->lmfs_flags & VMMC_BLOCK_LOCKED));
364 		/* FIXME: race condition against the VMMC_EVICTED check */
365 		bp->lmfs_flags |= VMMC_BLOCK_LOCKED;
366 	}
367 	raisecount(bp);
368 	ASSERT(bp->lmfs_flags & VMMC_BLOCK_LOCKED);
369 	ASSERT(bp->data);
370 
371 	if(ino != VMC_NO_INODE) {
372 		if(bp->lmfs_inode == VMC_NO_INODE
373 		|| bp->lmfs_inode != ino
374 		|| bp->lmfs_inode_offset != ino_off) {
375 			bp->lmfs_inode = ino;
376 			bp->lmfs_inode_offset = ino_off;
377 			bp->lmfs_needsetcache = 1;
378 		}
379 	}
380 
381 	*bpp = bp;
382 	return OK;
383   }
384 
385   /* We had the block in the cache but VM evicted it; invalidate it. */
386   if (bp != NULL) {
387 	assert(bp->lmfs_flags & VMMC_EVICTED);
388 	ASSERT(bp->lmfs_count == 0);
389 	ASSERT(!(bp->lmfs_flags & VMMC_BLOCK_LOCKED));
390 	ASSERT(!(bp->lmfs_flags & VMMC_DIRTY));
391 	bp->lmfs_dev = NO_DEV;
392 	bp->lmfs_bytes = 0;
393 	bp->data = NULL;
394   }
395 
396   /* Desired block is not on available chain. Find a free block to use. */
397   if(bp) {
398   	ASSERT(bp->lmfs_flags & VMMC_EVICTED);
399   } else {
400 	if ((bp = front) == NULL) panic("all buffers in use: %d", nr_bufs);
401   }
402   assert(bp);
403 
404   rm_lru(bp);
405 
406   /* Remove the block that was just taken from its hash chain. */
407   b = BUFHASH(bp->lmfs_blocknr);
408   prev_ptr = buf_hash[b];
409   if (prev_ptr == bp) {
410 	buf_hash[b] = bp->lmfs_hash;
411   } else {
412 	/* The block just taken is not on the front of its hash chain. */
413 	while (prev_ptr->lmfs_hash != NULL)
414 		if (prev_ptr->lmfs_hash == bp) {
415 			prev_ptr->lmfs_hash = bp->lmfs_hash;	/* found it */
416 			break;
417 		} else {
418 			prev_ptr = prev_ptr->lmfs_hash;	/* keep looking */
419 		}
420   }
421 
422   freeblock(bp);
423 
424   bp->lmfs_inode = ino;
425   bp->lmfs_inode_offset = ino_off;
426 
427   bp->lmfs_flags = VMMC_BLOCK_LOCKED;
428   bp->lmfs_needsetcache = 0;
429   bp->lmfs_dev = dev;		/* fill in device number */
430   bp->lmfs_blocknr = block;	/* fill in block number */
431   ASSERT(bp->lmfs_count == 0);
432   raisecount(bp);
433   b = BUFHASH(bp->lmfs_blocknr);
434   bp->lmfs_hash = buf_hash[b];
435 
436   buf_hash[b] = bp;		/* add to hash list */
437 
438   assert(dev != NO_DEV);
439 
440   /* The block is not found in our cache, but we do want it if it's in the VM
441    * cache. The exception is NO_READ, purely for context switching performance
442    * reasons. NO_READ is used for 1) newly allocated blocks, 2) blocks being
443    * prefetched, and 3) blocks about to be fully overwritten. In the first two
444    * cases, VM will not have the block in its cache anyway, and for the third
445    * we save on one VM call only if the block is in the VM cache.
446    */
447   assert(!bp->data);
448   assert(!bp->lmfs_bytes);
449   if (how != NO_READ && vmcache) {
450 	if((bp->data = vm_map_cacheblock(dev, dev_off, ino, ino_off,
451 	    &bp->lmfs_flags, roundup(block_size, PAGE_SIZE))) != MAP_FAILED) {
452 		bp->lmfs_bytes = block_size;
453 		ASSERT(!bp->lmfs_needsetcache);
454 		*bpp = bp;
455 		return OK;
456 	}
457   }
458   bp->data = NULL;
459 
460   /* The block is not in the cache, and VM does not know about it. If we were
461    * requested to search for the block only, we can now return failure to the
462    * caller. Return the block to the pool without allocating data pages, since
463    * these would be freed upon recycling the block anyway.
464    */
465   if (how == PEEK) {
466 	bp->lmfs_dev = NO_DEV;
467 
468 	put_block(bp, ONE_SHOT);
469 
470 	return ENOENT;
471   }
472 
473   /* Not in the cache; reserve memory for its contents. */
474 
475   lmfs_alloc_block(bp, block_size);
476 
477   assert(bp->data);
478 
479   if (how == NORMAL) {
480 	/* Try to read the block. Return an error code on failure. */
481 	if ((r = read_block(bp, block_size)) != OK) {
482 		put_block(bp, 0);
483 
484 		return r;
485 	}
486   } else if(how == NO_READ) {
487   	/* This block will be overwritten by new contents. */
488   } else
489 	panic("unexpected 'how' value: %d", how);
490 
491   assert(bp->data);
492 
493   *bpp = bp;			/* return the newly acquired block */
494   return OK;
495 }
496 
497 /*===========================================================================*
498  *				lmfs_get_block_ino			     *
499  *===========================================================================*/
500 int lmfs_get_block_ino(struct buf **bpp, dev_t dev, block64_t block, int how,
501 	ino_t ino, u64_t ino_off)
502 {
503   return get_block_ino(bpp, dev, block, how, ino, ino_off, fs_block_size);
504 }
505 
506 /*===========================================================================*
507  *				lmfs_get_partial_block			     *
508  *===========================================================================*/
509 int lmfs_get_partial_block(struct buf **bpp, dev_t dev, block64_t block,
510 	int how, size_t block_size)
511 {
512   return get_block_ino(bpp, dev, block, how, VMC_NO_INODE, 0, block_size);
513 }
514 
515 /*===========================================================================*
516  *				put_block				     *
517  *===========================================================================*/
518 static void put_block(struct buf *bp, int put_flags)
519 {
520 /* Return a block to the list of available blocks.   Depending on 'put_flags'
521  * it may be put on the front or rear of the LRU chain.  Blocks that are
522  * expected to be needed again at some point go on the rear; blocks that are
523  * unlikely to be needed again at all go on the front.
524  */
525   dev_t dev;
526   uint64_t dev_off;
527   int r, setflags;
528 
529   assert(bp != NULL);
530 
531   dev = bp->lmfs_dev;
532 
533   dev_off = bp->lmfs_blocknr * fs_block_size;
534 
535   lowercount(bp);
536   if (bp->lmfs_count != 0) return;	/* block is still in use */
537 
538   /* Put this block back on the LRU chain.  */
539   if (dev == NO_DEV || dev == DEV_RAM || (put_flags & ONE_SHOT)) {
540 	/* Block will not be needed again. Put it on front of chain.
541   	 * It will be the next block to be evicted from the cache.
542   	 */
543 	bp->lmfs_prev = NULL;
544 	bp->lmfs_next = front;
545 	if (front == NULL)
546 		rear = bp;	/* LRU chain was empty */
547 	else
548 		front->lmfs_prev = bp;
549 	front = bp;
550   }
551   else {
552 	/* Block may be needed again.  Put it on rear of chain.
553   	 * It will not be evicted from the cache for a long time.
554   	 */
555 	bp->lmfs_prev = rear;
556 	bp->lmfs_next = NULL;
557 	if (rear == NULL)
558 		front = bp;
559 	else
560 		rear->lmfs_next = bp;
561 	rear = bp;
562   }
563 
564   assert(bp->lmfs_flags & VMMC_BLOCK_LOCKED);
565   bp->lmfs_flags &= ~VMMC_BLOCK_LOCKED;
566 
567   /* block has sensible content - if necessary, identify it to VM */
568   if(vmcache && bp->lmfs_needsetcache && dev != NO_DEV) {
569 	assert(bp->data);
570 
571 	setflags = (put_flags & ONE_SHOT) ? VMSF_ONCE : 0;
572 
573 	if ((r = vm_set_cacheblock(bp->data, dev, dev_off, bp->lmfs_inode,
574 	    bp->lmfs_inode_offset, &bp->lmfs_flags,
575 	    roundup(bp->lmfs_bytes, PAGE_SIZE), setflags)) != OK) {
576 		if(r == ENOSYS) {
577 			printf("libminixfs: ENOSYS, disabling VM calls\n");
578 			vmcache = 0;
579 		} else if (r == ENOMEM) {
580 			/* Do not panic in this case. Running out of memory is
581 			 * bad, especially since it may lead to applications
582 			 * crashing when trying to access memory-mapped pages
583 			 * we haven't been able to pass off to the VM cache,
584 			 * but the entire file system crashing is always worse.
585 			 */
586 			printf("libminixfs: no memory for cache block!\n");
587 		} else {
588 			panic("libminixfs: setblock of %p dev 0x%llx off "
589 				"0x%llx failed\n", bp->data, dev, dev_off);
590 		}
591 	}
592   }
593   bp->lmfs_needsetcache = 0;
594 
595   /* Now that we (may) have given the block to VM, invalidate the block if it
596    * is a one-shot block.  Otherwise, it may still be reobtained immediately
597    * after, which could be a problem if VM already forgot the block and we are
598    * expected to pass it to VM again, which then wouldn't happen.
599    */
600   if (put_flags & ONE_SHOT)
601 	bp->lmfs_dev = NO_DEV;
602 }
603 
604 /*===========================================================================*
605  *				lmfs_put_block				     *
606  *===========================================================================*/
607 void lmfs_put_block(struct buf *bp)
608 {
609 /* User interface to put_block(). */
610 
611   if (bp == NULL) return;	/* for poorly written file systems */
612 
613   put_block(bp, 0);
614 }
615 
616 /*===========================================================================*
617  *				lmfs_free_block				     *
618  *===========================================================================*/
619 void lmfs_free_block(dev_t dev, block64_t block)
620 {
621 /* The file system has just freed the given block. The block may previously
622  * have been in use as data block for an inode. Therefore, we now need to tell
623  * VM that the block is no longer associated with an inode. If we fail to do so
624  * and the inode now has a hole at this location, mapping in the hole would
625  * yield the old block contents rather than a zeroed page. In addition, if the
626  * block is in the cache, it will be removed, even if it was dirty.
627  */
628   struct buf *bp;
629   int r;
630 
631   /* Tell VM to forget about the block. The primary purpose of this call is to
632    * break the inode association, but since the block is part of a mounted file
633    * system, it is not expected to be accessed directly anyway. So, save some
634    * cache memory by throwing it out of the VM cache altogether.
635    */
636   if (vmcache) {
637 	if ((r = vm_forget_cacheblock(dev, block * fs_block_size,
638 	    fs_block_size)) != OK)
639 		printf("libminixfs: vm_forget_cacheblock failed (%d)\n", r);
640   }
641 
642   if ((bp = find_block(dev, block)) != NULL) {
643 	lmfs_markclean(bp);
644 
645 	/* Invalidate the block. The block may or may not be in use right now,
646 	 * so don't be smart about freeing memory or repositioning in the LRU.
647 	 */
648 	bp->lmfs_dev = NO_DEV;
649   }
650 
651   /* Note that this is *not* the right place to implement TRIM support. Even
652    * though the block is freed, on the device it may still be part of a
653    * previous checkpoint or snapshot of some sort. Only the file system can
654    * be trusted to decide which blocks can be reused on the device!
655    */
656 }
657 
658 /*===========================================================================*
659  *				lmfs_zero_block_ino			     *
660  *===========================================================================*/
661 void lmfs_zero_block_ino(dev_t dev, ino_t ino, u64_t ino_off)
662 {
663 /* Files may have holes. From an application perspective, these are just file
664  * regions filled with zeroes. From a file system perspective however, holes
665  * may represent unallocated regions on disk. Thus, these holes do not have
666  * corresponding blocks on the disk, and therefore also no block number.
667  * Therefore, we cannot simply use lmfs_get_block_ino() for them. For reads,
668  * this is not a problem, since the file system can just zero out the target
669  * application buffer instead. For mapped pages however, this *is* a problem,
670  * since the VM cache needs to be told about the corresponding block, and VM
671  * does not accept blocks without a device offset. The role of this function is
672  * therefore to tell VM about the hole using a fake device offset. The device
673  * offsets are picked so that the VM cache will see a block memory-mapped for
674  * the hole in the file, while the same block is not visible when
675  * memory-mapping the block device.
676  */
677   struct buf *bp;
678   static block64_t fake_block = 0;
679   int r;
680 
681   if (!vmcache)
682 	return;
683 
684   assert(fs_block_size > 0);
685 
686   /* Pick a block number which is above the threshold of what can possibly be
687    * mapped in by mmap'ing the device, since off_t is signed, and it is safe to
688    * say that it will take a while before we have 8-exabyte devices. Pick a
689    * different block number each time to avoid possible concurrency issues.
690    * FIXME: it does not seem like VM actually verifies mmap offsets though..
691    */
692   if (fake_block == 0 || ++fake_block >= UINT64_MAX / fs_block_size)
693 	fake_block = ((uint64_t)INT64_MAX + 1) / fs_block_size;
694 
695   /* Obtain a block. */
696   if ((r = lmfs_get_block_ino(&bp, dev, fake_block, NO_READ, ino,
697       ino_off)) != OK)
698 	panic("libminixfs: getting a NO_READ block failed: %d", r);
699   assert(bp != NULL);
700   assert(bp->lmfs_dev != NO_DEV);
701 
702   /* The block is already zeroed, as it has just been allocated with mmap. File
703    * systems do not rely on this assumption yet, so if VM ever gets changed to
704    * not clear the blocks we allocate (e.g., by recycling pages in the VM cache
705    * for the same process, which would be safe), we need to add a memset here.
706    */
707 
708   /* Release the block. We don't expect it to be accessed ever again. Moreover,
709    * if we keep the block around in the VM cache, it may erroneously be mapped
710    * in beyond the file end later. Hence, use VMSF_ONCE when passing it to VM.
711    * TODO: tell VM that it is an all-zeroes block, so that VM can deduplicate
712    * all such pages in its cache.
713    */
714   put_block(bp, ONE_SHOT);
715 }
716 
717 void lmfs_set_blockusage(fsblkcnt_t btotal, fsblkcnt_t bused)
718 {
719 
720   assert(bused <= btotal);
721   fs_btotal = btotal;
722   fs_bused = bused;
723 
724   /* if the cache isn't in use, we could resize it. */
725   if (bufs_in_use == 0)
726 	cache_heuristic_check();
727 }
728 
729 /*===========================================================================*
730  *				read_block				     *
731  *===========================================================================*/
732 static int read_block(struct buf *bp, size_t block_size)
733 {
734 /* Read a disk block of 'size' bytes.  The given size is always the FS block
735  * size, except for the last block of a device.  If an I/O error occurs,
736  * invalidate the block and return an error code.
737  */
738   ssize_t r;
739   off_t pos;
740   dev_t dev = bp->lmfs_dev;
741 
742   assert(dev != NO_DEV);
743 
744   ASSERT(bp->lmfs_bytes == block_size);
745   ASSERT(fs_block_size > 0);
746 
747   pos = (off_t)bp->lmfs_blocknr * fs_block_size;
748   if (block_size > PAGE_SIZE) {
749 #define MAXPAGES 20
750 	vir_bytes blockrem, vaddr = (vir_bytes) bp->data;
751 	int p = 0;
752   	static iovec_t iovec[MAXPAGES];
753 	blockrem = block_size;
754 	while(blockrem > 0) {
755 		vir_bytes chunk = blockrem >= PAGE_SIZE ? PAGE_SIZE : blockrem;
756 		iovec[p].iov_addr = vaddr;
757 		iovec[p].iov_size = chunk;
758 		vaddr += chunk;
759 		blockrem -= chunk;
760 		p++;
761 	}
762   	r = bdev_gather(dev, pos, iovec, p, BDEV_NOFLAGS);
763   } else {
764 	r = bdev_read(dev, pos, bp->data, block_size, BDEV_NOFLAGS);
765   }
766   if (r != (ssize_t)block_size) {
767 	printf("fs cache: I/O error on device %d/%d, block %"PRIu64" (%zd)\n",
768 	    major(dev), minor(dev), bp->lmfs_blocknr, r);
769 	if (r >= 0)
770 		r = EIO; /* TODO: retry retrieving (just) the remaining part */
771 
772 	bp->lmfs_dev = NO_DEV;	/* invalidate block */
773 
774 	return r;
775   }
776 
777   return OK;
778 }
779 
780 /*===========================================================================*
781  *				lmfs_invalidate				     *
782  *===========================================================================*/
783 void lmfs_invalidate(
784   dev_t device			/* device whose blocks are to be purged */
785 )
786 {
787 /* Remove all the blocks belonging to some device from the cache. */
788 
789   register struct buf *bp;
790 
791   assert(device != NO_DEV);
792 
793   for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) {
794 	if (bp->lmfs_dev == device) {
795 		assert(bp->data);
796 		assert(bp->lmfs_bytes > 0);
797 		munmap_t(bp->data, bp->lmfs_bytes);
798 		bp->lmfs_dev = NO_DEV;
799 		bp->lmfs_bytes = 0;
800 		bp->data = NULL;
801 	}
802   }
803 
804   /* Clear the cache even if VM caching is disabled for the file system:
805    * caching may be disabled as side effect of an error, leaving blocks behind
806    * in the actual VM cache.
807    */
808   vm_clear_cache(device);
809 }
810 
811 /*===========================================================================*
812  *				sort_blocks				     *
813  *===========================================================================*/
814 static void sort_blocks(struct buf **bufq, unsigned int bufqsize)
815 {
816   struct buf *bp;
817   int i, j, gap;
818 
819   gap = 1;
820   do
821 	gap = 3 * gap + 1;
822   while ((unsigned int)gap <= bufqsize);
823 
824   while (gap != 1) {
825 	gap /= 3;
826 	for (j = gap; (unsigned int)j < bufqsize; j++) {
827 		for (i = j - gap; i >= 0 &&
828 		    bufq[i]->lmfs_blocknr > bufq[i + gap]->lmfs_blocknr;
829 		    i -= gap) {
830 			bp = bufq[i];
831 			bufq[i] = bufq[i + gap];
832 			bufq[i + gap] = bp;
833 		}
834 	}
835   }
836 }
837 
838 /*===========================================================================*
839  *				rw_scattered				     *
840  *===========================================================================*/
841 static void rw_scattered(
842   dev_t dev,			/* major-minor device number */
843   struct buf **bufq,		/* pointer to array of buffers */
844   unsigned int bufqsize,	/* number of buffers */
845   int rw_flag			/* READING or WRITING */
846 )
847 {
848 /* Read or write scattered data from a device. */
849 
850   register struct buf *bp;
851   register iovec_t *iop;
852   static iovec_t iovec[NR_IOREQS];
853   off_t pos;
854   unsigned int i, iov_per_block;
855   unsigned int start_in_use = bufs_in_use, start_bufqsize = bufqsize;
856 
857   if(bufqsize == 0) return;
858 
859   /* for READING, check all buffers on the list are obtained and held
860    * (count > 0)
861    */
862   if (rw_flag == READING) {
863 	assert(bufqsize <= LMFS_MAX_PREFETCH);
864 
865 	for(i = 0; i < bufqsize; i++) {
866 		assert(bufq[i] != NULL);
867 		assert(bufq[i]->lmfs_count > 0);
868   	}
869 
870   	/* therefore they are all 'in use' and must be at least this many */
871 	assert(start_in_use >= start_bufqsize);
872   }
873 
874   assert(dev != NO_DEV);
875   assert(fs_block_size > 0);
876   assert(howmany(fs_block_size, PAGE_SIZE) <= NR_IOREQS);
877 
878   /* For WRITING, (Shell) sort buffers on lmfs_blocknr.
879    * For READING, the buffers are already sorted.
880    */
881   if (rw_flag == WRITING)
882 	sort_blocks(bufq, bufqsize);
883 
884   /* Set up I/O vector and do I/O.  The result of bdev I/O is OK if everything
885    * went fine, otherwise the error code for the first failed transfer.
886    */
887   while (bufqsize > 0) {
888 	unsigned int p, nblocks = 0, niovecs = 0;
889 	int r;
890 	for (iop = iovec; nblocks < bufqsize; nblocks++) {
891 		vir_bytes vdata, blockrem;
892 		bp = bufq[nblocks];
893 		if (bp->lmfs_blocknr != bufq[0]->lmfs_blocknr + nblocks)
894 			break;
895 		blockrem = bp->lmfs_bytes;
896 		iov_per_block = howmany(blockrem, PAGE_SIZE);
897 		if (niovecs > NR_IOREQS - iov_per_block) break;
898 		vdata = (vir_bytes) bp->data;
899 		for(p = 0; p < iov_per_block; p++) {
900 			vir_bytes chunk =
901 			    blockrem < PAGE_SIZE ? blockrem : PAGE_SIZE;
902 			iop->iov_addr = vdata;
903 			iop->iov_size = chunk;
904 			vdata += PAGE_SIZE;
905 			blockrem -= chunk;
906 			iop++;
907 			niovecs++;
908 		}
909 		assert(p == iov_per_block);
910 		assert(blockrem == 0);
911 	}
912 
913 	assert(nblocks > 0);
914 	assert(niovecs > 0 && niovecs <= NR_IOREQS);
915 
916 	pos = (off_t)bufq[0]->lmfs_blocknr * fs_block_size;
917 	if (rw_flag == READING)
918 		r = bdev_gather(dev, pos, iovec, niovecs, BDEV_NOFLAGS);
919 	else
920 		r = bdev_scatter(dev, pos, iovec, niovecs, BDEV_NOFLAGS);
921 
922 	/* Harvest the results.  The driver may have returned an error, or it
923 	 * may have done less than what we asked for.
924 	 */
925 	if (r < 0) {
926 		printf("fs cache: I/O error %d on device %d/%d, "
927 		    "block %"PRIu64"\n",
928 		    r, major(dev), minor(dev), bufq[0]->lmfs_blocknr);
929 	}
930 	for (i = 0; i < nblocks; i++) {
931 		bp = bufq[i];
932 		if (r < (ssize_t)bp->lmfs_bytes) {
933 			/* Transfer failed. */
934 			if (i == 0) {
935 				bp->lmfs_dev = NO_DEV;	/* Invalidate block */
936 			}
937 			break;
938 		}
939 		if (rw_flag == READING) {
940 			lmfs_put_block(bp);
941 		} else {
942 			MARKCLEAN(bp);
943 		}
944 		r -= bp->lmfs_bytes;
945 	}
946 
947 	bufq += i;
948 	bufqsize -= i;
949 
950 	if (rw_flag == READING) {
951 		/* Don't bother reading more than the device is willing to
952 		 * give at this time.  Don't forget to release those extras.
953 		 */
954 		while (bufqsize > 0) {
955 			bp = *bufq++;
956 			bp->lmfs_dev = NO_DEV;	/* invalidate block */
957 			lmfs_put_block(bp);
958 			bufqsize--;
959 		}
960 	}
961 	if (rw_flag == WRITING && i == 0) {
962 		/* We're not making progress, this means we might keep
963 		 * looping. Buffers remain dirty if un-written. Buffers are
964 		 * lost if invalidate()d or LRU-removed while dirty. This
965 		 * is better than keeping unwritable blocks around forever..
966 		 */
967 		break;
968 	}
969   }
970 
971   if(rw_flag == READING) {
972   	assert(start_in_use >= start_bufqsize);
973 
974 	/* READING callers assume all bufs are released. */
975 	assert(start_in_use - start_bufqsize == bufs_in_use);
976   }
977 }
978 
979 /*===========================================================================*
980  *				lmfs_readahead				     *
981  *===========================================================================*/
982 void lmfs_readahead(dev_t dev, block64_t base_block, unsigned int nblocks,
983 	size_t last_size)
984 {
985 /* Read ahead 'nblocks' blocks starting from the block 'base_block' on device
986  * 'dev'. The number of blocks must be between 1 and LMFS_MAX_PREFETCH,
987  * inclusive. All blocks have the file system's block size, possibly except the
988  * last block in the range, which is of size 'last_size'. The caller must
989  * ensure that none of the blocks in the range are already in the cache.
990  * However, the caller must also not rely on all or even any of the blocks to
991  * be present in the cache afterwards--failures are (deliberately!) ignored.
992  */
993   static noxfer_buf_ptr_t bufq[LMFS_MAX_PREFETCH]; /* static for size only */
994   struct buf *bp;
995   unsigned int count;
996   int r;
997 
998   assert(nblocks >= 1 && nblocks <= LMFS_MAX_PREFETCH);
999 
1000   for (count = 0; count < nblocks; count++) {
1001 	if (count == nblocks - 1)
1002 		r = lmfs_get_partial_block(&bp, dev, base_block + count,
1003 		    NO_READ, last_size);
1004 	else
1005 		r = lmfs_get_block(&bp, dev, base_block + count, NO_READ);
1006 
1007 	if (r != OK)
1008 		break;
1009 
1010 	/* We could add a flag that makes the get_block() calls fail if the
1011 	 * block is already in the cache, but it is not a major concern if it
1012 	 * is: we just perform a useless read in that case. However, if the
1013 	 * block is cached *and* dirty, we are about to lose its new contents.
1014 	 */
1015 	assert(lmfs_isclean(bp));
1016 
1017 	bufq[count] = bp;
1018   }
1019 
1020   rw_scattered(dev, bufq, count, READING);
1021 }
1022 
1023 /*===========================================================================*
1024  *				lmfs_prefetch				     *
1025  *===========================================================================*/
1026 unsigned int lmfs_readahead_limit(void)
1027 {
1028 /* Return the maximum number of blocks that should be read ahead at once. The
1029  * return value is guaranteed to be between 1 and LMFS_MAX_PREFETCH, inclusive.
1030  */
1031   unsigned int max_transfer, max_bufs;
1032 
1033   /* The returned value is the minimum of two factors: the maximum number of
1034    * blocks that can be transferred in a single I/O gather request (see how
1035    * rw_scattered() generates I/O requests), and a policy limit on the number
1036    * of buffers that any read-ahead operation may use (that is, thrash).
1037    */
1038   max_transfer = NR_IOREQS / MAX(fs_block_size / PAGE_SIZE, 1);
1039 
1040   /* The constants have been imported from MFS as is, and may need tuning. */
1041   if (nr_bufs < 50)
1042 	max_bufs = 18;
1043   else
1044 	max_bufs = nr_bufs - 4;
1045 
1046   return MIN(max_transfer, max_bufs);
1047 }
1048 
1049 /*===========================================================================*
1050  *				lmfs_prefetch				     *
1051  *===========================================================================*/
1052 void lmfs_prefetch(dev_t dev, const block64_t *blockset, unsigned int nblocks)
1053 {
1054 /* The given set of blocks is expected to be needed soon, so prefetch a
1055  * convenient subset. The blocks are expected to be sorted by likelihood of
1056  * being accessed soon, making the first block of the set the most important
1057  * block to prefetch right now. The caller must have made sure that the blocks
1058  * are not in the cache already. The array may have duplicate block numbers.
1059  */
1060   bitchunk_t blocks_before[BITMAP_CHUNKS(LMFS_MAX_PREFETCH)];
1061   bitchunk_t blocks_after[BITMAP_CHUNKS(LMFS_MAX_PREFETCH)];
1062   block64_t block, base_block;
1063   unsigned int i, bit, nr_before, nr_after, span, limit, nr_blocks;
1064 
1065   if (nblocks == 0)
1066 	return;
1067 
1068   /* Here is the deal. We are going to prefetch one range only, because seeking
1069    * is too expensive for just prefetching. The range we select should at least
1070    * include the first ("base") block of the given set, since that is the block
1071    * the caller is primarily interested in. Thus, the rest of the range is
1072    * going to have to be directly around this base block. We first check which
1073    * blocks from the set fall just before and after the base block, which then
1074    * allows us to construct a contiguous range of desired blocks directly
1075    * around the base block, in O(n) time. As a natural part of this, we ignore
1076    * duplicate blocks in the given set. We then read from the beginning of this
1077    * range, in order to maximize the chance that a next prefetch request will
1078    * continue from the last disk position without requiring a seek. However, we
1079    * do correct for the maximum number of blocks we can (or should) read in at
1080    * once, such that we will still end up reading the base block.
1081    */
1082   base_block = blockset[0];
1083 
1084   memset(blocks_before, 0, sizeof(blocks_before));
1085   memset(blocks_after, 0, sizeof(blocks_after));
1086 
1087   for (i = 1; i < nblocks; i++) {
1088 	block = blockset[i];
1089 
1090 	if (block < base_block && block + LMFS_MAX_PREFETCH >= base_block) {
1091 		bit = base_block - block - 1;
1092 		assert(bit < LMFS_MAX_PREFETCH);
1093 		SET_BIT(blocks_before, bit);
1094 	} else if (block > base_block &&
1095 	    block - LMFS_MAX_PREFETCH <= base_block) {
1096 		bit = block - base_block - 1;
1097 		assert(bit < LMFS_MAX_PREFETCH);
1098 		SET_BIT(blocks_after, bit);
1099 	}
1100   }
1101 
1102   for (nr_before = 0; nr_before < LMFS_MAX_PREFETCH; nr_before++)
1103 	if (!GET_BIT(blocks_before, nr_before))
1104 		break;
1105 
1106   for (nr_after = 0; nr_after < LMFS_MAX_PREFETCH; nr_after++)
1107 	if (!GET_BIT(blocks_after, nr_after))
1108 		break;
1109 
1110   /* The number of blocks to prefetch is the minimum of two factors: the number
1111    * of blocks in the range around the base block, and the maximum number of
1112    * blocks that should be read ahead at once at all.
1113    */
1114   span = nr_before + 1 + nr_after;
1115   limit = lmfs_readahead_limit();
1116 
1117   nr_blocks = MIN(span, limit);
1118   assert(nr_blocks >= 1 && nr_blocks <= LMFS_MAX_PREFETCH);
1119 
1120   /* Start prefetching from the lowest block within the contiguous range, but
1121    * make sure that we read at least the original base block itself, too.
1122    */
1123   base_block -= MIN(nr_before, nr_blocks - 1);
1124 
1125   lmfs_readahead(dev, base_block, nr_blocks, fs_block_size);
1126 }
1127 
1128 /*===========================================================================*
1129  *				lmfs_flushdev				     *
1130  *===========================================================================*/
1131 void lmfs_flushdev(dev_t dev)
1132 {
1133 /* Flush all dirty blocks for one device. */
1134 
1135   register struct buf *bp;
1136   static noxfer_buf_ptr_t *dirty;
1137   static unsigned int dirtylistsize = 0;
1138   unsigned int ndirty;
1139 
1140   if(dirtylistsize != nr_bufs) {
1141 	if(dirtylistsize > 0) {
1142 		assert(dirty != NULL);
1143 		free(dirty);
1144 	}
1145 	if(!(dirty = malloc(sizeof(dirty[0])*nr_bufs)))
1146 		panic("couldn't allocate dirty buf list");
1147 	dirtylistsize = nr_bufs;
1148   }
1149 
1150   for (bp = &buf[0], ndirty = 0; bp < &buf[nr_bufs]; bp++) {
1151 	/* Do not flush dirty blocks that are in use (lmfs_count>0): the file
1152 	 * system may mark the block as dirty before changing its contents, in
1153 	 * which case the new contents could end up being lost.
1154 	 */
1155 	if (!lmfs_isclean(bp) && bp->lmfs_dev == dev && bp->lmfs_count == 0) {
1156 		dirty[ndirty++] = bp;
1157 	}
1158   }
1159 
1160   rw_scattered(dev, dirty, ndirty, WRITING);
1161 }
1162 
1163 /*===========================================================================*
1164  *				rm_lru					     *
1165  *===========================================================================*/
1166 static void rm_lru(struct buf *bp)
1167 {
1168 /* Remove a block from its LRU chain. */
1169   struct buf *next_ptr, *prev_ptr;
1170 
1171   next_ptr = bp->lmfs_next;	/* successor on LRU chain */
1172   prev_ptr = bp->lmfs_prev;	/* predecessor on LRU chain */
1173   if (prev_ptr != NULL)
1174 	prev_ptr->lmfs_next = next_ptr;
1175   else
1176 	front = next_ptr;	/* this block was at front of chain */
1177 
1178   if (next_ptr != NULL)
1179 	next_ptr->lmfs_prev = prev_ptr;
1180   else
1181 	rear = prev_ptr;	/* this block was at rear of chain */
1182 }
1183 
1184 /*===========================================================================*
1185  *				cache_resize				     *
1186  *===========================================================================*/
1187 static void cache_resize(size_t blocksize, unsigned int bufs)
1188 {
1189   struct buf *bp;
1190 
1191   assert(blocksize > 0);
1192   assert(bufs >= MINBUFS);
1193 
1194   for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++)
1195 	if(bp->lmfs_count != 0) panic("change blocksize with buffer in use");
1196 
1197   lmfs_buf_pool(bufs);
1198 
1199   fs_block_size = blocksize;
1200 }
1201 
1202 static void cache_heuristic_check(void)
1203 {
1204   int bufs, d;
1205 
1206   bufs = fs_bufs_heuristic(MINBUFS, fs_btotal, fs_bused, fs_block_size);
1207 
1208   /* set the cache to the new heuristic size if the new one
1209    * is more than 10% off from the current one.
1210    */
1211   d = bufs-nr_bufs;
1212   if(d < 0) d = -d;
1213   if(d*100/nr_bufs > 10) {
1214 	cache_resize(fs_block_size, bufs);
1215   }
1216 }
1217 
1218 /*===========================================================================*
1219  *			lmfs_set_blocksize				     *
1220  *===========================================================================*/
1221 void lmfs_set_blocksize(size_t new_block_size)
1222 {
1223   cache_resize(new_block_size, MINBUFS);
1224   cache_heuristic_check();
1225 
1226   /* Decide whether to use seconday cache or not.
1227    * Only do this if the block size is a multiple of the page size, and using
1228    * the VM cache has been enabled for this FS.
1229    */
1230 
1231   vmcache = 0;
1232 
1233   if(may_use_vmcache && !(new_block_size % PAGE_SIZE))
1234 	vmcache = 1;
1235 }
1236 
1237 /*===========================================================================*
1238  *                              lmfs_buf_pool                                *
1239  *===========================================================================*/
1240 void lmfs_buf_pool(int new_nr_bufs)
1241 {
1242 /* Initialize the buffer pool. */
1243   register struct buf *bp;
1244 
1245   assert(new_nr_bufs >= MINBUFS);
1246 
1247   if(nr_bufs > 0) {
1248 	assert(buf);
1249 	lmfs_flushall();
1250   	for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) {
1251 		if(bp->data) {
1252 			assert(bp->lmfs_bytes > 0);
1253 			munmap_t(bp->data, bp->lmfs_bytes);
1254 		}
1255 	}
1256   }
1257 
1258   if(buf)
1259 	free(buf);
1260 
1261   if(!(buf = calloc(sizeof(buf[0]), new_nr_bufs)))
1262 	panic("couldn't allocate buf list (%d)", new_nr_bufs);
1263 
1264   if(buf_hash)
1265 	free(buf_hash);
1266   if(!(buf_hash = calloc(sizeof(buf_hash[0]), new_nr_bufs)))
1267 	panic("couldn't allocate buf hash list (%d)", new_nr_bufs);
1268 
1269   nr_bufs = new_nr_bufs;
1270 
1271   bufs_in_use = 0;
1272   front = &buf[0];
1273   rear = &buf[nr_bufs - 1];
1274 
1275   for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) {
1276         bp->lmfs_blocknr = NO_BLOCK;
1277         bp->lmfs_dev = NO_DEV;
1278         bp->lmfs_next = bp + 1;
1279         bp->lmfs_prev = bp - 1;
1280         bp->data = NULL;
1281         bp->lmfs_bytes = 0;
1282   }
1283   front->lmfs_prev = NULL;
1284   rear->lmfs_next = NULL;
1285 
1286   for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) bp->lmfs_hash = bp->lmfs_next;
1287   buf_hash[0] = front;
1288 }
1289 
1290 void lmfs_flushall(void)
1291 {
1292 	struct buf *bp;
1293 	for(bp = &buf[0]; bp < &buf[nr_bufs]; bp++)
1294 		if(bp->lmfs_dev != NO_DEV && !lmfs_isclean(bp))
1295 			lmfs_flushdev(bp->lmfs_dev);
1296 
1297 	/* This is the moment where it is least likely (although certainly not
1298 	 * impossible!) that there are buffers in use, since buffers should not
1299 	 * be held across file system syncs. See if we already intended to
1300 	 * resize the buffer cache, but couldn't. Be aware that we may be
1301 	 * called indirectly from within lmfs_change_blockusage(), so care must
1302 	 * be taken not to recurse infinitely. TODO: see if it is better to
1303 	 * resize the cache from here *only*, thus guaranteeing a clean cache.
1304 	 */
1305 	lmfs_change_blockusage(0);
1306 }
1307 
1308 size_t lmfs_fs_block_size(void)
1309 {
1310 	return fs_block_size;
1311 }
1312 
1313 void lmfs_may_use_vmcache(int ok)
1314 {
1315 	may_use_vmcache = ok;
1316 }
1317