1433d6423SLionel Sambuc 2433d6423SLionel Sambuc #define _SYSTEM 3433d6423SLionel Sambuc 4433d6423SLionel Sambuc #include <assert.h> 5433d6423SLionel Sambuc #include <errno.h> 6433d6423SLionel Sambuc #include <math.h> 7433d6423SLionel Sambuc #include <stdlib.h> 8433d6423SLionel Sambuc 9433d6423SLionel Sambuc #include <machine/vmparam.h> 10433d6423SLionel Sambuc 11433d6423SLionel Sambuc #include <sys/param.h> 12433d6423SLionel Sambuc #include <sys/mman.h> 13433d6423SLionel Sambuc 14433d6423SLionel Sambuc #include <minix/dmap.h> 15433d6423SLionel Sambuc #include <minix/libminixfs.h> 16433d6423SLionel Sambuc #include <minix/syslib.h> 17433d6423SLionel Sambuc #include <minix/sysutil.h> 18433d6423SLionel Sambuc #include <minix/u64.h> 19433d6423SLionel Sambuc #include <minix/bdev.h> 20433d6423SLionel Sambuc 210314acfbSDavid van Moolenbroek /* Buffer (block) cache. To acquire a block, a routine calls lmfs_get_block(), 220314acfbSDavid van Moolenbroek * telling which block it wants. The block is then regarded as "in use" and 230314acfbSDavid van Moolenbroek * has its reference count incremented. All the blocks that are not in use are 240314acfbSDavid van Moolenbroek * chained together in an LRU list, with 'front' pointing to the least recently 250314acfbSDavid van Moolenbroek * used block, and 'rear' to the most recently used block. A reverse chain is 260314acfbSDavid van Moolenbroek * also maintained. Usage for LRU is measured by the time the put_block() is 270314acfbSDavid van Moolenbroek * done. The second parameter to put_block() can violate the LRU order and put 280314acfbSDavid van Moolenbroek * a block on the front of the list, if it will probably not be needed again. 290314acfbSDavid van Moolenbroek * This is used internally only; the lmfs_put_block() API call has no second 300314acfbSDavid van Moolenbroek * parameter. If a block is modified, the modifying routine must mark the 310314acfbSDavid van Moolenbroek * block as dirty, so the block will eventually be rewritten to the disk. 320314acfbSDavid van Moolenbroek */ 330314acfbSDavid van Moolenbroek 340314acfbSDavid van Moolenbroek /* Flags to put_block(). */ 350314acfbSDavid van Moolenbroek #define ONE_SHOT 0x1 /* set if block will not be needed again */ 360314acfbSDavid van Moolenbroek 37b65ad59eSDavid van Moolenbroek #define BUFHASH(b) ((unsigned int)((b) % nr_bufs)) 38433d6423SLionel Sambuc #define MARKCLEAN lmfs_markclean 39433d6423SLionel Sambuc 40433d6423SLionel Sambuc #define MINBUFS 6 /* minimal no of bufs for sanity check */ 41433d6423SLionel Sambuc 42433d6423SLionel Sambuc static struct buf *front; /* points to least recently used free block */ 43433d6423SLionel Sambuc static struct buf *rear; /* points to most recently used free block */ 44433d6423SLionel Sambuc static unsigned int bufs_in_use;/* # bufs currently in use (not on free list)*/ 45433d6423SLionel Sambuc 46433d6423SLionel Sambuc static void rm_lru(struct buf *bp); 47433d6423SLionel Sambuc static void read_block(struct buf *); 48433d6423SLionel Sambuc static void freeblock(struct buf *bp); 490314acfbSDavid van Moolenbroek static void cache_heuristic_check(void); 500314acfbSDavid van Moolenbroek static void put_block(struct buf *bp, int put_flags); 51433d6423SLionel Sambuc 52433d6423SLionel Sambuc static int vmcache = 0; /* are we using vm's secondary cache? (initially not) */ 53433d6423SLionel Sambuc 54433d6423SLionel Sambuc static struct buf *buf; 55433d6423SLionel Sambuc static struct buf **buf_hash; /* the buffer hash table */ 56433d6423SLionel Sambuc static unsigned int nr_bufs; 57433d6423SLionel Sambuc static int may_use_vmcache; 58433d6423SLionel Sambuc 5965f76edbSDavid van Moolenbroek static size_t fs_block_size = PAGE_SIZE; /* raw i/o block size */ 60433d6423SLionel Sambuc 61*1311233cSDavid van Moolenbroek static fsblkcnt_t fs_btotal = 0, fs_bused = 0; 62*1311233cSDavid van Moolenbroek 63433d6423SLionel Sambuc static int rdwt_err; 64433d6423SLionel Sambuc 65433d6423SLionel Sambuc static int quiet = 0; 66433d6423SLionel Sambuc 67433d6423SLionel Sambuc void lmfs_setquiet(int q) { quiet = q; } 68433d6423SLionel Sambuc 69*1311233cSDavid van Moolenbroek static int fs_bufs_heuristic(int minbufs, fsblkcnt_t btotal, 70*1311233cSDavid van Moolenbroek fsblkcnt_t bused, int blocksize) 71433d6423SLionel Sambuc { 72433d6423SLionel Sambuc struct vm_stats_info vsi; 73433d6423SLionel Sambuc int bufs; 74433d6423SLionel Sambuc u32_t kbytes_used_fs, kbytes_total_fs, kbcache, kb_fsmax; 75433d6423SLionel Sambuc u32_t kbytes_remain_mem; 76433d6423SLionel Sambuc 77433d6423SLionel Sambuc /* set a reasonable cache size; cache at most a certain 78433d6423SLionel Sambuc * portion of the used FS, and at most a certain %age of remaining 79433d6423SLionel Sambuc * memory 80433d6423SLionel Sambuc */ 81433d6423SLionel Sambuc if(vm_info_stats(&vsi) != OK) { 82433d6423SLionel Sambuc bufs = 1024; 83433d6423SLionel Sambuc if(!quiet) 84433d6423SLionel Sambuc printf("fslib: heuristic info fail: default to %d bufs\n", bufs); 85433d6423SLionel Sambuc return bufs; 86433d6423SLionel Sambuc } 87433d6423SLionel Sambuc 88433d6423SLionel Sambuc /* remaining free memory is unused memory plus memory in used for cache, 89433d6423SLionel Sambuc * as the cache can be evicted 90433d6423SLionel Sambuc */ 91433d6423SLionel Sambuc kbytes_remain_mem = (u64_t)(vsi.vsi_free + vsi.vsi_cached) * 92433d6423SLionel Sambuc vsi.vsi_pagesize / 1024; 93433d6423SLionel Sambuc 94433d6423SLionel Sambuc /* check fs usage. */ 95433d6423SLionel Sambuc kbytes_used_fs = (unsigned long)(((u64_t)bused * blocksize) / 1024); 96433d6423SLionel Sambuc kbytes_total_fs = (unsigned long)(((u64_t)btotal * blocksize) / 1024); 97433d6423SLionel Sambuc 98433d6423SLionel Sambuc /* heuristic for a desired cache size based on FS usage; 99433d6423SLionel Sambuc * but never bigger than half of the total filesystem 100433d6423SLionel Sambuc */ 101433d6423SLionel Sambuc kb_fsmax = sqrt_approx(kbytes_used_fs)*40; 102433d6423SLionel Sambuc kb_fsmax = MIN(kb_fsmax, kbytes_total_fs/2); 103433d6423SLionel Sambuc 104433d6423SLionel Sambuc /* heuristic for a maximum usage - 10% of remaining memory */ 105433d6423SLionel Sambuc kbcache = MIN(kbytes_remain_mem/10, kb_fsmax); 106433d6423SLionel Sambuc bufs = kbcache * 1024 / blocksize; 107433d6423SLionel Sambuc 108433d6423SLionel Sambuc /* but we simply need MINBUFS no matter what */ 109433d6423SLionel Sambuc if(bufs < minbufs) 110433d6423SLionel Sambuc bufs = minbufs; 111433d6423SLionel Sambuc 112433d6423SLionel Sambuc return bufs; 113433d6423SLionel Sambuc } 114433d6423SLionel Sambuc 115*1311233cSDavid van Moolenbroek void lmfs_change_blockusage(int delta) 116433d6423SLionel Sambuc { 117433d6423SLionel Sambuc /* Change the number of allocated blocks by 'delta.' 118433d6423SLionel Sambuc * Also accumulate the delta since the last cache re-evaluation. 119433d6423SLionel Sambuc * If it is outside a certain band, ask the cache library to 120433d6423SLionel Sambuc * re-evaluate the cache size. 121433d6423SLionel Sambuc */ 122*1311233cSDavid van Moolenbroek static int bitdelta = 0, warn_low = TRUE, warn_high = TRUE; 123*1311233cSDavid van Moolenbroek 124*1311233cSDavid van Moolenbroek /* Adjust the file system block usage counter accordingly. Do bounds 125*1311233cSDavid van Moolenbroek * checking, and report file system misbehavior. 126*1311233cSDavid van Moolenbroek */ 127*1311233cSDavid van Moolenbroek if (delta > 0 && (fsblkcnt_t)delta > fs_btotal - fs_bused) { 128*1311233cSDavid van Moolenbroek if (warn_high) { 129*1311233cSDavid van Moolenbroek printf("libminixfs: block usage overflow\n"); 130*1311233cSDavid van Moolenbroek warn_high = FALSE; 131*1311233cSDavid van Moolenbroek } 132*1311233cSDavid van Moolenbroek delta = (int)(fs_btotal - fs_bused); 133*1311233cSDavid van Moolenbroek } else if (delta < 0 && (fsblkcnt_t)-delta > fs_bused) { 134*1311233cSDavid van Moolenbroek if (warn_low) { 135*1311233cSDavid van Moolenbroek printf("libminixfs: block usage underflow\n"); 136*1311233cSDavid van Moolenbroek warn_low = FALSE; 137*1311233cSDavid van Moolenbroek } 138*1311233cSDavid van Moolenbroek delta = -(int)fs_bused; 139*1311233cSDavid van Moolenbroek } 140*1311233cSDavid van Moolenbroek fs_bused += delta; 141*1311233cSDavid van Moolenbroek 142433d6423SLionel Sambuc bitdelta += delta; 143*1311233cSDavid van Moolenbroek 144*1311233cSDavid van Moolenbroek #define BAND_KB (10*1024) /* recheck cache every 10MB change */ 145*1311233cSDavid van Moolenbroek 146*1311233cSDavid van Moolenbroek /* If the accumulated delta exceeds the configured threshold, resize 147*1311233cSDavid van Moolenbroek * the cache, but only if the cache isn't in use any more. In order to 148*1311233cSDavid van Moolenbroek * avoid that the latter case blocks a resize forever, we also call 149*1311233cSDavid van Moolenbroek * this function from lmfs_flushall(). Since lmfs_buf_pool() may call 150*1311233cSDavid van Moolenbroek * lmfs_flushall(), reset 'bitdelta' before doing the heuristics check. 151*1311233cSDavid van Moolenbroek */ 152*1311233cSDavid van Moolenbroek if (bufs_in_use == 0 && 153*1311233cSDavid van Moolenbroek (bitdelta*(int)fs_block_size/1024 > BAND_KB || 154*1311233cSDavid van Moolenbroek bitdelta*(int)fs_block_size/1024 < -BAND_KB)) { 155433d6423SLionel Sambuc bitdelta = 0; 156*1311233cSDavid van Moolenbroek cache_heuristic_check(); 157433d6423SLionel Sambuc } 158433d6423SLionel Sambuc } 159433d6423SLionel Sambuc 160433d6423SLionel Sambuc void lmfs_markdirty(struct buf *bp) 161433d6423SLionel Sambuc { 162433d6423SLionel Sambuc bp->lmfs_flags |= VMMC_DIRTY; 163433d6423SLionel Sambuc } 164433d6423SLionel Sambuc 165433d6423SLionel Sambuc void lmfs_markclean(struct buf *bp) 166433d6423SLionel Sambuc { 167433d6423SLionel Sambuc bp->lmfs_flags &= ~VMMC_DIRTY; 168433d6423SLionel Sambuc } 169433d6423SLionel Sambuc 170433d6423SLionel Sambuc int lmfs_isclean(struct buf *bp) 171433d6423SLionel Sambuc { 172433d6423SLionel Sambuc return !(bp->lmfs_flags & VMMC_DIRTY); 173433d6423SLionel Sambuc } 174433d6423SLionel Sambuc 175433d6423SLionel Sambuc dev_t lmfs_dev(struct buf *bp) 176433d6423SLionel Sambuc { 177433d6423SLionel Sambuc return bp->lmfs_dev; 178433d6423SLionel Sambuc } 179433d6423SLionel Sambuc 180433d6423SLionel Sambuc static void free_unused_blocks(void) 181433d6423SLionel Sambuc { 182433d6423SLionel Sambuc struct buf *bp; 183433d6423SLionel Sambuc 184433d6423SLionel Sambuc int freed = 0, bytes = 0; 185433d6423SLionel Sambuc printf("libminixfs: freeing; %d blocks in use\n", bufs_in_use); 186433d6423SLionel Sambuc for(bp = &buf[0]; bp < &buf[nr_bufs]; bp++) { 187433d6423SLionel Sambuc if(bp->lmfs_bytes > 0 && bp->lmfs_count == 0) { 188433d6423SLionel Sambuc freed++; 189433d6423SLionel Sambuc bytes += bp->lmfs_bytes; 190433d6423SLionel Sambuc freeblock(bp); 191433d6423SLionel Sambuc } 192433d6423SLionel Sambuc } 193433d6423SLionel Sambuc printf("libminixfs: freeing; %d blocks, %d bytes\n", freed, bytes); 194433d6423SLionel Sambuc } 195433d6423SLionel Sambuc 196433d6423SLionel Sambuc static void lmfs_alloc_block(struct buf *bp) 197433d6423SLionel Sambuc { 198433d6423SLionel Sambuc int len; 199433d6423SLionel Sambuc ASSERT(!bp->data); 200433d6423SLionel Sambuc ASSERT(bp->lmfs_bytes == 0); 201433d6423SLionel Sambuc 202433d6423SLionel Sambuc len = roundup(fs_block_size, PAGE_SIZE); 203433d6423SLionel Sambuc 204433d6423SLionel Sambuc if((bp->data = mmap(0, fs_block_size, 205433d6423SLionel Sambuc PROT_READ|PROT_WRITE, MAP_PREALLOC|MAP_ANON, -1, 0)) == MAP_FAILED) { 206433d6423SLionel Sambuc free_unused_blocks(); 207433d6423SLionel Sambuc if((bp->data = mmap(0, fs_block_size, PROT_READ|PROT_WRITE, 208433d6423SLionel Sambuc MAP_PREALLOC|MAP_ANON, -1, 0)) == MAP_FAILED) { 209433d6423SLionel Sambuc panic("libminixfs: could not allocate block"); 210433d6423SLionel Sambuc } 211433d6423SLionel Sambuc } 212433d6423SLionel Sambuc assert(bp->data); 213433d6423SLionel Sambuc bp->lmfs_bytes = fs_block_size; 214433d6423SLionel Sambuc bp->lmfs_needsetcache = 1; 215433d6423SLionel Sambuc } 216433d6423SLionel Sambuc 217433d6423SLionel Sambuc /*===========================================================================* 218433d6423SLionel Sambuc * lmfs_get_block * 219433d6423SLionel Sambuc *===========================================================================*/ 220cb9453caSDavid van Moolenbroek struct buf *lmfs_get_block(dev_t dev, block64_t block, int how) 221433d6423SLionel Sambuc { 222cb9453caSDavid van Moolenbroek return lmfs_get_block_ino(dev, block, how, VMC_NO_INODE, 0); 223433d6423SLionel Sambuc } 224433d6423SLionel Sambuc 22565f76edbSDavid van Moolenbroek static void munmap_t(void *a, int len) 226433d6423SLionel Sambuc { 227433d6423SLionel Sambuc vir_bytes av = (vir_bytes) a; 228433d6423SLionel Sambuc assert(a); 229433d6423SLionel Sambuc assert(a != MAP_FAILED); 230433d6423SLionel Sambuc assert(len > 0); 231433d6423SLionel Sambuc assert(!(av % PAGE_SIZE)); 232433d6423SLionel Sambuc 233433d6423SLionel Sambuc len = roundup(len, PAGE_SIZE); 234433d6423SLionel Sambuc 235433d6423SLionel Sambuc assert(!(len % PAGE_SIZE)); 236433d6423SLionel Sambuc 237433d6423SLionel Sambuc if(munmap(a, len) < 0) 238433d6423SLionel Sambuc panic("libminixfs cache: munmap failed"); 239433d6423SLionel Sambuc } 240433d6423SLionel Sambuc 241433d6423SLionel Sambuc static void raisecount(struct buf *bp) 242433d6423SLionel Sambuc { 243433d6423SLionel Sambuc assert(bufs_in_use >= 0); 244433d6423SLionel Sambuc ASSERT(bp->lmfs_count >= 0); 245433d6423SLionel Sambuc bp->lmfs_count++; 246433d6423SLionel Sambuc if(bp->lmfs_count == 1) bufs_in_use++; 247433d6423SLionel Sambuc assert(bufs_in_use > 0); 248433d6423SLionel Sambuc } 249433d6423SLionel Sambuc 250433d6423SLionel Sambuc static void lowercount(struct buf *bp) 251433d6423SLionel Sambuc { 252433d6423SLionel Sambuc assert(bufs_in_use > 0); 253433d6423SLionel Sambuc ASSERT(bp->lmfs_count > 0); 254433d6423SLionel Sambuc bp->lmfs_count--; 255433d6423SLionel Sambuc if(bp->lmfs_count == 0) bufs_in_use--; 256433d6423SLionel Sambuc assert(bufs_in_use >= 0); 257433d6423SLionel Sambuc } 258433d6423SLionel Sambuc 259433d6423SLionel Sambuc static void freeblock(struct buf *bp) 260433d6423SLionel Sambuc { 261433d6423SLionel Sambuc ASSERT(bp->lmfs_count == 0); 262433d6423SLionel Sambuc /* If the block taken is dirty, make it clean by writing it to the disk. 263433d6423SLionel Sambuc * Avoid hysteresis by flushing all other dirty blocks for the same device. 264433d6423SLionel Sambuc */ 265433d6423SLionel Sambuc if (bp->lmfs_dev != NO_DEV) { 266ebd3c067SDavid van Moolenbroek if (!lmfs_isclean(bp)) lmfs_flushdev(bp->lmfs_dev); 267433d6423SLionel Sambuc assert(bp->lmfs_bytes == fs_block_size); 268433d6423SLionel Sambuc bp->lmfs_dev = NO_DEV; 269433d6423SLionel Sambuc } 270433d6423SLionel Sambuc 271433d6423SLionel Sambuc /* Fill in block's parameters and add it to the hash chain where it goes. */ 272433d6423SLionel Sambuc MARKCLEAN(bp); /* NO_DEV blocks may be marked dirty */ 273433d6423SLionel Sambuc if(bp->lmfs_bytes > 0) { 274433d6423SLionel Sambuc assert(bp->data); 275433d6423SLionel Sambuc munmap_t(bp->data, bp->lmfs_bytes); 276433d6423SLionel Sambuc bp->lmfs_bytes = 0; 277433d6423SLionel Sambuc bp->data = NULL; 278433d6423SLionel Sambuc } else assert(!bp->data); 279433d6423SLionel Sambuc } 280433d6423SLionel Sambuc 281433d6423SLionel Sambuc /*===========================================================================* 282e94f856bSDavid van Moolenbroek * find_block * 283e94f856bSDavid van Moolenbroek *===========================================================================*/ 284e94f856bSDavid van Moolenbroek static struct buf *find_block(dev_t dev, block64_t block) 285e94f856bSDavid van Moolenbroek { 286e94f856bSDavid van Moolenbroek /* Search the hash chain for (dev, block). Return the buffer structure if 287e94f856bSDavid van Moolenbroek * found, or NULL otherwise. 288e94f856bSDavid van Moolenbroek */ 289e94f856bSDavid van Moolenbroek struct buf *bp; 290e94f856bSDavid van Moolenbroek int b; 291e94f856bSDavid van Moolenbroek 292e94f856bSDavid van Moolenbroek assert(dev != NO_DEV); 293e94f856bSDavid van Moolenbroek 294e94f856bSDavid van Moolenbroek b = BUFHASH(block); 295e94f856bSDavid van Moolenbroek for (bp = buf_hash[b]; bp != NULL; bp = bp->lmfs_hash) 296e94f856bSDavid van Moolenbroek if (bp->lmfs_blocknr == block && bp->lmfs_dev == dev) 297e94f856bSDavid van Moolenbroek return bp; 298e94f856bSDavid van Moolenbroek 299e94f856bSDavid van Moolenbroek return NULL; 300e94f856bSDavid van Moolenbroek } 301e94f856bSDavid van Moolenbroek 302e94f856bSDavid van Moolenbroek /*===========================================================================* 303433d6423SLionel Sambuc * lmfs_get_block_ino * 304433d6423SLionel Sambuc *===========================================================================*/ 305cb9453caSDavid van Moolenbroek struct buf *lmfs_get_block_ino(dev_t dev, block64_t block, int how, ino_t ino, 306cb9453caSDavid van Moolenbroek u64_t ino_off) 307433d6423SLionel Sambuc { 308433d6423SLionel Sambuc /* Check to see if the requested block is in the block cache. If so, return 309433d6423SLionel Sambuc * a pointer to it. If not, evict some other block and fetch it (unless 310cb9453caSDavid van Moolenbroek * 'how' is NO_READ). All the blocks in the cache that are not in use are 311cb9453caSDavid van Moolenbroek * linked together in a chain, with 'front' pointing to the least recently used 312cb9453caSDavid van Moolenbroek * block and 'rear' to the most recently used block. If 'how' is NO_READ, the 313cb9453caSDavid van Moolenbroek * block being requested will be overwritten in its entirety, so it is only 314cb9453caSDavid van Moolenbroek * necessary to see if it is in the cache; if it is not, any free buffer will 315cb9453caSDavid van Moolenbroek * do. It is not necessary to actually read the block in from disk. If 'how' 316cb9453caSDavid van Moolenbroek * is PREFETCH, the block need not be read from the disk, and the device is not 317cb9453caSDavid van Moolenbroek * to be marked on the block (i.e., set to NO_DEV), so callers can tell if the 318cb9453caSDavid van Moolenbroek * block returned is valid. If 'how' is PEEK, the function returns the block 319cb9453caSDavid van Moolenbroek * if it is in the cache or could be obtained from VM, and NULL otherwise. 320433d6423SLionel Sambuc * In addition to the LRU chain, there is also a hash chain to link together 321433d6423SLionel Sambuc * blocks whose block numbers end with the same bit strings, for fast lookup. 322433d6423SLionel Sambuc */ 323433d6423SLionel Sambuc int b; 324433d6423SLionel Sambuc static struct buf *bp; 325b65ad59eSDavid van Moolenbroek uint64_t dev_off; 326433d6423SLionel Sambuc struct buf *prev_ptr; 327433d6423SLionel Sambuc 328433d6423SLionel Sambuc assert(buf_hash); 329433d6423SLionel Sambuc assert(buf); 330433d6423SLionel Sambuc assert(nr_bufs > 0); 331433d6423SLionel Sambuc 332433d6423SLionel Sambuc ASSERT(fs_block_size > 0); 333433d6423SLionel Sambuc 334433d6423SLionel Sambuc assert(dev != NO_DEV); 335433d6423SLionel Sambuc 336b65ad59eSDavid van Moolenbroek assert(block <= UINT64_MAX / fs_block_size); 337b65ad59eSDavid van Moolenbroek 338b65ad59eSDavid van Moolenbroek dev_off = block * fs_block_size; 339b65ad59eSDavid van Moolenbroek 340433d6423SLionel Sambuc if((ino_off % fs_block_size)) { 341433d6423SLionel Sambuc 342433d6423SLionel Sambuc printf("cache: unaligned lmfs_get_block_ino ino_off %llu\n", 343433d6423SLionel Sambuc ino_off); 344433d6423SLionel Sambuc util_stacktrace(); 345433d6423SLionel Sambuc } 346433d6423SLionel Sambuc 347e94f856bSDavid van Moolenbroek /* See if the block is in the cache. If so, we can return it right away. */ 348e94f856bSDavid van Moolenbroek bp = find_block(dev, block); 349e94f856bSDavid van Moolenbroek if (bp != NULL && !(bp->lmfs_flags & VMMC_EVICTED)) { 350433d6423SLionel Sambuc /* Block needed has been found. */ 351433d6423SLionel Sambuc if (bp->lmfs_count == 0) { 352433d6423SLionel Sambuc rm_lru(bp); 353433d6423SLionel Sambuc ASSERT(bp->lmfs_needsetcache == 0); 354433d6423SLionel Sambuc ASSERT(!(bp->lmfs_flags & VMMC_BLOCK_LOCKED)); 355e94f856bSDavid van Moolenbroek /* FIXME: race condition against the VMMC_EVICTED check */ 356433d6423SLionel Sambuc bp->lmfs_flags |= VMMC_BLOCK_LOCKED; 357433d6423SLionel Sambuc } 358433d6423SLionel Sambuc raisecount(bp); 359433d6423SLionel Sambuc ASSERT(bp->lmfs_bytes == fs_block_size); 360433d6423SLionel Sambuc ASSERT(bp->lmfs_dev == dev); 361433d6423SLionel Sambuc ASSERT(bp->lmfs_dev != NO_DEV); 362433d6423SLionel Sambuc ASSERT(bp->lmfs_flags & VMMC_BLOCK_LOCKED); 363433d6423SLionel Sambuc ASSERT(bp->data); 364433d6423SLionel Sambuc 365433d6423SLionel Sambuc if(ino != VMC_NO_INODE) { 366433d6423SLionel Sambuc if(bp->lmfs_inode == VMC_NO_INODE 367433d6423SLionel Sambuc || bp->lmfs_inode != ino 368433d6423SLionel Sambuc || bp->lmfs_inode_offset != ino_off) { 369433d6423SLionel Sambuc bp->lmfs_inode = ino; 370433d6423SLionel Sambuc bp->lmfs_inode_offset = ino_off; 371433d6423SLionel Sambuc bp->lmfs_needsetcache = 1; 372433d6423SLionel Sambuc } 373433d6423SLionel Sambuc } 374433d6423SLionel Sambuc 375433d6423SLionel Sambuc return(bp); 376433d6423SLionel Sambuc } 377e94f856bSDavid van Moolenbroek 378e94f856bSDavid van Moolenbroek /* We had the block in the cache but VM evicted it; invalidate it. */ 379e94f856bSDavid van Moolenbroek if (bp != NULL) { 380e94f856bSDavid van Moolenbroek assert(bp->lmfs_flags & VMMC_EVICTED); 381e94f856bSDavid van Moolenbroek ASSERT(bp->lmfs_count == 0); 382e94f856bSDavid van Moolenbroek ASSERT(!(bp->lmfs_flags & VMMC_BLOCK_LOCKED)); 383e94f856bSDavid van Moolenbroek ASSERT(!(bp->lmfs_flags & VMMC_DIRTY)); 384e94f856bSDavid van Moolenbroek bp->lmfs_dev = NO_DEV; 385e94f856bSDavid van Moolenbroek bp->lmfs_bytes = 0; 386e94f856bSDavid van Moolenbroek bp->data = NULL; 387433d6423SLionel Sambuc } 388433d6423SLionel Sambuc 389433d6423SLionel Sambuc /* Desired block is not on available chain. Find a free block to use. */ 390433d6423SLionel Sambuc if(bp) { 391433d6423SLionel Sambuc ASSERT(bp->lmfs_flags & VMMC_EVICTED); 392433d6423SLionel Sambuc } else { 393433d6423SLionel Sambuc if ((bp = front) == NULL) panic("all buffers in use: %d", nr_bufs); 394433d6423SLionel Sambuc } 395433d6423SLionel Sambuc assert(bp); 396433d6423SLionel Sambuc 397433d6423SLionel Sambuc rm_lru(bp); 398433d6423SLionel Sambuc 399433d6423SLionel Sambuc /* Remove the block that was just taken from its hash chain. */ 400433d6423SLionel Sambuc b = BUFHASH(bp->lmfs_blocknr); 401433d6423SLionel Sambuc prev_ptr = buf_hash[b]; 402433d6423SLionel Sambuc if (prev_ptr == bp) { 403433d6423SLionel Sambuc buf_hash[b] = bp->lmfs_hash; 404433d6423SLionel Sambuc } else { 405433d6423SLionel Sambuc /* The block just taken is not on the front of its hash chain. */ 406433d6423SLionel Sambuc while (prev_ptr->lmfs_hash != NULL) 407433d6423SLionel Sambuc if (prev_ptr->lmfs_hash == bp) { 408433d6423SLionel Sambuc prev_ptr->lmfs_hash = bp->lmfs_hash; /* found it */ 409433d6423SLionel Sambuc break; 410433d6423SLionel Sambuc } else { 411433d6423SLionel Sambuc prev_ptr = prev_ptr->lmfs_hash; /* keep looking */ 412433d6423SLionel Sambuc } 413433d6423SLionel Sambuc } 414433d6423SLionel Sambuc 415433d6423SLionel Sambuc freeblock(bp); 416433d6423SLionel Sambuc 417433d6423SLionel Sambuc bp->lmfs_inode = ino; 418433d6423SLionel Sambuc bp->lmfs_inode_offset = ino_off; 419433d6423SLionel Sambuc 420433d6423SLionel Sambuc bp->lmfs_flags = VMMC_BLOCK_LOCKED; 421433d6423SLionel Sambuc bp->lmfs_needsetcache = 0; 422433d6423SLionel Sambuc bp->lmfs_dev = dev; /* fill in device number */ 423433d6423SLionel Sambuc bp->lmfs_blocknr = block; /* fill in block number */ 424433d6423SLionel Sambuc ASSERT(bp->lmfs_count == 0); 425433d6423SLionel Sambuc raisecount(bp); 426433d6423SLionel Sambuc b = BUFHASH(bp->lmfs_blocknr); 427433d6423SLionel Sambuc bp->lmfs_hash = buf_hash[b]; 428433d6423SLionel Sambuc 429433d6423SLionel Sambuc buf_hash[b] = bp; /* add to hash list */ 430433d6423SLionel Sambuc 431433d6423SLionel Sambuc assert(dev != NO_DEV); 432433d6423SLionel Sambuc 433433d6423SLionel Sambuc /* Block is not found in our cache, but we do want it 434433d6423SLionel Sambuc * if it's in the vm cache. 435433d6423SLionel Sambuc */ 436433d6423SLionel Sambuc assert(!bp->data); 437433d6423SLionel Sambuc assert(!bp->lmfs_bytes); 438433d6423SLionel Sambuc if(vmcache) { 439433d6423SLionel Sambuc if((bp->data = vm_map_cacheblock(dev, dev_off, ino, ino_off, 440433d6423SLionel Sambuc &bp->lmfs_flags, fs_block_size)) != MAP_FAILED) { 441433d6423SLionel Sambuc bp->lmfs_bytes = fs_block_size; 442433d6423SLionel Sambuc ASSERT(!bp->lmfs_needsetcache); 443433d6423SLionel Sambuc return bp; 444433d6423SLionel Sambuc } 445433d6423SLionel Sambuc } 446433d6423SLionel Sambuc bp->data = NULL; 447433d6423SLionel Sambuc 448cb9453caSDavid van Moolenbroek /* The block is not in the cache, and VM does not know about it. If we were 449cb9453caSDavid van Moolenbroek * requested to search for the block only, we can now return failure to the 450cb9453caSDavid van Moolenbroek * caller. Return the block to the pool without allocating data pages, since 451cb9453caSDavid van Moolenbroek * these would be freed upon recycling the block anyway. 452cb9453caSDavid van Moolenbroek */ 453cb9453caSDavid van Moolenbroek if (how == PEEK) { 454cb9453caSDavid van Moolenbroek bp->lmfs_dev = NO_DEV; 455cb9453caSDavid van Moolenbroek 4560314acfbSDavid van Moolenbroek put_block(bp, ONE_SHOT); 457cb9453caSDavid van Moolenbroek 458cb9453caSDavid van Moolenbroek return NULL; 459cb9453caSDavid van Moolenbroek } 460cb9453caSDavid van Moolenbroek 461433d6423SLionel Sambuc /* Not in the cache; reserve memory for its contents. */ 462433d6423SLionel Sambuc 463433d6423SLionel Sambuc lmfs_alloc_block(bp); 464433d6423SLionel Sambuc 465433d6423SLionel Sambuc assert(bp->data); 466433d6423SLionel Sambuc 467cb9453caSDavid van Moolenbroek if(how == PREFETCH) { 468433d6423SLionel Sambuc /* PREFETCH: don't do i/o. */ 469433d6423SLionel Sambuc bp->lmfs_dev = NO_DEV; 470cb9453caSDavid van Moolenbroek } else if (how == NORMAL) { 471433d6423SLionel Sambuc read_block(bp); 472cb9453caSDavid van Moolenbroek } else if(how == NO_READ) { 473433d6423SLionel Sambuc /* This block will be overwritten by new contents. */ 474433d6423SLionel Sambuc } else 475cb9453caSDavid van Moolenbroek panic("unexpected 'how' value: %d", how); 476433d6423SLionel Sambuc 477433d6423SLionel Sambuc assert(bp->data); 478433d6423SLionel Sambuc 479433d6423SLionel Sambuc return(bp); /* return the newly acquired block */ 480433d6423SLionel Sambuc } 481433d6423SLionel Sambuc 482433d6423SLionel Sambuc /*===========================================================================* 4830314acfbSDavid van Moolenbroek * put_block * 484433d6423SLionel Sambuc *===========================================================================*/ 4850314acfbSDavid van Moolenbroek static void put_block(struct buf *bp, int put_flags) 486433d6423SLionel Sambuc { 4870314acfbSDavid van Moolenbroek /* Return a block to the list of available blocks. Depending on 'put_flags' 488433d6423SLionel Sambuc * it may be put on the front or rear of the LRU chain. Blocks that are 4890314acfbSDavid van Moolenbroek * expected to be needed again at some point go on the rear; blocks that are 4900314acfbSDavid van Moolenbroek * unlikely to be needed again at all go on the front. 491433d6423SLionel Sambuc */ 492433d6423SLionel Sambuc dev_t dev; 493b65ad59eSDavid van Moolenbroek uint64_t dev_off; 494d75faf18SDavid van Moolenbroek int r, setflags; 495433d6423SLionel Sambuc 4960314acfbSDavid van Moolenbroek assert(bp != NULL); 497433d6423SLionel Sambuc 498433d6423SLionel Sambuc dev = bp->lmfs_dev; 499433d6423SLionel Sambuc 500b65ad59eSDavid van Moolenbroek dev_off = bp->lmfs_blocknr * fs_block_size; 501433d6423SLionel Sambuc 502433d6423SLionel Sambuc lowercount(bp); 503433d6423SLionel Sambuc if (bp->lmfs_count != 0) return; /* block is still in use */ 504433d6423SLionel Sambuc 505433d6423SLionel Sambuc /* Put this block back on the LRU chain. */ 5060314acfbSDavid van Moolenbroek if (dev == NO_DEV || dev == DEV_RAM || (put_flags & ONE_SHOT)) { 5070314acfbSDavid van Moolenbroek /* Block will not be needed again. Put it on front of chain. 508433d6423SLionel Sambuc * It will be the next block to be evicted from the cache. 509433d6423SLionel Sambuc */ 510433d6423SLionel Sambuc bp->lmfs_prev = NULL; 511433d6423SLionel Sambuc bp->lmfs_next = front; 512433d6423SLionel Sambuc if (front == NULL) 513433d6423SLionel Sambuc rear = bp; /* LRU chain was empty */ 514433d6423SLionel Sambuc else 515433d6423SLionel Sambuc front->lmfs_prev = bp; 516433d6423SLionel Sambuc front = bp; 517433d6423SLionel Sambuc } 518433d6423SLionel Sambuc else { 5190314acfbSDavid van Moolenbroek /* Block may be needed again. Put it on rear of chain. 520433d6423SLionel Sambuc * It will not be evicted from the cache for a long time. 521433d6423SLionel Sambuc */ 522433d6423SLionel Sambuc bp->lmfs_prev = rear; 523433d6423SLionel Sambuc bp->lmfs_next = NULL; 524433d6423SLionel Sambuc if (rear == NULL) 525433d6423SLionel Sambuc front = bp; 526433d6423SLionel Sambuc else 527433d6423SLionel Sambuc rear->lmfs_next = bp; 528433d6423SLionel Sambuc rear = bp; 529433d6423SLionel Sambuc } 530433d6423SLionel Sambuc 531433d6423SLionel Sambuc assert(bp->lmfs_flags & VMMC_BLOCK_LOCKED); 532433d6423SLionel Sambuc bp->lmfs_flags &= ~VMMC_BLOCK_LOCKED; 533433d6423SLionel Sambuc 534cb9453caSDavid van Moolenbroek /* block has sensible content - if necessary, identify it to VM */ 535433d6423SLionel Sambuc if(vmcache && bp->lmfs_needsetcache && dev != NO_DEV) { 536cb9453caSDavid van Moolenbroek assert(bp->data); 537cb9453caSDavid van Moolenbroek 5380314acfbSDavid van Moolenbroek setflags = (put_flags & ONE_SHOT) ? VMSF_ONCE : 0; 539d75faf18SDavid van Moolenbroek if ((r = vm_set_cacheblock(bp->data, dev, dev_off, bp->lmfs_inode, 540d75faf18SDavid van Moolenbroek bp->lmfs_inode_offset, &bp->lmfs_flags, fs_block_size, 541d75faf18SDavid van Moolenbroek setflags)) != OK) { 542433d6423SLionel Sambuc if(r == ENOSYS) { 543433d6423SLionel Sambuc printf("libminixfs: ENOSYS, disabling VM calls\n"); 544433d6423SLionel Sambuc vmcache = 0; 545433d6423SLionel Sambuc } else { 546433d6423SLionel Sambuc panic("libminixfs: setblock of %p dev 0x%llx off " 547433d6423SLionel Sambuc "0x%llx failed\n", bp->data, dev, dev_off); 548433d6423SLionel Sambuc } 549433d6423SLionel Sambuc } 550433d6423SLionel Sambuc } 551433d6423SLionel Sambuc bp->lmfs_needsetcache = 0; 552d75faf18SDavid van Moolenbroek 553d75faf18SDavid van Moolenbroek /* Now that we (may) have given the block to VM, invalidate the block if it 554d75faf18SDavid van Moolenbroek * is a one-shot block. Otherwise, it may still be reobtained immediately 555d75faf18SDavid van Moolenbroek * after, which could be a problem if VM already forgot the block and we are 556d75faf18SDavid van Moolenbroek * expected to pass it to VM again, which then wouldn't happen. 557d75faf18SDavid van Moolenbroek */ 5580314acfbSDavid van Moolenbroek if (put_flags & ONE_SHOT) 559d75faf18SDavid van Moolenbroek bp->lmfs_dev = NO_DEV; 560e94f856bSDavid van Moolenbroek } 561433d6423SLionel Sambuc 562e94f856bSDavid van Moolenbroek /*===========================================================================* 5630314acfbSDavid van Moolenbroek * lmfs_put_block * 5640314acfbSDavid van Moolenbroek *===========================================================================*/ 5650314acfbSDavid van Moolenbroek void lmfs_put_block(struct buf *bp) 5660314acfbSDavid van Moolenbroek { 5670314acfbSDavid van Moolenbroek /* User interface to put_block(). */ 5680314acfbSDavid van Moolenbroek 5690314acfbSDavid van Moolenbroek if (bp == NULL) return; /* for poorly written file systems */ 5700314acfbSDavid van Moolenbroek 5710314acfbSDavid van Moolenbroek put_block(bp, 0); 5720314acfbSDavid van Moolenbroek } 5730314acfbSDavid van Moolenbroek 5740314acfbSDavid van Moolenbroek /*===========================================================================* 575e94f856bSDavid van Moolenbroek * lmfs_free_block * 576e94f856bSDavid van Moolenbroek *===========================================================================*/ 577e94f856bSDavid van Moolenbroek void lmfs_free_block(dev_t dev, block64_t block) 578e94f856bSDavid van Moolenbroek { 579e94f856bSDavid van Moolenbroek /* The file system has just freed the given block. The block may previously 580e94f856bSDavid van Moolenbroek * have been in use as data block for an inode. Therefore, we now need to tell 581e94f856bSDavid van Moolenbroek * VM that the block is no longer associated with an inode. If we fail to do so 582e94f856bSDavid van Moolenbroek * and the inode now has a hole at this location, mapping in the hole would 583e94f856bSDavid van Moolenbroek * yield the old block contents rather than a zeroed page. In addition, if the 584e94f856bSDavid van Moolenbroek * block is in the cache, it will be removed, even if it was dirty. 585e94f856bSDavid van Moolenbroek */ 586e94f856bSDavid van Moolenbroek struct buf *bp; 587e94f856bSDavid van Moolenbroek int r; 588e94f856bSDavid van Moolenbroek 589e94f856bSDavid van Moolenbroek /* Tell VM to forget about the block. The primary purpose of this call is to 590e94f856bSDavid van Moolenbroek * break the inode association, but since the block is part of a mounted file 591e94f856bSDavid van Moolenbroek * system, it is not expected to be accessed directly anyway. So, save some 592e94f856bSDavid van Moolenbroek * cache memory by throwing it out of the VM cache altogether. 593e94f856bSDavid van Moolenbroek */ 594e94f856bSDavid van Moolenbroek if (vmcache) { 595e94f856bSDavid van Moolenbroek if ((r = vm_forget_cacheblock(dev, block * fs_block_size, 596e94f856bSDavid van Moolenbroek fs_block_size)) != OK) 597e94f856bSDavid van Moolenbroek printf("libminixfs: vm_forget_cacheblock failed (%d)\n", r); 598e94f856bSDavid van Moolenbroek } 599e94f856bSDavid van Moolenbroek 600e94f856bSDavid van Moolenbroek if ((bp = find_block(dev, block)) != NULL) { 601e94f856bSDavid van Moolenbroek lmfs_markclean(bp); 602e94f856bSDavid van Moolenbroek 603e94f856bSDavid van Moolenbroek /* Invalidate the block. The block may or may not be in use right now, 604e94f856bSDavid van Moolenbroek * so don't be smart about freeing memory or repositioning in the LRU. 605e94f856bSDavid van Moolenbroek */ 606e94f856bSDavid van Moolenbroek bp->lmfs_dev = NO_DEV; 607e94f856bSDavid van Moolenbroek } 608e94f856bSDavid van Moolenbroek 609e94f856bSDavid van Moolenbroek /* Note that this is *not* the right place to implement TRIM support. Even 610e94f856bSDavid van Moolenbroek * though the block is freed, on the device it may still be part of a 611e94f856bSDavid van Moolenbroek * previous checkpoint or snapshot of some sort. Only the file system can 612e94f856bSDavid van Moolenbroek * be trusted to decide which blocks can be reused on the device! 613e94f856bSDavid van Moolenbroek */ 614433d6423SLionel Sambuc } 615433d6423SLionel Sambuc 616d75faf18SDavid van Moolenbroek /*===========================================================================* 617d75faf18SDavid van Moolenbroek * lmfs_zero_block_ino * 618d75faf18SDavid van Moolenbroek *===========================================================================*/ 619d75faf18SDavid van Moolenbroek void lmfs_zero_block_ino(dev_t dev, ino_t ino, u64_t ino_off) 620d75faf18SDavid van Moolenbroek { 621d75faf18SDavid van Moolenbroek /* Files may have holes. From an application perspective, these are just file 622d75faf18SDavid van Moolenbroek * regions filled with zeroes. From a file system perspective however, holes 623d75faf18SDavid van Moolenbroek * may represent unallocated regions on disk. Thus, these holes do not have 624d75faf18SDavid van Moolenbroek * corresponding blocks on the disk, and therefore also no block number. 625d75faf18SDavid van Moolenbroek * Therefore, we cannot simply use lmfs_get_block_ino() for them. For reads, 626d75faf18SDavid van Moolenbroek * this is not a problem, since the file system can just zero out the target 627d75faf18SDavid van Moolenbroek * application buffer instead. For mapped pages however, this *is* a problem, 628d75faf18SDavid van Moolenbroek * since the VM cache needs to be told about the corresponding block, and VM 629d75faf18SDavid van Moolenbroek * does not accept blocks without a device offset. The role of this function is 630d75faf18SDavid van Moolenbroek * therefore to tell VM about the hole using a fake device offset. The device 631d75faf18SDavid van Moolenbroek * offsets are picked so that the VM cache will see a block memory-mapped for 632d75faf18SDavid van Moolenbroek * the hole in the file, while the same block is not visible when 633d75faf18SDavid van Moolenbroek * memory-mapping the block device. 634d75faf18SDavid van Moolenbroek */ 635d75faf18SDavid van Moolenbroek struct buf *bp; 636d75faf18SDavid van Moolenbroek static block64_t fake_block = 0; 637d75faf18SDavid van Moolenbroek 638d75faf18SDavid van Moolenbroek if (!vmcache) 639d75faf18SDavid van Moolenbroek return; 640d75faf18SDavid van Moolenbroek 641d75faf18SDavid van Moolenbroek assert(fs_block_size > 0); 642d75faf18SDavid van Moolenbroek 643d75faf18SDavid van Moolenbroek /* Pick a block number which is above the threshold of what can possibly be 644d75faf18SDavid van Moolenbroek * mapped in by mmap'ing the device, since off_t is signed, and it is safe to 645d75faf18SDavid van Moolenbroek * say that it will take a while before we have 8-exabyte devices. Pick a 646d75faf18SDavid van Moolenbroek * different block number each time to avoid possible concurrency issues. 647d75faf18SDavid van Moolenbroek * FIXME: it does not seem like VM actually verifies mmap offsets though.. 648d75faf18SDavid van Moolenbroek */ 649d75faf18SDavid van Moolenbroek if (fake_block == 0 || ++fake_block >= UINT64_MAX / fs_block_size) 650d75faf18SDavid van Moolenbroek fake_block = ((uint64_t)INT64_MAX + 1) / fs_block_size; 651d75faf18SDavid van Moolenbroek 652d75faf18SDavid van Moolenbroek /* Obtain a block. */ 653d75faf18SDavid van Moolenbroek bp = lmfs_get_block_ino(dev, fake_block, NO_READ, ino, ino_off); 654d75faf18SDavid van Moolenbroek assert(bp != NULL); 655d75faf18SDavid van Moolenbroek assert(bp->lmfs_dev != NO_DEV); 656d75faf18SDavid van Moolenbroek 657d75faf18SDavid van Moolenbroek /* The block is already zeroed, as it has just been allocated with mmap. File 658d75faf18SDavid van Moolenbroek * systems do not rely on this assumption yet, so if VM ever gets changed to 659d75faf18SDavid van Moolenbroek * not clear the blocks we allocate (e.g., by recycling pages in the VM cache 660d75faf18SDavid van Moolenbroek * for the same process, which would be safe), we need to add a memset here. 661d75faf18SDavid van Moolenbroek */ 662d75faf18SDavid van Moolenbroek 663d75faf18SDavid van Moolenbroek /* Release the block. We don't expect it to be accessed ever again. Moreover, 664d75faf18SDavid van Moolenbroek * if we keep the block around in the VM cache, it may erroneously be mapped 665d75faf18SDavid van Moolenbroek * in beyond the file end later. Hence, use VMSF_ONCE when passing it to VM. 666d75faf18SDavid van Moolenbroek * TODO: tell VM that it is an all-zeroes block, so that VM can deduplicate 667d75faf18SDavid van Moolenbroek * all such pages in its cache. 668d75faf18SDavid van Moolenbroek */ 6690314acfbSDavid van Moolenbroek put_block(bp, ONE_SHOT); 670d75faf18SDavid van Moolenbroek } 671d75faf18SDavid van Moolenbroek 672*1311233cSDavid van Moolenbroek void lmfs_set_blockusage(fsblkcnt_t btotal, fsblkcnt_t bused) 673433d6423SLionel Sambuc { 674*1311233cSDavid van Moolenbroek 675*1311233cSDavid van Moolenbroek assert(bused <= btotal); 676*1311233cSDavid van Moolenbroek fs_btotal = btotal; 677*1311233cSDavid van Moolenbroek fs_bused = bused; 678*1311233cSDavid van Moolenbroek 679*1311233cSDavid van Moolenbroek /* if the cache isn't in use, we could resize it. */ 680*1311233cSDavid van Moolenbroek if (bufs_in_use == 0) 6810314acfbSDavid van Moolenbroek cache_heuristic_check(); 682433d6423SLionel Sambuc } 683433d6423SLionel Sambuc 684433d6423SLionel Sambuc /*===========================================================================* 685433d6423SLionel Sambuc * read_block * 686433d6423SLionel Sambuc *===========================================================================*/ 687433d6423SLionel Sambuc static void read_block( 688433d6423SLionel Sambuc struct buf *bp /* buffer pointer */ 689433d6423SLionel Sambuc ) 690433d6423SLionel Sambuc { 691433d6423SLionel Sambuc /* Read or write a disk block. This is the only routine in which actual disk 692433d6423SLionel Sambuc * I/O is invoked. If an error occurs, a message is printed here, but the error 693433d6423SLionel Sambuc * is not reported to the caller. If the error occurred while purging a block 694433d6423SLionel Sambuc * from the cache, it is not clear what the caller could do about it anyway. 695433d6423SLionel Sambuc */ 696433d6423SLionel Sambuc int r, op_failed; 697433d6423SLionel Sambuc off_t pos; 698433d6423SLionel Sambuc dev_t dev = bp->lmfs_dev; 699433d6423SLionel Sambuc 700433d6423SLionel Sambuc op_failed = 0; 701433d6423SLionel Sambuc 702433d6423SLionel Sambuc assert(dev != NO_DEV); 703433d6423SLionel Sambuc 704433d6423SLionel Sambuc ASSERT(bp->lmfs_bytes == fs_block_size); 705433d6423SLionel Sambuc ASSERT(fs_block_size > 0); 706433d6423SLionel Sambuc 707433d6423SLionel Sambuc pos = (off_t)bp->lmfs_blocknr * fs_block_size; 708433d6423SLionel Sambuc if(fs_block_size > PAGE_SIZE) { 709433d6423SLionel Sambuc #define MAXPAGES 20 710433d6423SLionel Sambuc vir_bytes blockrem, vaddr = (vir_bytes) bp->data; 711433d6423SLionel Sambuc int p = 0; 712433d6423SLionel Sambuc static iovec_t iovec[MAXPAGES]; 713433d6423SLionel Sambuc blockrem = fs_block_size; 714433d6423SLionel Sambuc while(blockrem > 0) { 715433d6423SLionel Sambuc vir_bytes chunk = blockrem >= PAGE_SIZE ? PAGE_SIZE : blockrem; 716433d6423SLionel Sambuc iovec[p].iov_addr = vaddr; 717433d6423SLionel Sambuc iovec[p].iov_size = chunk; 718433d6423SLionel Sambuc vaddr += chunk; 719433d6423SLionel Sambuc blockrem -= chunk; 720433d6423SLionel Sambuc p++; 721433d6423SLionel Sambuc } 722433d6423SLionel Sambuc r = bdev_gather(dev, pos, iovec, p, BDEV_NOFLAGS); 723433d6423SLionel Sambuc } else { 724433d6423SLionel Sambuc r = bdev_read(dev, pos, bp->data, fs_block_size, 725433d6423SLionel Sambuc BDEV_NOFLAGS); 726433d6423SLionel Sambuc } 727433d6423SLionel Sambuc if (r < 0) { 728b65ad59eSDavid van Moolenbroek printf("fs cache: I/O error on device %d/%d, block %"PRIu64"\n", 729433d6423SLionel Sambuc major(dev), minor(dev), bp->lmfs_blocknr); 730433d6423SLionel Sambuc op_failed = 1; 731433d6423SLionel Sambuc } else if (r != (ssize_t) fs_block_size) { 732433d6423SLionel Sambuc r = END_OF_FILE; 733433d6423SLionel Sambuc op_failed = 1; 734433d6423SLionel Sambuc } 735433d6423SLionel Sambuc 736433d6423SLionel Sambuc if (op_failed) { 737433d6423SLionel Sambuc bp->lmfs_dev = NO_DEV; /* invalidate block */ 738433d6423SLionel Sambuc 739433d6423SLionel Sambuc /* Report read errors to interested parties. */ 740433d6423SLionel Sambuc rdwt_err = r; 741433d6423SLionel Sambuc } 742433d6423SLionel Sambuc 743433d6423SLionel Sambuc } 744433d6423SLionel Sambuc 745433d6423SLionel Sambuc /*===========================================================================* 746433d6423SLionel Sambuc * lmfs_invalidate * 747433d6423SLionel Sambuc *===========================================================================*/ 748433d6423SLionel Sambuc void lmfs_invalidate( 749433d6423SLionel Sambuc dev_t device /* device whose blocks are to be purged */ 750433d6423SLionel Sambuc ) 751433d6423SLionel Sambuc { 752433d6423SLionel Sambuc /* Remove all the blocks belonging to some device from the cache. */ 753433d6423SLionel Sambuc 754433d6423SLionel Sambuc register struct buf *bp; 755433d6423SLionel Sambuc 756cb9453caSDavid van Moolenbroek assert(device != NO_DEV); 757cb9453caSDavid van Moolenbroek 758433d6423SLionel Sambuc for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) { 759433d6423SLionel Sambuc if (bp->lmfs_dev == device) { 760433d6423SLionel Sambuc assert(bp->data); 761433d6423SLionel Sambuc assert(bp->lmfs_bytes > 0); 762433d6423SLionel Sambuc munmap_t(bp->data, bp->lmfs_bytes); 763433d6423SLionel Sambuc bp->lmfs_dev = NO_DEV; 764433d6423SLionel Sambuc bp->lmfs_bytes = 0; 765433d6423SLionel Sambuc bp->data = NULL; 766433d6423SLionel Sambuc } 767433d6423SLionel Sambuc } 768433d6423SLionel Sambuc 769e94f856bSDavid van Moolenbroek /* Clear the cache even if VM caching is disabled for the file system: 770e94f856bSDavid van Moolenbroek * caching may be disabled as side effect of an error, leaving blocks behind 771e94f856bSDavid van Moolenbroek * in the actual VM cache. 772e94f856bSDavid van Moolenbroek */ 773433d6423SLionel Sambuc vm_clear_cache(device); 774433d6423SLionel Sambuc } 775433d6423SLionel Sambuc 776433d6423SLionel Sambuc /*===========================================================================* 777ebd3c067SDavid van Moolenbroek * lmfs_flushdev * 778433d6423SLionel Sambuc *===========================================================================*/ 779ebd3c067SDavid van Moolenbroek void lmfs_flushdev(dev_t dev) 780433d6423SLionel Sambuc { 781433d6423SLionel Sambuc /* Flush all dirty blocks for one device. */ 782433d6423SLionel Sambuc 783433d6423SLionel Sambuc register struct buf *bp; 784b8f6d4a6SDavid van Moolenbroek static struct buf **dirty; 785433d6423SLionel Sambuc static unsigned int dirtylistsize = 0; 786433d6423SLionel Sambuc int ndirty; 787433d6423SLionel Sambuc 788433d6423SLionel Sambuc if(dirtylistsize != nr_bufs) { 789433d6423SLionel Sambuc if(dirtylistsize > 0) { 790433d6423SLionel Sambuc assert(dirty != NULL); 791433d6423SLionel Sambuc free(dirty); 792433d6423SLionel Sambuc } 793433d6423SLionel Sambuc if(!(dirty = malloc(sizeof(dirty[0])*nr_bufs))) 794433d6423SLionel Sambuc panic("couldn't allocate dirty buf list"); 795433d6423SLionel Sambuc dirtylistsize = nr_bufs; 796433d6423SLionel Sambuc } 797433d6423SLionel Sambuc 798433d6423SLionel Sambuc for (bp = &buf[0], ndirty = 0; bp < &buf[nr_bufs]; bp++) { 799b8f6d4a6SDavid van Moolenbroek /* Do not flush dirty blocks that are in use (lmfs_count>0): the file 800b8f6d4a6SDavid van Moolenbroek * system may mark the block as dirty before changing its contents, in 801b8f6d4a6SDavid van Moolenbroek * which case the new contents could end up being lost. 802b8f6d4a6SDavid van Moolenbroek */ 803b8f6d4a6SDavid van Moolenbroek if (!lmfs_isclean(bp) && bp->lmfs_dev == dev && bp->lmfs_count == 0) { 804433d6423SLionel Sambuc dirty[ndirty++] = bp; 805433d6423SLionel Sambuc } 806433d6423SLionel Sambuc } 807433d6423SLionel Sambuc 808433d6423SLionel Sambuc lmfs_rw_scattered(dev, dirty, ndirty, WRITING); 809433d6423SLionel Sambuc } 810433d6423SLionel Sambuc 811433d6423SLionel Sambuc /*===========================================================================* 812433d6423SLionel Sambuc * lmfs_rw_scattered * 813433d6423SLionel Sambuc *===========================================================================*/ 814433d6423SLionel Sambuc void lmfs_rw_scattered( 815433d6423SLionel Sambuc dev_t dev, /* major-minor device number */ 816433d6423SLionel Sambuc struct buf **bufq, /* pointer to array of buffers */ 817433d6423SLionel Sambuc int bufqsize, /* number of buffers */ 818433d6423SLionel Sambuc int rw_flag /* READING or WRITING */ 819433d6423SLionel Sambuc ) 820433d6423SLionel Sambuc { 821433d6423SLionel Sambuc /* Read or write scattered data from a device. */ 822433d6423SLionel Sambuc 823433d6423SLionel Sambuc register struct buf *bp; 824433d6423SLionel Sambuc int gap; 825433d6423SLionel Sambuc register int i; 826433d6423SLionel Sambuc register iovec_t *iop; 827433d6423SLionel Sambuc static iovec_t iovec[NR_IOREQS]; 828433d6423SLionel Sambuc off_t pos; 829433d6423SLionel Sambuc int iov_per_block; 83065f76edbSDavid van Moolenbroek unsigned int start_in_use = bufs_in_use, start_bufqsize = bufqsize; 831433d6423SLionel Sambuc 832433d6423SLionel Sambuc assert(bufqsize >= 0); 833433d6423SLionel Sambuc if(bufqsize == 0) return; 834433d6423SLionel Sambuc 835433d6423SLionel Sambuc /* for READING, check all buffers on the list are obtained and held 836433d6423SLionel Sambuc * (count > 0) 837433d6423SLionel Sambuc */ 838433d6423SLionel Sambuc if (rw_flag == READING) { 839433d6423SLionel Sambuc for(i = 0; i < bufqsize; i++) { 840433d6423SLionel Sambuc assert(bufq[i] != NULL); 841433d6423SLionel Sambuc assert(bufq[i]->lmfs_count > 0); 842433d6423SLionel Sambuc } 843433d6423SLionel Sambuc 844433d6423SLionel Sambuc /* therefore they are all 'in use' and must be at least this many */ 845433d6423SLionel Sambuc assert(start_in_use >= start_bufqsize); 846433d6423SLionel Sambuc } 847433d6423SLionel Sambuc 848433d6423SLionel Sambuc assert(dev != NO_DEV); 849433d6423SLionel Sambuc assert(fs_block_size > 0); 850433d6423SLionel Sambuc iov_per_block = roundup(fs_block_size, PAGE_SIZE) / PAGE_SIZE; 851433d6423SLionel Sambuc assert(iov_per_block < NR_IOREQS); 852433d6423SLionel Sambuc 853433d6423SLionel Sambuc /* (Shell) sort buffers on lmfs_blocknr. */ 854433d6423SLionel Sambuc gap = 1; 855433d6423SLionel Sambuc do 856433d6423SLionel Sambuc gap = 3 * gap + 1; 857433d6423SLionel Sambuc while (gap <= bufqsize); 858433d6423SLionel Sambuc while (gap != 1) { 859433d6423SLionel Sambuc int j; 860433d6423SLionel Sambuc gap /= 3; 861433d6423SLionel Sambuc for (j = gap; j < bufqsize; j++) { 862433d6423SLionel Sambuc for (i = j - gap; 863433d6423SLionel Sambuc i >= 0 && bufq[i]->lmfs_blocknr > bufq[i + gap]->lmfs_blocknr; 864433d6423SLionel Sambuc i -= gap) { 865433d6423SLionel Sambuc bp = bufq[i]; 866433d6423SLionel Sambuc bufq[i] = bufq[i + gap]; 867433d6423SLionel Sambuc bufq[i + gap] = bp; 868433d6423SLionel Sambuc } 869433d6423SLionel Sambuc } 870433d6423SLionel Sambuc } 871433d6423SLionel Sambuc 872433d6423SLionel Sambuc /* Set up I/O vector and do I/O. The result of bdev I/O is OK if everything 873433d6423SLionel Sambuc * went fine, otherwise the error code for the first failed transfer. 874433d6423SLionel Sambuc */ 875433d6423SLionel Sambuc while (bufqsize > 0) { 876433d6423SLionel Sambuc int nblocks = 0, niovecs = 0; 877433d6423SLionel Sambuc int r; 878433d6423SLionel Sambuc for (iop = iovec; nblocks < bufqsize; nblocks++) { 879433d6423SLionel Sambuc int p; 880433d6423SLionel Sambuc vir_bytes vdata, blockrem; 881433d6423SLionel Sambuc bp = bufq[nblocks]; 882b65ad59eSDavid van Moolenbroek if (bp->lmfs_blocknr != bufq[0]->lmfs_blocknr + nblocks) 883433d6423SLionel Sambuc break; 884433d6423SLionel Sambuc if(niovecs >= NR_IOREQS-iov_per_block) break; 885433d6423SLionel Sambuc vdata = (vir_bytes) bp->data; 886433d6423SLionel Sambuc blockrem = fs_block_size; 887433d6423SLionel Sambuc for(p = 0; p < iov_per_block; p++) { 888433d6423SLionel Sambuc vir_bytes chunk = blockrem < PAGE_SIZE ? blockrem : PAGE_SIZE; 889433d6423SLionel Sambuc iop->iov_addr = vdata; 890433d6423SLionel Sambuc iop->iov_size = chunk; 891433d6423SLionel Sambuc vdata += PAGE_SIZE; 892433d6423SLionel Sambuc blockrem -= chunk; 893433d6423SLionel Sambuc iop++; 894433d6423SLionel Sambuc niovecs++; 895433d6423SLionel Sambuc } 896433d6423SLionel Sambuc assert(p == iov_per_block); 897433d6423SLionel Sambuc assert(blockrem == 0); 898433d6423SLionel Sambuc } 899433d6423SLionel Sambuc 900433d6423SLionel Sambuc assert(nblocks > 0); 901433d6423SLionel Sambuc assert(niovecs > 0); 902433d6423SLionel Sambuc 903433d6423SLionel Sambuc pos = (off_t)bufq[0]->lmfs_blocknr * fs_block_size; 904433d6423SLionel Sambuc if (rw_flag == READING) 905433d6423SLionel Sambuc r = bdev_gather(dev, pos, iovec, niovecs, BDEV_NOFLAGS); 906433d6423SLionel Sambuc else 907433d6423SLionel Sambuc r = bdev_scatter(dev, pos, iovec, niovecs, BDEV_NOFLAGS); 908433d6423SLionel Sambuc 909433d6423SLionel Sambuc /* Harvest the results. The driver may have returned an error, or it 910433d6423SLionel Sambuc * may have done less than what we asked for. 911433d6423SLionel Sambuc */ 912433d6423SLionel Sambuc if (r < 0) { 913b65ad59eSDavid van Moolenbroek printf("fs cache: I/O error %d on device %d/%d, " 914b65ad59eSDavid van Moolenbroek "block %"PRIu64"\n", 915433d6423SLionel Sambuc r, major(dev), minor(dev), bufq[0]->lmfs_blocknr); 916433d6423SLionel Sambuc } 917433d6423SLionel Sambuc for (i = 0; i < nblocks; i++) { 918433d6423SLionel Sambuc bp = bufq[i]; 919433d6423SLionel Sambuc if (r < (ssize_t) fs_block_size) { 920433d6423SLionel Sambuc /* Transfer failed. */ 921433d6423SLionel Sambuc if (i == 0) { 922433d6423SLionel Sambuc bp->lmfs_dev = NO_DEV; /* Invalidate block */ 923433d6423SLionel Sambuc } 924433d6423SLionel Sambuc break; 925433d6423SLionel Sambuc } 926433d6423SLionel Sambuc if (rw_flag == READING) { 927433d6423SLionel Sambuc bp->lmfs_dev = dev; /* validate block */ 9280314acfbSDavid van Moolenbroek lmfs_put_block(bp); 929433d6423SLionel Sambuc } else { 930433d6423SLionel Sambuc MARKCLEAN(bp); 931433d6423SLionel Sambuc } 932433d6423SLionel Sambuc r -= fs_block_size; 933433d6423SLionel Sambuc } 934433d6423SLionel Sambuc 935433d6423SLionel Sambuc bufq += i; 936433d6423SLionel Sambuc bufqsize -= i; 937433d6423SLionel Sambuc 938433d6423SLionel Sambuc if (rw_flag == READING) { 939433d6423SLionel Sambuc /* Don't bother reading more than the device is willing to 940433d6423SLionel Sambuc * give at this time. Don't forget to release those extras. 941433d6423SLionel Sambuc */ 942433d6423SLionel Sambuc while (bufqsize > 0) { 9430314acfbSDavid van Moolenbroek lmfs_put_block(*bufq++); 944433d6423SLionel Sambuc bufqsize--; 945433d6423SLionel Sambuc } 946433d6423SLionel Sambuc } 947433d6423SLionel Sambuc if (rw_flag == WRITING && i == 0) { 948433d6423SLionel Sambuc /* We're not making progress, this means we might keep 949433d6423SLionel Sambuc * looping. Buffers remain dirty if un-written. Buffers are 950433d6423SLionel Sambuc * lost if invalidate()d or LRU-removed while dirty. This 951433d6423SLionel Sambuc * is better than keeping unwritable blocks around forever.. 952433d6423SLionel Sambuc */ 953433d6423SLionel Sambuc break; 954433d6423SLionel Sambuc } 955433d6423SLionel Sambuc } 956433d6423SLionel Sambuc 957433d6423SLionel Sambuc if(rw_flag == READING) { 958433d6423SLionel Sambuc assert(start_in_use >= start_bufqsize); 959433d6423SLionel Sambuc 960433d6423SLionel Sambuc /* READING callers assume all bufs are released. */ 961433d6423SLionel Sambuc assert(start_in_use - start_bufqsize == bufs_in_use); 962433d6423SLionel Sambuc } 963433d6423SLionel Sambuc } 964433d6423SLionel Sambuc 965433d6423SLionel Sambuc /*===========================================================================* 966433d6423SLionel Sambuc * rm_lru * 967433d6423SLionel Sambuc *===========================================================================*/ 968433d6423SLionel Sambuc static void rm_lru(struct buf *bp) 969433d6423SLionel Sambuc { 970433d6423SLionel Sambuc /* Remove a block from its LRU chain. */ 971433d6423SLionel Sambuc struct buf *next_ptr, *prev_ptr; 972433d6423SLionel Sambuc 973433d6423SLionel Sambuc next_ptr = bp->lmfs_next; /* successor on LRU chain */ 974433d6423SLionel Sambuc prev_ptr = bp->lmfs_prev; /* predecessor on LRU chain */ 975433d6423SLionel Sambuc if (prev_ptr != NULL) 976433d6423SLionel Sambuc prev_ptr->lmfs_next = next_ptr; 977433d6423SLionel Sambuc else 978433d6423SLionel Sambuc front = next_ptr; /* this block was at front of chain */ 979433d6423SLionel Sambuc 980433d6423SLionel Sambuc if (next_ptr != NULL) 981433d6423SLionel Sambuc next_ptr->lmfs_prev = prev_ptr; 982433d6423SLionel Sambuc else 983433d6423SLionel Sambuc rear = prev_ptr; /* this block was at rear of chain */ 984433d6423SLionel Sambuc } 985433d6423SLionel Sambuc 986433d6423SLionel Sambuc /*===========================================================================* 987433d6423SLionel Sambuc * cache_resize * 988433d6423SLionel Sambuc *===========================================================================*/ 989*1311233cSDavid van Moolenbroek static void cache_resize(size_t blocksize, unsigned int bufs) 990433d6423SLionel Sambuc { 991433d6423SLionel Sambuc struct buf *bp; 992433d6423SLionel Sambuc 993433d6423SLionel Sambuc assert(blocksize > 0); 994433d6423SLionel Sambuc assert(bufs >= MINBUFS); 995433d6423SLionel Sambuc 996433d6423SLionel Sambuc for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) 997433d6423SLionel Sambuc if(bp->lmfs_count != 0) panic("change blocksize with buffer in use"); 998433d6423SLionel Sambuc 999433d6423SLionel Sambuc lmfs_buf_pool(bufs); 1000433d6423SLionel Sambuc 1001433d6423SLionel Sambuc fs_block_size = blocksize; 1002433d6423SLionel Sambuc } 1003433d6423SLionel Sambuc 10040314acfbSDavid van Moolenbroek static void cache_heuristic_check(void) 1005433d6423SLionel Sambuc { 1006433d6423SLionel Sambuc int bufs, d; 1007433d6423SLionel Sambuc 1008*1311233cSDavid van Moolenbroek bufs = fs_bufs_heuristic(MINBUFS, fs_btotal, fs_bused, fs_block_size); 1009433d6423SLionel Sambuc 1010433d6423SLionel Sambuc /* set the cache to the new heuristic size if the new one 1011433d6423SLionel Sambuc * is more than 10% off from the current one. 1012433d6423SLionel Sambuc */ 1013433d6423SLionel Sambuc d = bufs-nr_bufs; 1014433d6423SLionel Sambuc if(d < 0) d = -d; 1015433d6423SLionel Sambuc if(d*100/nr_bufs > 10) { 1016433d6423SLionel Sambuc cache_resize(fs_block_size, bufs); 1017433d6423SLionel Sambuc } 1018433d6423SLionel Sambuc } 1019433d6423SLionel Sambuc 1020433d6423SLionel Sambuc /*===========================================================================* 1021433d6423SLionel Sambuc * lmfs_set_blocksize * 1022433d6423SLionel Sambuc *===========================================================================*/ 1023*1311233cSDavid van Moolenbroek void lmfs_set_blocksize(size_t new_block_size) 1024433d6423SLionel Sambuc { 1025433d6423SLionel Sambuc cache_resize(new_block_size, MINBUFS); 10260314acfbSDavid van Moolenbroek cache_heuristic_check(); 1027433d6423SLionel Sambuc 1028433d6423SLionel Sambuc /* Decide whether to use seconday cache or not. 10290314acfbSDavid van Moolenbroek * Only do this if the block size is a multiple of the page size, and using 10300314acfbSDavid van Moolenbroek * the VM cache has been enabled for this FS. 1031433d6423SLionel Sambuc */ 1032433d6423SLionel Sambuc 1033433d6423SLionel Sambuc vmcache = 0; 1034433d6423SLionel Sambuc 1035433d6423SLionel Sambuc if(may_use_vmcache && !(new_block_size % PAGE_SIZE)) 1036433d6423SLionel Sambuc vmcache = 1; 1037433d6423SLionel Sambuc } 1038433d6423SLionel Sambuc 1039433d6423SLionel Sambuc /*===========================================================================* 1040433d6423SLionel Sambuc * lmfs_buf_pool * 1041433d6423SLionel Sambuc *===========================================================================*/ 1042433d6423SLionel Sambuc void lmfs_buf_pool(int new_nr_bufs) 1043433d6423SLionel Sambuc { 1044433d6423SLionel Sambuc /* Initialize the buffer pool. */ 1045433d6423SLionel Sambuc register struct buf *bp; 1046433d6423SLionel Sambuc 1047433d6423SLionel Sambuc assert(new_nr_bufs >= MINBUFS); 1048433d6423SLionel Sambuc 1049433d6423SLionel Sambuc if(nr_bufs > 0) { 1050433d6423SLionel Sambuc assert(buf); 1051c5beebb6SDavid van Moolenbroek lmfs_flushall(); 1052433d6423SLionel Sambuc for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) { 1053433d6423SLionel Sambuc if(bp->data) { 1054433d6423SLionel Sambuc assert(bp->lmfs_bytes > 0); 1055433d6423SLionel Sambuc munmap_t(bp->data, bp->lmfs_bytes); 1056433d6423SLionel Sambuc } 1057433d6423SLionel Sambuc } 1058433d6423SLionel Sambuc } 1059433d6423SLionel Sambuc 1060433d6423SLionel Sambuc if(buf) 1061433d6423SLionel Sambuc free(buf); 1062433d6423SLionel Sambuc 1063433d6423SLionel Sambuc if(!(buf = calloc(sizeof(buf[0]), new_nr_bufs))) 1064433d6423SLionel Sambuc panic("couldn't allocate buf list (%d)", new_nr_bufs); 1065433d6423SLionel Sambuc 1066433d6423SLionel Sambuc if(buf_hash) 1067433d6423SLionel Sambuc free(buf_hash); 1068433d6423SLionel Sambuc if(!(buf_hash = calloc(sizeof(buf_hash[0]), new_nr_bufs))) 1069433d6423SLionel Sambuc panic("couldn't allocate buf hash list (%d)", new_nr_bufs); 1070433d6423SLionel Sambuc 1071433d6423SLionel Sambuc nr_bufs = new_nr_bufs; 1072433d6423SLionel Sambuc 1073433d6423SLionel Sambuc bufs_in_use = 0; 1074433d6423SLionel Sambuc front = &buf[0]; 1075433d6423SLionel Sambuc rear = &buf[nr_bufs - 1]; 1076433d6423SLionel Sambuc 1077433d6423SLionel Sambuc for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) { 1078433d6423SLionel Sambuc bp->lmfs_blocknr = NO_BLOCK; 1079433d6423SLionel Sambuc bp->lmfs_dev = NO_DEV; 1080433d6423SLionel Sambuc bp->lmfs_next = bp + 1; 1081433d6423SLionel Sambuc bp->lmfs_prev = bp - 1; 1082433d6423SLionel Sambuc bp->data = NULL; 1083433d6423SLionel Sambuc bp->lmfs_bytes = 0; 1084433d6423SLionel Sambuc } 1085433d6423SLionel Sambuc front->lmfs_prev = NULL; 1086433d6423SLionel Sambuc rear->lmfs_next = NULL; 1087433d6423SLionel Sambuc 1088433d6423SLionel Sambuc for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) bp->lmfs_hash = bp->lmfs_next; 1089433d6423SLionel Sambuc buf_hash[0] = front; 1090433d6423SLionel Sambuc } 1091433d6423SLionel Sambuc 1092433d6423SLionel Sambuc int lmfs_bufs_in_use(void) 1093433d6423SLionel Sambuc { 1094433d6423SLionel Sambuc return bufs_in_use; 1095433d6423SLionel Sambuc } 1096433d6423SLionel Sambuc 1097433d6423SLionel Sambuc int lmfs_nr_bufs(void) 1098433d6423SLionel Sambuc { 1099433d6423SLionel Sambuc return nr_bufs; 1100433d6423SLionel Sambuc } 1101433d6423SLionel Sambuc 1102433d6423SLionel Sambuc void lmfs_flushall(void) 1103433d6423SLionel Sambuc { 1104433d6423SLionel Sambuc struct buf *bp; 1105433d6423SLionel Sambuc for(bp = &buf[0]; bp < &buf[nr_bufs]; bp++) 1106433d6423SLionel Sambuc if(bp->lmfs_dev != NO_DEV && !lmfs_isclean(bp)) 1107ebd3c067SDavid van Moolenbroek lmfs_flushdev(bp->lmfs_dev); 1108*1311233cSDavid van Moolenbroek 1109*1311233cSDavid van Moolenbroek /* This is the moment where it is least likely (although certainly not 1110*1311233cSDavid van Moolenbroek * impossible!) that there are buffers in use, since buffers should not 1111*1311233cSDavid van Moolenbroek * be held across file system syncs. See if we already intended to 1112*1311233cSDavid van Moolenbroek * resize the buffer cache, but couldn't. Be aware that we may be 1113*1311233cSDavid van Moolenbroek * called indirectly from within lmfs_change_blockusage(), so care must 1114*1311233cSDavid van Moolenbroek * be taken not to recurse infinitely. TODO: see if it is better to 1115*1311233cSDavid van Moolenbroek * resize the cache from here *only*, thus guaranteeing a clean cache. 1116*1311233cSDavid van Moolenbroek */ 1117*1311233cSDavid van Moolenbroek lmfs_change_blockusage(0); 1118433d6423SLionel Sambuc } 1119433d6423SLionel Sambuc 1120*1311233cSDavid van Moolenbroek size_t lmfs_fs_block_size(void) 1121433d6423SLionel Sambuc { 1122433d6423SLionel Sambuc return fs_block_size; 1123433d6423SLionel Sambuc } 1124433d6423SLionel Sambuc 1125433d6423SLionel Sambuc void lmfs_may_use_vmcache(int ok) 1126433d6423SLionel Sambuc { 1127433d6423SLionel Sambuc may_use_vmcache = ok; 1128433d6423SLionel Sambuc } 1129433d6423SLionel Sambuc 1130433d6423SLionel Sambuc void lmfs_reset_rdwt_err(void) 1131433d6423SLionel Sambuc { 1132433d6423SLionel Sambuc rdwt_err = OK; 1133433d6423SLionel Sambuc } 1134433d6423SLionel Sambuc 1135433d6423SLionel Sambuc int lmfs_rdwt_err(void) 1136433d6423SLionel Sambuc { 1137433d6423SLionel Sambuc return rdwt_err; 1138433d6423SLionel Sambuc } 1139