1 /* $NetBSD: bcache.c,v 1.5 2009/10/26 19:16:56 cegger Exp $ */ 2 3 /*- 4 * Copyright (c) 1998 Michael Smith <msmith@freebsd.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 /* __FBSDID("$FreeBSD: src/sys/boot/common/bcache.c,v 1.12 2003/08/25 23:30:41 obrien Exp $"); */ 31 32 /* 33 * Simple LRU block cache 34 */ 35 36 #include <sys/stdint.h> 37 38 #include <lib/libsa/stand.h> 39 40 #include "bitstring.h" /* XXX: Brought this in from FreeBSD:sys/sys/. Do we have an equivalent in NetBSD ? */ 41 #include "bootstrap.h" 42 43 /* #define BCACHE_DEBUG */ 44 45 #ifdef BCACHE_DEBUG 46 #define BCACHE_TIMEOUT 10 47 # define DEBUG(fmt, args...) printf("%s: " fmt "\n" , __func__ , ## args) 48 #else 49 #define BCACHE_TIMEOUT 2 50 # define DEBUG(fmt, args...) 51 #endif 52 53 54 struct bcachectl 55 { 56 daddr_t bc_blkno; 57 time_t bc_stamp; 58 int bc_count; 59 }; 60 61 static struct bcachectl *bcache_ctl; 62 static void * bcache_data; 63 static bitstr_t *bcache_miss; 64 static u_int bcache_nblks; 65 static u_int bcache_blksize; 66 static u_int bcache_hits, bcache_misses, bcache_ops, bcache_bypasses; 67 static u_int bcache_flushes; 68 static u_int bcache_bcount; 69 70 static void bcache_invalidate(daddr_t blkno); 71 static void bcache_insert(void *buf, daddr_t blkno); 72 static int bcache_lookup(void *buf, daddr_t blkno); 73 74 /* 75 * Initialise the cache for (nblks) of (bsize). 76 */ 77 int 78 bcache_init(u_int nblks, size_t bsize) 79 { 80 /* discard any old contents */ 81 if (bcache_data != NULL) { 82 free(bcache_data); 83 bcache_data = NULL; 84 free(bcache_ctl); 85 } 86 87 /* Allocate control structures */ 88 bcache_nblks = nblks; 89 bcache_blksize = bsize; 90 bcache_data = alloc(bcache_nblks * bcache_blksize); 91 bcache_ctl = (struct bcachectl *)alloc(bcache_nblks * sizeof(struct bcachectl)); 92 bcache_miss = bit_alloc((bcache_nblks + 1) / 2); 93 if ((bcache_data == NULL) || (bcache_ctl == NULL) || (bcache_miss == NULL)) { 94 if (bcache_miss) 95 free(bcache_miss); 96 if (bcache_ctl) 97 free(bcache_ctl); 98 if (bcache_data) 99 free(bcache_data); 100 bcache_data = NULL; 101 return(ENOMEM); 102 } 103 104 return(0); 105 } 106 107 /* 108 * Flush the cache 109 */ 110 void 111 bcache_flush(void) 112 { 113 u_int i; 114 115 bcache_flushes++; 116 117 /* Flush the cache */ 118 for (i = 0; i < bcache_nblks; i++) { 119 bcache_ctl[i].bc_count = -1; 120 bcache_ctl[i].bc_blkno = -1; 121 } 122 } 123 124 /* 125 * Handle a write request; write directly to the disk, and populate the 126 * cache with the new values. 127 */ 128 static int 129 write_strategy(void *devdata, int unit, int rw, daddr_t blk, size_t size, 130 char *buf, size_t *rsize) 131 { 132 struct bcache_devdata *dd = (struct bcache_devdata *)devdata; 133 daddr_t i, nblk; 134 int err; 135 136 nblk = size / bcache_blksize; 137 138 /* Invalidate the blocks being written */ 139 for (i = 0; i < nblk; i++) { 140 bcache_invalidate(blk + i); 141 } 142 143 /* Write the blocks */ 144 err = dd->dv_strategy(dd->dv_devdata, rw, blk, size, buf, rsize); 145 146 /* Populate the block cache with the new data */ 147 if (err == 0) { 148 for (i = 0; i < nblk; i++) { 149 bcache_insert(buf + (i * bcache_blksize),blk + i); 150 } 151 } 152 153 return err; 154 } 155 156 /* 157 * Handle a read request; fill in parts of the request that can 158 * be satisfied by the cache, use the supplied strategy routine to do 159 * device I/O and then use the I/O results to populate the cache. 160 */ 161 static int 162 read_strategy(void *devdata, int unit, int rw, daddr_t blk, size_t size, 163 char *buf, size_t *rsize) 164 { 165 struct bcache_devdata *dd = (struct bcache_devdata *)devdata; 166 int p_size, result; 167 daddr_t p_blk, i, j, nblk; 168 void * p_buf; 169 170 nblk = size / bcache_blksize; 171 result = 0; 172 173 /* Satisfy any cache hits up front */ 174 for (i = 0; i < nblk; i++) { 175 if (bcache_lookup(buf + (bcache_blksize * i), blk + i)) { 176 bit_set(bcache_miss, i); /* cache miss */ 177 bcache_misses++; 178 } else { 179 bit_clear(bcache_miss, i); /* cache hit */ 180 bcache_hits++; 181 } 182 } 183 184 /* Go back and fill in any misses XXX optimise */ 185 p_blk = -1; 186 p_buf = NULL; 187 p_size = 0; 188 for (i = 0; i < nblk; i++) { 189 if (bit_test(bcache_miss, i)) { 190 /* miss, add to pending transfer */ 191 if (p_blk == -1) { 192 p_blk = blk + i; 193 p_buf = buf + (bcache_blksize * i); 194 p_size = 1; 195 } else { 196 p_size++; 197 } 198 } else if (p_blk != -1) { 199 /* hit, complete pending transfer */ 200 result = dd->dv_strategy(dd->dv_devdata, rw, p_blk, p_size * bcache_blksize, p_buf, NULL); 201 if (result != 0) 202 goto done; 203 for (j = 0; j < p_size; j++) 204 bcache_insert(p_buf + (j * bcache_blksize), p_blk + j); 205 p_blk = -1; 206 } 207 } 208 if (p_blk != -1) { 209 /* pending transfer left */ 210 result = dd->dv_strategy(dd->dv_devdata, rw, p_blk, p_size * bcache_blksize, p_buf, NULL); 211 if (result != 0) 212 goto done; 213 for (j = 0; j < p_size; j++) 214 bcache_insert(p_buf + (j * bcache_blksize), p_blk + j); 215 } 216 217 done: 218 if ((result == 0) && (rsize != NULL)) 219 *rsize = size; 220 return(result); 221 } 222 223 /* 224 * Requests larger than 1/2 the cache size will be bypassed and go 225 * directly to the disk. XXX tune this. 226 */ 227 int 228 bcache_strategy(void *devdata, int unit, int rw, daddr_t blk, size_t size, 229 char *buf, size_t *rsize) 230 { 231 static int bcache_unit = -1; 232 struct bcache_devdata *dd = (struct bcache_devdata *)devdata; 233 234 bcache_ops++; 235 236 if(bcache_unit != unit) { 237 bcache_flush(); 238 bcache_unit = unit; 239 } 240 241 /* bypass large requests, or when the cache is inactive */ 242 if ((bcache_data == NULL) || ((size * 2 / bcache_blksize) > bcache_nblks)) { 243 DEBUG("bypass %d from %d", size / bcache_blksize, blk); 244 bcache_bypasses++; 245 return(dd->dv_strategy(dd->dv_devdata, rw, blk, size, buf, rsize)); 246 } 247 248 switch (rw) { 249 case F_READ: 250 return read_strategy(devdata, unit, rw, blk, size, buf, rsize); 251 case F_WRITE: 252 return write_strategy(devdata, unit, rw, blk, size, buf, rsize); 253 } 254 return -1; 255 } 256 257 258 /* 259 * Insert a block into the cache. Retire the oldest block to do so, if required. 260 * 261 * XXX the LRU algorithm will fail after 2^31 blocks have been transferred. 262 */ 263 static void 264 bcache_insert(void *buf, daddr_t blkno) 265 { 266 time_t now; 267 int cand, ocount; 268 u_int i; 269 270 time(&now); 271 cand = 0; /* assume the first block */ 272 ocount = bcache_ctl[0].bc_count; 273 274 /* find the oldest block */ 275 for (i = 1; i < bcache_nblks; i++) { 276 if (bcache_ctl[i].bc_blkno == blkno) { 277 /* reuse old entry */ 278 cand = i; 279 break; 280 } 281 if (bcache_ctl[i].bc_count < ocount) { 282 ocount = bcache_ctl[i].bc_count; 283 cand = i; 284 } 285 } 286 287 DEBUG("insert blk %d -> %d @ %d # %d", blkno, cand, now, bcache_bcount); 288 memcpy(bcache_data + (bcache_blksize * cand), buf, bcache_blksize); 289 bcache_ctl[cand].bc_blkno = blkno; 290 bcache_ctl[cand].bc_stamp = now; 291 bcache_ctl[cand].bc_count = bcache_bcount++; 292 } 293 294 /* 295 * Look for a block in the cache. Blocks more than BCACHE_TIMEOUT seconds old 296 * may be stale (removable media) and thus are discarded. Copy the block out 297 * if successful and return zero, or return nonzero on failure. 298 */ 299 static int 300 bcache_lookup(void *buf, daddr_t blkno) 301 { 302 time_t now; 303 u_int i; 304 305 time(&now); 306 307 for (i = 0; i < bcache_nblks; i++) 308 /* cache hit? */ 309 if ((bcache_ctl[i].bc_blkno == blkno) && ((bcache_ctl[i].bc_stamp + BCACHE_TIMEOUT) >= now)) { 310 memcpy(buf, bcache_data + (bcache_blksize * i), bcache_blksize); 311 DEBUG("hit blk %d <- %d (now %d then %d)", blkno, i, now, bcache_ctl[i].bc_stamp); 312 return(0); 313 } 314 return(ENOENT); 315 } 316 317 /* 318 * Invalidate a block from the cache. 319 */ 320 static void 321 bcache_invalidate(daddr_t blkno) 322 { 323 u_int i; 324 325 for (i = 0; i < bcache_nblks; i++) { 326 if (bcache_ctl[i].bc_blkno == blkno) { 327 bcache_ctl[i].bc_count = -1; 328 bcache_ctl[i].bc_blkno = -1; 329 DEBUG("invalidate blk %d", blkno); 330 break; 331 } 332 } 333 } 334 335 int 336 command_bcache(int argc, char *argv[]) 337 { 338 u_int i; 339 340 for (i = 0; i < bcache_nblks; i++) { 341 printf("%lx %x %x|", (uintmax_t)bcache_ctl[i].bc_blkno, (unsigned int)bcache_ctl[i].bc_stamp & 0xffff, bcache_ctl[i].bc_count & 0xffff); 342 if (((i + 1) % 4) == 0) 343 printf("\n"); 344 } 345 printf("\n%d ops %d bypasses %d hits %d misses %d flushes\n", bcache_ops, bcache_bypasses, bcache_hits, bcache_misses, bcache_flushes); 346 return(CMD_OK); 347 } 348 349