1 /* $NetBSD: vfs_wapbl.c,v 1.30 2010/02/06 12:10:59 uebayasi Exp $ */ 2 3 /*- 4 * Copyright (c) 2003, 2008, 2009 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Wasabi Systems, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * This implements file system independent write ahead filesystem logging. 34 */ 35 36 #define WAPBL_INTERNAL 37 38 #include <sys/cdefs.h> 39 __KERNEL_RCSID(0, "$NetBSD: vfs_wapbl.c,v 1.30 2010/02/06 12:10:59 uebayasi Exp $"); 40 41 #include <sys/param.h> 42 43 #ifdef _KERNEL 44 #include <sys/param.h> 45 #include <sys/namei.h> 46 #include <sys/proc.h> 47 #include <sys/uio.h> 48 #include <sys/vnode.h> 49 #include <sys/file.h> 50 #include <sys/malloc.h> 51 #include <sys/resourcevar.h> 52 #include <sys/conf.h> 53 #include <sys/mount.h> 54 #include <sys/kernel.h> 55 #include <sys/kauth.h> 56 #include <sys/mutex.h> 57 #include <sys/atomic.h> 58 #include <sys/wapbl.h> 59 #include <sys/wapbl_replay.h> 60 61 #include <miscfs/specfs/specdev.h> 62 63 #if 0 /* notyet */ 64 #define wapbl_malloc(s) kmem_alloc((s), KM_SLEEP) 65 #define wapbl_free(a, s) kmem_free((a), (s)) 66 #define wapbl_calloc(n, s) kmem_zalloc((n)*(s), KM_SLEEP) 67 #else 68 MALLOC_JUSTDEFINE(M_WAPBL, "wapbl", "write-ahead physical block logging"); 69 #define wapbl_malloc(s) malloc((s), M_WAPBL, M_WAITOK) 70 #define wapbl_free(a, s) free((a), M_WAPBL) 71 #define wapbl_calloc(n, s) malloc((n)*(s), M_WAPBL, M_WAITOK | M_ZERO) 72 #endif 73 74 #else /* !_KERNEL */ 75 #include <assert.h> 76 #include <errno.h> 77 #include <stdio.h> 78 #include <stdbool.h> 79 #include <stdlib.h> 80 #include <string.h> 81 82 #include <sys/time.h> 83 #include <sys/wapbl.h> 84 #include <sys/wapbl_replay.h> 85 86 #define KDASSERT(x) assert(x) 87 #define KASSERT(x) assert(x) 88 #define wapbl_malloc(s) malloc(s) 89 #define wapbl_free(a, s) free(a) 90 #define wapbl_calloc(n, s) calloc((n), (s)) 91 92 #endif /* !_KERNEL */ 93 94 /* 95 * INTERNAL DATA STRUCTURES 96 */ 97 98 /* 99 * This structure holds per-mount log information. 100 * 101 * Legend: a = atomic access only 102 * r = read-only after init 103 * l = rwlock held 104 * m = mutex held 105 * u = unlocked access ok 106 * b = bufcache_lock held 107 */ 108 struct wapbl { 109 struct vnode *wl_logvp; /* r: log here */ 110 struct vnode *wl_devvp; /* r: log on this device */ 111 struct mount *wl_mount; /* r: mountpoint wl is associated with */ 112 daddr_t wl_logpbn; /* r: Physical block number of start of log */ 113 int wl_log_dev_bshift; /* r: logarithm of device block size of log 114 device */ 115 int wl_fs_dev_bshift; /* r: logarithm of device block size of 116 filesystem device */ 117 118 unsigned wl_lock_count; /* m: Count of transactions in progress */ 119 120 size_t wl_circ_size; /* r: Number of bytes in buffer of log */ 121 size_t wl_circ_off; /* r: Number of bytes reserved at start */ 122 123 size_t wl_bufcount_max; /* r: Number of buffers reserved for log */ 124 size_t wl_bufbytes_max; /* r: Number of buf bytes reserved for log */ 125 126 off_t wl_head; /* l: Byte offset of log head */ 127 off_t wl_tail; /* l: Byte offset of log tail */ 128 /* 129 * head == tail == 0 means log is empty 130 * head == tail != 0 means log is full 131 * see assertions in wapbl_advance() for other boundary conditions. 132 * only truncate moves the tail, except when flush sets it to 133 * wl_header_size only flush moves the head, except when truncate 134 * sets it to 0. 135 */ 136 137 struct wapbl_wc_header *wl_wc_header; /* l */ 138 void *wl_wc_scratch; /* l: scratch space (XXX: por que?!?) */ 139 140 kmutex_t wl_mtx; /* u: short-term lock */ 141 krwlock_t wl_rwlock; /* u: File system transaction lock */ 142 143 /* 144 * Must be held while accessing 145 * wl_count or wl_bufs or head or tail 146 */ 147 148 /* 149 * Callback called from within the flush routine to flush any extra 150 * bits. Note that flush may be skipped without calling this if 151 * there are no outstanding buffers in the transaction. 152 */ 153 #if _KERNEL 154 wapbl_flush_fn_t wl_flush; /* r */ 155 wapbl_flush_fn_t wl_flush_abort;/* r */ 156 #endif 157 158 size_t wl_bufbytes; /* m: Byte count of pages in wl_bufs */ 159 size_t wl_bufcount; /* m: Count of buffers in wl_bufs */ 160 size_t wl_bcount; /* m: Total bcount of wl_bufs */ 161 162 LIST_HEAD(, buf) wl_bufs; /* m: Buffers in current transaction */ 163 164 kcondvar_t wl_reclaimable_cv; /* m (obviously) */ 165 size_t wl_reclaimable_bytes; /* m: Amount of space available for 166 reclamation by truncate */ 167 int wl_error_count; /* m: # of wl_entries with errors */ 168 size_t wl_reserved_bytes; /* never truncate log smaller than this */ 169 170 #ifdef WAPBL_DEBUG_BUFBYTES 171 size_t wl_unsynced_bufbytes; /* Byte count of unsynced buffers */ 172 #endif 173 174 daddr_t *wl_deallocblks;/* l: address of block */ 175 int *wl_dealloclens; /* l: size of block */ 176 int wl_dealloccnt; /* l: total count */ 177 int wl_dealloclim; /* l: max count */ 178 179 /* hashtable of inode numbers for allocated but unlinked inodes */ 180 /* synch ??? */ 181 LIST_HEAD(wapbl_ino_head, wapbl_ino) *wl_inohash; 182 u_long wl_inohashmask; 183 int wl_inohashcnt; 184 185 SIMPLEQ_HEAD(, wapbl_entry) wl_entries; /* On disk transaction 186 accounting */ 187 }; 188 189 #ifdef WAPBL_DEBUG_PRINT 190 int wapbl_debug_print = WAPBL_DEBUG_PRINT; 191 #endif 192 193 /****************************************************************/ 194 #ifdef _KERNEL 195 196 #ifdef WAPBL_DEBUG 197 struct wapbl *wapbl_debug_wl; 198 #endif 199 200 static int wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail); 201 static int wapbl_write_blocks(struct wapbl *wl, off_t *offp); 202 static int wapbl_write_revocations(struct wapbl *wl, off_t *offp); 203 static int wapbl_write_inodes(struct wapbl *wl, off_t *offp); 204 #endif /* _KERNEL */ 205 206 static int wapbl_replay_process(struct wapbl_replay *wr, off_t, off_t); 207 208 static inline size_t wapbl_space_free(size_t avail, off_t head, 209 off_t tail); 210 static inline size_t wapbl_space_used(size_t avail, off_t head, 211 off_t tail); 212 213 #ifdef _KERNEL 214 215 #define WAPBL_INODETRK_SIZE 83 216 static int wapbl_ino_pool_refcount; 217 static struct pool wapbl_ino_pool; 218 struct wapbl_ino { 219 LIST_ENTRY(wapbl_ino) wi_hash; 220 ino_t wi_ino; 221 mode_t wi_mode; 222 }; 223 224 static void wapbl_inodetrk_init(struct wapbl *wl, u_int size); 225 static void wapbl_inodetrk_free(struct wapbl *wl); 226 static struct wapbl_ino *wapbl_inodetrk_get(struct wapbl *wl, ino_t ino); 227 228 static size_t wapbl_transaction_len(struct wapbl *wl); 229 static inline size_t wapbl_transaction_inodes_len(struct wapbl *wl); 230 231 #if 0 232 int wapbl_replay_verify(struct wapbl_replay *, struct vnode *); 233 #endif 234 235 static int wapbl_replay_isopen1(struct wapbl_replay *); 236 237 /* 238 * This is useful for debugging. If set, the log will 239 * only be truncated when necessary. 240 */ 241 int wapbl_lazy_truncate = 0; 242 243 struct wapbl_ops wapbl_ops = { 244 .wo_wapbl_discard = wapbl_discard, 245 .wo_wapbl_replay_isopen = wapbl_replay_isopen1, 246 .wo_wapbl_replay_can_read = wapbl_replay_can_read, 247 .wo_wapbl_replay_read = wapbl_replay_read, 248 .wo_wapbl_add_buf = wapbl_add_buf, 249 .wo_wapbl_remove_buf = wapbl_remove_buf, 250 .wo_wapbl_resize_buf = wapbl_resize_buf, 251 .wo_wapbl_begin = wapbl_begin, 252 .wo_wapbl_end = wapbl_end, 253 .wo_wapbl_junlock_assert= wapbl_junlock_assert, 254 255 /* XXX: the following is only used to say "this is a wapbl buf" */ 256 .wo_wapbl_biodone = wapbl_biodone, 257 }; 258 259 void 260 wapbl_init(void) 261 { 262 263 malloc_type_attach(M_WAPBL); 264 } 265 266 static int 267 wapbl_start_flush_inodes(struct wapbl *wl, struct wapbl_replay *wr) 268 { 269 int error, i; 270 271 WAPBL_PRINTF(WAPBL_PRINT_REPLAY, 272 ("wapbl_start: reusing log with %d inodes\n", wr->wr_inodescnt)); 273 274 /* 275 * Its only valid to reuse the replay log if its 276 * the same as the new log we just opened. 277 */ 278 KDASSERT(!wapbl_replay_isopen(wr)); 279 KASSERT(wl->wl_devvp->v_rdev == wr->wr_devvp->v_rdev); 280 KASSERT(wl->wl_logpbn == wr->wr_logpbn); 281 KASSERT(wl->wl_circ_size == wr->wr_circ_size); 282 KASSERT(wl->wl_circ_off == wr->wr_circ_off); 283 KASSERT(wl->wl_log_dev_bshift == wr->wr_log_dev_bshift); 284 KASSERT(wl->wl_fs_dev_bshift == wr->wr_fs_dev_bshift); 285 286 wl->wl_wc_header->wc_generation = wr->wr_generation + 1; 287 288 for (i = 0; i < wr->wr_inodescnt; i++) 289 wapbl_register_inode(wl, wr->wr_inodes[i].wr_inumber, 290 wr->wr_inodes[i].wr_imode); 291 292 /* Make sure new transaction won't overwrite old inodes list */ 293 KDASSERT(wapbl_transaction_len(wl) <= 294 wapbl_space_free(wl->wl_circ_size, wr->wr_inodeshead, 295 wr->wr_inodestail)); 296 297 wl->wl_head = wl->wl_tail = wr->wr_inodeshead; 298 wl->wl_reclaimable_bytes = wl->wl_reserved_bytes = 299 wapbl_transaction_len(wl); 300 301 error = wapbl_write_inodes(wl, &wl->wl_head); 302 if (error) 303 return error; 304 305 KASSERT(wl->wl_head != wl->wl_tail); 306 KASSERT(wl->wl_head != 0); 307 308 return 0; 309 } 310 311 int 312 wapbl_start(struct wapbl ** wlp, struct mount *mp, struct vnode *vp, 313 daddr_t off, size_t count, size_t blksize, struct wapbl_replay *wr, 314 wapbl_flush_fn_t flushfn, wapbl_flush_fn_t flushabortfn) 315 { 316 struct wapbl *wl; 317 struct vnode *devvp; 318 daddr_t logpbn; 319 int error; 320 int log_dev_bshift = DEV_BSHIFT; 321 int fs_dev_bshift = DEV_BSHIFT; 322 int run; 323 324 WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_start: vp=%p off=%" PRId64 325 " count=%zu blksize=%zu\n", vp, off, count, blksize)); 326 327 if (log_dev_bshift > fs_dev_bshift) { 328 WAPBL_PRINTF(WAPBL_PRINT_OPEN, 329 ("wapbl: log device's block size cannot be larger " 330 "than filesystem's\n")); 331 /* 332 * Not currently implemented, although it could be if 333 * needed someday. 334 */ 335 return ENOSYS; 336 } 337 338 if (off < 0) 339 return EINVAL; 340 341 if (blksize < DEV_BSIZE) 342 return EINVAL; 343 if (blksize % DEV_BSIZE) 344 return EINVAL; 345 346 /* XXXTODO: verify that the full load is writable */ 347 348 /* 349 * XXX check for minimum log size 350 * minimum is governed by minimum amount of space 351 * to complete a transaction. (probably truncate) 352 */ 353 /* XXX for now pick something minimal */ 354 if ((count * blksize) < MAXPHYS) { 355 return ENOSPC; 356 } 357 358 if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, &run)) != 0) { 359 return error; 360 } 361 362 wl = wapbl_calloc(1, sizeof(*wl)); 363 rw_init(&wl->wl_rwlock); 364 mutex_init(&wl->wl_mtx, MUTEX_DEFAULT, IPL_NONE); 365 cv_init(&wl->wl_reclaimable_cv, "wapblrec"); 366 LIST_INIT(&wl->wl_bufs); 367 SIMPLEQ_INIT(&wl->wl_entries); 368 369 wl->wl_logvp = vp; 370 wl->wl_devvp = devvp; 371 wl->wl_mount = mp; 372 wl->wl_logpbn = logpbn; 373 wl->wl_log_dev_bshift = log_dev_bshift; 374 wl->wl_fs_dev_bshift = fs_dev_bshift; 375 376 wl->wl_flush = flushfn; 377 wl->wl_flush_abort = flushabortfn; 378 379 /* Reserve two log device blocks for the commit headers */ 380 wl->wl_circ_off = 2<<wl->wl_log_dev_bshift; 381 wl->wl_circ_size = ((count * blksize) - wl->wl_circ_off); 382 /* truncate the log usage to a multiple of log_dev_bshift */ 383 wl->wl_circ_size >>= wl->wl_log_dev_bshift; 384 wl->wl_circ_size <<= wl->wl_log_dev_bshift; 385 386 /* 387 * wl_bufbytes_max limits the size of the in memory transaction space. 388 * - Since buffers are allocated and accounted for in units of 389 * PAGE_SIZE it is required to be a multiple of PAGE_SIZE 390 * (i.e. 1<<PAGE_SHIFT) 391 * - Since the log device has to be written in units of 392 * 1<<wl_log_dev_bshift it is required to be a mulitple of 393 * 1<<wl_log_dev_bshift. 394 * - Since filesystem will provide data in units of 1<<wl_fs_dev_bshift, 395 * it is convenient to be a multiple of 1<<wl_fs_dev_bshift. 396 * Therefore it must be multiple of the least common multiple of those 397 * three quantities. Fortunately, all of those quantities are 398 * guaranteed to be a power of two, and the least common multiple of 399 * a set of numbers which are all powers of two is simply the maximum 400 * of those numbers. Finally, the maximum logarithm of a power of two 401 * is the same as the log of the maximum power of two. So we can do 402 * the following operations to size wl_bufbytes_max: 403 */ 404 405 /* XXX fix actual number of pages reserved per filesystem. */ 406 wl->wl_bufbytes_max = MIN(wl->wl_circ_size, buf_memcalc() / 2); 407 408 /* Round wl_bufbytes_max to the largest power of two constraint */ 409 wl->wl_bufbytes_max >>= PAGE_SHIFT; 410 wl->wl_bufbytes_max <<= PAGE_SHIFT; 411 wl->wl_bufbytes_max >>= wl->wl_log_dev_bshift; 412 wl->wl_bufbytes_max <<= wl->wl_log_dev_bshift; 413 wl->wl_bufbytes_max >>= wl->wl_fs_dev_bshift; 414 wl->wl_bufbytes_max <<= wl->wl_fs_dev_bshift; 415 416 /* XXX maybe use filesystem fragment size instead of 1024 */ 417 /* XXX fix actual number of buffers reserved per filesystem. */ 418 wl->wl_bufcount_max = (nbuf / 2) * 1024; 419 420 /* XXX tie this into resource estimation */ 421 wl->wl_dealloclim = 2 * btodb(wl->wl_bufbytes_max); 422 423 wl->wl_deallocblks = wapbl_malloc(sizeof(*wl->wl_deallocblks) * 424 wl->wl_dealloclim); 425 wl->wl_dealloclens = wapbl_malloc(sizeof(*wl->wl_dealloclens) * 426 wl->wl_dealloclim); 427 428 wapbl_inodetrk_init(wl, WAPBL_INODETRK_SIZE); 429 430 /* Initialize the commit header */ 431 { 432 struct wapbl_wc_header *wc; 433 size_t len = 1 << wl->wl_log_dev_bshift; 434 wc = wapbl_calloc(1, len); 435 wc->wc_type = WAPBL_WC_HEADER; 436 wc->wc_len = len; 437 wc->wc_circ_off = wl->wl_circ_off; 438 wc->wc_circ_size = wl->wl_circ_size; 439 /* XXX wc->wc_fsid */ 440 wc->wc_log_dev_bshift = wl->wl_log_dev_bshift; 441 wc->wc_fs_dev_bshift = wl->wl_fs_dev_bshift; 442 wl->wl_wc_header = wc; 443 wl->wl_wc_scratch = wapbl_malloc(len); 444 } 445 446 /* 447 * if there was an existing set of unlinked but 448 * allocated inodes, preserve it in the new 449 * log. 450 */ 451 if (wr && wr->wr_inodescnt) { 452 error = wapbl_start_flush_inodes(wl, wr); 453 if (error) 454 goto errout; 455 } 456 457 error = wapbl_write_commit(wl, wl->wl_head, wl->wl_tail); 458 if (error) { 459 goto errout; 460 } 461 462 *wlp = wl; 463 #if defined(WAPBL_DEBUG) 464 wapbl_debug_wl = wl; 465 #endif 466 467 return 0; 468 errout: 469 wapbl_discard(wl); 470 wapbl_free(wl->wl_wc_scratch, wl->wl_wc_header->wc_len); 471 wapbl_free(wl->wl_wc_header, wl->wl_wc_header->wc_len); 472 wapbl_free(wl->wl_deallocblks, 473 sizeof(*wl->wl_deallocblks) * wl->wl_dealloclim); 474 wapbl_free(wl->wl_dealloclens, 475 sizeof(*wl->wl_dealloclens) * wl->wl_dealloclim); 476 wapbl_inodetrk_free(wl); 477 wapbl_free(wl, sizeof(*wl)); 478 479 return error; 480 } 481 482 /* 483 * Like wapbl_flush, only discards the transaction 484 * completely 485 */ 486 487 void 488 wapbl_discard(struct wapbl *wl) 489 { 490 struct wapbl_entry *we; 491 struct buf *bp; 492 int i; 493 494 /* 495 * XXX we may consider using upgrade here 496 * if we want to call flush from inside a transaction 497 */ 498 rw_enter(&wl->wl_rwlock, RW_WRITER); 499 wl->wl_flush(wl->wl_mount, wl->wl_deallocblks, wl->wl_dealloclens, 500 wl->wl_dealloccnt); 501 502 #ifdef WAPBL_DEBUG_PRINT 503 { 504 pid_t pid = -1; 505 lwpid_t lid = -1; 506 if (curproc) 507 pid = curproc->p_pid; 508 if (curlwp) 509 lid = curlwp->l_lid; 510 #ifdef WAPBL_DEBUG_BUFBYTES 511 WAPBL_PRINTF(WAPBL_PRINT_DISCARD, 512 ("wapbl_discard: thread %d.%d discarding " 513 "transaction\n" 514 "\tbufcount=%zu bufbytes=%zu bcount=%zu " 515 "deallocs=%d inodes=%d\n" 516 "\terrcnt = %u, reclaimable=%zu reserved=%zu " 517 "unsynced=%zu\n", 518 pid, lid, wl->wl_bufcount, wl->wl_bufbytes, 519 wl->wl_bcount, wl->wl_dealloccnt, 520 wl->wl_inohashcnt, wl->wl_error_count, 521 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes, 522 wl->wl_unsynced_bufbytes)); 523 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) { 524 WAPBL_PRINTF(WAPBL_PRINT_DISCARD, 525 ("\tentry: bufcount = %zu, reclaimable = %zu, " 526 "error = %d, unsynced = %zu\n", 527 we->we_bufcount, we->we_reclaimable_bytes, 528 we->we_error, we->we_unsynced_bufbytes)); 529 } 530 #else /* !WAPBL_DEBUG_BUFBYTES */ 531 WAPBL_PRINTF(WAPBL_PRINT_DISCARD, 532 ("wapbl_discard: thread %d.%d discarding transaction\n" 533 "\tbufcount=%zu bufbytes=%zu bcount=%zu " 534 "deallocs=%d inodes=%d\n" 535 "\terrcnt = %u, reclaimable=%zu reserved=%zu\n", 536 pid, lid, wl->wl_bufcount, wl->wl_bufbytes, 537 wl->wl_bcount, wl->wl_dealloccnt, 538 wl->wl_inohashcnt, wl->wl_error_count, 539 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes)); 540 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) { 541 WAPBL_PRINTF(WAPBL_PRINT_DISCARD, 542 ("\tentry: bufcount = %zu, reclaimable = %zu, " 543 "error = %d\n", 544 we->we_bufcount, we->we_reclaimable_bytes, 545 we->we_error)); 546 } 547 #endif /* !WAPBL_DEBUG_BUFBYTES */ 548 } 549 #endif /* WAPBL_DEBUG_PRINT */ 550 551 for (i = 0; i <= wl->wl_inohashmask; i++) { 552 struct wapbl_ino_head *wih; 553 struct wapbl_ino *wi; 554 555 wih = &wl->wl_inohash[i]; 556 while ((wi = LIST_FIRST(wih)) != NULL) { 557 LIST_REMOVE(wi, wi_hash); 558 pool_put(&wapbl_ino_pool, wi); 559 KASSERT(wl->wl_inohashcnt > 0); 560 wl->wl_inohashcnt--; 561 } 562 } 563 564 /* 565 * clean buffer list 566 */ 567 mutex_enter(&bufcache_lock); 568 mutex_enter(&wl->wl_mtx); 569 while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) { 570 if (bbusy(bp, 0, 0, &wl->wl_mtx) == 0) { 571 /* 572 * The buffer will be unlocked and 573 * removed from the transaction in brelse 574 */ 575 mutex_exit(&wl->wl_mtx); 576 brelsel(bp, 0); 577 mutex_enter(&wl->wl_mtx); 578 } 579 } 580 mutex_exit(&wl->wl_mtx); 581 mutex_exit(&bufcache_lock); 582 583 /* 584 * Remove references to this wl from wl_entries, free any which 585 * no longer have buffers, others will be freed in wapbl_biodone 586 * when they no longer have any buffers. 587 */ 588 while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) != NULL) { 589 SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries); 590 /* XXX should we be accumulating wl_error_count 591 * and increasing reclaimable bytes ? */ 592 we->we_wapbl = NULL; 593 if (we->we_bufcount == 0) { 594 #ifdef WAPBL_DEBUG_BUFBYTES 595 KASSERT(we->we_unsynced_bufbytes == 0); 596 #endif 597 wapbl_free(we, sizeof(*we)); 598 } 599 } 600 601 /* Discard list of deallocs */ 602 wl->wl_dealloccnt = 0; 603 /* XXX should we clear wl_reserved_bytes? */ 604 605 KASSERT(wl->wl_bufbytes == 0); 606 KASSERT(wl->wl_bcount == 0); 607 KASSERT(wl->wl_bufcount == 0); 608 KASSERT(LIST_EMPTY(&wl->wl_bufs)); 609 KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries)); 610 KASSERT(wl->wl_inohashcnt == 0); 611 612 rw_exit(&wl->wl_rwlock); 613 } 614 615 int 616 wapbl_stop(struct wapbl *wl, int force) 617 { 618 struct vnode *vp; 619 int error; 620 621 WAPBL_PRINTF(WAPBL_PRINT_OPEN, ("wapbl_stop called\n")); 622 error = wapbl_flush(wl, 1); 623 if (error) { 624 if (force) 625 wapbl_discard(wl); 626 else 627 return error; 628 } 629 630 /* Unlinked inodes persist after a flush */ 631 if (wl->wl_inohashcnt) { 632 if (force) { 633 wapbl_discard(wl); 634 } else { 635 return EBUSY; 636 } 637 } 638 639 KASSERT(wl->wl_bufbytes == 0); 640 KASSERT(wl->wl_bcount == 0); 641 KASSERT(wl->wl_bufcount == 0); 642 KASSERT(LIST_EMPTY(&wl->wl_bufs)); 643 KASSERT(wl->wl_dealloccnt == 0); 644 KASSERT(SIMPLEQ_EMPTY(&wl->wl_entries)); 645 KASSERT(wl->wl_inohashcnt == 0); 646 647 vp = wl->wl_logvp; 648 649 wapbl_free(wl->wl_wc_scratch, wl->wl_wc_header->wc_len); 650 wapbl_free(wl->wl_wc_header, wl->wl_wc_header->wc_len); 651 wapbl_free(wl->wl_deallocblks, 652 sizeof(*wl->wl_deallocblks) * wl->wl_dealloclim); 653 wapbl_free(wl->wl_dealloclens, 654 sizeof(*wl->wl_dealloclens) * wl->wl_dealloclim); 655 wapbl_inodetrk_free(wl); 656 657 cv_destroy(&wl->wl_reclaimable_cv); 658 mutex_destroy(&wl->wl_mtx); 659 rw_destroy(&wl->wl_rwlock); 660 wapbl_free(wl, sizeof(*wl)); 661 662 return 0; 663 } 664 665 static int 666 wapbl_doio(void *data, size_t len, struct vnode *devvp, daddr_t pbn, int flags) 667 { 668 struct pstats *pstats = curlwp->l_proc->p_stats; 669 struct buf *bp; 670 int error; 671 672 KASSERT((flags & ~(B_WRITE | B_READ)) == 0); 673 KASSERT(devvp->v_type == VBLK); 674 675 if ((flags & (B_WRITE | B_READ)) == B_WRITE) { 676 mutex_enter(&devvp->v_interlock); 677 devvp->v_numoutput++; 678 mutex_exit(&devvp->v_interlock); 679 pstats->p_ru.ru_oublock++; 680 } else { 681 pstats->p_ru.ru_inblock++; 682 } 683 684 bp = getiobuf(devvp, true); 685 bp->b_flags = flags; 686 bp->b_cflags = BC_BUSY; /* silly & dubious */ 687 bp->b_dev = devvp->v_rdev; 688 bp->b_data = data; 689 bp->b_bufsize = bp->b_resid = bp->b_bcount = len; 690 bp->b_blkno = pbn; 691 692 WAPBL_PRINTF(WAPBL_PRINT_IO, 693 ("wapbl_doio: %s %d bytes at block %"PRId64" on dev 0x%"PRIx64"\n", 694 BUF_ISWRITE(bp) ? "write" : "read", bp->b_bcount, 695 bp->b_blkno, bp->b_dev)); 696 697 VOP_STRATEGY(devvp, bp); 698 699 error = biowait(bp); 700 putiobuf(bp); 701 702 if (error) { 703 WAPBL_PRINTF(WAPBL_PRINT_ERROR, 704 ("wapbl_doio: %s %zu bytes at block %" PRId64 705 " on dev 0x%"PRIx64" failed with error %d\n", 706 (((flags & (B_WRITE | B_READ)) == B_WRITE) ? 707 "write" : "read"), 708 len, pbn, devvp->v_rdev, error)); 709 } 710 711 return error; 712 } 713 714 int 715 wapbl_write(void *data, size_t len, struct vnode *devvp, daddr_t pbn) 716 { 717 718 return wapbl_doio(data, len, devvp, pbn, B_WRITE); 719 } 720 721 int 722 wapbl_read(void *data, size_t len, struct vnode *devvp, daddr_t pbn) 723 { 724 725 return wapbl_doio(data, len, devvp, pbn, B_READ); 726 } 727 728 /* 729 * Off is byte offset returns new offset for next write 730 * handles log wraparound 731 */ 732 static int 733 wapbl_circ_write(struct wapbl *wl, void *data, size_t len, off_t *offp) 734 { 735 size_t slen; 736 off_t off = *offp; 737 int error; 738 739 KDASSERT(((len >> wl->wl_log_dev_bshift) << 740 wl->wl_log_dev_bshift) == len); 741 742 if (off < wl->wl_circ_off) 743 off = wl->wl_circ_off; 744 slen = wl->wl_circ_off + wl->wl_circ_size - off; 745 if (slen < len) { 746 error = wapbl_write(data, slen, wl->wl_devvp, 747 wl->wl_logpbn + (off >> wl->wl_log_dev_bshift)); 748 if (error) 749 return error; 750 data = (uint8_t *)data + slen; 751 len -= slen; 752 off = wl->wl_circ_off; 753 } 754 error = wapbl_write(data, len, wl->wl_devvp, 755 wl->wl_logpbn + (off >> wl->wl_log_dev_bshift)); 756 if (error) 757 return error; 758 off += len; 759 if (off >= wl->wl_circ_off + wl->wl_circ_size) 760 off = wl->wl_circ_off; 761 *offp = off; 762 return 0; 763 } 764 765 /****************************************************************/ 766 767 int 768 wapbl_begin(struct wapbl *wl, const char *file, int line) 769 { 770 int doflush; 771 unsigned lockcount; 772 773 KDASSERT(wl); 774 775 /* 776 * XXX this needs to be made much more sophisticated. 777 * perhaps each wapbl_begin could reserve a specified 778 * number of buffers and bytes. 779 */ 780 mutex_enter(&wl->wl_mtx); 781 lockcount = wl->wl_lock_count; 782 doflush = ((wl->wl_bufbytes + (lockcount * MAXPHYS)) > 783 wl->wl_bufbytes_max / 2) || 784 ((wl->wl_bufcount + (lockcount * 10)) > 785 wl->wl_bufcount_max / 2) || 786 (wapbl_transaction_len(wl) > wl->wl_circ_size / 2) || 787 (wl->wl_dealloccnt >= 788 (wl->wl_dealloclim - (wl->wl_dealloclim >> 8))); 789 mutex_exit(&wl->wl_mtx); 790 791 if (doflush) { 792 WAPBL_PRINTF(WAPBL_PRINT_FLUSH, 793 ("force flush lockcnt=%d bufbytes=%zu " 794 "(max=%zu) bufcount=%zu (max=%zu) " 795 "dealloccnt %d (lim=%d)\n", 796 lockcount, wl->wl_bufbytes, 797 wl->wl_bufbytes_max, wl->wl_bufcount, 798 wl->wl_bufcount_max, 799 wl->wl_dealloccnt, wl->wl_dealloclim)); 800 } 801 802 if (doflush) { 803 int error = wapbl_flush(wl, 0); 804 if (error) 805 return error; 806 } 807 808 rw_enter(&wl->wl_rwlock, RW_READER); 809 mutex_enter(&wl->wl_mtx); 810 wl->wl_lock_count++; 811 mutex_exit(&wl->wl_mtx); 812 813 #if defined(WAPBL_DEBUG_PRINT) 814 WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION, 815 ("wapbl_begin thread %d.%d with bufcount=%zu " 816 "bufbytes=%zu bcount=%zu at %s:%d\n", 817 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount, 818 wl->wl_bufbytes, wl->wl_bcount, file, line)); 819 #endif 820 821 return 0; 822 } 823 824 void 825 wapbl_end(struct wapbl *wl) 826 { 827 828 #if defined(WAPBL_DEBUG_PRINT) 829 WAPBL_PRINTF(WAPBL_PRINT_TRANSACTION, 830 ("wapbl_end thread %d.%d with bufcount=%zu " 831 "bufbytes=%zu bcount=%zu\n", 832 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount, 833 wl->wl_bufbytes, wl->wl_bcount)); 834 #endif 835 836 mutex_enter(&wl->wl_mtx); 837 KASSERT(wl->wl_lock_count > 0); 838 wl->wl_lock_count--; 839 mutex_exit(&wl->wl_mtx); 840 841 rw_exit(&wl->wl_rwlock); 842 } 843 844 void 845 wapbl_add_buf(struct wapbl *wl, struct buf * bp) 846 { 847 848 KASSERT(bp->b_cflags & BC_BUSY); 849 KASSERT(bp->b_vp); 850 851 wapbl_jlock_assert(wl); 852 853 #if 0 854 /* 855 * XXX this might be an issue for swapfiles. 856 * see uvm_swap.c:1702 857 * 858 * XXX2 why require it then? leap of semantics? 859 */ 860 KASSERT((bp->b_cflags & BC_NOCACHE) == 0); 861 #endif 862 863 mutex_enter(&wl->wl_mtx); 864 if (bp->b_flags & B_LOCKED) { 865 LIST_REMOVE(bp, b_wapbllist); 866 WAPBL_PRINTF(WAPBL_PRINT_BUFFER2, 867 ("wapbl_add_buf thread %d.%d re-adding buf %p " 868 "with %d bytes %d bcount\n", 869 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize, 870 bp->b_bcount)); 871 } else { 872 /* unlocked by dirty buffers shouldn't exist */ 873 KASSERT(!(bp->b_oflags & BO_DELWRI)); 874 wl->wl_bufbytes += bp->b_bufsize; 875 wl->wl_bcount += bp->b_bcount; 876 wl->wl_bufcount++; 877 WAPBL_PRINTF(WAPBL_PRINT_BUFFER, 878 ("wapbl_add_buf thread %d.%d adding buf %p " 879 "with %d bytes %d bcount\n", 880 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize, 881 bp->b_bcount)); 882 } 883 LIST_INSERT_HEAD(&wl->wl_bufs, bp, b_wapbllist); 884 mutex_exit(&wl->wl_mtx); 885 886 bp->b_flags |= B_LOCKED; 887 } 888 889 static void 890 wapbl_remove_buf_locked(struct wapbl * wl, struct buf *bp) 891 { 892 893 KASSERT(mutex_owned(&wl->wl_mtx)); 894 KASSERT(bp->b_cflags & BC_BUSY); 895 wapbl_jlock_assert(wl); 896 897 #if 0 898 /* 899 * XXX this might be an issue for swapfiles. 900 * see uvm_swap.c:1725 901 * 902 * XXXdeux: see above 903 */ 904 KASSERT((bp->b_flags & BC_NOCACHE) == 0); 905 #endif 906 KASSERT(bp->b_flags & B_LOCKED); 907 908 WAPBL_PRINTF(WAPBL_PRINT_BUFFER, 909 ("wapbl_remove_buf thread %d.%d removing buf %p with " 910 "%d bytes %d bcount\n", 911 curproc->p_pid, curlwp->l_lid, bp, bp->b_bufsize, bp->b_bcount)); 912 913 KASSERT(wl->wl_bufbytes >= bp->b_bufsize); 914 wl->wl_bufbytes -= bp->b_bufsize; 915 KASSERT(wl->wl_bcount >= bp->b_bcount); 916 wl->wl_bcount -= bp->b_bcount; 917 KASSERT(wl->wl_bufcount > 0); 918 wl->wl_bufcount--; 919 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0)); 920 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0)); 921 LIST_REMOVE(bp, b_wapbllist); 922 923 bp->b_flags &= ~B_LOCKED; 924 } 925 926 /* called from brelsel() in vfs_bio among other places */ 927 void 928 wapbl_remove_buf(struct wapbl * wl, struct buf *bp) 929 { 930 931 mutex_enter(&wl->wl_mtx); 932 wapbl_remove_buf_locked(wl, bp); 933 mutex_exit(&wl->wl_mtx); 934 } 935 936 void 937 wapbl_resize_buf(struct wapbl *wl, struct buf *bp, long oldsz, long oldcnt) 938 { 939 940 KASSERT(bp->b_cflags & BC_BUSY); 941 942 /* 943 * XXX: why does this depend on B_LOCKED? otherwise the buf 944 * is not for a transaction? if so, why is this called in the 945 * first place? 946 */ 947 if (bp->b_flags & B_LOCKED) { 948 mutex_enter(&wl->wl_mtx); 949 wl->wl_bufbytes += bp->b_bufsize - oldsz; 950 wl->wl_bcount += bp->b_bcount - oldcnt; 951 mutex_exit(&wl->wl_mtx); 952 } 953 } 954 955 #endif /* _KERNEL */ 956 957 /****************************************************************/ 958 /* Some utility inlines */ 959 960 /* This is used to advance the pointer at old to new value at old+delta */ 961 static inline off_t 962 wapbl_advance(size_t size, size_t off, off_t old, size_t delta) 963 { 964 off_t new; 965 966 /* Define acceptable ranges for inputs. */ 967 KASSERT(delta <= size); 968 KASSERT((old == 0) || (old >= off)); 969 KASSERT(old < (size + off)); 970 971 if ((old == 0) && (delta != 0)) 972 new = off + delta; 973 else if ((old + delta) < (size + off)) 974 new = old + delta; 975 else 976 new = (old + delta) - size; 977 978 /* Note some interesting axioms */ 979 KASSERT((delta != 0) || (new == old)); 980 KASSERT((delta == 0) || (new != 0)); 981 KASSERT((delta != (size)) || (new == old)); 982 983 /* Define acceptable ranges for output. */ 984 KASSERT((new == 0) || (new >= off)); 985 KASSERT(new < (size + off)); 986 return new; 987 } 988 989 static inline size_t 990 wapbl_space_used(size_t avail, off_t head, off_t tail) 991 { 992 993 if (tail == 0) { 994 KASSERT(head == 0); 995 return 0; 996 } 997 return ((head + (avail - 1) - tail) % avail) + 1; 998 } 999 1000 static inline size_t 1001 wapbl_space_free(size_t avail, off_t head, off_t tail) 1002 { 1003 1004 return avail - wapbl_space_used(avail, head, tail); 1005 } 1006 1007 static inline void 1008 wapbl_advance_head(size_t size, size_t off, size_t delta, off_t *headp, 1009 off_t *tailp) 1010 { 1011 off_t head = *headp; 1012 off_t tail = *tailp; 1013 1014 KASSERT(delta <= wapbl_space_free(size, head, tail)); 1015 head = wapbl_advance(size, off, head, delta); 1016 if ((tail == 0) && (head != 0)) 1017 tail = off; 1018 *headp = head; 1019 *tailp = tail; 1020 } 1021 1022 static inline void 1023 wapbl_advance_tail(size_t size, size_t off, size_t delta, off_t *headp, 1024 off_t *tailp) 1025 { 1026 off_t head = *headp; 1027 off_t tail = *tailp; 1028 1029 KASSERT(delta <= wapbl_space_used(size, head, tail)); 1030 tail = wapbl_advance(size, off, tail, delta); 1031 if (head == tail) { 1032 head = tail = 0; 1033 } 1034 *headp = head; 1035 *tailp = tail; 1036 } 1037 1038 #ifdef _KERNEL 1039 1040 /****************************************************************/ 1041 1042 /* 1043 * Remove transactions whose buffers are completely flushed to disk. 1044 * Will block until at least minfree space is available. 1045 * only intended to be called from inside wapbl_flush and therefore 1046 * does not protect against commit races with itself or with flush. 1047 */ 1048 static int 1049 wapbl_truncate(struct wapbl *wl, size_t minfree, int waitonly) 1050 { 1051 size_t delta; 1052 size_t avail; 1053 off_t head; 1054 off_t tail; 1055 int error = 0; 1056 1057 KASSERT(minfree <= (wl->wl_circ_size - wl->wl_reserved_bytes)); 1058 KASSERT(rw_write_held(&wl->wl_rwlock)); 1059 1060 mutex_enter(&wl->wl_mtx); 1061 1062 /* 1063 * First check to see if we have to do a commit 1064 * at all. 1065 */ 1066 avail = wapbl_space_free(wl->wl_circ_size, wl->wl_head, wl->wl_tail); 1067 if (minfree < avail) { 1068 mutex_exit(&wl->wl_mtx); 1069 return 0; 1070 } 1071 minfree -= avail; 1072 while ((wl->wl_error_count == 0) && 1073 (wl->wl_reclaimable_bytes < minfree)) { 1074 WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE, 1075 ("wapbl_truncate: sleeping on %p wl=%p bytes=%zd " 1076 "minfree=%zd\n", 1077 &wl->wl_reclaimable_bytes, wl, wl->wl_reclaimable_bytes, 1078 minfree)); 1079 1080 cv_wait(&wl->wl_reclaimable_cv, &wl->wl_mtx); 1081 } 1082 if (wl->wl_reclaimable_bytes < minfree) { 1083 KASSERT(wl->wl_error_count); 1084 /* XXX maybe get actual error from buffer instead someday? */ 1085 error = EIO; 1086 } 1087 head = wl->wl_head; 1088 tail = wl->wl_tail; 1089 delta = wl->wl_reclaimable_bytes; 1090 1091 /* If all of of the entries are flushed, then be sure to keep 1092 * the reserved bytes reserved. Watch out for discarded transactions, 1093 * which could leave more bytes reserved than are reclaimable. 1094 */ 1095 if (SIMPLEQ_EMPTY(&wl->wl_entries) && 1096 (delta >= wl->wl_reserved_bytes)) { 1097 delta -= wl->wl_reserved_bytes; 1098 } 1099 wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta, &head, 1100 &tail); 1101 KDASSERT(wl->wl_reserved_bytes <= 1102 wapbl_space_used(wl->wl_circ_size, head, tail)); 1103 mutex_exit(&wl->wl_mtx); 1104 1105 if (error) 1106 return error; 1107 1108 if (waitonly) 1109 return 0; 1110 1111 /* 1112 * This is where head, tail and delta are unprotected 1113 * from races against itself or flush. This is ok since 1114 * we only call this routine from inside flush itself. 1115 * 1116 * XXX: how can it race against itself when accessed only 1117 * from behind the write-locked rwlock? 1118 */ 1119 error = wapbl_write_commit(wl, head, tail); 1120 if (error) 1121 return error; 1122 1123 wl->wl_head = head; 1124 wl->wl_tail = tail; 1125 1126 mutex_enter(&wl->wl_mtx); 1127 KASSERT(wl->wl_reclaimable_bytes >= delta); 1128 wl->wl_reclaimable_bytes -= delta; 1129 mutex_exit(&wl->wl_mtx); 1130 WAPBL_PRINTF(WAPBL_PRINT_TRUNCATE, 1131 ("wapbl_truncate thread %d.%d truncating %zu bytes\n", 1132 curproc->p_pid, curlwp->l_lid, delta)); 1133 1134 return 0; 1135 } 1136 1137 /****************************************************************/ 1138 1139 void 1140 wapbl_biodone(struct buf *bp) 1141 { 1142 struct wapbl_entry *we = bp->b_private; 1143 struct wapbl *wl = we->we_wapbl; 1144 1145 /* 1146 * Handle possible flushing of buffers after log has been 1147 * decomissioned. 1148 */ 1149 if (!wl) { 1150 KASSERT(we->we_bufcount > 0); 1151 we->we_bufcount--; 1152 #ifdef WAPBL_DEBUG_BUFBYTES 1153 KASSERT(we->we_unsynced_bufbytes >= bp->b_bufsize); 1154 we->we_unsynced_bufbytes -= bp->b_bufsize; 1155 #endif 1156 1157 if (we->we_bufcount == 0) { 1158 #ifdef WAPBL_DEBUG_BUFBYTES 1159 KASSERT(we->we_unsynced_bufbytes == 0); 1160 #endif 1161 wapbl_free(we, sizeof(*we)); 1162 } 1163 1164 brelse(bp, 0); 1165 return; 1166 } 1167 1168 #ifdef ohbother 1169 KDASSERT(bp->b_flags & B_DONE); 1170 KDASSERT(!(bp->b_flags & B_DELWRI)); 1171 KDASSERT(bp->b_flags & B_ASYNC); 1172 KDASSERT(bp->b_flags & B_BUSY); 1173 KDASSERT(!(bp->b_flags & B_LOCKED)); 1174 KDASSERT(!(bp->b_flags & B_READ)); 1175 KDASSERT(!(bp->b_flags & B_INVAL)); 1176 KDASSERT(!(bp->b_flags & B_NOCACHE)); 1177 #endif 1178 1179 if (bp->b_error) { 1180 #ifdef notyet /* Can't currently handle possible dirty buffer reuse */ 1181 /* 1182 * XXXpooka: interfaces not fully updated 1183 * Note: this was not enabled in the original patch 1184 * against netbsd4 either. I don't know if comment 1185 * above is true or not. 1186 */ 1187 1188 /* 1189 * If an error occurs, report the error and leave the 1190 * buffer as a delayed write on the LRU queue. 1191 * restarting the write would likely result in 1192 * an error spinloop, so let it be done harmlessly 1193 * by the syncer. 1194 */ 1195 bp->b_flags &= ~(B_DONE); 1196 simple_unlock(&bp->b_interlock); 1197 1198 if (we->we_error == 0) { 1199 mutex_enter(&wl->wl_mtx); 1200 wl->wl_error_count++; 1201 mutex_exit(&wl->wl_mtx); 1202 cv_broadcast(&wl->wl_reclaimable_cv); 1203 } 1204 we->we_error = bp->b_error; 1205 bp->b_error = 0; 1206 brelse(bp); 1207 return; 1208 #else 1209 /* For now, just mark the log permanently errored out */ 1210 1211 mutex_enter(&wl->wl_mtx); 1212 if (wl->wl_error_count == 0) { 1213 wl->wl_error_count++; 1214 cv_broadcast(&wl->wl_reclaimable_cv); 1215 } 1216 mutex_exit(&wl->wl_mtx); 1217 #endif 1218 } 1219 1220 mutex_enter(&wl->wl_mtx); 1221 1222 KASSERT(we->we_bufcount > 0); 1223 we->we_bufcount--; 1224 #ifdef WAPBL_DEBUG_BUFBYTES 1225 KASSERT(we->we_unsynced_bufbytes >= bp->b_bufsize); 1226 we->we_unsynced_bufbytes -= bp->b_bufsize; 1227 KASSERT(wl->wl_unsynced_bufbytes >= bp->b_bufsize); 1228 wl->wl_unsynced_bufbytes -= bp->b_bufsize; 1229 #endif 1230 1231 /* 1232 * If the current transaction can be reclaimed, start 1233 * at the beginning and reclaim any consecutive reclaimable 1234 * transactions. If we successfully reclaim anything, 1235 * then wakeup anyone waiting for the reclaim. 1236 */ 1237 if (we->we_bufcount == 0) { 1238 size_t delta = 0; 1239 int errcnt = 0; 1240 #ifdef WAPBL_DEBUG_BUFBYTES 1241 KDASSERT(we->we_unsynced_bufbytes == 0); 1242 #endif 1243 /* 1244 * clear any posted error, since the buffer it came from 1245 * has successfully flushed by now 1246 */ 1247 while ((we = SIMPLEQ_FIRST(&wl->wl_entries)) && 1248 (we->we_bufcount == 0)) { 1249 delta += we->we_reclaimable_bytes; 1250 if (we->we_error) 1251 errcnt++; 1252 SIMPLEQ_REMOVE_HEAD(&wl->wl_entries, we_entries); 1253 wapbl_free(we, sizeof(*we)); 1254 } 1255 1256 if (delta) { 1257 wl->wl_reclaimable_bytes += delta; 1258 KASSERT(wl->wl_error_count >= errcnt); 1259 wl->wl_error_count -= errcnt; 1260 cv_broadcast(&wl->wl_reclaimable_cv); 1261 } 1262 } 1263 1264 mutex_exit(&wl->wl_mtx); 1265 brelse(bp, 0); 1266 } 1267 1268 /* 1269 * Write transactions to disk + start I/O for contents 1270 */ 1271 int 1272 wapbl_flush(struct wapbl *wl, int waitfor) 1273 { 1274 struct buf *bp; 1275 struct wapbl_entry *we; 1276 off_t off; 1277 off_t head; 1278 off_t tail; 1279 size_t delta = 0; 1280 size_t flushsize; 1281 size_t reserved; 1282 int error = 0; 1283 1284 /* 1285 * Do a quick check to see if a full flush can be skipped 1286 * This assumes that the flush callback does not need to be called 1287 * unless there are other outstanding bufs. 1288 */ 1289 if (!waitfor) { 1290 size_t nbufs; 1291 mutex_enter(&wl->wl_mtx); /* XXX need mutex here to 1292 protect the KASSERTS */ 1293 nbufs = wl->wl_bufcount; 1294 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bufbytes == 0)); 1295 KASSERT((wl->wl_bufcount == 0) == (wl->wl_bcount == 0)); 1296 mutex_exit(&wl->wl_mtx); 1297 if (nbufs == 0) 1298 return 0; 1299 } 1300 1301 /* 1302 * XXX we may consider using LK_UPGRADE here 1303 * if we want to call flush from inside a transaction 1304 */ 1305 rw_enter(&wl->wl_rwlock, RW_WRITER); 1306 wl->wl_flush(wl->wl_mount, wl->wl_deallocblks, wl->wl_dealloclens, 1307 wl->wl_dealloccnt); 1308 1309 /* 1310 * Now that we are fully locked and flushed, 1311 * do another check for nothing to do. 1312 */ 1313 if (wl->wl_bufcount == 0) { 1314 goto out; 1315 } 1316 1317 #if 0 1318 WAPBL_PRINTF(WAPBL_PRINT_FLUSH, 1319 ("wapbl_flush thread %d.%d flushing entries with " 1320 "bufcount=%zu bufbytes=%zu\n", 1321 curproc->p_pid, curlwp->l_lid, wl->wl_bufcount, 1322 wl->wl_bufbytes)); 1323 #endif 1324 1325 /* Calculate amount of space needed to flush */ 1326 flushsize = wapbl_transaction_len(wl); 1327 1328 if (flushsize > (wl->wl_circ_size - wl->wl_reserved_bytes)) { 1329 /* 1330 * XXX this could be handled more gracefully, perhaps place 1331 * only a partial transaction in the log and allow the 1332 * remaining to flush without the protection of the journal. 1333 */ 1334 panic("wapbl_flush: current transaction too big to flush\n"); 1335 } 1336 1337 error = wapbl_truncate(wl, flushsize, 0); 1338 if (error) 1339 goto out2; 1340 1341 off = wl->wl_head; 1342 KASSERT((off == 0) || ((off >= wl->wl_circ_off) && 1343 (off < wl->wl_circ_off + wl->wl_circ_size))); 1344 error = wapbl_write_blocks(wl, &off); 1345 if (error) 1346 goto out2; 1347 error = wapbl_write_revocations(wl, &off); 1348 if (error) 1349 goto out2; 1350 error = wapbl_write_inodes(wl, &off); 1351 if (error) 1352 goto out2; 1353 1354 reserved = 0; 1355 if (wl->wl_inohashcnt) 1356 reserved = wapbl_transaction_inodes_len(wl); 1357 1358 head = wl->wl_head; 1359 tail = wl->wl_tail; 1360 1361 wapbl_advance_head(wl->wl_circ_size, wl->wl_circ_off, flushsize, 1362 &head, &tail); 1363 #ifdef WAPBL_DEBUG 1364 if (head != off) { 1365 panic("lost head! head=%"PRIdMAX" tail=%" PRIdMAX 1366 " off=%"PRIdMAX" flush=%zu\n", 1367 (intmax_t)head, (intmax_t)tail, (intmax_t)off, 1368 flushsize); 1369 } 1370 #else 1371 KASSERT(head == off); 1372 #endif 1373 1374 /* Opportunistically move the tail forward if we can */ 1375 if (!wapbl_lazy_truncate) { 1376 mutex_enter(&wl->wl_mtx); 1377 delta = wl->wl_reclaimable_bytes; 1378 mutex_exit(&wl->wl_mtx); 1379 wapbl_advance_tail(wl->wl_circ_size, wl->wl_circ_off, delta, 1380 &head, &tail); 1381 } 1382 1383 error = wapbl_write_commit(wl, head, tail); 1384 if (error) 1385 goto out2; 1386 1387 we = wapbl_calloc(1, sizeof(*we)); 1388 1389 #ifdef WAPBL_DEBUG_BUFBYTES 1390 WAPBL_PRINTF(WAPBL_PRINT_FLUSH, 1391 ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu" 1392 " unsynced=%zu" 1393 "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d " 1394 "inodes=%d\n", 1395 curproc->p_pid, curlwp->l_lid, flushsize, delta, 1396 wapbl_space_used(wl->wl_circ_size, head, tail), 1397 wl->wl_unsynced_bufbytes, wl->wl_bufcount, 1398 wl->wl_bufbytes, wl->wl_bcount, wl->wl_dealloccnt, 1399 wl->wl_inohashcnt)); 1400 #else 1401 WAPBL_PRINTF(WAPBL_PRINT_FLUSH, 1402 ("wapbl_flush: thread %d.%d head+=%zu tail+=%zu used=%zu" 1403 "\n\tbufcount=%zu bufbytes=%zu bcount=%zu deallocs=%d " 1404 "inodes=%d\n", 1405 curproc->p_pid, curlwp->l_lid, flushsize, delta, 1406 wapbl_space_used(wl->wl_circ_size, head, tail), 1407 wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount, 1408 wl->wl_dealloccnt, wl->wl_inohashcnt)); 1409 #endif 1410 1411 1412 mutex_enter(&bufcache_lock); 1413 mutex_enter(&wl->wl_mtx); 1414 1415 wl->wl_reserved_bytes = reserved; 1416 wl->wl_head = head; 1417 wl->wl_tail = tail; 1418 KASSERT(wl->wl_reclaimable_bytes >= delta); 1419 wl->wl_reclaimable_bytes -= delta; 1420 wl->wl_dealloccnt = 0; 1421 #ifdef WAPBL_DEBUG_BUFBYTES 1422 wl->wl_unsynced_bufbytes += wl->wl_bufbytes; 1423 #endif 1424 1425 we->we_wapbl = wl; 1426 we->we_bufcount = wl->wl_bufcount; 1427 #ifdef WAPBL_DEBUG_BUFBYTES 1428 we->we_unsynced_bufbytes = wl->wl_bufbytes; 1429 #endif 1430 we->we_reclaimable_bytes = flushsize; 1431 we->we_error = 0; 1432 SIMPLEQ_INSERT_TAIL(&wl->wl_entries, we, we_entries); 1433 1434 /* 1435 * this flushes bufs in reverse order than they were queued 1436 * it shouldn't matter, but if we care we could use TAILQ instead. 1437 * XXX Note they will get put on the lru queue when they flush 1438 * so we might actually want to change this to preserve order. 1439 */ 1440 while ((bp = LIST_FIRST(&wl->wl_bufs)) != NULL) { 1441 if (bbusy(bp, 0, 0, &wl->wl_mtx)) { 1442 continue; 1443 } 1444 bp->b_iodone = wapbl_biodone; 1445 bp->b_private = we; 1446 bremfree(bp); 1447 wapbl_remove_buf_locked(wl, bp); 1448 mutex_exit(&wl->wl_mtx); 1449 mutex_exit(&bufcache_lock); 1450 bawrite(bp); 1451 mutex_enter(&bufcache_lock); 1452 mutex_enter(&wl->wl_mtx); 1453 } 1454 mutex_exit(&wl->wl_mtx); 1455 mutex_exit(&bufcache_lock); 1456 1457 #if 0 1458 WAPBL_PRINTF(WAPBL_PRINT_FLUSH, 1459 ("wapbl_flush thread %d.%d done flushing entries...\n", 1460 curproc->p_pid, curlwp->l_lid)); 1461 #endif 1462 1463 out: 1464 1465 /* 1466 * If the waitfor flag is set, don't return until everything is 1467 * fully flushed and the on disk log is empty. 1468 */ 1469 if (waitfor) { 1470 error = wapbl_truncate(wl, wl->wl_circ_size - 1471 wl->wl_reserved_bytes, wapbl_lazy_truncate); 1472 } 1473 1474 out2: 1475 if (error) { 1476 wl->wl_flush_abort(wl->wl_mount, wl->wl_deallocblks, 1477 wl->wl_dealloclens, wl->wl_dealloccnt); 1478 } 1479 1480 #ifdef WAPBL_DEBUG_PRINT 1481 if (error) { 1482 pid_t pid = -1; 1483 lwpid_t lid = -1; 1484 if (curproc) 1485 pid = curproc->p_pid; 1486 if (curlwp) 1487 lid = curlwp->l_lid; 1488 mutex_enter(&wl->wl_mtx); 1489 #ifdef WAPBL_DEBUG_BUFBYTES 1490 WAPBL_PRINTF(WAPBL_PRINT_ERROR, 1491 ("wapbl_flush: thread %d.%d aborted flush: " 1492 "error = %d\n" 1493 "\tbufcount=%zu bufbytes=%zu bcount=%zu " 1494 "deallocs=%d inodes=%d\n" 1495 "\terrcnt = %d, reclaimable=%zu reserved=%zu " 1496 "unsynced=%zu\n", 1497 pid, lid, error, wl->wl_bufcount, 1498 wl->wl_bufbytes, wl->wl_bcount, 1499 wl->wl_dealloccnt, wl->wl_inohashcnt, 1500 wl->wl_error_count, wl->wl_reclaimable_bytes, 1501 wl->wl_reserved_bytes, wl->wl_unsynced_bufbytes)); 1502 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) { 1503 WAPBL_PRINTF(WAPBL_PRINT_ERROR, 1504 ("\tentry: bufcount = %zu, reclaimable = %zu, " 1505 "error = %d, unsynced = %zu\n", 1506 we->we_bufcount, we->we_reclaimable_bytes, 1507 we->we_error, we->we_unsynced_bufbytes)); 1508 } 1509 #else 1510 WAPBL_PRINTF(WAPBL_PRINT_ERROR, 1511 ("wapbl_flush: thread %d.%d aborted flush: " 1512 "error = %d\n" 1513 "\tbufcount=%zu bufbytes=%zu bcount=%zu " 1514 "deallocs=%d inodes=%d\n" 1515 "\terrcnt = %d, reclaimable=%zu reserved=%zu\n", 1516 pid, lid, error, wl->wl_bufcount, 1517 wl->wl_bufbytes, wl->wl_bcount, 1518 wl->wl_dealloccnt, wl->wl_inohashcnt, 1519 wl->wl_error_count, wl->wl_reclaimable_bytes, 1520 wl->wl_reserved_bytes)); 1521 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) { 1522 WAPBL_PRINTF(WAPBL_PRINT_ERROR, 1523 ("\tentry: bufcount = %zu, reclaimable = %zu, " 1524 "error = %d\n", we->we_bufcount, 1525 we->we_reclaimable_bytes, we->we_error)); 1526 } 1527 #endif 1528 mutex_exit(&wl->wl_mtx); 1529 } 1530 #endif 1531 1532 rw_exit(&wl->wl_rwlock); 1533 return error; 1534 } 1535 1536 /****************************************************************/ 1537 1538 void 1539 wapbl_jlock_assert(struct wapbl *wl) 1540 { 1541 1542 KASSERT(rw_lock_held(&wl->wl_rwlock)); 1543 } 1544 1545 void 1546 wapbl_junlock_assert(struct wapbl *wl) 1547 { 1548 1549 KASSERT(!rw_write_held(&wl->wl_rwlock)); 1550 } 1551 1552 /****************************************************************/ 1553 1554 /* locks missing */ 1555 void 1556 wapbl_print(struct wapbl *wl, 1557 int full, 1558 void (*pr)(const char *, ...)) 1559 { 1560 struct buf *bp; 1561 struct wapbl_entry *we; 1562 (*pr)("wapbl %p", wl); 1563 (*pr)("\nlogvp = %p, devvp = %p, logpbn = %"PRId64"\n", 1564 wl->wl_logvp, wl->wl_devvp, wl->wl_logpbn); 1565 (*pr)("circ = %zu, header = %zu, head = %"PRIdMAX" tail = %"PRIdMAX"\n", 1566 wl->wl_circ_size, wl->wl_circ_off, 1567 (intmax_t)wl->wl_head, (intmax_t)wl->wl_tail); 1568 (*pr)("fs_dev_bshift = %d, log_dev_bshift = %d\n", 1569 wl->wl_log_dev_bshift, wl->wl_fs_dev_bshift); 1570 #ifdef WAPBL_DEBUG_BUFBYTES 1571 (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu " 1572 "reserved = %zu errcnt = %d unsynced = %zu\n", 1573 wl->wl_bufcount, wl->wl_bufbytes, wl->wl_bcount, 1574 wl->wl_reclaimable_bytes, wl->wl_reserved_bytes, 1575 wl->wl_error_count, wl->wl_unsynced_bufbytes); 1576 #else 1577 (*pr)("bufcount = %zu, bufbytes = %zu bcount = %zu reclaimable = %zu " 1578 "reserved = %zu errcnt = %d\n", wl->wl_bufcount, wl->wl_bufbytes, 1579 wl->wl_bcount, wl->wl_reclaimable_bytes, wl->wl_reserved_bytes, 1580 wl->wl_error_count); 1581 #endif 1582 (*pr)("\tdealloccnt = %d, dealloclim = %d\n", 1583 wl->wl_dealloccnt, wl->wl_dealloclim); 1584 (*pr)("\tinohashcnt = %d, inohashmask = 0x%08x\n", 1585 wl->wl_inohashcnt, wl->wl_inohashmask); 1586 (*pr)("entries:\n"); 1587 SIMPLEQ_FOREACH(we, &wl->wl_entries, we_entries) { 1588 #ifdef WAPBL_DEBUG_BUFBYTES 1589 (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d, " 1590 "unsynced = %zu\n", 1591 we->we_bufcount, we->we_reclaimable_bytes, 1592 we->we_error, we->we_unsynced_bufbytes); 1593 #else 1594 (*pr)("\tbufcount = %zu, reclaimable = %zu, error = %d\n", 1595 we->we_bufcount, we->we_reclaimable_bytes, we->we_error); 1596 #endif 1597 } 1598 if (full) { 1599 int cnt = 0; 1600 (*pr)("bufs ="); 1601 LIST_FOREACH(bp, &wl->wl_bufs, b_wapbllist) { 1602 if (!LIST_NEXT(bp, b_wapbllist)) { 1603 (*pr)(" %p", bp); 1604 } else if ((++cnt % 6) == 0) { 1605 (*pr)(" %p,\n\t", bp); 1606 } else { 1607 (*pr)(" %p,", bp); 1608 } 1609 } 1610 (*pr)("\n"); 1611 1612 (*pr)("dealloced blks = "); 1613 { 1614 int i; 1615 cnt = 0; 1616 for (i = 0; i < wl->wl_dealloccnt; i++) { 1617 (*pr)(" %"PRId64":%d,", 1618 wl->wl_deallocblks[i], 1619 wl->wl_dealloclens[i]); 1620 if ((++cnt % 4) == 0) { 1621 (*pr)("\n\t"); 1622 } 1623 } 1624 } 1625 (*pr)("\n"); 1626 1627 (*pr)("registered inodes = "); 1628 { 1629 int i; 1630 cnt = 0; 1631 for (i = 0; i <= wl->wl_inohashmask; i++) { 1632 struct wapbl_ino_head *wih; 1633 struct wapbl_ino *wi; 1634 1635 wih = &wl->wl_inohash[i]; 1636 LIST_FOREACH(wi, wih, wi_hash) { 1637 if (wi->wi_ino == 0) 1638 continue; 1639 (*pr)(" %"PRId32"/0%06"PRIo32",", 1640 wi->wi_ino, wi->wi_mode); 1641 if ((++cnt % 4) == 0) { 1642 (*pr)("\n\t"); 1643 } 1644 } 1645 } 1646 (*pr)("\n"); 1647 } 1648 } 1649 } 1650 1651 #if defined(WAPBL_DEBUG) || defined(DDB) 1652 void 1653 wapbl_dump(struct wapbl *wl) 1654 { 1655 #if defined(WAPBL_DEBUG) 1656 if (!wl) 1657 wl = wapbl_debug_wl; 1658 #endif 1659 if (!wl) 1660 return; 1661 wapbl_print(wl, 1, printf); 1662 } 1663 #endif 1664 1665 /****************************************************************/ 1666 1667 void 1668 wapbl_register_deallocation(struct wapbl *wl, daddr_t blk, int len) 1669 { 1670 1671 wapbl_jlock_assert(wl); 1672 1673 /* XXX should eventually instead tie this into resource estimation */ 1674 /* 1675 * XXX this panic needs locking/mutex analysis and the 1676 * ability to cope with the failure. 1677 */ 1678 /* XXX this XXX doesn't have enough XXX */ 1679 if (__predict_false(wl->wl_dealloccnt >= wl->wl_dealloclim)) 1680 panic("wapbl_register_deallocation: out of resources"); 1681 1682 wl->wl_deallocblks[wl->wl_dealloccnt] = blk; 1683 wl->wl_dealloclens[wl->wl_dealloccnt] = len; 1684 wl->wl_dealloccnt++; 1685 WAPBL_PRINTF(WAPBL_PRINT_ALLOC, 1686 ("wapbl_register_deallocation: blk=%"PRId64" len=%d\n", blk, len)); 1687 } 1688 1689 /****************************************************************/ 1690 1691 static void 1692 wapbl_inodetrk_init(struct wapbl *wl, u_int size) 1693 { 1694 1695 wl->wl_inohash = hashinit(size, HASH_LIST, true, &wl->wl_inohashmask); 1696 if (atomic_inc_uint_nv(&wapbl_ino_pool_refcount) == 1) { 1697 pool_init(&wapbl_ino_pool, sizeof(struct wapbl_ino), 0, 0, 0, 1698 "wapblinopl", &pool_allocator_nointr, IPL_NONE); 1699 } 1700 } 1701 1702 static void 1703 wapbl_inodetrk_free(struct wapbl *wl) 1704 { 1705 1706 /* XXX this KASSERT needs locking/mutex analysis */ 1707 KASSERT(wl->wl_inohashcnt == 0); 1708 hashdone(wl->wl_inohash, HASH_LIST, wl->wl_inohashmask); 1709 if (atomic_dec_uint_nv(&wapbl_ino_pool_refcount) == 0) { 1710 pool_destroy(&wapbl_ino_pool); 1711 } 1712 } 1713 1714 static struct wapbl_ino * 1715 wapbl_inodetrk_get(struct wapbl *wl, ino_t ino) 1716 { 1717 struct wapbl_ino_head *wih; 1718 struct wapbl_ino *wi; 1719 1720 KASSERT(mutex_owned(&wl->wl_mtx)); 1721 1722 wih = &wl->wl_inohash[ino & wl->wl_inohashmask]; 1723 LIST_FOREACH(wi, wih, wi_hash) { 1724 if (ino == wi->wi_ino) 1725 return wi; 1726 } 1727 return 0; 1728 } 1729 1730 void 1731 wapbl_register_inode(struct wapbl *wl, ino_t ino, mode_t mode) 1732 { 1733 struct wapbl_ino_head *wih; 1734 struct wapbl_ino *wi; 1735 1736 wi = pool_get(&wapbl_ino_pool, PR_WAITOK); 1737 1738 mutex_enter(&wl->wl_mtx); 1739 if (wapbl_inodetrk_get(wl, ino) == NULL) { 1740 wi->wi_ino = ino; 1741 wi->wi_mode = mode; 1742 wih = &wl->wl_inohash[ino & wl->wl_inohashmask]; 1743 LIST_INSERT_HEAD(wih, wi, wi_hash); 1744 wl->wl_inohashcnt++; 1745 WAPBL_PRINTF(WAPBL_PRINT_INODE, 1746 ("wapbl_register_inode: ino=%"PRId64"\n", ino)); 1747 mutex_exit(&wl->wl_mtx); 1748 } else { 1749 mutex_exit(&wl->wl_mtx); 1750 pool_put(&wapbl_ino_pool, wi); 1751 } 1752 } 1753 1754 void 1755 wapbl_unregister_inode(struct wapbl *wl, ino_t ino, mode_t mode) 1756 { 1757 struct wapbl_ino *wi; 1758 1759 mutex_enter(&wl->wl_mtx); 1760 wi = wapbl_inodetrk_get(wl, ino); 1761 if (wi) { 1762 WAPBL_PRINTF(WAPBL_PRINT_INODE, 1763 ("wapbl_unregister_inode: ino=%"PRId64"\n", ino)); 1764 KASSERT(wl->wl_inohashcnt > 0); 1765 wl->wl_inohashcnt--; 1766 LIST_REMOVE(wi, wi_hash); 1767 mutex_exit(&wl->wl_mtx); 1768 1769 pool_put(&wapbl_ino_pool, wi); 1770 } else { 1771 mutex_exit(&wl->wl_mtx); 1772 } 1773 } 1774 1775 /****************************************************************/ 1776 1777 static inline size_t 1778 wapbl_transaction_inodes_len(struct wapbl *wl) 1779 { 1780 int blocklen = 1<<wl->wl_log_dev_bshift; 1781 int iph; 1782 1783 /* Calculate number of inodes described in a inodelist header */ 1784 iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) / 1785 sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]); 1786 1787 KASSERT(iph > 0); 1788 1789 return MAX(1, howmany(wl->wl_inohashcnt, iph))*blocklen; 1790 } 1791 1792 1793 /* Calculate amount of space a transaction will take on disk */ 1794 static size_t 1795 wapbl_transaction_len(struct wapbl *wl) 1796 { 1797 int blocklen = 1<<wl->wl_log_dev_bshift; 1798 size_t len; 1799 int bph; 1800 1801 /* Calculate number of blocks described in a blocklist header */ 1802 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) / 1803 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]); 1804 1805 KASSERT(bph > 0); 1806 1807 len = wl->wl_bcount; 1808 len += howmany(wl->wl_bufcount, bph)*blocklen; 1809 len += howmany(wl->wl_dealloccnt, bph)*blocklen; 1810 len += wapbl_transaction_inodes_len(wl); 1811 1812 return len; 1813 } 1814 1815 /* 1816 * Perform commit operation 1817 * 1818 * Note that generation number incrementation needs to 1819 * be protected against racing with other invocations 1820 * of wapbl_commit. This is ok since this routine 1821 * is only invoked from wapbl_flush 1822 */ 1823 static int 1824 wapbl_write_commit(struct wapbl *wl, off_t head, off_t tail) 1825 { 1826 struct wapbl_wc_header *wc = wl->wl_wc_header; 1827 struct timespec ts; 1828 int error; 1829 int force = 1; 1830 1831 /* XXX Calc checksum here, instead we do this for now */ 1832 error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force, FWRITE, FSCRED); 1833 if (error) { 1834 WAPBL_PRINTF(WAPBL_PRINT_ERROR, 1835 ("wapbl_write_commit: DIOCCACHESYNC on dev 0x%"PRIx64 1836 " returned %d\n", wl->wl_devvp->v_rdev, error)); 1837 } 1838 1839 wc->wc_head = head; 1840 wc->wc_tail = tail; 1841 wc->wc_checksum = 0; 1842 wc->wc_version = 1; 1843 getnanotime(&ts); 1844 wc->wc_time = ts.tv_sec; 1845 wc->wc_timensec = ts.tv_nsec; 1846 1847 WAPBL_PRINTF(WAPBL_PRINT_WRITE, 1848 ("wapbl_write_commit: head = %"PRIdMAX "tail = %"PRIdMAX"\n", 1849 (intmax_t)head, (intmax_t)tail)); 1850 1851 /* 1852 * XXX if generation will rollover, then first zero 1853 * over second commit header before trying to write both headers. 1854 */ 1855 1856 error = wapbl_write(wc, wc->wc_len, wl->wl_devvp, 1857 wl->wl_logpbn + wc->wc_generation % 2); 1858 if (error) 1859 return error; 1860 1861 error = VOP_IOCTL(wl->wl_devvp, DIOCCACHESYNC, &force, FWRITE, FSCRED); 1862 if (error) { 1863 WAPBL_PRINTF(WAPBL_PRINT_ERROR, 1864 ("wapbl_write_commit: DIOCCACHESYNC on dev 0x%"PRIx64 1865 " returned %d\n", wl->wl_devvp->v_rdev, error)); 1866 } 1867 1868 /* 1869 * If the generation number was zero, write it out a second time. 1870 * This handles initialization and generation number rollover 1871 */ 1872 if (wc->wc_generation++ == 0) { 1873 error = wapbl_write_commit(wl, head, tail); 1874 /* 1875 * This panic should be able to be removed if we do the 1876 * zero'ing mentioned above, and we are certain to roll 1877 * back generation number on failure. 1878 */ 1879 if (error) 1880 panic("wapbl_write_commit: error writing duplicate " 1881 "log header: %d\n", error); 1882 } 1883 return 0; 1884 } 1885 1886 /* Returns new offset value */ 1887 static int 1888 wapbl_write_blocks(struct wapbl *wl, off_t *offp) 1889 { 1890 struct wapbl_wc_blocklist *wc = 1891 (struct wapbl_wc_blocklist *)wl->wl_wc_scratch; 1892 int blocklen = 1<<wl->wl_log_dev_bshift; 1893 int bph; 1894 struct buf *bp; 1895 off_t off = *offp; 1896 int error; 1897 size_t padding; 1898 1899 KASSERT(rw_write_held(&wl->wl_rwlock)); 1900 1901 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) / 1902 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]); 1903 1904 bp = LIST_FIRST(&wl->wl_bufs); 1905 1906 while (bp) { 1907 int cnt; 1908 struct buf *obp = bp; 1909 1910 KASSERT(bp->b_flags & B_LOCKED); 1911 1912 wc->wc_type = WAPBL_WC_BLOCKS; 1913 wc->wc_len = blocklen; 1914 wc->wc_blkcount = 0; 1915 while (bp && (wc->wc_blkcount < bph)) { 1916 /* 1917 * Make sure all the physical block numbers are up to 1918 * date. If this is not always true on a given 1919 * filesystem, then VOP_BMAP must be called. We 1920 * could call VOP_BMAP here, or else in the filesystem 1921 * specific flush callback, although neither of those 1922 * solutions allow us to take the vnode lock. If a 1923 * filesystem requires that we must take the vnode lock 1924 * to call VOP_BMAP, then we can probably do it in 1925 * bwrite when the vnode lock should already be held 1926 * by the invoking code. 1927 */ 1928 KASSERT((bp->b_vp->v_type == VBLK) || 1929 (bp->b_blkno != bp->b_lblkno)); 1930 KASSERT(bp->b_blkno > 0); 1931 1932 wc->wc_blocks[wc->wc_blkcount].wc_daddr = bp->b_blkno; 1933 wc->wc_blocks[wc->wc_blkcount].wc_dlen = bp->b_bcount; 1934 wc->wc_len += bp->b_bcount; 1935 wc->wc_blkcount++; 1936 bp = LIST_NEXT(bp, b_wapbllist); 1937 } 1938 if (wc->wc_len % blocklen != 0) { 1939 padding = blocklen - wc->wc_len % blocklen; 1940 wc->wc_len += padding; 1941 } else { 1942 padding = 0; 1943 } 1944 1945 WAPBL_PRINTF(WAPBL_PRINT_WRITE, 1946 ("wapbl_write_blocks: len = %u (padding %zu) off = %"PRIdMAX"\n", 1947 wc->wc_len, padding, (intmax_t)off)); 1948 1949 error = wapbl_circ_write(wl, wc, blocklen, &off); 1950 if (error) 1951 return error; 1952 bp = obp; 1953 cnt = 0; 1954 while (bp && (cnt++ < bph)) { 1955 error = wapbl_circ_write(wl, bp->b_data, 1956 bp->b_bcount, &off); 1957 if (error) 1958 return error; 1959 bp = LIST_NEXT(bp, b_wapbllist); 1960 } 1961 if (padding) { 1962 void *zero; 1963 1964 zero = wapbl_malloc(padding); 1965 memset(zero, 0, padding); 1966 error = wapbl_circ_write(wl, zero, padding, &off); 1967 wapbl_free(zero, padding); 1968 if (error) 1969 return error; 1970 } 1971 } 1972 *offp = off; 1973 return 0; 1974 } 1975 1976 static int 1977 wapbl_write_revocations(struct wapbl *wl, off_t *offp) 1978 { 1979 struct wapbl_wc_blocklist *wc = 1980 (struct wapbl_wc_blocklist *)wl->wl_wc_scratch; 1981 int i; 1982 int blocklen = 1<<wl->wl_log_dev_bshift; 1983 int bph; 1984 off_t off = *offp; 1985 int error; 1986 1987 if (wl->wl_dealloccnt == 0) 1988 return 0; 1989 1990 bph = (blocklen - offsetof(struct wapbl_wc_blocklist, wc_blocks)) / 1991 sizeof(((struct wapbl_wc_blocklist *)0)->wc_blocks[0]); 1992 1993 i = 0; 1994 while (i < wl->wl_dealloccnt) { 1995 wc->wc_type = WAPBL_WC_REVOCATIONS; 1996 wc->wc_len = blocklen; 1997 wc->wc_blkcount = 0; 1998 while ((i < wl->wl_dealloccnt) && (wc->wc_blkcount < bph)) { 1999 wc->wc_blocks[wc->wc_blkcount].wc_daddr = 2000 wl->wl_deallocblks[i]; 2001 wc->wc_blocks[wc->wc_blkcount].wc_dlen = 2002 wl->wl_dealloclens[i]; 2003 wc->wc_blkcount++; 2004 i++; 2005 } 2006 WAPBL_PRINTF(WAPBL_PRINT_WRITE, 2007 ("wapbl_write_revocations: len = %u off = %"PRIdMAX"\n", 2008 wc->wc_len, (intmax_t)off)); 2009 error = wapbl_circ_write(wl, wc, blocklen, &off); 2010 if (error) 2011 return error; 2012 } 2013 *offp = off; 2014 return 0; 2015 } 2016 2017 static int 2018 wapbl_write_inodes(struct wapbl *wl, off_t *offp) 2019 { 2020 struct wapbl_wc_inodelist *wc = 2021 (struct wapbl_wc_inodelist *)wl->wl_wc_scratch; 2022 int i; 2023 int blocklen = 1 << wl->wl_log_dev_bshift; 2024 off_t off = *offp; 2025 int error; 2026 2027 struct wapbl_ino_head *wih; 2028 struct wapbl_ino *wi; 2029 int iph; 2030 2031 iph = (blocklen - offsetof(struct wapbl_wc_inodelist, wc_inodes)) / 2032 sizeof(((struct wapbl_wc_inodelist *)0)->wc_inodes[0]); 2033 2034 i = 0; 2035 wih = &wl->wl_inohash[0]; 2036 wi = 0; 2037 do { 2038 wc->wc_type = WAPBL_WC_INODES; 2039 wc->wc_len = blocklen; 2040 wc->wc_inocnt = 0; 2041 wc->wc_clear = (i == 0); 2042 while ((i < wl->wl_inohashcnt) && (wc->wc_inocnt < iph)) { 2043 while (!wi) { 2044 KASSERT((wih - &wl->wl_inohash[0]) 2045 <= wl->wl_inohashmask); 2046 wi = LIST_FIRST(wih++); 2047 } 2048 wc->wc_inodes[wc->wc_inocnt].wc_inumber = wi->wi_ino; 2049 wc->wc_inodes[wc->wc_inocnt].wc_imode = wi->wi_mode; 2050 wc->wc_inocnt++; 2051 i++; 2052 wi = LIST_NEXT(wi, wi_hash); 2053 } 2054 WAPBL_PRINTF(WAPBL_PRINT_WRITE, 2055 ("wapbl_write_inodes: len = %u off = %"PRIdMAX"\n", 2056 wc->wc_len, (intmax_t)off)); 2057 error = wapbl_circ_write(wl, wc, blocklen, &off); 2058 if (error) 2059 return error; 2060 } while (i < wl->wl_inohashcnt); 2061 2062 *offp = off; 2063 return 0; 2064 } 2065 2066 #endif /* _KERNEL */ 2067 2068 /****************************************************************/ 2069 2070 struct wapbl_blk { 2071 LIST_ENTRY(wapbl_blk) wb_hash; 2072 daddr_t wb_blk; 2073 off_t wb_off; /* Offset of this block in the log */ 2074 }; 2075 #define WAPBL_BLKPOOL_MIN 83 2076 2077 static void 2078 wapbl_blkhash_init(struct wapbl_replay *wr, u_int size) 2079 { 2080 if (size < WAPBL_BLKPOOL_MIN) 2081 size = WAPBL_BLKPOOL_MIN; 2082 KASSERT(wr->wr_blkhash == 0); 2083 #ifdef _KERNEL 2084 wr->wr_blkhash = hashinit(size, HASH_LIST, true, &wr->wr_blkhashmask); 2085 #else /* ! _KERNEL */ 2086 /* Manually implement hashinit */ 2087 { 2088 unsigned long i, hashsize; 2089 for (hashsize = 1; hashsize < size; hashsize <<= 1) 2090 continue; 2091 wr->wr_blkhash = wapbl_malloc(hashsize * sizeof(*wr->wr_blkhash)); 2092 for (i = 0; i < wr->wr_blkhashmask; i++) 2093 LIST_INIT(&wr->wr_blkhash[i]); 2094 wr->wr_blkhashmask = hashsize - 1; 2095 } 2096 #endif /* ! _KERNEL */ 2097 } 2098 2099 static void 2100 wapbl_blkhash_free(struct wapbl_replay *wr) 2101 { 2102 KASSERT(wr->wr_blkhashcnt == 0); 2103 #ifdef _KERNEL 2104 hashdone(wr->wr_blkhash, HASH_LIST, wr->wr_blkhashmask); 2105 #else /* ! _KERNEL */ 2106 wapbl_free(wr->wr_blkhash, 2107 (wr->wr_blkhashmask + 1) * sizeof(*wr->wr_blkhash)); 2108 #endif /* ! _KERNEL */ 2109 } 2110 2111 static struct wapbl_blk * 2112 wapbl_blkhash_get(struct wapbl_replay *wr, daddr_t blk) 2113 { 2114 struct wapbl_blk_head *wbh; 2115 struct wapbl_blk *wb; 2116 wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask]; 2117 LIST_FOREACH(wb, wbh, wb_hash) { 2118 if (blk == wb->wb_blk) 2119 return wb; 2120 } 2121 return 0; 2122 } 2123 2124 static void 2125 wapbl_blkhash_ins(struct wapbl_replay *wr, daddr_t blk, off_t off) 2126 { 2127 struct wapbl_blk_head *wbh; 2128 struct wapbl_blk *wb; 2129 wb = wapbl_blkhash_get(wr, blk); 2130 if (wb) { 2131 KASSERT(wb->wb_blk == blk); 2132 wb->wb_off = off; 2133 } else { 2134 wb = wapbl_malloc(sizeof(*wb)); 2135 wb->wb_blk = blk; 2136 wb->wb_off = off; 2137 wbh = &wr->wr_blkhash[blk & wr->wr_blkhashmask]; 2138 LIST_INSERT_HEAD(wbh, wb, wb_hash); 2139 wr->wr_blkhashcnt++; 2140 } 2141 } 2142 2143 static void 2144 wapbl_blkhash_rem(struct wapbl_replay *wr, daddr_t blk) 2145 { 2146 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk); 2147 if (wb) { 2148 KASSERT(wr->wr_blkhashcnt > 0); 2149 wr->wr_blkhashcnt--; 2150 LIST_REMOVE(wb, wb_hash); 2151 wapbl_free(wb, sizeof(*wb)); 2152 } 2153 } 2154 2155 static void 2156 wapbl_blkhash_clear(struct wapbl_replay *wr) 2157 { 2158 unsigned long i; 2159 for (i = 0; i <= wr->wr_blkhashmask; i++) { 2160 struct wapbl_blk *wb; 2161 2162 while ((wb = LIST_FIRST(&wr->wr_blkhash[i]))) { 2163 KASSERT(wr->wr_blkhashcnt > 0); 2164 wr->wr_blkhashcnt--; 2165 LIST_REMOVE(wb, wb_hash); 2166 wapbl_free(wb, sizeof(*wb)); 2167 } 2168 } 2169 KASSERT(wr->wr_blkhashcnt == 0); 2170 } 2171 2172 /****************************************************************/ 2173 2174 static int 2175 wapbl_circ_read(struct wapbl_replay *wr, void *data, size_t len, off_t *offp) 2176 { 2177 size_t slen; 2178 off_t off = *offp; 2179 int error; 2180 2181 KASSERT(((len >> wr->wr_log_dev_bshift) << 2182 wr->wr_log_dev_bshift) == len); 2183 if (off < wr->wr_circ_off) 2184 off = wr->wr_circ_off; 2185 slen = wr->wr_circ_off + wr->wr_circ_size - off; 2186 if (slen < len) { 2187 error = wapbl_read(data, slen, wr->wr_devvp, 2188 wr->wr_logpbn + (off >> wr->wr_log_dev_bshift)); 2189 if (error) 2190 return error; 2191 data = (uint8_t *)data + slen; 2192 len -= slen; 2193 off = wr->wr_circ_off; 2194 } 2195 error = wapbl_read(data, len, wr->wr_devvp, 2196 wr->wr_logpbn + (off >> wr->wr_log_dev_bshift)); 2197 if (error) 2198 return error; 2199 off += len; 2200 if (off >= wr->wr_circ_off + wr->wr_circ_size) 2201 off = wr->wr_circ_off; 2202 *offp = off; 2203 return 0; 2204 } 2205 2206 static void 2207 wapbl_circ_advance(struct wapbl_replay *wr, size_t len, off_t *offp) 2208 { 2209 size_t slen; 2210 off_t off = *offp; 2211 2212 KASSERT(((len >> wr->wr_log_dev_bshift) << 2213 wr->wr_log_dev_bshift) == len); 2214 2215 if (off < wr->wr_circ_off) 2216 off = wr->wr_circ_off; 2217 slen = wr->wr_circ_off + wr->wr_circ_size - off; 2218 if (slen < len) { 2219 len -= slen; 2220 off = wr->wr_circ_off; 2221 } 2222 off += len; 2223 if (off >= wr->wr_circ_off + wr->wr_circ_size) 2224 off = wr->wr_circ_off; 2225 *offp = off; 2226 } 2227 2228 /****************************************************************/ 2229 2230 int 2231 wapbl_replay_start(struct wapbl_replay **wrp, struct vnode *vp, 2232 daddr_t off, size_t count, size_t blksize) 2233 { 2234 struct wapbl_replay *wr; 2235 int error; 2236 struct vnode *devvp; 2237 daddr_t logpbn; 2238 uint8_t *scratch; 2239 struct wapbl_wc_header *wch; 2240 struct wapbl_wc_header *wch2; 2241 /* Use this until we read the actual log header */ 2242 int log_dev_bshift = DEV_BSHIFT; 2243 size_t used; 2244 2245 WAPBL_PRINTF(WAPBL_PRINT_REPLAY, 2246 ("wapbl_replay_start: vp=%p off=%"PRId64 " count=%zu blksize=%zu\n", 2247 vp, off, count, blksize)); 2248 2249 if (off < 0) 2250 return EINVAL; 2251 2252 if (blksize < DEV_BSIZE) 2253 return EINVAL; 2254 if (blksize % DEV_BSIZE) 2255 return EINVAL; 2256 2257 #ifdef _KERNEL 2258 #if 0 2259 /* XXX vp->v_size isn't reliably set for VBLK devices, 2260 * especially root. However, we might still want to verify 2261 * that the full load is readable */ 2262 if ((off + count) * blksize > vp->v_size) 2263 return EINVAL; 2264 #endif 2265 2266 if ((error = VOP_BMAP(vp, off, &devvp, &logpbn, 0)) != 0) { 2267 return error; 2268 } 2269 #else /* ! _KERNEL */ 2270 devvp = vp; 2271 logpbn = off; 2272 #endif /* ! _KERNEL */ 2273 2274 scratch = wapbl_malloc(MAXBSIZE); 2275 2276 error = wapbl_read(scratch, 2<<log_dev_bshift, devvp, logpbn); 2277 if (error) 2278 goto errout; 2279 2280 wch = (struct wapbl_wc_header *)scratch; 2281 wch2 = 2282 (struct wapbl_wc_header *)(scratch + (1<<log_dev_bshift)); 2283 /* XXX verify checksums and magic numbers */ 2284 if (wch->wc_type != WAPBL_WC_HEADER) { 2285 printf("Unrecognized wapbl magic: 0x%08x\n", wch->wc_type); 2286 error = EFTYPE; 2287 goto errout; 2288 } 2289 2290 if (wch2->wc_generation > wch->wc_generation) 2291 wch = wch2; 2292 2293 wr = wapbl_calloc(1, sizeof(*wr)); 2294 2295 wr->wr_logvp = vp; 2296 wr->wr_devvp = devvp; 2297 wr->wr_logpbn = logpbn; 2298 2299 wr->wr_scratch = scratch; 2300 2301 wr->wr_log_dev_bshift = wch->wc_log_dev_bshift; 2302 wr->wr_fs_dev_bshift = wch->wc_fs_dev_bshift; 2303 wr->wr_circ_off = wch->wc_circ_off; 2304 wr->wr_circ_size = wch->wc_circ_size; 2305 wr->wr_generation = wch->wc_generation; 2306 2307 used = wapbl_space_used(wch->wc_circ_size, wch->wc_head, wch->wc_tail); 2308 2309 WAPBL_PRINTF(WAPBL_PRINT_REPLAY, 2310 ("wapbl_replay: head=%"PRId64" tail=%"PRId64" off=%"PRId64 2311 " len=%"PRId64" used=%zu\n", 2312 wch->wc_head, wch->wc_tail, wch->wc_circ_off, 2313 wch->wc_circ_size, used)); 2314 2315 wapbl_blkhash_init(wr, (used >> wch->wc_fs_dev_bshift)); 2316 2317 error = wapbl_replay_process(wr, wch->wc_head, wch->wc_tail); 2318 if (error) { 2319 wapbl_replay_stop(wr); 2320 wapbl_replay_free(wr); 2321 return error; 2322 } 2323 2324 *wrp = wr; 2325 return 0; 2326 2327 errout: 2328 wapbl_free(scratch, MAXBSIZE); 2329 return error; 2330 } 2331 2332 void 2333 wapbl_replay_stop(struct wapbl_replay *wr) 2334 { 2335 2336 if (!wapbl_replay_isopen(wr)) 2337 return; 2338 2339 WAPBL_PRINTF(WAPBL_PRINT_REPLAY, ("wapbl_replay_stop called\n")); 2340 2341 wapbl_free(wr->wr_scratch, MAXBSIZE); 2342 wr->wr_scratch = NULL; 2343 2344 wr->wr_logvp = NULL; 2345 2346 wapbl_blkhash_clear(wr); 2347 wapbl_blkhash_free(wr); 2348 } 2349 2350 void 2351 wapbl_replay_free(struct wapbl_replay *wr) 2352 { 2353 2354 KDASSERT(!wapbl_replay_isopen(wr)); 2355 2356 if (wr->wr_inodes) 2357 wapbl_free(wr->wr_inodes, 2358 wr->wr_inodescnt * sizeof(wr->wr_inodes[0])); 2359 wapbl_free(wr, sizeof(*wr)); 2360 } 2361 2362 #ifdef _KERNEL 2363 int 2364 wapbl_replay_isopen1(struct wapbl_replay *wr) 2365 { 2366 2367 return wapbl_replay_isopen(wr); 2368 } 2369 #endif 2370 2371 static void 2372 wapbl_replay_process_blocks(struct wapbl_replay *wr, off_t *offp) 2373 { 2374 struct wapbl_wc_blocklist *wc = 2375 (struct wapbl_wc_blocklist *)wr->wr_scratch; 2376 int fsblklen = 1 << wr->wr_fs_dev_bshift; 2377 int i, j, n; 2378 2379 for (i = 0; i < wc->wc_blkcount; i++) { 2380 /* 2381 * Enter each physical block into the hashtable independently. 2382 */ 2383 n = wc->wc_blocks[i].wc_dlen >> wr->wr_fs_dev_bshift; 2384 for (j = 0; j < n; j++) { 2385 wapbl_blkhash_ins(wr, wc->wc_blocks[i].wc_daddr + j, 2386 *offp); 2387 wapbl_circ_advance(wr, fsblklen, offp); 2388 } 2389 } 2390 } 2391 2392 static void 2393 wapbl_replay_process_revocations(struct wapbl_replay *wr) 2394 { 2395 struct wapbl_wc_blocklist *wc = 2396 (struct wapbl_wc_blocklist *)wr->wr_scratch; 2397 int i, j, n; 2398 2399 for (i = 0; i < wc->wc_blkcount; i++) { 2400 /* 2401 * Remove any blocks found from the hashtable. 2402 */ 2403 n = wc->wc_blocks[i].wc_dlen >> wr->wr_fs_dev_bshift; 2404 for (j = 0; j < n; j++) 2405 wapbl_blkhash_rem(wr, wc->wc_blocks[i].wc_daddr + j); 2406 } 2407 } 2408 2409 static void 2410 wapbl_replay_process_inodes(struct wapbl_replay *wr, off_t oldoff, off_t newoff) 2411 { 2412 struct wapbl_wc_inodelist *wc = 2413 (struct wapbl_wc_inodelist *)wr->wr_scratch; 2414 void *new_inodes; 2415 const size_t oldsize = wr->wr_inodescnt * sizeof(wr->wr_inodes[0]); 2416 2417 KASSERT(sizeof(wr->wr_inodes[0]) == sizeof(wc->wc_inodes[0])); 2418 2419 /* 2420 * Keep track of where we found this so location won't be 2421 * overwritten. 2422 */ 2423 if (wc->wc_clear) { 2424 wr->wr_inodestail = oldoff; 2425 wr->wr_inodescnt = 0; 2426 if (wr->wr_inodes != NULL) { 2427 wapbl_free(wr->wr_inodes, oldsize); 2428 wr->wr_inodes = NULL; 2429 } 2430 } 2431 wr->wr_inodeshead = newoff; 2432 if (wc->wc_inocnt == 0) 2433 return; 2434 2435 new_inodes = wapbl_malloc((wr->wr_inodescnt + wc->wc_inocnt) * 2436 sizeof(wr->wr_inodes[0])); 2437 if (wr->wr_inodes != NULL) { 2438 memcpy(new_inodes, wr->wr_inodes, oldsize); 2439 wapbl_free(wr->wr_inodes, oldsize); 2440 } 2441 wr->wr_inodes = new_inodes; 2442 memcpy(&wr->wr_inodes[wr->wr_inodescnt], wc->wc_inodes, 2443 wc->wc_inocnt * sizeof(wr->wr_inodes[0])); 2444 wr->wr_inodescnt += wc->wc_inocnt; 2445 } 2446 2447 static int 2448 wapbl_replay_process(struct wapbl_replay *wr, off_t head, off_t tail) 2449 { 2450 off_t off; 2451 int error; 2452 2453 int logblklen = 1 << wr->wr_log_dev_bshift; 2454 2455 wapbl_blkhash_clear(wr); 2456 2457 off = tail; 2458 while (off != head) { 2459 struct wapbl_wc_null *wcn; 2460 off_t saveoff = off; 2461 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off); 2462 if (error) 2463 goto errout; 2464 wcn = (struct wapbl_wc_null *)wr->wr_scratch; 2465 switch (wcn->wc_type) { 2466 case WAPBL_WC_BLOCKS: 2467 wapbl_replay_process_blocks(wr, &off); 2468 break; 2469 2470 case WAPBL_WC_REVOCATIONS: 2471 wapbl_replay_process_revocations(wr); 2472 break; 2473 2474 case WAPBL_WC_INODES: 2475 wapbl_replay_process_inodes(wr, saveoff, off); 2476 break; 2477 2478 default: 2479 printf("Unrecognized wapbl type: 0x%08x\n", 2480 wcn->wc_type); 2481 error = EFTYPE; 2482 goto errout; 2483 } 2484 wapbl_circ_advance(wr, wcn->wc_len, &saveoff); 2485 if (off != saveoff) { 2486 printf("wapbl_replay: corrupted records\n"); 2487 error = EFTYPE; 2488 goto errout; 2489 } 2490 } 2491 return 0; 2492 2493 errout: 2494 wapbl_blkhash_clear(wr); 2495 return error; 2496 } 2497 2498 #if 0 2499 int 2500 wapbl_replay_verify(struct wapbl_replay *wr, struct vnode *fsdevvp) 2501 { 2502 off_t off; 2503 int mismatchcnt = 0; 2504 int logblklen = 1 << wr->wr_log_dev_bshift; 2505 int fsblklen = 1 << wr->wr_fs_dev_bshift; 2506 void *scratch1 = wapbl_malloc(MAXBSIZE); 2507 void *scratch2 = wapbl_malloc(MAXBSIZE); 2508 int error = 0; 2509 2510 KDASSERT(wapbl_replay_isopen(wr)); 2511 2512 off = wch->wc_tail; 2513 while (off != wch->wc_head) { 2514 struct wapbl_wc_null *wcn; 2515 #ifdef DEBUG 2516 off_t saveoff = off; 2517 #endif 2518 error = wapbl_circ_read(wr, wr->wr_scratch, logblklen, &off); 2519 if (error) 2520 goto out; 2521 wcn = (struct wapbl_wc_null *)wr->wr_scratch; 2522 switch (wcn->wc_type) { 2523 case WAPBL_WC_BLOCKS: 2524 { 2525 struct wapbl_wc_blocklist *wc = 2526 (struct wapbl_wc_blocklist *)wr->wr_scratch; 2527 int i; 2528 for (i = 0; i < wc->wc_blkcount; i++) { 2529 int foundcnt = 0; 2530 int dirtycnt = 0; 2531 int j, n; 2532 /* 2533 * Check each physical block into the 2534 * hashtable independently 2535 */ 2536 n = wc->wc_blocks[i].wc_dlen >> 2537 wch->wc_fs_dev_bshift; 2538 for (j = 0; j < n; j++) { 2539 struct wapbl_blk *wb = 2540 wapbl_blkhash_get(wr, 2541 wc->wc_blocks[i].wc_daddr + j); 2542 if (wb && (wb->wb_off == off)) { 2543 foundcnt++; 2544 error = 2545 wapbl_circ_read(wr, 2546 scratch1, fsblklen, 2547 &off); 2548 if (error) 2549 goto out; 2550 error = 2551 wapbl_read(scratch2, 2552 fsblklen, fsdevvp, 2553 wb->wb_blk); 2554 if (error) 2555 goto out; 2556 if (memcmp(scratch1, 2557 scratch2, 2558 fsblklen)) { 2559 printf( 2560 "wapbl_verify: mismatch block %"PRId64" at off %"PRIdMAX"\n", 2561 wb->wb_blk, (intmax_t)off); 2562 dirtycnt++; 2563 mismatchcnt++; 2564 } 2565 } else { 2566 wapbl_circ_advance(wr, 2567 fsblklen, &off); 2568 } 2569 } 2570 #if 0 2571 /* 2572 * If all of the blocks in an entry 2573 * are clean, then remove all of its 2574 * blocks from the hashtable since they 2575 * never will need replay. 2576 */ 2577 if ((foundcnt != 0) && 2578 (dirtycnt == 0)) { 2579 off = saveoff; 2580 wapbl_circ_advance(wr, 2581 logblklen, &off); 2582 for (j = 0; j < n; j++) { 2583 struct wapbl_blk *wb = 2584 wapbl_blkhash_get(wr, 2585 wc->wc_blocks[i].wc_daddr + j); 2586 if (wb && 2587 (wb->wb_off == off)) { 2588 wapbl_blkhash_rem(wr, wb->wb_blk); 2589 } 2590 wapbl_circ_advance(wr, 2591 fsblklen, &off); 2592 } 2593 } 2594 #endif 2595 } 2596 } 2597 break; 2598 case WAPBL_WC_REVOCATIONS: 2599 case WAPBL_WC_INODES: 2600 break; 2601 default: 2602 KASSERT(0); 2603 } 2604 #ifdef DEBUG 2605 wapbl_circ_advance(wr, wcn->wc_len, &saveoff); 2606 KASSERT(off == saveoff); 2607 #endif 2608 } 2609 out: 2610 wapbl_free(scratch1, MAXBSIZE); 2611 wapbl_free(scratch2, MAXBSIZE); 2612 if (!error && mismatchcnt) 2613 error = EFTYPE; 2614 return error; 2615 } 2616 #endif 2617 2618 int 2619 wapbl_replay_write(struct wapbl_replay *wr, struct vnode *fsdevvp) 2620 { 2621 struct wapbl_blk *wb; 2622 size_t i; 2623 off_t off; 2624 void *scratch; 2625 int error = 0; 2626 int fsblklen = 1 << wr->wr_fs_dev_bshift; 2627 2628 KDASSERT(wapbl_replay_isopen(wr)); 2629 2630 scratch = wapbl_malloc(MAXBSIZE); 2631 2632 for (i = 0; i < wr->wr_blkhashmask; ++i) { 2633 LIST_FOREACH(wb, &wr->wr_blkhash[i], wb_hash) { 2634 off = wb->wb_off; 2635 error = wapbl_circ_read(wr, scratch, fsblklen, &off); 2636 if (error) 2637 break; 2638 error = wapbl_write(scratch, fsblklen, fsdevvp, 2639 wb->wb_blk); 2640 if (error) 2641 break; 2642 } 2643 } 2644 2645 wapbl_free(scratch, MAXBSIZE); 2646 return error; 2647 } 2648 2649 int 2650 wapbl_replay_can_read(struct wapbl_replay *wr, daddr_t blk, long len) 2651 { 2652 int fsblklen = 1 << wr->wr_fs_dev_bshift; 2653 2654 KDASSERT(wapbl_replay_isopen(wr)); 2655 KASSERT((len % fsblklen) == 0); 2656 2657 while (len != 0) { 2658 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk); 2659 if (wb) 2660 return 1; 2661 len -= fsblklen; 2662 } 2663 return 0; 2664 } 2665 2666 int 2667 wapbl_replay_read(struct wapbl_replay *wr, void *data, daddr_t blk, long len) 2668 { 2669 int fsblklen = 1 << wr->wr_fs_dev_bshift; 2670 2671 KDASSERT(wapbl_replay_isopen(wr)); 2672 2673 KASSERT((len % fsblklen) == 0); 2674 2675 while (len != 0) { 2676 struct wapbl_blk *wb = wapbl_blkhash_get(wr, blk); 2677 if (wb) { 2678 off_t off = wb->wb_off; 2679 int error; 2680 error = wapbl_circ_read(wr, data, fsblklen, &off); 2681 if (error) 2682 return error; 2683 } 2684 data = (uint8_t *)data + fsblklen; 2685 len -= fsblklen; 2686 blk++; 2687 } 2688 return 0; 2689 } 2690