1 /* 2 * Copyright (c) 2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_blockmap.c,v 1.27 2008/07/31 22:30:33 dillon Exp $ 35 */ 36 37 /* 38 * HAMMER blockmap 39 */ 40 #include "hammer.h" 41 42 static int hammer_res_rb_compare(hammer_reserve_t res1, hammer_reserve_t res2); 43 static void hammer_reserve_setdelay_offset(hammer_mount_t hmp, 44 hammer_off_t base_offset, int zone, 45 struct hammer_blockmap_layer2 *layer2); 46 static void hammer_reserve_setdelay(hammer_mount_t hmp, hammer_reserve_t resv); 47 48 /* 49 * Reserved big-blocks red-black tree support 50 */ 51 RB_GENERATE2(hammer_res_rb_tree, hammer_reserve, rb_node, 52 hammer_res_rb_compare, hammer_off_t, zone_offset); 53 54 static int 55 hammer_res_rb_compare(hammer_reserve_t res1, hammer_reserve_t res2) 56 { 57 if (res1->zone_offset < res2->zone_offset) 58 return(-1); 59 if (res1->zone_offset > res2->zone_offset) 60 return(1); 61 return(0); 62 } 63 64 /* 65 * Allocate bytes from a zone 66 */ 67 hammer_off_t 68 hammer_blockmap_alloc(hammer_transaction_t trans, int zone, int bytes, 69 hammer_off_t hint, int *errorp) 70 { 71 hammer_mount_t hmp; 72 hammer_volume_t root_volume; 73 hammer_blockmap_t blockmap; 74 hammer_blockmap_t freemap; 75 hammer_reserve_t resv; 76 struct hammer_blockmap_layer1 *layer1; 77 struct hammer_blockmap_layer2 *layer2; 78 hammer_buffer_t buffer1 = NULL; 79 hammer_buffer_t buffer2 = NULL; 80 hammer_buffer_t buffer3 = NULL; 81 hammer_off_t tmp_offset; 82 hammer_off_t next_offset; 83 hammer_off_t result_offset; 84 hammer_off_t layer1_offset; 85 hammer_off_t layer2_offset; 86 hammer_off_t base_off; 87 int loops = 0; 88 int offset; /* offset within big-block */ 89 int use_hint; 90 91 hmp = trans->hmp; 92 93 /* 94 * Deal with alignment and buffer-boundary issues. 95 * 96 * Be careful, certain primary alignments are used below to allocate 97 * new blockmap blocks. 98 */ 99 bytes = (bytes + 15) & ~15; 100 KKASSERT(bytes > 0 && bytes <= HAMMER_XBUFSIZE); 101 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES); 102 103 /* 104 * Setup 105 */ 106 root_volume = trans->rootvol; 107 *errorp = 0; 108 blockmap = &hmp->blockmap[zone]; 109 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX]; 110 KKASSERT(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone); 111 112 /* 113 * Use the hint if we have one. 114 */ 115 if (hint && HAMMER_ZONE_DECODE(hint) == zone) { 116 next_offset = (hint + 15) & ~(hammer_off_t)15; 117 use_hint = 1; 118 } else { 119 next_offset = blockmap->next_offset; 120 use_hint = 0; 121 } 122 again: 123 124 /* 125 * use_hint is turned off if we leave the hinted big-block. 126 */ 127 if (use_hint && ((next_offset ^ hint) & ~HAMMER_HINTBLOCK_MASK64)) { 128 next_offset = blockmap->next_offset; 129 use_hint = 0; 130 } 131 132 /* 133 * Check for wrap 134 */ 135 if (next_offset == HAMMER_ZONE_ENCODE(zone + 1, 0)) { 136 if (++loops == 2) { 137 result_offset = 0; 138 *errorp = ENOSPC; 139 goto failed; 140 } 141 next_offset = HAMMER_ZONE_ENCODE(zone, 0); 142 } 143 144 /* 145 * The allocation request may not cross a buffer boundary. Special 146 * large allocations must not cross a large-block boundary. 147 */ 148 tmp_offset = next_offset + bytes - 1; 149 if (bytes <= HAMMER_BUFSIZE) { 150 if ((next_offset ^ tmp_offset) & ~HAMMER_BUFMASK64) { 151 next_offset = tmp_offset & ~HAMMER_BUFMASK64; 152 goto again; 153 } 154 } else { 155 if ((next_offset ^ tmp_offset) & ~HAMMER_LARGEBLOCK_MASK64) { 156 next_offset = tmp_offset & ~HAMMER_LARGEBLOCK_MASK64; 157 goto again; 158 } 159 } 160 offset = (int)next_offset & HAMMER_LARGEBLOCK_MASK; 161 162 /* 163 * Dive layer 1. 164 */ 165 layer1_offset = freemap->phys_offset + 166 HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset); 167 168 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1); 169 if (*errorp) { 170 result_offset = 0; 171 goto failed; 172 } 173 174 /* 175 * Check CRC. 176 */ 177 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) { 178 hammer_lock_ex(&hmp->blkmap_lock); 179 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) 180 panic("CRC FAILED: LAYER1"); 181 hammer_unlock(&hmp->blkmap_lock); 182 } 183 184 /* 185 * If we are at a big-block boundary and layer1 indicates no 186 * free big-blocks, then we cannot allocate a new bigblock in 187 * layer2, skip to the next layer1 entry. 188 */ 189 if (offset == 0 && layer1->blocks_free == 0) { 190 next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2) & 191 ~HAMMER_BLOCKMAP_LAYER2_MASK; 192 goto again; 193 } 194 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL); 195 196 /* 197 * Skip this layer1 entry if it is pointing to a layer2 big-block 198 * on a volume that we are currently trying to remove from the 199 * file-system. This is used by the volume-del code together with 200 * the reblocker to free up a volume. 201 */ 202 if ((int)HAMMER_VOL_DECODE(layer1->phys_offset) == 203 hmp->volume_to_remove) { 204 next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2) & 205 ~HAMMER_BLOCKMAP_LAYER2_MASK; 206 goto again; 207 } 208 209 /* 210 * Dive layer 2, each entry represents a large-block. 211 */ 212 layer2_offset = layer1->phys_offset + 213 HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset); 214 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2); 215 if (*errorp) { 216 result_offset = 0; 217 goto failed; 218 } 219 220 /* 221 * Check CRC. This can race another thread holding the lock 222 * and in the middle of modifying layer2. 223 */ 224 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) { 225 hammer_lock_ex(&hmp->blkmap_lock); 226 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) 227 panic("CRC FAILED: LAYER2"); 228 hammer_unlock(&hmp->blkmap_lock); 229 } 230 231 /* 232 * Skip the layer if the zone is owned by someone other then us. 233 */ 234 if (layer2->zone && layer2->zone != zone) { 235 next_offset += (HAMMER_LARGEBLOCK_SIZE - offset); 236 goto again; 237 } 238 if (offset < layer2->append_off) { 239 next_offset += layer2->append_off - offset; 240 goto again; 241 } 242 243 /* 244 * If operating in the current non-hint blockmap block, do not 245 * allow it to get over-full. Also drop any active hinting so 246 * blockmap->next_offset is updated at the end. 247 * 248 * We do this for B-Tree and meta-data allocations to provide 249 * localization for updates. 250 */ 251 if ((zone == HAMMER_ZONE_BTREE_INDEX || 252 zone == HAMMER_ZONE_META_INDEX) && 253 offset >= HAMMER_LARGEBLOCK_OVERFILL && 254 !((next_offset ^ blockmap->next_offset) & ~HAMMER_LARGEBLOCK_MASK64) 255 ) { 256 if (offset >= HAMMER_LARGEBLOCK_OVERFILL) { 257 next_offset += (HAMMER_LARGEBLOCK_SIZE - offset); 258 use_hint = 0; 259 goto again; 260 } 261 } 262 263 /* 264 * We need the lock from this point on. We have to re-check zone 265 * ownership after acquiring the lock and also check for reservations. 266 */ 267 hammer_lock_ex(&hmp->blkmap_lock); 268 269 if (layer2->zone && layer2->zone != zone) { 270 hammer_unlock(&hmp->blkmap_lock); 271 next_offset += (HAMMER_LARGEBLOCK_SIZE - offset); 272 goto again; 273 } 274 if (offset < layer2->append_off) { 275 hammer_unlock(&hmp->blkmap_lock); 276 next_offset += layer2->append_off - offset; 277 goto again; 278 } 279 280 /* 281 * The bigblock might be reserved by another zone. If it is reserved 282 * by our zone we may have to move next_offset past the append_off. 283 */ 284 base_off = (next_offset & 285 (~HAMMER_LARGEBLOCK_MASK64 & ~HAMMER_OFF_ZONE_MASK)) | 286 HAMMER_ZONE_RAW_BUFFER; 287 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off); 288 if (resv) { 289 if (resv->zone != zone) { 290 hammer_unlock(&hmp->blkmap_lock); 291 next_offset = (next_offset + HAMMER_LARGEBLOCK_SIZE) & 292 ~HAMMER_LARGEBLOCK_MASK64; 293 goto again; 294 } 295 if (offset < resv->append_off) { 296 hammer_unlock(&hmp->blkmap_lock); 297 next_offset += resv->append_off - offset; 298 goto again; 299 } 300 ++resv->refs; 301 } 302 303 /* 304 * Ok, we can allocate out of this layer2 big-block. Assume ownership 305 * of the layer for real. At this point we've validated any 306 * reservation that might exist and can just ignore resv. 307 */ 308 if (layer2->zone == 0) { 309 /* 310 * Assign the bigblock to our zone 311 */ 312 hammer_modify_buffer(trans, buffer1, 313 layer1, sizeof(*layer1)); 314 --layer1->blocks_free; 315 layer1->layer1_crc = crc32(layer1, 316 HAMMER_LAYER1_CRCSIZE); 317 hammer_modify_buffer_done(buffer1); 318 hammer_modify_buffer(trans, buffer2, 319 layer2, sizeof(*layer2)); 320 layer2->zone = zone; 321 KKASSERT(layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE); 322 KKASSERT(layer2->append_off == 0); 323 hammer_modify_volume_field(trans, trans->rootvol, 324 vol0_stat_freebigblocks); 325 --root_volume->ondisk->vol0_stat_freebigblocks; 326 hmp->copy_stat_freebigblocks = 327 root_volume->ondisk->vol0_stat_freebigblocks; 328 hammer_modify_volume_done(trans->rootvol); 329 } else { 330 hammer_modify_buffer(trans, buffer2, 331 layer2, sizeof(*layer2)); 332 } 333 KKASSERT(layer2->zone == zone); 334 335 /* 336 * NOTE: bytes_free can legally go negative due to de-dup. 337 */ 338 layer2->bytes_free -= bytes; 339 KKASSERT(layer2->append_off <= offset); 340 layer2->append_off = offset + bytes; 341 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE); 342 hammer_modify_buffer_done(buffer2); 343 344 /* 345 * We hold the blockmap lock and should be the only ones 346 * capable of modifying resv->append_off. Track the allocation 347 * as appropriate. 348 */ 349 KKASSERT(bytes != 0); 350 if (resv) { 351 KKASSERT(resv->append_off <= offset); 352 resv->append_off = offset + bytes; 353 resv->flags &= ~HAMMER_RESF_LAYER2FREE; 354 hammer_blockmap_reserve_complete(hmp, resv); 355 } 356 357 /* 358 * If we are allocating from the base of a new buffer we can avoid 359 * a disk read by calling hammer_bnew(). 360 */ 361 if ((next_offset & HAMMER_BUFMASK) == 0) { 362 hammer_bnew_ext(trans->hmp, next_offset, bytes, 363 errorp, &buffer3); 364 } 365 result_offset = next_offset; 366 367 /* 368 * If we weren't supplied with a hint or could not use the hint 369 * then we wound up using blockmap->next_offset as the hint and 370 * need to save it. 371 */ 372 if (use_hint == 0) { 373 hammer_modify_volume(NULL, root_volume, NULL, 0); 374 blockmap->next_offset = next_offset + bytes; 375 hammer_modify_volume_done(root_volume); 376 } 377 hammer_unlock(&hmp->blkmap_lock); 378 failed: 379 380 /* 381 * Cleanup 382 */ 383 if (buffer1) 384 hammer_rel_buffer(buffer1, 0); 385 if (buffer2) 386 hammer_rel_buffer(buffer2, 0); 387 if (buffer3) 388 hammer_rel_buffer(buffer3, 0); 389 390 return(result_offset); 391 } 392 393 /* 394 * Frontend function - Reserve bytes in a zone. 395 * 396 * This code reserves bytes out of a blockmap without committing to any 397 * meta-data modifications, allowing the front-end to directly issue disk 398 * write I/O for large blocks of data 399 * 400 * The backend later finalizes the reservation with hammer_blockmap_finalize() 401 * upon committing the related record. 402 */ 403 hammer_reserve_t 404 hammer_blockmap_reserve(hammer_mount_t hmp, int zone, int bytes, 405 hammer_off_t *zone_offp, int *errorp) 406 { 407 hammer_volume_t root_volume; 408 hammer_blockmap_t blockmap; 409 hammer_blockmap_t freemap; 410 struct hammer_blockmap_layer1 *layer1; 411 struct hammer_blockmap_layer2 *layer2; 412 hammer_buffer_t buffer1 = NULL; 413 hammer_buffer_t buffer2 = NULL; 414 hammer_buffer_t buffer3 = NULL; 415 hammer_off_t tmp_offset; 416 hammer_off_t next_offset; 417 hammer_off_t layer1_offset; 418 hammer_off_t layer2_offset; 419 hammer_off_t base_off; 420 hammer_reserve_t resv; 421 hammer_reserve_t resx; 422 int loops = 0; 423 int offset; 424 425 /* 426 * Setup 427 */ 428 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES); 429 root_volume = hammer_get_root_volume(hmp, errorp); 430 if (*errorp) 431 return(NULL); 432 blockmap = &hmp->blockmap[zone]; 433 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX]; 434 KKASSERT(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone); 435 436 /* 437 * Deal with alignment and buffer-boundary issues. 438 * 439 * Be careful, certain primary alignments are used below to allocate 440 * new blockmap blocks. 441 */ 442 bytes = (bytes + 15) & ~15; 443 KKASSERT(bytes > 0 && bytes <= HAMMER_XBUFSIZE); 444 445 next_offset = blockmap->next_offset; 446 again: 447 resv = NULL; 448 /* 449 * Check for wrap 450 */ 451 if (next_offset == HAMMER_ZONE_ENCODE(zone + 1, 0)) { 452 if (++loops == 2) { 453 *errorp = ENOSPC; 454 goto failed; 455 } 456 next_offset = HAMMER_ZONE_ENCODE(zone, 0); 457 } 458 459 /* 460 * The allocation request may not cross a buffer boundary. Special 461 * large allocations must not cross a large-block boundary. 462 */ 463 tmp_offset = next_offset + bytes - 1; 464 if (bytes <= HAMMER_BUFSIZE) { 465 if ((next_offset ^ tmp_offset) & ~HAMMER_BUFMASK64) { 466 next_offset = tmp_offset & ~HAMMER_BUFMASK64; 467 goto again; 468 } 469 } else { 470 if ((next_offset ^ tmp_offset) & ~HAMMER_LARGEBLOCK_MASK64) { 471 next_offset = tmp_offset & ~HAMMER_LARGEBLOCK_MASK64; 472 goto again; 473 } 474 } 475 offset = (int)next_offset & HAMMER_LARGEBLOCK_MASK; 476 477 /* 478 * Dive layer 1. 479 */ 480 layer1_offset = freemap->phys_offset + 481 HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset); 482 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1); 483 if (*errorp) 484 goto failed; 485 486 /* 487 * Check CRC. 488 */ 489 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) { 490 hammer_lock_ex(&hmp->blkmap_lock); 491 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) 492 panic("CRC FAILED: LAYER1"); 493 hammer_unlock(&hmp->blkmap_lock); 494 } 495 496 /* 497 * If we are at a big-block boundary and layer1 indicates no 498 * free big-blocks, then we cannot allocate a new bigblock in 499 * layer2, skip to the next layer1 entry. 500 */ 501 if ((next_offset & HAMMER_LARGEBLOCK_MASK) == 0 && 502 layer1->blocks_free == 0) { 503 next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2) & 504 ~HAMMER_BLOCKMAP_LAYER2_MASK; 505 goto again; 506 } 507 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL); 508 509 /* 510 * Dive layer 2, each entry represents a large-block. 511 */ 512 layer2_offset = layer1->phys_offset + 513 HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset); 514 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2); 515 if (*errorp) 516 goto failed; 517 518 /* 519 * Check CRC if not allocating into uninitialized space (which we 520 * aren't when reserving space). 521 */ 522 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) { 523 hammer_lock_ex(&hmp->blkmap_lock); 524 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) 525 panic("CRC FAILED: LAYER2"); 526 hammer_unlock(&hmp->blkmap_lock); 527 } 528 529 /* 530 * Skip the layer if the zone is owned by someone other then us. 531 */ 532 if (layer2->zone && layer2->zone != zone) { 533 next_offset += (HAMMER_LARGEBLOCK_SIZE - offset); 534 goto again; 535 } 536 if (offset < layer2->append_off) { 537 next_offset += layer2->append_off - offset; 538 goto again; 539 } 540 541 /* 542 * We need the lock from this point on. We have to re-check zone 543 * ownership after acquiring the lock and also check for reservations. 544 */ 545 hammer_lock_ex(&hmp->blkmap_lock); 546 547 if (layer2->zone && layer2->zone != zone) { 548 hammer_unlock(&hmp->blkmap_lock); 549 next_offset += (HAMMER_LARGEBLOCK_SIZE - offset); 550 goto again; 551 } 552 if (offset < layer2->append_off) { 553 hammer_unlock(&hmp->blkmap_lock); 554 next_offset += layer2->append_off - offset; 555 goto again; 556 } 557 558 /* 559 * The bigblock might be reserved by another zone. If it is reserved 560 * by our zone we may have to move next_offset past the append_off. 561 */ 562 base_off = (next_offset & 563 (~HAMMER_LARGEBLOCK_MASK64 & ~HAMMER_OFF_ZONE_MASK)) | 564 HAMMER_ZONE_RAW_BUFFER; 565 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off); 566 if (resv) { 567 if (resv->zone != zone) { 568 hammer_unlock(&hmp->blkmap_lock); 569 next_offset = (next_offset + HAMMER_LARGEBLOCK_SIZE) & 570 ~HAMMER_LARGEBLOCK_MASK64; 571 goto again; 572 } 573 if (offset < resv->append_off) { 574 hammer_unlock(&hmp->blkmap_lock); 575 next_offset += resv->append_off - offset; 576 goto again; 577 } 578 ++resv->refs; 579 resx = NULL; 580 } else { 581 resx = kmalloc(sizeof(*resv), hmp->m_misc, 582 M_WAITOK | M_ZERO | M_USE_RESERVE); 583 resx->refs = 1; 584 resx->zone = zone; 585 resx->zone_offset = base_off; 586 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) 587 resx->flags |= HAMMER_RESF_LAYER2FREE; 588 resv = RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resx); 589 KKASSERT(resv == NULL); 590 resv = resx; 591 ++hammer_count_reservations; 592 } 593 resv->append_off = offset + bytes; 594 595 /* 596 * If we are not reserving a whole buffer but are at the start of 597 * a new block, call hammer_bnew() to avoid a disk read. 598 * 599 * If we are reserving a whole buffer (or more), the caller will 600 * probably use a direct read, so do nothing. 601 */ 602 if (bytes < HAMMER_BUFSIZE && (next_offset & HAMMER_BUFMASK) == 0) { 603 hammer_bnew(hmp, next_offset, errorp, &buffer3); 604 } 605 606 /* 607 * Adjust our iterator and alloc_offset. The layer1 and layer2 608 * space beyond alloc_offset is uninitialized. alloc_offset must 609 * be big-block aligned. 610 */ 611 blockmap->next_offset = next_offset + bytes; 612 hammer_unlock(&hmp->blkmap_lock); 613 614 failed: 615 if (buffer1) 616 hammer_rel_buffer(buffer1, 0); 617 if (buffer2) 618 hammer_rel_buffer(buffer2, 0); 619 if (buffer3) 620 hammer_rel_buffer(buffer3, 0); 621 hammer_rel_volume(root_volume, 0); 622 *zone_offp = next_offset; 623 624 return(resv); 625 } 626 627 /* 628 * Dereference a reservation structure. Upon the final release the 629 * underlying big-block is checked and if it is entirely free we delete 630 * any related HAMMER buffers to avoid potential conflicts with future 631 * reuse of the big-block. 632 */ 633 void 634 hammer_blockmap_reserve_complete(hammer_mount_t hmp, hammer_reserve_t resv) 635 { 636 hammer_off_t base_offset; 637 int error; 638 639 KKASSERT(resv->refs > 0); 640 KKASSERT((resv->zone_offset & HAMMER_OFF_ZONE_MASK) == 641 HAMMER_ZONE_RAW_BUFFER); 642 643 /* 644 * Setting append_off to the max prevents any new allocations 645 * from occuring while we are trying to dispose of the reservation, 646 * allowing us to safely delete any related HAMMER buffers. 647 * 648 * If we are unable to clean out all related HAMMER buffers we 649 * requeue the delay. 650 */ 651 if (resv->refs == 1 && (resv->flags & HAMMER_RESF_LAYER2FREE)) { 652 resv->append_off = HAMMER_LARGEBLOCK_SIZE; 653 base_offset = resv->zone_offset & ~HAMMER_OFF_ZONE_MASK; 654 base_offset = HAMMER_ZONE_ENCODE(resv->zone, base_offset); 655 error = hammer_del_buffers(hmp, base_offset, 656 resv->zone_offset, 657 HAMMER_LARGEBLOCK_SIZE, 658 0); 659 if (error) 660 hammer_reserve_setdelay(hmp, resv); 661 } 662 if (--resv->refs == 0) { 663 KKASSERT((resv->flags & HAMMER_RESF_ONDELAY) == 0); 664 RB_REMOVE(hammer_res_rb_tree, &hmp->rb_resv_root, resv); 665 kfree(resv, hmp->m_misc); 666 --hammer_count_reservations; 667 } 668 } 669 670 /* 671 * Prevent a potentially free big-block from being reused until after 672 * the related flushes have completely cycled, otherwise crash recovery 673 * could resurrect a data block that was already reused and overwritten. 674 * 675 * The caller might reset the underlying layer2 entry's append_off to 0, so 676 * our covering append_off must be set to max to prevent any reallocation 677 * until after the flush delays complete, not to mention proper invalidation 678 * of any underlying cached blocks. 679 */ 680 static void 681 hammer_reserve_setdelay_offset(hammer_mount_t hmp, hammer_off_t base_offset, 682 int zone, struct hammer_blockmap_layer2 *layer2) 683 { 684 hammer_reserve_t resv; 685 686 /* 687 * Allocate the reservation if necessary. 688 * 689 * NOTE: need lock in future around resv lookup/allocation and 690 * the setdelay call, currently refs is not bumped until the call. 691 */ 692 again: 693 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_offset); 694 if (resv == NULL) { 695 resv = kmalloc(sizeof(*resv), hmp->m_misc, 696 M_WAITOK | M_ZERO | M_USE_RESERVE); 697 resv->zone = zone; 698 resv->zone_offset = base_offset; 699 resv->refs = 0; 700 resv->append_off = HAMMER_LARGEBLOCK_SIZE; 701 702 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) 703 resv->flags |= HAMMER_RESF_LAYER2FREE; 704 if (RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resv)) { 705 kfree(resv, hmp->m_misc); 706 goto again; 707 } 708 ++hammer_count_reservations; 709 } else { 710 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) 711 resv->flags |= HAMMER_RESF_LAYER2FREE; 712 } 713 hammer_reserve_setdelay(hmp, resv); 714 } 715 716 /* 717 * Enter the reservation on the on-delay list, or move it if it 718 * is already on the list. 719 */ 720 static void 721 hammer_reserve_setdelay(hammer_mount_t hmp, hammer_reserve_t resv) 722 { 723 if (resv->flags & HAMMER_RESF_ONDELAY) { 724 TAILQ_REMOVE(&hmp->delay_list, resv, delay_entry); 725 resv->flush_group = hmp->flusher.next + 1; 726 TAILQ_INSERT_TAIL(&hmp->delay_list, resv, delay_entry); 727 } else { 728 ++resv->refs; 729 ++hmp->rsv_fromdelay; 730 resv->flags |= HAMMER_RESF_ONDELAY; 731 resv->flush_group = hmp->flusher.next + 1; 732 TAILQ_INSERT_TAIL(&hmp->delay_list, resv, delay_entry); 733 } 734 } 735 736 void 737 hammer_reserve_clrdelay(hammer_mount_t hmp, hammer_reserve_t resv) 738 { 739 KKASSERT(resv->flags & HAMMER_RESF_ONDELAY); 740 resv->flags &= ~HAMMER_RESF_ONDELAY; 741 TAILQ_REMOVE(&hmp->delay_list, resv, delay_entry); 742 --hmp->rsv_fromdelay; 743 hammer_blockmap_reserve_complete(hmp, resv); 744 } 745 746 /* 747 * Backend function - free (offset, bytes) in a zone. 748 * 749 * XXX error return 750 */ 751 void 752 hammer_blockmap_free(hammer_transaction_t trans, 753 hammer_off_t zone_offset, int bytes) 754 { 755 hammer_mount_t hmp; 756 hammer_volume_t root_volume; 757 hammer_blockmap_t blockmap; 758 hammer_blockmap_t freemap; 759 struct hammer_blockmap_layer1 *layer1; 760 struct hammer_blockmap_layer2 *layer2; 761 hammer_buffer_t buffer1 = NULL; 762 hammer_buffer_t buffer2 = NULL; 763 hammer_off_t layer1_offset; 764 hammer_off_t layer2_offset; 765 hammer_off_t base_off; 766 int error; 767 int zone; 768 769 if (bytes == 0) 770 return; 771 hmp = trans->hmp; 772 773 /* 774 * Alignment 775 */ 776 bytes = (bytes + 15) & ~15; 777 KKASSERT(bytes <= HAMMER_XBUFSIZE); 778 KKASSERT(((zone_offset ^ (zone_offset + (bytes - 1))) & 779 ~HAMMER_LARGEBLOCK_MASK64) == 0); 780 781 /* 782 * Basic zone validation & locking 783 */ 784 zone = HAMMER_ZONE_DECODE(zone_offset); 785 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES); 786 root_volume = trans->rootvol; 787 error = 0; 788 789 blockmap = &hmp->blockmap[zone]; 790 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX]; 791 792 /* 793 * Dive layer 1. 794 */ 795 layer1_offset = freemap->phys_offset + 796 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset); 797 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1); 798 if (error) 799 goto failed; 800 KKASSERT(layer1->phys_offset && 801 layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL); 802 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) { 803 hammer_lock_ex(&hmp->blkmap_lock); 804 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) 805 panic("CRC FAILED: LAYER1"); 806 hammer_unlock(&hmp->blkmap_lock); 807 } 808 809 /* 810 * Dive layer 2, each entry represents a large-block. 811 */ 812 layer2_offset = layer1->phys_offset + 813 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset); 814 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2); 815 if (error) 816 goto failed; 817 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) { 818 hammer_lock_ex(&hmp->blkmap_lock); 819 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) 820 panic("CRC FAILED: LAYER2"); 821 hammer_unlock(&hmp->blkmap_lock); 822 } 823 824 hammer_lock_ex(&hmp->blkmap_lock); 825 826 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2)); 827 828 /* 829 * Free space previously allocated via blockmap_alloc(). 830 * 831 * NOTE: bytes_free can be and remain negative due to de-dup ops 832 * but can never become larger than HAMMER_LARGEBLOCK_SIZE. 833 */ 834 KKASSERT(layer2->zone == zone); 835 layer2->bytes_free += bytes; 836 KKASSERT(layer2->bytes_free <= HAMMER_LARGEBLOCK_SIZE); 837 838 /* 839 * If a big-block becomes entirely free we must create a covering 840 * reservation to prevent premature reuse. Note, however, that 841 * the big-block and/or reservation may still have an append_off 842 * that allows further (non-reused) allocations. 843 * 844 * Once the reservation has been made we re-check layer2 and if 845 * the big-block is still entirely free we reset the layer2 entry. 846 * The reservation will prevent premature reuse. 847 * 848 * NOTE: hammer_buffer's are only invalidated when the reservation 849 * is completed, if the layer2 entry is still completely free at 850 * that time. Any allocations from the reservation that may have 851 * occured in the mean time, or active references on the reservation 852 * from new pending allocations, will prevent the invalidation from 853 * occuring. 854 */ 855 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) { 856 base_off = (zone_offset & (~HAMMER_LARGEBLOCK_MASK64 & ~HAMMER_OFF_ZONE_MASK)) | HAMMER_ZONE_RAW_BUFFER; 857 858 hammer_reserve_setdelay_offset(hmp, base_off, zone, layer2); 859 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) { 860 layer2->zone = 0; 861 layer2->append_off = 0; 862 hammer_modify_buffer(trans, buffer1, 863 layer1, sizeof(*layer1)); 864 ++layer1->blocks_free; 865 layer1->layer1_crc = crc32(layer1, 866 HAMMER_LAYER1_CRCSIZE); 867 hammer_modify_buffer_done(buffer1); 868 hammer_modify_volume_field(trans, 869 trans->rootvol, 870 vol0_stat_freebigblocks); 871 ++root_volume->ondisk->vol0_stat_freebigblocks; 872 hmp->copy_stat_freebigblocks = 873 root_volume->ondisk->vol0_stat_freebigblocks; 874 hammer_modify_volume_done(trans->rootvol); 875 } 876 } 877 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE); 878 hammer_modify_buffer_done(buffer2); 879 hammer_unlock(&hmp->blkmap_lock); 880 881 failed: 882 if (buffer1) 883 hammer_rel_buffer(buffer1, 0); 884 if (buffer2) 885 hammer_rel_buffer(buffer2, 0); 886 } 887 888 /* 889 * Backend function - finalize (offset, bytes) in a zone. 890 * 891 * Allocate space that was previously reserved by the frontend. 892 */ 893 int 894 hammer_blockmap_finalize(hammer_transaction_t trans, 895 hammer_reserve_t resv, 896 hammer_off_t zone_offset, int bytes) 897 { 898 hammer_mount_t hmp; 899 hammer_volume_t root_volume; 900 hammer_blockmap_t blockmap; 901 hammer_blockmap_t freemap; 902 struct hammer_blockmap_layer1 *layer1; 903 struct hammer_blockmap_layer2 *layer2; 904 hammer_buffer_t buffer1 = NULL; 905 hammer_buffer_t buffer2 = NULL; 906 hammer_off_t layer1_offset; 907 hammer_off_t layer2_offset; 908 int error; 909 int zone; 910 int offset; 911 912 if (bytes == 0) 913 return(0); 914 hmp = trans->hmp; 915 916 /* 917 * Alignment 918 */ 919 bytes = (bytes + 15) & ~15; 920 KKASSERT(bytes <= HAMMER_XBUFSIZE); 921 922 /* 923 * Basic zone validation & locking 924 */ 925 zone = HAMMER_ZONE_DECODE(zone_offset); 926 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES); 927 root_volume = trans->rootvol; 928 error = 0; 929 930 blockmap = &hmp->blockmap[zone]; 931 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX]; 932 933 /* 934 * Dive layer 1. 935 */ 936 layer1_offset = freemap->phys_offset + 937 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset); 938 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1); 939 if (error) 940 goto failed; 941 KKASSERT(layer1->phys_offset && 942 layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL); 943 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) { 944 hammer_lock_ex(&hmp->blkmap_lock); 945 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) 946 panic("CRC FAILED: LAYER1"); 947 hammer_unlock(&hmp->blkmap_lock); 948 } 949 950 /* 951 * Dive layer 2, each entry represents a large-block. 952 */ 953 layer2_offset = layer1->phys_offset + 954 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset); 955 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2); 956 if (error) 957 goto failed; 958 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) { 959 hammer_lock_ex(&hmp->blkmap_lock); 960 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) 961 panic("CRC FAILED: LAYER2"); 962 hammer_unlock(&hmp->blkmap_lock); 963 } 964 965 hammer_lock_ex(&hmp->blkmap_lock); 966 967 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2)); 968 969 /* 970 * Finalize some or all of the space covered by a current 971 * reservation. An allocation in the same layer may have 972 * already assigned ownership. 973 */ 974 if (layer2->zone == 0) { 975 hammer_modify_buffer(trans, buffer1, 976 layer1, sizeof(*layer1)); 977 --layer1->blocks_free; 978 layer1->layer1_crc = crc32(layer1, 979 HAMMER_LAYER1_CRCSIZE); 980 hammer_modify_buffer_done(buffer1); 981 layer2->zone = zone; 982 KKASSERT(layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE); 983 KKASSERT(layer2->append_off == 0); 984 hammer_modify_volume_field(trans, 985 trans->rootvol, 986 vol0_stat_freebigblocks); 987 --root_volume->ondisk->vol0_stat_freebigblocks; 988 hmp->copy_stat_freebigblocks = 989 root_volume->ondisk->vol0_stat_freebigblocks; 990 hammer_modify_volume_done(trans->rootvol); 991 } 992 if (layer2->zone != zone) 993 kprintf("layer2 zone mismatch %d %d\n", layer2->zone, zone); 994 KKASSERT(layer2->zone == zone); 995 KKASSERT(bytes != 0); 996 layer2->bytes_free -= bytes; 997 if (resv) 998 resv->flags &= ~HAMMER_RESF_LAYER2FREE; 999 1000 /* 1001 * Finalizations can occur out of order, or combined with allocations. 1002 * append_off must be set to the highest allocated offset. 1003 */ 1004 offset = ((int)zone_offset & HAMMER_LARGEBLOCK_MASK) + bytes; 1005 if (layer2->append_off < offset) 1006 layer2->append_off = offset; 1007 1008 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE); 1009 hammer_modify_buffer_done(buffer2); 1010 hammer_unlock(&hmp->blkmap_lock); 1011 1012 failed: 1013 if (buffer1) 1014 hammer_rel_buffer(buffer1, 0); 1015 if (buffer2) 1016 hammer_rel_buffer(buffer2, 0); 1017 return(error); 1018 } 1019 1020 /* 1021 * Return the approximate number of free bytes in the big-block 1022 * containing the specified blockmap offset. 1023 * 1024 * WARNING: A negative number can be returned if data de-dup exists, 1025 * and the result will also not represent he actual number 1026 * of free bytes in this case. 1027 * 1028 * This code is used only by the reblocker. 1029 */ 1030 int 1031 hammer_blockmap_getfree(hammer_mount_t hmp, hammer_off_t zone_offset, 1032 int *curp, int *errorp) 1033 { 1034 hammer_volume_t root_volume; 1035 hammer_blockmap_t blockmap; 1036 hammer_blockmap_t freemap; 1037 struct hammer_blockmap_layer1 *layer1; 1038 struct hammer_blockmap_layer2 *layer2; 1039 hammer_buffer_t buffer = NULL; 1040 hammer_off_t layer1_offset; 1041 hammer_off_t layer2_offset; 1042 int32_t bytes; 1043 int zone; 1044 1045 zone = HAMMER_ZONE_DECODE(zone_offset); 1046 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES); 1047 root_volume = hammer_get_root_volume(hmp, errorp); 1048 if (*errorp) { 1049 *curp = 0; 1050 return(0); 1051 } 1052 blockmap = &hmp->blockmap[zone]; 1053 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX]; 1054 1055 /* 1056 * Dive layer 1. 1057 */ 1058 layer1_offset = freemap->phys_offset + 1059 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset); 1060 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer); 1061 if (*errorp) { 1062 bytes = 0; 1063 goto failed; 1064 } 1065 KKASSERT(layer1->phys_offset); 1066 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) { 1067 hammer_lock_ex(&hmp->blkmap_lock); 1068 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) 1069 panic("CRC FAILED: LAYER1"); 1070 hammer_unlock(&hmp->blkmap_lock); 1071 } 1072 1073 /* 1074 * Dive layer 2, each entry represents a large-block. 1075 * 1076 * (reuse buffer, layer1 pointer becomes invalid) 1077 */ 1078 layer2_offset = layer1->phys_offset + 1079 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset); 1080 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer); 1081 if (*errorp) { 1082 bytes = 0; 1083 goto failed; 1084 } 1085 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) { 1086 hammer_lock_ex(&hmp->blkmap_lock); 1087 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) 1088 panic("CRC FAILED: LAYER2"); 1089 hammer_unlock(&hmp->blkmap_lock); 1090 } 1091 KKASSERT(layer2->zone == zone); 1092 1093 bytes = layer2->bytes_free; 1094 1095 if ((blockmap->next_offset ^ zone_offset) & ~HAMMER_LARGEBLOCK_MASK64) 1096 *curp = 0; 1097 else 1098 *curp = 1; 1099 failed: 1100 if (buffer) 1101 hammer_rel_buffer(buffer, 0); 1102 hammer_rel_volume(root_volume, 0); 1103 if (hammer_debug_general & 0x0800) { 1104 kprintf("hammer_blockmap_getfree: %016llx -> %d\n", 1105 (long long)zone_offset, bytes); 1106 } 1107 return(bytes); 1108 } 1109 1110 1111 /* 1112 * Lookup a blockmap offset. 1113 */ 1114 hammer_off_t 1115 hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t zone_offset, 1116 int *errorp) 1117 { 1118 hammer_volume_t root_volume; 1119 hammer_blockmap_t freemap; 1120 struct hammer_blockmap_layer1 *layer1; 1121 struct hammer_blockmap_layer2 *layer2; 1122 hammer_buffer_t buffer = NULL; 1123 hammer_off_t layer1_offset; 1124 hammer_off_t layer2_offset; 1125 hammer_off_t result_offset; 1126 hammer_off_t base_off; 1127 hammer_reserve_t resv; 1128 int zone; 1129 1130 /* 1131 * Calculate the zone-2 offset. 1132 */ 1133 zone = HAMMER_ZONE_DECODE(zone_offset); 1134 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES); 1135 1136 result_offset = (zone_offset & ~HAMMER_OFF_ZONE_MASK) | 1137 HAMMER_ZONE_RAW_BUFFER; 1138 1139 /* 1140 * We can actually stop here, normal blockmaps are now direct-mapped 1141 * onto the freemap and so represent zone-2 addresses. 1142 */ 1143 if (hammer_verify_zone == 0) { 1144 *errorp = 0; 1145 return(result_offset); 1146 } 1147 1148 /* 1149 * Validate the allocation zone 1150 */ 1151 root_volume = hammer_get_root_volume(hmp, errorp); 1152 if (*errorp) 1153 return(0); 1154 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX]; 1155 KKASSERT(freemap->phys_offset != 0); 1156 1157 /* 1158 * Dive layer 1. 1159 */ 1160 layer1_offset = freemap->phys_offset + 1161 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset); 1162 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer); 1163 if (*errorp) 1164 goto failed; 1165 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL); 1166 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) { 1167 hammer_lock_ex(&hmp->blkmap_lock); 1168 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) 1169 panic("CRC FAILED: LAYER1"); 1170 hammer_unlock(&hmp->blkmap_lock); 1171 } 1172 1173 /* 1174 * Dive layer 2, each entry represents a large-block. 1175 */ 1176 layer2_offset = layer1->phys_offset + 1177 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset); 1178 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer); 1179 1180 if (*errorp) 1181 goto failed; 1182 if (layer2->zone == 0) { 1183 base_off = (zone_offset & (~HAMMER_LARGEBLOCK_MASK64 & ~HAMMER_OFF_ZONE_MASK)) | HAMMER_ZONE_RAW_BUFFER; 1184 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, 1185 base_off); 1186 KKASSERT(resv && resv->zone == zone); 1187 1188 } else if (layer2->zone != zone) { 1189 panic("hammer_blockmap_lookup: bad zone %d/%d\n", 1190 layer2->zone, zone); 1191 } 1192 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) { 1193 hammer_lock_ex(&hmp->blkmap_lock); 1194 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) 1195 panic("CRC FAILED: LAYER2"); 1196 hammer_unlock(&hmp->blkmap_lock); 1197 } 1198 1199 failed: 1200 if (buffer) 1201 hammer_rel_buffer(buffer, 0); 1202 hammer_rel_volume(root_volume, 0); 1203 if (hammer_debug_general & 0x0800) { 1204 kprintf("hammer_blockmap_lookup: %016llx -> %016llx\n", 1205 (long long)zone_offset, (long long)result_offset); 1206 } 1207 return(result_offset); 1208 } 1209 1210 1211 /* 1212 * Check space availability 1213 */ 1214 int 1215 _hammer_checkspace(hammer_mount_t hmp, int slop, int64_t *resp) 1216 { 1217 const int in_size = sizeof(struct hammer_inode_data) + 1218 sizeof(union hammer_btree_elm); 1219 const int rec_size = (sizeof(union hammer_btree_elm) * 2); 1220 int64_t usedbytes; 1221 1222 usedbytes = hmp->rsv_inodes * in_size + 1223 hmp->rsv_recs * rec_size + 1224 hmp->rsv_databytes + 1225 ((int64_t)hmp->rsv_fromdelay << HAMMER_LARGEBLOCK_BITS) + 1226 ((int64_t)hidirtybufspace << 2) + 1227 (slop << HAMMER_LARGEBLOCK_BITS); 1228 1229 hammer_count_extra_space_used = usedbytes; /* debugging */ 1230 if (resp) 1231 *resp = usedbytes; 1232 1233 if (hmp->copy_stat_freebigblocks >= 1234 (usedbytes >> HAMMER_LARGEBLOCK_BITS)) { 1235 return(0); 1236 } 1237 return (ENOSPC); 1238 } 1239 1240