1 /* 2 * Copyright (c) 2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sbin/hammer/cmd_blockmap.c,v 1.4 2008/07/19 18:48:14 dillon Exp $ 35 */ 36 37 #include "hammer.h" 38 39 /* 40 * Each collect covers 1<<(19+23) bytes address space of layer 1. 41 * (plus a copy of 1<<23 bytes that holds layer2 entries in layer 1). 42 */ 43 typedef struct collect { 44 RB_ENTRY(collect) entry; 45 hammer_off_t phys_offset; /* layer2 address pointed by layer1 */ 46 hammer_off_t *offsets; /* big-block offset for layer2[i] */ 47 struct hammer_blockmap_layer2 *track2; /* track of layer2 entries */ 48 struct hammer_blockmap_layer2 *layer2; /* 1<<19 x 16 bytes entries */ 49 int error; /* # of inconsistencies */ 50 } *collect_t; 51 52 static int 53 collect_compare(struct collect *c1, struct collect *c2) 54 { 55 if (c1->phys_offset < c2->phys_offset) 56 return(-1); 57 if (c1->phys_offset > c2->phys_offset) 58 return(1); 59 return(0); 60 } 61 62 RB_HEAD(collect_rb_tree, collect) CollectTree = RB_INITIALIZER(&CollectTree); 63 RB_PROTOTYPE2(collect_rb_tree, collect, entry, collect_compare, hammer_off_t); 64 RB_GENERATE2(collect_rb_tree, collect, entry, collect_compare, hammer_off_t, 65 phys_offset); 66 67 static void dump_blockmap(const char *label, int zone); 68 static void check_freemap(hammer_blockmap_t freemap); 69 static void check_btree_node(hammer_off_t node_offset, int depth); 70 static void check_undo(hammer_blockmap_t undomap); 71 static __inline void collect_btree_root(hammer_off_t node_offset); 72 static __inline void collect_btree_internal(hammer_btree_elm_t elm); 73 static __inline void collect_btree_leaf(hammer_btree_elm_t elm); 74 static __inline void collect_freemap_layer1(hammer_blockmap_t freemap); 75 static __inline void collect_freemap_layer2(struct hammer_blockmap_layer1 *layer1); 76 static __inline void collect_undo(hammer_off_t scan_offset, 77 hammer_fifo_head_t head); 78 static void collect_blockmap(hammer_off_t offset, int32_t length, int zone); 79 static struct hammer_blockmap_layer2 *collect_get_track( 80 collect_t collect, hammer_off_t offset, int zone, 81 struct hammer_blockmap_layer2 *layer2); 82 static collect_t collect_get(hammer_off_t phys_offset); 83 static void dump_collect_table(void); 84 static void dump_collect(collect_t collect, struct zone_stat *stats); 85 86 void 87 hammer_cmd_blockmap(void) 88 { 89 dump_blockmap("freemap", HAMMER_ZONE_FREEMAP_INDEX); 90 } 91 92 static 93 void 94 dump_blockmap(const char *label, int zone) 95 { 96 struct volume_info *root_volume; 97 hammer_blockmap_t rootmap; 98 hammer_blockmap_t blockmap; 99 struct hammer_blockmap_layer1 *layer1; 100 struct hammer_blockmap_layer2 *layer2; 101 struct buffer_info *buffer1 = NULL; 102 struct buffer_info *buffer2 = NULL; 103 hammer_off_t layer1_offset; 104 hammer_off_t layer2_offset; 105 hammer_off_t scan1; 106 hammer_off_t scan2; 107 struct zone_stat *stats = NULL; 108 int xerr; 109 int i; 110 111 assert(RootVolNo >= 0); 112 root_volume = get_volume(RootVolNo); 113 rootmap = &root_volume->ondisk->vol0_blockmap[zone]; 114 assert(rootmap->phys_offset != 0); 115 116 printf(" " 117 "phys first next alloc\n"); 118 for (i = 0; i < HAMMER_MAX_ZONES; i++) { 119 blockmap = &root_volume->ondisk->vol0_blockmap[i]; 120 if (VerboseOpt || i == zone) { 121 printf("zone %-2d %-10s %016jx %016jx %016jx %016jx\n", 122 i, (i == zone ? label : ""), 123 (uintmax_t)blockmap->phys_offset, 124 (uintmax_t)blockmap->first_offset, 125 (uintmax_t)blockmap->next_offset, 126 (uintmax_t)blockmap->alloc_offset); 127 } 128 } 129 130 if (VerboseOpt) 131 stats = hammer_init_zone_stat(); 132 133 for (scan1 = HAMMER_ZONE_ENCODE(zone, 0); 134 scan1 < HAMMER_ZONE_ENCODE(zone, HAMMER_OFF_LONG_MASK); 135 scan1 += HAMMER_BLOCKMAP_LAYER2) { 136 /* 137 * Dive layer 1. 138 */ 139 layer1_offset = rootmap->phys_offset + 140 HAMMER_BLOCKMAP_LAYER1_OFFSET(scan1); 141 layer1 = get_buffer_data(layer1_offset, &buffer1, 0); 142 xerr = ' '; 143 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) 144 xerr = 'B'; 145 if (xerr == ' ' && 146 layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) { 147 continue; 148 } 149 printf("%c layer1 %016jx @%016jx blocks-free %jd\n", 150 xerr, 151 (uintmax_t)scan1, 152 (uintmax_t)layer1->phys_offset, 153 (intmax_t)layer1->blocks_free); 154 if (layer1->phys_offset == HAMMER_BLOCKMAP_FREE) 155 continue; 156 for (scan2 = scan1; 157 scan2 < scan1 + HAMMER_BLOCKMAP_LAYER2; 158 scan2 += HAMMER_BIGBLOCK_SIZE 159 ) { 160 /* 161 * Dive layer 2, each entry represents a big-block. 162 */ 163 layer2_offset = layer1->phys_offset + 164 HAMMER_BLOCKMAP_LAYER2_OFFSET(scan2); 165 layer2 = get_buffer_data(layer2_offset, &buffer2, 0); 166 xerr = ' '; 167 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) 168 xerr = 'B'; 169 printf("%c %016jx zone=%-2d ", 170 xerr, 171 (uintmax_t)scan2, 172 layer2->zone); 173 if (VerboseOpt > 1) 174 printf("vol=%-3d L1=%-7lu L2=%-7lu ", 175 HAMMER_VOL_DECODE(scan2), 176 HAMMER_BLOCKMAP_LAYER1_OFFSET(scan2), 177 HAMMER_BLOCKMAP_LAYER2_OFFSET(scan2)); 178 else if (VerboseOpt > 0) 179 printf("vol=%-3d L1=%-6lu L2=%-6lu ", 180 HAMMER_VOL_DECODE(scan2), 181 HAMMER_BLOCKMAP_LAYER1_INDEX(scan2), 182 HAMMER_BLOCKMAP_LAYER2_INDEX(scan2)); 183 printf("app=%-7d free=%-7d", 184 layer2->append_off, 185 layer2->bytes_free); 186 if (VerboseOpt) 187 printf(" crc=%04x-%04x\n", 188 layer1->layer1_crc, 189 layer2->entry_crc); 190 else 191 printf("\n"); 192 193 if (VerboseOpt) 194 hammer_add_zone_stat_layer2(stats, layer2); 195 } 196 } 197 rel_buffer(buffer1); 198 rel_buffer(buffer2); 199 rel_volume(root_volume); 200 201 if (VerboseOpt) { 202 hammer_print_zone_stat(stats); 203 hammer_cleanup_zone_stat(stats); 204 } 205 } 206 207 void 208 hammer_cmd_checkmap(void) 209 { 210 struct volume_info *volume; 211 hammer_blockmap_t freemap; 212 hammer_blockmap_t undomap; 213 hammer_off_t node_offset; 214 215 volume = get_volume(RootVolNo); 216 node_offset = volume->ondisk->vol0_btree_root; 217 freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX]; 218 undomap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX]; 219 220 if (QuietOpt < 3) { 221 printf("Volume header\trecords=%jd next_tid=%016jx\n", 222 (intmax_t)volume->ondisk->vol0_stat_records, 223 (uintmax_t)volume->ondisk->vol0_next_tid); 224 printf("\t\tbufoffset=%016jx\n", 225 (uintmax_t)volume->ondisk->vol_buf_beg); 226 printf("\t\tundosize=%jdMB\n", 227 (intmax_t)((undomap->alloc_offset & HAMMER_OFF_LONG_MASK) 228 / (1024 * 1024))); 229 } 230 rel_volume(volume); 231 232 assert(HAMMER_ZONE_UNDO_INDEX < HAMMER_ZONE2_MAPPED_INDEX); 233 assert(HAMMER_ZONE2_MAPPED_INDEX < HAMMER_MAX_ZONES); 234 AssertOnFailure = (DebugOpt != 0); 235 236 printf("Collecting allocation info from freemap: "); 237 fflush(stdout); 238 check_freemap(freemap); 239 printf("done\n"); 240 241 printf("Collecting allocation info from B-Tree: "); 242 fflush(stdout); 243 check_btree_node(node_offset, 0); 244 printf("done\n"); 245 246 printf("Collecting allocation info from UNDO: "); 247 fflush(stdout); 248 check_undo(undomap); 249 printf("done\n"); 250 251 dump_collect_table(); 252 AssertOnFailure = 1; 253 } 254 255 static void 256 check_freemap(hammer_blockmap_t freemap) 257 { 258 hammer_off_t offset; 259 struct buffer_info *buffer1 = NULL; 260 struct hammer_blockmap_layer1 *layer1; 261 int i; 262 263 collect_freemap_layer1(freemap); 264 265 for (i = 0; i < (int)HAMMER_BLOCKMAP_RADIX1; ++i) { 266 offset = freemap->phys_offset + i * sizeof(*layer1); 267 layer1 = get_buffer_data(offset, &buffer1, 0); 268 if (layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL) 269 collect_freemap_layer2(layer1); 270 } 271 rel_buffer(buffer1); 272 } 273 274 static void 275 check_btree_node(hammer_off_t node_offset, int depth) 276 { 277 struct buffer_info *buffer = NULL; 278 hammer_node_ondisk_t node; 279 hammer_btree_elm_t elm; 280 int i; 281 char badc; 282 283 if (depth == 0) 284 collect_btree_root(node_offset); 285 node = get_node(node_offset, &buffer); 286 287 if (crc32(&node->crc + 1, HAMMER_BTREE_CRCSIZE) == node->crc) 288 badc = ' '; 289 else 290 badc = 'B'; 291 292 if (badc != ' ') { 293 printf("%c NODE %016jx cnt=%02d p=%016jx " 294 "type=%c depth=%d", 295 badc, 296 (uintmax_t)node_offset, node->count, 297 (uintmax_t)node->parent, 298 (node->type ? node->type : '?'), depth); 299 printf(" mirror %016jx\n", (uintmax_t)node->mirror_tid); 300 } 301 302 for (i = 0; i < node->count; ++i) { 303 elm = &node->elms[i]; 304 305 switch(node->type) { 306 case HAMMER_BTREE_TYPE_INTERNAL: 307 if (elm->internal.subtree_offset) { 308 collect_btree_internal(elm); 309 check_btree_node(elm->internal.subtree_offset, 310 depth + 1); 311 } 312 break; 313 case HAMMER_BTREE_TYPE_LEAF: 314 if (elm->leaf.data_offset) 315 collect_btree_leaf(elm); 316 break; 317 default: 318 if (AssertOnFailure) 319 assert(0); 320 break; 321 } 322 } 323 rel_buffer(buffer); 324 } 325 326 static void 327 check_undo(hammer_blockmap_t undomap) 328 { 329 struct buffer_info *buffer = NULL; 330 hammer_off_t scan_offset; 331 hammer_fifo_head_t head; 332 333 scan_offset = HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0); 334 while (scan_offset < undomap->alloc_offset) { 335 head = get_buffer_data(scan_offset, &buffer, 0); 336 switch (head->hdr_type) { 337 case HAMMER_HEAD_TYPE_PAD: 338 case HAMMER_HEAD_TYPE_DUMMY: 339 case HAMMER_HEAD_TYPE_UNDO: 340 case HAMMER_HEAD_TYPE_REDO: 341 collect_undo(scan_offset, head); 342 break; 343 default: 344 if (AssertOnFailure) 345 assert(0); 346 break; 347 } 348 if ((head->hdr_size & HAMMER_HEAD_ALIGN_MASK) || 349 head->hdr_size == 0 || 350 head->hdr_size > HAMMER_UNDO_ALIGN - 351 ((u_int)scan_offset & HAMMER_UNDO_MASK)) { 352 printf("Illegal size, skipping to next boundary\n"); 353 scan_offset = (scan_offset + HAMMER_UNDO_MASK) & 354 ~HAMMER_UNDO_MASK64; 355 } else { 356 scan_offset += head->hdr_size; 357 } 358 } 359 rel_buffer(buffer); 360 } 361 362 static __inline 363 void 364 collect_freemap_layer1(hammer_blockmap_t freemap) 365 { 366 /* 367 * This translation is necessary to do checkmap properly 368 * as zone4 is really just zone2 address space. 369 */ 370 hammer_off_t zone4_offset = hammer_xlate_to_zoneX( 371 HAMMER_ZONE_FREEMAP_INDEX, freemap->phys_offset); 372 collect_blockmap(zone4_offset, HAMMER_BIGBLOCK_SIZE, 373 HAMMER_ZONE_FREEMAP_INDEX); 374 } 375 376 static __inline 377 void 378 collect_freemap_layer2(struct hammer_blockmap_layer1 *layer1) 379 { 380 /* 381 * This translation is necessary to do checkmap properly 382 * as zone4 is really just zone2 address space. 383 */ 384 hammer_off_t zone4_offset = hammer_xlate_to_zoneX( 385 HAMMER_ZONE_FREEMAP_INDEX, layer1->phys_offset); 386 collect_blockmap(zone4_offset, HAMMER_BIGBLOCK_SIZE, 387 HAMMER_ZONE_FREEMAP_INDEX); 388 } 389 390 static __inline 391 void 392 collect_btree_root(hammer_off_t node_offset) 393 { 394 collect_blockmap(node_offset, 395 sizeof(struct hammer_node_ondisk), /* 4KB */ 396 HAMMER_ZONE_BTREE_INDEX); 397 } 398 399 static __inline 400 void 401 collect_btree_internal(hammer_btree_elm_t elm) 402 { 403 collect_blockmap(elm->internal.subtree_offset, 404 sizeof(struct hammer_node_ondisk), /* 4KB */ 405 HAMMER_ZONE_BTREE_INDEX); 406 } 407 408 static __inline 409 void 410 collect_btree_leaf(hammer_btree_elm_t elm) 411 { 412 int zone; 413 414 switch (elm->base.rec_type) { 415 case HAMMER_RECTYPE_INODE: 416 case HAMMER_RECTYPE_DIRENTRY: 417 case HAMMER_RECTYPE_EXT: 418 case HAMMER_RECTYPE_FIX: 419 case HAMMER_RECTYPE_PFS: 420 case HAMMER_RECTYPE_SNAPSHOT: 421 case HAMMER_RECTYPE_CONFIG: 422 zone = HAMMER_ZONE_META_INDEX; 423 break; 424 case HAMMER_RECTYPE_DATA: 425 case HAMMER_RECTYPE_DB: 426 /* 427 * There is an exceptional case where HAMMER uses 428 * HAMMER_ZONE_LARGE_DATA when the data length is 429 * >HAMMER_BUFSIZE/2 (not >=HAMMER_BUFSIZE). 430 * This exceptional case is currently being used 431 * by mirror write code, however the following code 432 * can ignore that and simply use the normal way 433 * of selecting a zone using >=HAMMER_BUFSIZE. 434 * See hammer_alloc_data() for details. 435 */ 436 zone = elm->leaf.data_len >= HAMMER_BUFSIZE ? 437 HAMMER_ZONE_LARGE_DATA_INDEX : 438 HAMMER_ZONE_SMALL_DATA_INDEX; 439 break; 440 default: 441 zone = HAMMER_ZONE_UNAVAIL_INDEX; 442 break; 443 } 444 collect_blockmap(elm->leaf.data_offset, 445 (elm->leaf.data_len + 15) & ~15, zone); 446 } 447 448 static __inline 449 void 450 collect_undo(hammer_off_t scan_offset, hammer_fifo_head_t head) 451 { 452 collect_blockmap(scan_offset, head->hdr_size, 453 HAMMER_ZONE_UNDO_INDEX); 454 } 455 456 static 457 void 458 collect_blockmap(hammer_off_t offset, int32_t length, int zone) 459 { 460 struct hammer_blockmap_layer1 layer1; 461 struct hammer_blockmap_layer2 layer2; 462 struct hammer_blockmap_layer2 *track2; 463 hammer_off_t result_offset; 464 collect_t collect; 465 int error; 466 467 result_offset = blockmap_lookup(offset, &layer1, &layer2, &error); 468 if (AssertOnFailure) { 469 assert(HAMMER_ZONE_DECODE(offset) == zone); 470 assert(HAMMER_ZONE_DECODE(result_offset) == 471 HAMMER_ZONE_RAW_BUFFER_INDEX); 472 assert(error == 0); 473 } 474 collect = collect_get(layer1.phys_offset); /* layer2 address */ 475 track2 = collect_get_track(collect, result_offset, zone, &layer2); 476 track2->bytes_free -= length; 477 } 478 479 static 480 collect_t 481 collect_get(hammer_off_t phys_offset) 482 { 483 collect_t collect; 484 485 collect = RB_LOOKUP(collect_rb_tree, &CollectTree, phys_offset); 486 if (collect) 487 return(collect); 488 489 collect = calloc(sizeof(*collect), 1); 490 collect->track2 = malloc(HAMMER_BIGBLOCK_SIZE); /* 1<<23 bytes */ 491 collect->layer2 = malloc(HAMMER_BIGBLOCK_SIZE); /* 1<<23 bytes */ 492 collect->offsets = malloc(sizeof(hammer_off_t) * HAMMER_BLOCKMAP_RADIX2); 493 collect->phys_offset = phys_offset; 494 RB_INSERT(collect_rb_tree, &CollectTree, collect); 495 bzero(collect->track2, HAMMER_BIGBLOCK_SIZE); 496 bzero(collect->layer2, HAMMER_BIGBLOCK_SIZE); 497 498 return (collect); 499 } 500 501 static 502 void 503 collect_rel(collect_t collect) 504 { 505 free(collect->offsets); 506 free(collect->layer2); 507 free(collect->track2); 508 free(collect); 509 } 510 511 static 512 struct hammer_blockmap_layer2 * 513 collect_get_track(collect_t collect, hammer_off_t offset, int zone, 514 struct hammer_blockmap_layer2 *layer2) 515 { 516 struct hammer_blockmap_layer2 *track2; 517 size_t i; 518 519 i = HAMMER_BLOCKMAP_LAYER2_INDEX(offset); 520 track2 = &collect->track2[i]; 521 if (track2->entry_crc == 0) { 522 collect->layer2[i] = *layer2; 523 collect->offsets[i] = offset & ~HAMMER_BIGBLOCK_MASK64; 524 track2->zone = zone; 525 track2->bytes_free = HAMMER_BIGBLOCK_SIZE; 526 track2->entry_crc = 1; /* steal field to tag track load */ 527 } 528 return (track2); 529 } 530 531 static 532 void 533 dump_collect_table(void) 534 { 535 collect_t collect; 536 int error = 0; 537 struct zone_stat *stats = NULL; 538 539 if (VerboseOpt) 540 stats = hammer_init_zone_stat(); 541 542 RB_FOREACH(collect, collect_rb_tree, &CollectTree) { 543 dump_collect(collect, stats); 544 error += collect->error; 545 } 546 547 while ((collect = RB_ROOT(&CollectTree)) != NULL) { 548 RB_REMOVE(collect_rb_tree, &CollectTree, collect); 549 collect_rel(collect); 550 } 551 assert(RB_EMPTY(&CollectTree)); 552 553 if (VerboseOpt) { 554 hammer_print_zone_stat(stats); 555 hammer_cleanup_zone_stat(stats); 556 } 557 558 if (error || VerboseOpt) 559 printf("%d errors\n", error); 560 } 561 562 static 563 void 564 dump_collect(collect_t collect, struct zone_stat *stats) 565 { 566 struct hammer_blockmap_layer2 *track2; 567 struct hammer_blockmap_layer2 *layer2; 568 hammer_off_t offset; 569 size_t i; 570 int zone; 571 572 for (i = 0; i < HAMMER_BLOCKMAP_RADIX2; ++i) { 573 track2 = &collect->track2[i]; 574 layer2 = &collect->layer2[i]; 575 offset = collect->offsets[i]; 576 577 /* 578 * Check big-blocks referenced by freemap, data, 579 * B-Tree nodes and UNDO fifo. 580 */ 581 if (track2->entry_crc == 0) 582 continue; 583 584 zone = layer2->zone; 585 if (AssertOnFailure) { 586 assert((zone == HAMMER_ZONE_UNDO_INDEX) || 587 (zone == HAMMER_ZONE_FREEMAP_INDEX) || 588 (zone >= HAMMER_ZONE2_MAPPED_INDEX && 589 zone < HAMMER_MAX_ZONES)); 590 } 591 if (VerboseOpt) 592 hammer_add_zone_stat_layer2(stats, layer2); 593 594 if (track2->zone != layer2->zone) { 595 printf("BZ\tblock=%016jx calc zone=%-2d, got zone=%-2d\n", 596 (intmax_t)offset, 597 track2->zone, 598 layer2->zone); 599 collect->error++; 600 } else if (track2->bytes_free != layer2->bytes_free) { 601 printf("BM\tblock=%016jx zone=%-2d calc %d free, got %d\n", 602 (intmax_t)offset, 603 layer2->zone, 604 track2->bytes_free, 605 layer2->bytes_free); 606 collect->error++; 607 } else if (VerboseOpt) { 608 printf("\tblock=%016jx zone=%-2d %d free (correct)\n", 609 (intmax_t)offset, 610 layer2->zone, 611 track2->bytes_free); 612 } 613 } 614 } 615