1 /* 2 * Copyright (c) 2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sbin/hammer/cmd_blockmap.c,v 1.4 2008/07/19 18:48:14 dillon Exp $ 35 */ 36 37 #include "hammer.h" 38 39 /* 40 * Each collect covers 1<<(19+23) bytes address space of layer 1. 41 * (plus a copy of 1<<23 bytes that holds layer2 entries in layer 1). 42 */ 43 typedef struct collect { 44 RB_ENTRY(collect) entry; 45 hammer_off_t phys_offset; /* layer2 address pointed by layer1 */ 46 hammer_off_t *offsets; /* big-block offset for layer2[i] */ 47 hammer_blockmap_layer2_t track2; /* track of layer2 entries */ 48 hammer_blockmap_layer2_t layer2; /* 1<<19 x 16 bytes entries */ 49 int error; /* # of inconsistencies */ 50 } *collect_t; 51 52 static int 53 collect_compare(struct collect *c1, struct collect *c2) 54 { 55 if (c1->phys_offset < c2->phys_offset) 56 return(-1); 57 if (c1->phys_offset > c2->phys_offset) 58 return(1); 59 return(0); 60 } 61 62 RB_HEAD(collect_rb_tree, collect) CollectTree = RB_INITIALIZER(&CollectTree); 63 RB_PROTOTYPE2(collect_rb_tree, collect, entry, collect_compare, hammer_off_t); 64 RB_GENERATE2(collect_rb_tree, collect, entry, collect_compare, hammer_off_t, 65 phys_offset); 66 67 static void dump_blockmap(int zone); 68 static void check_freemap(hammer_blockmap_t freemap); 69 static void check_btree_node(hammer_off_t node_offset, int depth); 70 static void check_undo(hammer_blockmap_t undomap); 71 static __inline void collect_btree_root(hammer_off_t node_offset); 72 static __inline void collect_btree_internal(hammer_btree_elm_t elm); 73 static __inline void collect_btree_leaf(hammer_btree_elm_t elm); 74 static __inline void collect_freemap_layer1(hammer_blockmap_t freemap); 75 static __inline void collect_freemap_layer2(hammer_blockmap_layer1_t layer1); 76 static __inline void collect_undo(hammer_off_t scan_offset, 77 hammer_fifo_head_t head); 78 static void collect_blockmap(hammer_off_t offset, int32_t length, int zone); 79 static hammer_blockmap_layer2_t collect_get_track( 80 collect_t collect, hammer_off_t offset, int zone, 81 hammer_blockmap_layer2_t layer2); 82 static collect_t collect_get(hammer_off_t phys_offset); 83 static void dump_collect_table(void); 84 static void dump_collect(collect_t collect, struct zone_stat *stats); 85 86 static int num_bad_layer1 = 0; 87 static int num_bad_layer2 = 0; 88 static int num_bad_node = 0; 89 90 void 91 hammer_cmd_blockmap(void) 92 { 93 dump_blockmap(HAMMER_ZONE_FREEMAP_INDEX); 94 } 95 96 static 97 void 98 dump_blockmap(int zone) 99 { 100 struct volume_info *root_volume; 101 hammer_blockmap_t rootmap; 102 hammer_blockmap_layer1_t layer1; 103 hammer_blockmap_layer2_t layer2; 104 struct buffer_info *buffer1 = NULL; 105 struct buffer_info *buffer2 = NULL; 106 hammer_off_t layer1_offset; 107 hammer_off_t layer2_offset; 108 hammer_off_t phys_offset; 109 hammer_off_t block_offset; 110 struct zone_stat *stats = NULL; 111 int xerr, aerr, ferr; 112 113 root_volume = get_root_volume(); 114 rootmap = &root_volume->ondisk->vol0_blockmap[zone]; 115 assert(rootmap->phys_offset != 0); 116 117 print_blockmap(root_volume); 118 119 if (VerboseOpt) 120 stats = hammer_init_zone_stat(); 121 122 for (phys_offset = HAMMER_ZONE_ENCODE(zone, 0); 123 phys_offset < HAMMER_ZONE_ENCODE(zone, HAMMER_OFF_LONG_MASK); 124 phys_offset += HAMMER_BLOCKMAP_LAYER2) { 125 /* 126 * Dive layer 1. 127 */ 128 layer1_offset = rootmap->phys_offset + 129 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset); 130 layer1 = get_buffer_data(layer1_offset, &buffer1, 0); 131 132 xerr = ' '; /* good */ 133 if (layer1->layer1_crc != 134 crc32(layer1, HAMMER_LAYER1_CRCSIZE)) { 135 xerr = 'B'; 136 ++num_bad_layer1; 137 } 138 if (xerr == ' ' && 139 layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) { 140 continue; 141 } 142 printf("%c layer1 %016jx @%016jx blocks-free %jd\n", 143 xerr, 144 (uintmax_t)phys_offset, 145 (uintmax_t)layer1->phys_offset, 146 (intmax_t)layer1->blocks_free); 147 148 for (block_offset = 0; 149 block_offset < HAMMER_BLOCKMAP_LAYER2; 150 block_offset += HAMMER_BIGBLOCK_SIZE) { 151 hammer_off_t zone_offset = phys_offset + block_offset; 152 /* 153 * Dive layer 2, each entry represents a big-block. 154 */ 155 layer2_offset = layer1->phys_offset + 156 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset); 157 layer2 = get_buffer_data(layer2_offset, &buffer2, 0); 158 159 xerr = aerr = ferr = ' '; /* good */ 160 if (layer2->entry_crc != 161 crc32(layer2, HAMMER_LAYER2_CRCSIZE)) { 162 xerr = 'B'; 163 ++num_bad_layer2; 164 } 165 if (layer2->append_off > HAMMER_BIGBLOCK_SIZE) { 166 aerr = 'A'; 167 ++num_bad_layer2; 168 } 169 if (layer2->bytes_free < 0 || 170 layer2->bytes_free > HAMMER_BIGBLOCK_SIZE) { 171 ferr = 'F'; 172 ++num_bad_layer2; 173 } 174 175 if (VerboseOpt < 2 && 176 xerr == ' ' && aerr == ' ' && ferr == ' ' && 177 layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) { 178 break; 179 } 180 printf("%c%c%c %016jx zone=%-2d ", 181 xerr, aerr, ferr, (uintmax_t)zone_offset, layer2->zone); 182 if (VerboseOpt) { 183 printf("vol=%-3d L1#=%-6d L2#=%-6d L1=%-7lu L2=%-7lu ", 184 HAMMER_VOL_DECODE(zone_offset), 185 HAMMER_BLOCKMAP_LAYER1_INDEX(zone_offset), 186 HAMMER_BLOCKMAP_LAYER2_INDEX(zone_offset), 187 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset), 188 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset)); 189 } 190 printf("app=%-7d free=%-7d", 191 layer2->append_off, 192 layer2->bytes_free); 193 if (VerboseOpt) { 194 double bytes_used = HAMMER_BIGBLOCK_SIZE - 195 layer2->bytes_free; 196 printf(" fill=%-5.1lf crc=%08x-%08x\n", 197 bytes_used * 100 / HAMMER_BIGBLOCK_SIZE, 198 layer1->layer1_crc, 199 layer2->entry_crc); 200 } else { 201 printf("\n"); 202 } 203 204 if (VerboseOpt) 205 hammer_add_zone_stat_layer2(stats, layer2); 206 } 207 } 208 rel_buffer(buffer1); 209 rel_buffer(buffer2); 210 211 if (VerboseOpt) { 212 hammer_print_zone_stat(stats); 213 hammer_cleanup_zone_stat(stats); 214 } 215 216 if (num_bad_layer1 || VerboseOpt) { 217 printf("%d bad layer1\n", num_bad_layer1); 218 } 219 if (num_bad_layer2 || VerboseOpt) { 220 printf("%d bad layer2\n", num_bad_layer1); 221 } 222 } 223 224 void 225 hammer_cmd_checkmap(void) 226 { 227 struct volume_info *volume; 228 hammer_blockmap_t freemap; 229 hammer_blockmap_t undomap; 230 hammer_off_t node_offset; 231 232 volume = get_root_volume(); 233 node_offset = volume->ondisk->vol0_btree_root; 234 freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX]; 235 undomap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX]; 236 237 print_blockmap(volume); 238 239 printf("Collecting allocation info from freemap: "); 240 fflush(stdout); 241 check_freemap(freemap); 242 printf("done\n"); 243 244 printf("Collecting allocation info from B-Tree: "); 245 fflush(stdout); 246 check_btree_node(node_offset, 0); 247 printf("done\n"); 248 249 printf("Collecting allocation info from UNDO: "); 250 fflush(stdout); 251 check_undo(undomap); 252 printf("done\n"); 253 254 dump_collect_table(); 255 } 256 257 static void 258 check_freemap(hammer_blockmap_t freemap) 259 { 260 hammer_off_t offset; 261 struct buffer_info *buffer1 = NULL; 262 hammer_blockmap_layer1_t layer1; 263 int i; 264 265 collect_freemap_layer1(freemap); 266 267 for (i = 0; i < HAMMER_BLOCKMAP_RADIX1; ++i) { 268 offset = freemap->phys_offset + i * sizeof(*layer1); 269 layer1 = get_buffer_data(offset, &buffer1, 0); 270 if (layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL) 271 collect_freemap_layer2(layer1); 272 } 273 rel_buffer(buffer1); 274 } 275 276 static void 277 check_btree_node(hammer_off_t node_offset, int depth) 278 { 279 struct buffer_info *buffer = NULL; 280 hammer_node_ondisk_t node; 281 hammer_btree_elm_t elm; 282 int i; 283 char badc = ' '; /* good */ 284 char badm = ' '; /* good */ 285 286 if (depth == 0) 287 collect_btree_root(node_offset); 288 node = get_node(node_offset, &buffer); 289 290 if (node == NULL) { 291 badc = 'B'; 292 badm = 'I'; 293 } else if (crc32(&node->crc + 1, HAMMER_BTREE_CRCSIZE) != node->crc) { 294 badc = 'B'; 295 } 296 297 if (badm != ' ' || badc != ' ') { /* not good */ 298 ++num_bad_node; 299 printf("%c%c NODE %016jx ", 300 badc, badm, (uintmax_t)node_offset); 301 if (node == NULL) { 302 printf("(IO ERROR)\n"); 303 rel_buffer(buffer); 304 return; 305 } else { 306 printf("cnt=%02d p=%016jx type=%c depth=%d mirror=%016jx\n", 307 node->count, 308 (uintmax_t)node->parent, 309 (node->type ? node->type : '?'), 310 depth, 311 (uintmax_t)node->mirror_tid); 312 } 313 } 314 315 for (i = 0; i < node->count; ++i) { 316 elm = &node->elms[i]; 317 318 switch(node->type) { 319 case HAMMER_BTREE_TYPE_INTERNAL: 320 if (elm->internal.subtree_offset) { 321 collect_btree_internal(elm); 322 check_btree_node(elm->internal.subtree_offset, 323 depth + 1); 324 } 325 break; 326 case HAMMER_BTREE_TYPE_LEAF: 327 if (elm->leaf.data_offset) 328 collect_btree_leaf(elm); 329 break; 330 default: 331 assert(!DebugOpt); 332 break; 333 } 334 } 335 rel_buffer(buffer); 336 } 337 338 static void 339 check_undo(hammer_blockmap_t undomap) 340 { 341 struct buffer_info *buffer = NULL; 342 hammer_off_t scan_offset; 343 hammer_fifo_head_t head; 344 345 scan_offset = HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0); 346 while (scan_offset < undomap->alloc_offset) { 347 head = get_buffer_data(scan_offset, &buffer, 0); 348 switch (head->hdr_type) { 349 case HAMMER_HEAD_TYPE_PAD: 350 case HAMMER_HEAD_TYPE_DUMMY: 351 case HAMMER_HEAD_TYPE_UNDO: 352 case HAMMER_HEAD_TYPE_REDO: 353 collect_undo(scan_offset, head); 354 break; 355 default: 356 assert(!DebugOpt); 357 break; 358 } 359 if ((head->hdr_size & HAMMER_HEAD_ALIGN_MASK) || 360 head->hdr_size == 0 || 361 head->hdr_size > HAMMER_UNDO_ALIGN - 362 ((u_int)scan_offset & HAMMER_UNDO_MASK)) { 363 printf("Illegal size, skipping to next boundary\n"); 364 scan_offset = (scan_offset + HAMMER_UNDO_MASK) & 365 ~HAMMER_UNDO_MASK64; 366 } else { 367 scan_offset += head->hdr_size; 368 } 369 } 370 rel_buffer(buffer); 371 } 372 373 static __inline 374 void 375 collect_freemap_layer1(hammer_blockmap_t freemap) 376 { 377 /* 378 * This translation is necessary to do checkmap properly 379 * as zone4 is really just zone2 address space. 380 */ 381 hammer_off_t zone4_offset = hammer_xlate_to_zoneX( 382 HAMMER_ZONE_FREEMAP_INDEX, freemap->phys_offset); 383 collect_blockmap(zone4_offset, HAMMER_BIGBLOCK_SIZE, 384 HAMMER_ZONE_FREEMAP_INDEX); 385 } 386 387 static __inline 388 void 389 collect_freemap_layer2(hammer_blockmap_layer1_t layer1) 390 { 391 /* 392 * This translation is necessary to do checkmap properly 393 * as zone4 is really just zone2 address space. 394 */ 395 hammer_off_t zone4_offset = hammer_xlate_to_zoneX( 396 HAMMER_ZONE_FREEMAP_INDEX, layer1->phys_offset); 397 collect_blockmap(zone4_offset, HAMMER_BIGBLOCK_SIZE, 398 HAMMER_ZONE_FREEMAP_INDEX); 399 } 400 401 static __inline 402 void 403 collect_btree_root(hammer_off_t node_offset) 404 { 405 collect_blockmap(node_offset, 406 sizeof(struct hammer_node_ondisk), /* 4KB */ 407 HAMMER_ZONE_BTREE_INDEX); 408 } 409 410 static __inline 411 void 412 collect_btree_internal(hammer_btree_elm_t elm) 413 { 414 collect_blockmap(elm->internal.subtree_offset, 415 sizeof(struct hammer_node_ondisk), /* 4KB */ 416 HAMMER_ZONE_BTREE_INDEX); 417 } 418 419 static __inline 420 void 421 collect_btree_leaf(hammer_btree_elm_t elm) 422 { 423 int zone; 424 425 switch (elm->base.rec_type) { 426 case HAMMER_RECTYPE_INODE: 427 case HAMMER_RECTYPE_DIRENTRY: 428 case HAMMER_RECTYPE_EXT: 429 case HAMMER_RECTYPE_FIX: 430 case HAMMER_RECTYPE_PFS: 431 case HAMMER_RECTYPE_SNAPSHOT: 432 case HAMMER_RECTYPE_CONFIG: 433 zone = HAMMER_ZONE_META_INDEX; 434 break; 435 case HAMMER_RECTYPE_DATA: 436 case HAMMER_RECTYPE_DB: 437 zone = hammer_data_zone_index(elm->leaf.data_len); 438 break; 439 default: 440 zone = HAMMER_ZONE_UNAVAIL_INDEX; 441 break; 442 } 443 collect_blockmap(elm->leaf.data_offset, 444 (elm->leaf.data_len + 15) & ~15, zone); 445 } 446 447 static __inline 448 void 449 collect_undo(hammer_off_t scan_offset, hammer_fifo_head_t head) 450 { 451 collect_blockmap(scan_offset, head->hdr_size, 452 HAMMER_ZONE_UNDO_INDEX); 453 } 454 455 static 456 void 457 collect_blockmap(hammer_off_t offset, int32_t length, int zone) 458 { 459 struct hammer_blockmap_layer1 layer1; 460 struct hammer_blockmap_layer2 layer2; 461 hammer_blockmap_layer2_t track2; 462 hammer_off_t result_offset; 463 collect_t collect; 464 int error; 465 466 result_offset = blockmap_lookup(offset, &layer1, &layer2, &error); 467 if (DebugOpt) { 468 assert(HAMMER_ZONE_DECODE(offset) == zone); 469 assert(HAMMER_ZONE_DECODE(result_offset) == 470 HAMMER_ZONE_RAW_BUFFER_INDEX); 471 assert(error == 0); 472 } 473 collect = collect_get(layer1.phys_offset); /* layer2 address */ 474 track2 = collect_get_track(collect, result_offset, zone, &layer2); 475 track2->bytes_free -= length; 476 } 477 478 static 479 collect_t 480 collect_get(hammer_off_t phys_offset) 481 { 482 collect_t collect; 483 484 collect = RB_LOOKUP(collect_rb_tree, &CollectTree, phys_offset); 485 if (collect) 486 return(collect); 487 488 collect = calloc(sizeof(*collect), 1); 489 collect->track2 = malloc(HAMMER_BIGBLOCK_SIZE); /* 1<<23 bytes */ 490 collect->layer2 = malloc(HAMMER_BIGBLOCK_SIZE); /* 1<<23 bytes */ 491 collect->offsets = malloc(sizeof(hammer_off_t) * HAMMER_BLOCKMAP_RADIX2); 492 collect->phys_offset = phys_offset; 493 RB_INSERT(collect_rb_tree, &CollectTree, collect); 494 bzero(collect->track2, HAMMER_BIGBLOCK_SIZE); 495 bzero(collect->layer2, HAMMER_BIGBLOCK_SIZE); 496 497 return (collect); 498 } 499 500 static 501 void 502 collect_rel(collect_t collect) 503 { 504 free(collect->offsets); 505 free(collect->layer2); 506 free(collect->track2); 507 free(collect); 508 } 509 510 static 511 hammer_blockmap_layer2_t 512 collect_get_track(collect_t collect, hammer_off_t offset, int zone, 513 hammer_blockmap_layer2_t layer2) 514 { 515 hammer_blockmap_layer2_t track2; 516 size_t i; 517 518 i = HAMMER_BLOCKMAP_LAYER2_INDEX(offset); 519 track2 = &collect->track2[i]; 520 if (track2->entry_crc == 0) { 521 collect->layer2[i] = *layer2; 522 collect->offsets[i] = offset & ~HAMMER_BIGBLOCK_MASK64; 523 track2->zone = zone; 524 track2->bytes_free = HAMMER_BIGBLOCK_SIZE; 525 track2->entry_crc = 1; /* steal field to tag track load */ 526 } 527 return (track2); 528 } 529 530 static 531 void 532 dump_collect_table(void) 533 { 534 collect_t collect; 535 int error = 0; 536 struct zone_stat *stats = NULL; 537 538 if (VerboseOpt) 539 stats = hammer_init_zone_stat(); 540 541 RB_FOREACH(collect, collect_rb_tree, &CollectTree) { 542 dump_collect(collect, stats); 543 error += collect->error; 544 } 545 546 while ((collect = RB_ROOT(&CollectTree)) != NULL) { 547 RB_REMOVE(collect_rb_tree, &CollectTree, collect); 548 collect_rel(collect); 549 } 550 assert(RB_EMPTY(&CollectTree)); 551 552 if (VerboseOpt) { 553 hammer_print_zone_stat(stats); 554 hammer_cleanup_zone_stat(stats); 555 } 556 557 if (num_bad_node || VerboseOpt) { 558 printf("%d bad nodes\n", num_bad_node); 559 } 560 if (error || VerboseOpt) { 561 printf("%d errors\n", error); 562 } 563 } 564 565 static 566 void 567 dump_collect(collect_t collect, struct zone_stat *stats) 568 { 569 hammer_blockmap_layer2_t track2; 570 hammer_blockmap_layer2_t layer2; 571 hammer_off_t offset; 572 int i, zone; 573 574 for (i = 0; i < HAMMER_BLOCKMAP_RADIX2; ++i) { 575 track2 = &collect->track2[i]; 576 layer2 = &collect->layer2[i]; 577 offset = collect->offsets[i]; 578 579 /* 580 * Check big-blocks referenced by freemap, data, 581 * B-Tree nodes and UNDO fifo. 582 */ 583 if (track2->entry_crc == 0) 584 continue; 585 586 zone = layer2->zone; 587 if (DebugOpt) { 588 assert((zone == HAMMER_ZONE_UNDO_INDEX) || 589 (zone == HAMMER_ZONE_FREEMAP_INDEX) || 590 hammer_is_zone2_mapped_index(zone)); 591 } 592 if (VerboseOpt) 593 hammer_add_zone_stat_layer2(stats, layer2); 594 595 if (track2->zone != layer2->zone) { 596 printf("BZ\tblock=%016jx calc zone=%-2d, got zone=%-2d\n", 597 (intmax_t)offset, 598 track2->zone, 599 layer2->zone); 600 collect->error++; 601 } else if (track2->bytes_free != layer2->bytes_free) { 602 printf("BM\tblock=%016jx zone=%-2d calc %d free, got %d\n", 603 (intmax_t)offset, 604 layer2->zone, 605 track2->bytes_free, 606 layer2->bytes_free); 607 collect->error++; 608 } else if (VerboseOpt) { 609 printf("\tblock=%016jx zone=%-2d %d free (correct)\n", 610 (intmax_t)offset, 611 layer2->zone, 612 track2->bytes_free); 613 } 614 } 615 } 616