xref: /dflybsd-src/sbin/hammer/cmd_blockmap.c (revision 1cf08730c23f1c205ebd18f51b9a91d805d262f9)
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sbin/hammer/cmd_blockmap.c,v 1.4 2008/07/19 18:48:14 dillon Exp $
35  */
36 
37 #include "hammer.h"
38 
39 /*
40  * Each collect covers 1<<(19+23) bytes address space of layer 1.
41  * (plus a copy of 1<<23 bytes that holds layer2 entries in layer 1).
42  */
43 typedef struct collect {
44 	RB_ENTRY(collect) entry;
45 	hammer_off_t	phys_offset;  /* layer2 address pointed by layer1 */
46 	hammer_off_t	*offsets;  /* big-block offset for layer2[i] */
47 	hammer_blockmap_layer2_t track2;  /* track of layer2 entries */
48 	hammer_blockmap_layer2_t layer2;  /* 1<<19 x 16 bytes entries */
49 	int error;  /* # of inconsistencies */
50 } *collect_t;
51 
52 static int
53 collect_compare(struct collect *c1, struct collect *c2)
54 {
55 	if (c1->phys_offset < c2->phys_offset)
56 		return(-1);
57 	if (c1->phys_offset > c2->phys_offset)
58 		return(1);
59 	return(0);
60 }
61 
62 RB_HEAD(collect_rb_tree, collect) CollectTree = RB_INITIALIZER(&CollectTree);
63 RB_PROTOTYPE2(collect_rb_tree, collect, entry, collect_compare, hammer_off_t);
64 RB_GENERATE2(collect_rb_tree, collect, entry, collect_compare, hammer_off_t,
65 	phys_offset);
66 
67 static void dump_blockmap(int zone);
68 static void check_freemap(hammer_blockmap_t freemap);
69 static void check_btree_node(hammer_off_t node_offset, int depth);
70 static void check_undo(hammer_blockmap_t undomap);
71 static __inline void collect_btree_root(hammer_off_t node_offset);
72 static __inline void collect_btree_internal(hammer_btree_elm_t elm);
73 static __inline void collect_btree_leaf(hammer_btree_elm_t elm);
74 static __inline void collect_freemap_layer1(hammer_blockmap_t freemap);
75 static __inline void collect_freemap_layer2(hammer_blockmap_layer1_t layer1);
76 static __inline void collect_undo(hammer_off_t scan_offset,
77 	hammer_fifo_head_t head);
78 static void collect_blockmap(hammer_off_t offset, int32_t length, int zone);
79 static hammer_blockmap_layer2_t collect_get_track(
80 	collect_t collect, hammer_off_t offset, int zone,
81 	hammer_blockmap_layer2_t layer2);
82 static collect_t collect_get(hammer_off_t phys_offset);
83 static void dump_collect_table(void);
84 static void dump_collect(collect_t collect, struct zone_stat *stats);
85 
86 static int num_bad_layer1 = 0;
87 static int num_bad_layer2 = 0;
88 static int num_bad_node = 0;
89 
90 void
91 hammer_cmd_blockmap(void)
92 {
93 	dump_blockmap(HAMMER_ZONE_FREEMAP_INDEX);
94 }
95 
96 static
97 void
98 dump_blockmap(int zone)
99 {
100 	struct volume_info *root_volume;
101 	hammer_blockmap_t rootmap;
102 	hammer_blockmap_layer1_t layer1;
103 	hammer_blockmap_layer2_t layer2;
104 	struct buffer_info *buffer1 = NULL;
105 	struct buffer_info *buffer2 = NULL;
106 	hammer_off_t layer1_offset;
107 	hammer_off_t layer2_offset;
108 	hammer_off_t phys_offset;
109 	hammer_off_t block_offset;
110 	struct zone_stat *stats = NULL;
111 	int xerr, aerr, ferr;
112 
113 	root_volume = get_root_volume();
114 	rootmap = &root_volume->ondisk->vol0_blockmap[zone];
115 	assert(rootmap->phys_offset != 0);
116 
117 	print_blockmap(root_volume);
118 
119 	if (VerboseOpt)
120 		stats = hammer_init_zone_stat();
121 
122 	for (phys_offset = HAMMER_ZONE_ENCODE(zone, 0);
123 	     phys_offset < HAMMER_ZONE_ENCODE(zone, HAMMER_OFF_LONG_MASK);
124 	     phys_offset += HAMMER_BLOCKMAP_LAYER2) {
125 		/*
126 		 * Dive layer 1.
127 		 */
128 		layer1_offset = rootmap->phys_offset +
129 				HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
130 		layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
131 
132 		xerr = ' ';  /* good */
133 		if (!hammer_crc_test_layer1(HammerVersion, layer1)) {
134 			xerr = 'B';
135 			++num_bad_layer1;
136 		}
137 		if (xerr == ' ' &&
138 		    layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
139 			continue;
140 		}
141 		printf("%c layer1 %016jx @%016jx blocks-free %jd\n",
142 			xerr,
143 			(uintmax_t)phys_offset,
144 			(uintmax_t)layer1->phys_offset,
145 			(intmax_t)layer1->blocks_free);
146 
147 		for (block_offset = 0;
148 		     block_offset < HAMMER_BLOCKMAP_LAYER2;
149 		     block_offset += HAMMER_BIGBLOCK_SIZE) {
150 			hammer_off_t zone_offset = phys_offset + block_offset;
151 			/*
152 			 * Dive layer 2, each entry represents a big-block.
153 			 */
154 			layer2_offset = layer1->phys_offset +
155 					HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
156 			layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
157 
158 			xerr = aerr = ferr = ' ';  /* good */
159 			if (!hammer_crc_test_layer2(HammerVersion, layer2)) {
160 				xerr = 'B';
161 				++num_bad_layer2;
162 			}
163 			if (layer2->append_off > HAMMER_BIGBLOCK_SIZE) {
164 				aerr = 'A';
165 				++num_bad_layer2;
166 			}
167 			if (layer2->bytes_free < 0 ||
168 			    layer2->bytes_free > HAMMER_BIGBLOCK_SIZE) {
169 				ferr = 'F';
170 				++num_bad_layer2;
171 			}
172 
173 			if (VerboseOpt < 2 &&
174 			    xerr == ' ' && aerr == ' ' && ferr == ' ' &&
175 			    layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
176 				break;
177 			}
178 			printf("%c%c%c     %016jx zone=%-2d ",
179 				xerr, aerr, ferr, (uintmax_t)zone_offset, layer2->zone);
180 			if (VerboseOpt) {
181 				printf("vol=%-3d L1#=%-6d L2#=%-6d L1=%-7lu L2=%-7lu ",
182 					HAMMER_VOL_DECODE(zone_offset),
183 					HAMMER_BLOCKMAP_LAYER1_INDEX(zone_offset),
184 					HAMMER_BLOCKMAP_LAYER2_INDEX(zone_offset),
185 					HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset),
186 					HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset));
187 			}
188 			printf("app=%-7d free=%-7d",
189 				layer2->append_off,
190 				layer2->bytes_free);
191 			if (VerboseOpt) {
192 				double bytes_used = HAMMER_BIGBLOCK_SIZE -
193 					layer2->bytes_free;
194 				printf(" fill=%-5.1lf crc=%08x-%08x\n",
195 					bytes_used * 100 / HAMMER_BIGBLOCK_SIZE,
196 					layer1->layer1_crc,
197 					layer2->entry_crc);
198 			} else {
199 				printf("\n");
200 			}
201 
202 			if (stats)
203 				hammer_add_zone_stat_layer2(stats, layer2);
204 		}
205 	}
206 	rel_buffer(buffer1);
207 	rel_buffer(buffer2);
208 
209 	if (stats) {
210 		hammer_print_zone_stat(stats);
211 		hammer_cleanup_zone_stat(stats);
212 	}
213 
214 	if (num_bad_layer1 || VerboseOpt)
215 		printf("%d bad layer1\n", num_bad_layer1);
216 	if (num_bad_layer2 || VerboseOpt)
217 		printf("%d bad layer2\n", num_bad_layer1);
218 }
219 
220 void
221 hammer_cmd_checkmap(void)
222 {
223 	struct volume_info *volume;
224 	hammer_blockmap_t freemap;
225 	hammer_blockmap_t undomap;
226 	hammer_off_t node_offset;
227 
228 	volume = get_root_volume();
229 	node_offset = volume->ondisk->vol0_btree_root;
230 	freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
231 	undomap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
232 
233 	print_blockmap(volume);
234 
235 	printf("Collecting allocation info from freemap: ");
236 	fflush(stdout);
237 	check_freemap(freemap);
238 	printf("done\n");
239 
240 	printf("Collecting allocation info from B-Tree: ");
241 	fflush(stdout);
242 	check_btree_node(node_offset, 0);
243 	printf("done\n");
244 
245 	printf("Collecting allocation info from UNDO: ");
246 	fflush(stdout);
247 	check_undo(undomap);
248 	printf("done\n");
249 
250 	dump_collect_table();
251 }
252 
253 static void
254 check_freemap(hammer_blockmap_t freemap)
255 {
256 	hammer_off_t offset;
257 	struct buffer_info *buffer1 = NULL;
258 	hammer_blockmap_layer1_t layer1;
259 	int i;
260 
261 	collect_freemap_layer1(freemap);
262 
263 	for (i = 0; i < HAMMER_BLOCKMAP_RADIX1; ++i) {
264 		offset = freemap->phys_offset + i * sizeof(*layer1);
265 		layer1 = get_buffer_data(offset, &buffer1, 0);
266 		if (layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL)
267 			collect_freemap_layer2(layer1);
268 	}
269 	rel_buffer(buffer1);
270 }
271 
272 static void
273 check_btree_node(hammer_off_t node_offset, int depth)
274 {
275 	struct buffer_info *buffer = NULL;
276 	hammer_node_ondisk_t node;
277 	hammer_btree_elm_t elm;
278 	int i;
279 	char badc = ' ';  /* good */
280 	char badm = ' ';  /* good */
281 
282 	if (depth == 0)
283 		collect_btree_root(node_offset);
284 	node = get_buffer_data(node_offset, &buffer, 0);
285 
286 	if (node == NULL) {
287 		badc = 'B';
288 		badm = 'I';
289 	} else if (!hammer_crc_test_btree(HammerVersion, node)) {
290 		badc = 'B';
291 	}
292 
293 	if (badm != ' ' || badc != ' ') {  /* not good */
294 		++num_bad_node;
295 		printf("%c%c   NODE %016jx ",
296 			badc, badm, (uintmax_t)node_offset);
297 		if (node == NULL) {
298 			printf("(IO ERROR)\n");
299 			rel_buffer(buffer);
300 			return;
301 		} else {
302 			printf("cnt=%02d p=%016jx type=%c depth=%d mirror=%016jx\n",
303 			       node->count,
304 			       (uintmax_t)node->parent,
305 			       (node->type ? node->type : '?'),
306 			       depth,
307 			       (uintmax_t)node->mirror_tid);
308 		}
309 	}
310 
311 	for (i = 0; i < node->count; ++i) {
312 		elm = &node->elms[i];
313 
314 		switch(node->type) {
315 		case HAMMER_BTREE_TYPE_INTERNAL:
316 			if (elm->internal.subtree_offset) {
317 				collect_btree_internal(elm);
318 				check_btree_node(elm->internal.subtree_offset,
319 						 depth + 1);
320 			}
321 			break;
322 		case HAMMER_BTREE_TYPE_LEAF:
323 			if (elm->leaf.data_offset)
324 				collect_btree_leaf(elm);
325 			break;
326 		default:
327 			assert(!DebugOpt);
328 			break;
329 		}
330 	}
331 	rel_buffer(buffer);
332 }
333 
334 static void
335 check_undo(hammer_blockmap_t undomap)
336 {
337 	struct buffer_info *buffer = NULL;
338 	hammer_off_t scan_offset;
339 	hammer_fifo_head_t head;
340 
341 	scan_offset = HAMMER_ENCODE_UNDO(0);
342 	while (scan_offset < undomap->alloc_offset) {
343 		head = get_buffer_data(scan_offset, &buffer, 0);
344 		switch (head->hdr_type) {
345 		case HAMMER_HEAD_TYPE_PAD:
346 		case HAMMER_HEAD_TYPE_DUMMY:
347 		case HAMMER_HEAD_TYPE_UNDO:
348 		case HAMMER_HEAD_TYPE_REDO:
349 			collect_undo(scan_offset, head);
350 			break;
351 		default:
352 			assert(!DebugOpt);
353 			break;
354 		}
355 		if ((head->hdr_size & HAMMER_HEAD_ALIGN_MASK) ||
356 		     head->hdr_size == 0 ||
357 		     head->hdr_size > HAMMER_UNDO_ALIGN -
358 			((u_int)scan_offset & HAMMER_UNDO_MASK)) {
359 			printf("Illegal size, skipping to next boundary\n");
360 			scan_offset = HAMMER_UNDO_DOALIGN(scan_offset);
361 		} else {
362 			scan_offset += head->hdr_size;
363 		}
364 	}
365 	rel_buffer(buffer);
366 }
367 
368 static __inline
369 void
370 collect_freemap_layer1(hammer_blockmap_t freemap)
371 {
372 	/*
373 	 * This translation is necessary to do checkmap properly
374 	 * as zone4 is really just zone2 address space.
375 	 */
376 	hammer_off_t zone4_offset = hammer_xlate_to_zoneX(
377 		HAMMER_ZONE_FREEMAP_INDEX, freemap->phys_offset);
378 	collect_blockmap(zone4_offset, HAMMER_BIGBLOCK_SIZE,
379 		HAMMER_ZONE_FREEMAP_INDEX);
380 }
381 
382 static __inline
383 void
384 collect_freemap_layer2(hammer_blockmap_layer1_t layer1)
385 {
386 	/*
387 	 * This translation is necessary to do checkmap properly
388 	 * as zone4 is really just zone2 address space.
389 	 */
390 	hammer_off_t zone4_offset = hammer_xlate_to_zoneX(
391 		HAMMER_ZONE_FREEMAP_INDEX, layer1->phys_offset);
392 	collect_blockmap(zone4_offset, HAMMER_BIGBLOCK_SIZE,
393 		HAMMER_ZONE_FREEMAP_INDEX);
394 }
395 
396 static __inline
397 void
398 collect_btree_root(hammer_off_t node_offset)
399 {
400 	collect_blockmap(node_offset,
401 		sizeof(struct hammer_node_ondisk),  /* 4KB */
402 		HAMMER_ZONE_BTREE_INDEX);
403 }
404 
405 static __inline
406 void
407 collect_btree_internal(hammer_btree_elm_t elm)
408 {
409 	collect_blockmap(elm->internal.subtree_offset,
410 		sizeof(struct hammer_node_ondisk),  /* 4KB */
411 		HAMMER_ZONE_BTREE_INDEX);
412 }
413 
414 static __inline
415 void
416 collect_btree_leaf(hammer_btree_elm_t elm)
417 {
418 	int zone;
419 
420 	switch (elm->base.rec_type) {
421 	case HAMMER_RECTYPE_INODE:
422 	case HAMMER_RECTYPE_DIRENTRY:
423 	case HAMMER_RECTYPE_EXT:
424 	case HAMMER_RECTYPE_FIX:
425 	case HAMMER_RECTYPE_PFS:
426 	case HAMMER_RECTYPE_SNAPSHOT:
427 	case HAMMER_RECTYPE_CONFIG:
428 		zone = HAMMER_ZONE_META_INDEX;
429 		break;
430 	case HAMMER_RECTYPE_DATA:
431 	case HAMMER_RECTYPE_DB:
432 		zone = hammer_data_zone_index(elm->leaf.data_len);
433 		break;
434 	default:
435 		zone = HAMMER_ZONE_UNAVAIL_INDEX;
436 		break;
437 	}
438 	collect_blockmap(elm->leaf.data_offset,
439 		HAMMER_DATA_DOALIGN(elm->leaf.data_len), zone);
440 }
441 
442 static __inline
443 void
444 collect_undo(hammer_off_t scan_offset, hammer_fifo_head_t head)
445 {
446 	collect_blockmap(scan_offset, head->hdr_size,
447 		HAMMER_ZONE_UNDO_INDEX);
448 }
449 
450 static
451 void
452 collect_blockmap(hammer_off_t offset, int32_t length, int zone)
453 {
454 	struct hammer_blockmap_layer1 layer1;
455 	struct hammer_blockmap_layer2 layer2;
456 	hammer_blockmap_layer2_t track2;
457 	hammer_off_t result_offset;
458 	collect_t collect;
459 	int error;
460 
461 	result_offset = blockmap_lookup_save(offset, &layer1, &layer2, &error);
462 	if (DebugOpt) {
463 		assert(HAMMER_ZONE_DECODE(offset) == zone);
464 		assert(hammer_is_zone_raw_buffer(result_offset));
465 		assert(error == 0);
466 	}
467 	collect = collect_get(layer1.phys_offset); /* layer2 address */
468 	track2 = collect_get_track(collect, result_offset, zone, &layer2);
469 	track2->bytes_free -= length;
470 }
471 
472 static
473 collect_t
474 collect_get(hammer_off_t phys_offset)
475 {
476 	collect_t collect;
477 
478 	collect = RB_LOOKUP(collect_rb_tree, &CollectTree, phys_offset);
479 	if (collect)
480 		return(collect);
481 
482 	collect = calloc(1, sizeof(*collect));
483 	collect->track2 = calloc(1, HAMMER_BIGBLOCK_SIZE);  /* 1<<23 bytes */
484 	collect->layer2 = calloc(1, HAMMER_BIGBLOCK_SIZE);  /* 1<<23 bytes */
485 	collect->offsets = calloc(HAMMER_BLOCKMAP_RADIX2, sizeof(hammer_off_t));
486 	collect->phys_offset = phys_offset;
487 	RB_INSERT(collect_rb_tree, &CollectTree, collect);
488 
489 	return (collect);
490 }
491 
492 static
493 void
494 collect_rel(collect_t collect)
495 {
496 	free(collect->offsets);
497 	free(collect->layer2);
498 	free(collect->track2);
499 	free(collect);
500 }
501 
502 static
503 hammer_blockmap_layer2_t
504 collect_get_track(collect_t collect, hammer_off_t offset, int zone,
505 		  hammer_blockmap_layer2_t layer2)
506 {
507 	hammer_blockmap_layer2_t track2;
508 	size_t i;
509 
510 	i = HAMMER_BLOCKMAP_LAYER2_INDEX(offset);
511 	track2 = &collect->track2[i];
512 	if (track2->entry_crc == 0) {
513 		collect->layer2[i] = *layer2;
514 		collect->offsets[i] = offset & ~HAMMER_BIGBLOCK_MASK64;
515 		track2->zone = zone;
516 		track2->bytes_free = HAMMER_BIGBLOCK_SIZE;
517 		track2->entry_crc = 1;	/* steal field to tag track load */
518 	}
519 	return (track2);
520 }
521 
522 static
523 void
524 dump_collect_table(void)
525 {
526 	collect_t collect;
527 	int error = 0;
528 	struct zone_stat *stats = NULL;
529 
530 	if (VerboseOpt)
531 		stats = hammer_init_zone_stat();
532 
533 	RB_FOREACH(collect, collect_rb_tree, &CollectTree) {
534 		dump_collect(collect, stats);
535 		error += collect->error;
536 	}
537 
538 	while ((collect = RB_ROOT(&CollectTree)) != NULL) {
539 		RB_REMOVE(collect_rb_tree, &CollectTree, collect);
540 		collect_rel(collect);
541 	}
542 	assert(RB_EMPTY(&CollectTree));
543 
544 	if (stats) {
545 		hammer_print_zone_stat(stats);
546 		hammer_cleanup_zone_stat(stats);
547 	}
548 
549 	if (num_bad_node || VerboseOpt)
550 		printf("%d bad nodes\n", num_bad_node);
551 	if (error || VerboseOpt)
552 		printf("%d errors\n", error);
553 }
554 
555 static
556 void
557 dump_collect(collect_t collect, struct zone_stat *stats)
558 {
559 	hammer_blockmap_layer2_t track2;
560 	hammer_blockmap_layer2_t layer2;
561 	hammer_off_t offset;
562 	int i;
563 
564 	for (i = 0; i < HAMMER_BLOCKMAP_RADIX2; ++i) {
565 		track2 = &collect->track2[i];
566 		layer2 = &collect->layer2[i];
567 		offset = collect->offsets[i];
568 
569 		/*
570 		 * Check big-blocks referenced by freemap, data,
571 		 * B-Tree nodes and UNDO fifo.
572 		 */
573 		if (track2->entry_crc == 0)
574 			continue;
575 
576 		if (DebugOpt) {
577 			assert((layer2->zone == HAMMER_ZONE_UNDO_INDEX) ||
578 				(layer2->zone == HAMMER_ZONE_FREEMAP_INDEX) ||
579 				hammer_is_index_record(layer2->zone));
580 		}
581 		if (stats)
582 			hammer_add_zone_stat_layer2(stats, layer2);
583 
584 		if (track2->zone != layer2->zone) {
585 			printf("BZ\tblock=%016jx calc zone=%-2d, got zone=%-2d\n",
586 				(intmax_t)offset,
587 				track2->zone,
588 				layer2->zone);
589 			collect->error++;
590 		} else if (track2->bytes_free != layer2->bytes_free) {
591 			printf("BM\tblock=%016jx zone=%-2d calc %d free, got %d\n",
592 				(intmax_t)offset,
593 				layer2->zone,
594 				track2->bytes_free,
595 				layer2->bytes_free);
596 			collect->error++;
597 		} else if (VerboseOpt) {
598 			printf("\tblock=%016jx zone=%-2d %d free (correct)\n",
599 				(intmax_t)offset,
600 				layer2->zone,
601 				track2->bytes_free);
602 		}
603 	}
604 }
605