xref: /dflybsd-src/sbin/hammer/cmd_blockmap.c (revision d0ce80dc6b5a41da926e902cb8b6a31c874b12bc)
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sbin/hammer/cmd_blockmap.c,v 1.4 2008/07/19 18:48:14 dillon Exp $
35  */
36 
37 #include "hammer.h"
38 
39 /*
40  * Each collect covers 1<<(19+23) bytes address space of layer 1.
41  * (plus a copy of 1<<23 bytes that holds layer2 entries in layer 1).
42  */
43 typedef struct collect {
44 	RB_ENTRY(collect) entry;
45 	hammer_off_t	phys_offset;  /* layer2 address pointed by layer1 */
46 	hammer_off_t	*offsets;  /* big-block offset for layer2[i] */
47 	struct hammer_blockmap_layer2 *track2;  /* track of layer2 entries */
48 	struct hammer_blockmap_layer2 *layer2;  /* 1<<19 x 16 bytes entries */
49 	int error;  /* # of inconsistencies */
50 } *collect_t;
51 
52 static int
53 collect_compare(struct collect *c1, struct collect *c2)
54 {
55 	if (c1->phys_offset < c2->phys_offset)
56 		return(-1);
57 	if (c1->phys_offset > c2->phys_offset)
58 		return(1);
59 	return(0);
60 }
61 
62 RB_HEAD(collect_rb_tree, collect) CollectTree = RB_INITIALIZER(&CollectTree);
63 RB_PROTOTYPE2(collect_rb_tree, collect, entry, collect_compare, hammer_off_t);
64 RB_GENERATE2(collect_rb_tree, collect, entry, collect_compare, hammer_off_t,
65 	phys_offset);
66 
67 static void dump_blockmap(const char *label, int zone);
68 static void check_freemap(hammer_blockmap_t freemap);
69 static void check_btree_node(hammer_off_t node_offset, int depth);
70 static void check_undo(hammer_blockmap_t undomap);
71 static __inline void collect_btree_root(hammer_off_t node_offset);
72 static __inline void collect_btree_internal(hammer_btree_elm_t elm);
73 static __inline void collect_btree_leaf(hammer_btree_elm_t elm);
74 static __inline void collect_freemap_layer1(hammer_blockmap_t freemap);
75 static __inline void collect_freemap_layer2(struct hammer_blockmap_layer1 *layer1);
76 static __inline void collect_undo(hammer_off_t scan_offset,
77 	hammer_fifo_head_t head);
78 static void collect_blockmap(hammer_off_t offset, int32_t length, int zone);
79 static struct hammer_blockmap_layer2 *collect_get_track(
80 	collect_t collect, hammer_off_t offset, int zone,
81 	struct hammer_blockmap_layer2 *layer2);
82 static collect_t collect_get(hammer_off_t phys_offset);
83 static void dump_collect_table(void);
84 static void dump_collect(collect_t collect, struct zone_stat *stats);
85 
86 static int num_bad_layer1 = 0;
87 static int num_bad_layer2 = 0;
88 static int num_bad_node = 0;
89 
90 void
91 hammer_cmd_blockmap(void)
92 {
93 	dump_blockmap("freemap", HAMMER_ZONE_FREEMAP_INDEX);
94 }
95 
96 static
97 void
98 dump_blockmap(const char *label, int zone)
99 {
100 	struct volume_info *root_volume;
101 	hammer_blockmap_t rootmap;
102 	hammer_blockmap_t blockmap;
103 	struct hammer_blockmap_layer1 *layer1;
104 	struct hammer_blockmap_layer2 *layer2;
105 	struct buffer_info *buffer1 = NULL;
106 	struct buffer_info *buffer2 = NULL;
107 	hammer_off_t layer1_offset;
108 	hammer_off_t layer2_offset;
109 	hammer_off_t phys_offset;
110 	hammer_off_t block_offset;
111 	struct zone_stat *stats = NULL;
112 	int xerr, aerr, ferr;
113 	int i;
114 
115 	root_volume = get_root_volume();
116 	rootmap = &root_volume->ondisk->vol0_blockmap[zone];
117 	assert(rootmap->phys_offset != 0);
118 
119 	printf("                   "
120 	       "phys             first            next             alloc\n");
121 	for (i = 0; i < HAMMER_MAX_ZONES; i++) {
122 		blockmap = &root_volume->ondisk->vol0_blockmap[i];
123 		if (VerboseOpt || i == zone) {
124 			printf("zone %-2d %-10s %016jx %016jx %016jx %016jx\n",
125 				i, (i == zone ? label : ""),
126 				(uintmax_t)blockmap->phys_offset,
127 				(uintmax_t)blockmap->first_offset,
128 				(uintmax_t)blockmap->next_offset,
129 				(uintmax_t)blockmap->alloc_offset);
130 		}
131 	}
132 
133 	if (VerboseOpt)
134 		stats = hammer_init_zone_stat();
135 
136 	for (phys_offset = HAMMER_ZONE_ENCODE(zone, 0);
137 	     phys_offset < HAMMER_ZONE_ENCODE(zone, HAMMER_OFF_LONG_MASK);
138 	     phys_offset += HAMMER_BLOCKMAP_LAYER2) {
139 		/*
140 		 * Dive layer 1.
141 		 */
142 		layer1_offset = rootmap->phys_offset +
143 				HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
144 		layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
145 
146 		xerr = ' ';  /* good */
147 		if (layer1->layer1_crc !=
148 		    crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
149 			xerr = 'B';
150 			++num_bad_layer1;
151 		}
152 		if (xerr == ' ' &&
153 		    layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
154 			continue;
155 		}
156 		printf("%c layer1 %016jx @%016jx blocks-free %jd\n",
157 			xerr,
158 			(uintmax_t)phys_offset,
159 			(uintmax_t)layer1->phys_offset,
160 			(intmax_t)layer1->blocks_free);
161 
162 		for (block_offset = 0;
163 		     block_offset < HAMMER_BLOCKMAP_LAYER2;
164 		     block_offset += HAMMER_BIGBLOCK_SIZE) {
165 			hammer_off_t zone_offset = phys_offset + block_offset;
166 			/*
167 			 * Dive layer 2, each entry represents a big-block.
168 			 */
169 			layer2_offset = layer1->phys_offset +
170 					HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
171 			layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
172 
173 			xerr = aerr = ferr = ' ';  /* good */
174 			if (layer2->entry_crc !=
175 			    crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
176 				xerr = 'B';
177 				++num_bad_layer2;
178 			}
179 			if (layer2->append_off > HAMMER_BIGBLOCK_SIZE) {
180 				aerr = 'A';
181 				++num_bad_layer2;
182 			}
183 			if (layer2->bytes_free < 0 ||
184 			    layer2->bytes_free > HAMMER_BIGBLOCK_SIZE) {
185 				ferr = 'F';
186 				++num_bad_layer2;
187 			}
188 
189 			if (VerboseOpt < 2 &&
190 			    xerr == ' ' && aerr == ' ' && ferr == ' ' &&
191 			    layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
192 				break;
193 			}
194 			printf("%c%c%c     %016jx zone=%-2d ",
195 				xerr, aerr, ferr, (uintmax_t)zone_offset, layer2->zone);
196 			if (VerboseOpt) {
197 				printf("vol=%-3d L1#=%-6d L2#=%-6d L1=%-7lu L2=%-7lu ",
198 					HAMMER_VOL_DECODE(zone_offset),
199 					HAMMER_BLOCKMAP_LAYER1_INDEX(zone_offset),
200 					HAMMER_BLOCKMAP_LAYER2_INDEX(zone_offset),
201 					HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset),
202 					HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset));
203 			}
204 			printf("app=%-7d free=%-7d",
205 				layer2->append_off,
206 				layer2->bytes_free);
207 			if (VerboseOpt) {
208 				double bytes_used = HAMMER_BIGBLOCK_SIZE -
209 					layer2->bytes_free;
210 				printf(" fill=%-5.1lf crc=%08x-%08x\n",
211 					bytes_used * 100 / HAMMER_BIGBLOCK_SIZE,
212 					layer1->layer1_crc,
213 					layer2->entry_crc);
214 			} else {
215 				printf("\n");
216 			}
217 
218 			if (VerboseOpt)
219 				hammer_add_zone_stat_layer2(stats, layer2);
220 		}
221 	}
222 	rel_buffer(buffer1);
223 	rel_buffer(buffer2);
224 
225 	if (VerboseOpt) {
226 		hammer_print_zone_stat(stats);
227 		hammer_cleanup_zone_stat(stats);
228 	}
229 
230 	if (num_bad_layer1 || VerboseOpt) {
231 		printf("%d bad layer1\n", num_bad_layer1);
232 	}
233 	if (num_bad_layer2 || VerboseOpt) {
234 		printf("%d bad layer2\n", num_bad_layer1);
235 	}
236 }
237 
238 void
239 hammer_cmd_checkmap(void)
240 {
241 	struct volume_info *volume;
242 	hammer_blockmap_t freemap;
243 	hammer_blockmap_t undomap;
244 	hammer_off_t node_offset;
245 
246 	volume = get_root_volume();
247 	node_offset = volume->ondisk->vol0_btree_root;
248 	freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
249 	undomap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
250 
251 	printf("Volume header\tnext_tid=%016jx\n",
252 	       (uintmax_t)volume->ondisk->vol0_next_tid);
253 	printf("\t\tbufoffset=%016jx\n",
254 	       (uintmax_t)volume->ondisk->vol_buf_beg);
255 	printf("\t\tundosize=%jdMB\n",
256 	       (intmax_t)((undomap->alloc_offset & HAMMER_OFF_LONG_MASK)
257 		/ (1024 * 1024)));
258 
259 	printf("Collecting allocation info from freemap: ");
260 	fflush(stdout);
261 	check_freemap(freemap);
262 	printf("done\n");
263 
264 	printf("Collecting allocation info from B-Tree: ");
265 	fflush(stdout);
266 	check_btree_node(node_offset, 0);
267 	printf("done\n");
268 
269 	printf("Collecting allocation info from UNDO: ");
270 	fflush(stdout);
271 	check_undo(undomap);
272 	printf("done\n");
273 
274 	dump_collect_table();
275 }
276 
277 static void
278 check_freemap(hammer_blockmap_t freemap)
279 {
280 	hammer_off_t offset;
281 	struct buffer_info *buffer1 = NULL;
282 	struct hammer_blockmap_layer1 *layer1;
283 	int i;
284 
285 	collect_freemap_layer1(freemap);
286 
287 	for (i = 0; i < HAMMER_BLOCKMAP_RADIX1; ++i) {
288 		offset = freemap->phys_offset + i * sizeof(*layer1);
289 		layer1 = get_buffer_data(offset, &buffer1, 0);
290 		if (layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL)
291 			collect_freemap_layer2(layer1);
292 	}
293 	rel_buffer(buffer1);
294 }
295 
296 static void
297 check_btree_node(hammer_off_t node_offset, int depth)
298 {
299 	struct buffer_info *buffer = NULL;
300 	hammer_node_ondisk_t node;
301 	hammer_btree_elm_t elm;
302 	int i;
303 	char badc = ' ';  /* good */
304 	char badm = ' ';  /* good */
305 
306 	if (depth == 0)
307 		collect_btree_root(node_offset);
308 	node = get_node(node_offset, &buffer);
309 
310 	if (node == NULL) {
311 		badc = 'B';
312 		badm = 'I';
313 	} else if (crc32(&node->crc + 1, HAMMER_BTREE_CRCSIZE) != node->crc) {
314 		badc = 'B';
315 	}
316 
317 	if (badm != ' ' || badc != ' ') {  /* not good */
318 		++num_bad_node;
319 		printf("%c%c   NODE %016jx ",
320 			badc, badm, (uintmax_t)node_offset);
321 		if (node == NULL) {
322 			printf("(IO ERROR)\n");
323 			rel_buffer(buffer);
324 			return;
325 		} else {
326 			printf("cnt=%02d p=%016jx type=%c depth=%d mirror=%016jx\n",
327 			       node->count,
328 			       (uintmax_t)node->parent,
329 			       (node->type ? node->type : '?'),
330 			       depth,
331 			       (uintmax_t)node->mirror_tid);
332 		}
333 	}
334 
335 	for (i = 0; i < node->count; ++i) {
336 		elm = &node->elms[i];
337 
338 		switch(node->type) {
339 		case HAMMER_BTREE_TYPE_INTERNAL:
340 			if (elm->internal.subtree_offset) {
341 				collect_btree_internal(elm);
342 				check_btree_node(elm->internal.subtree_offset,
343 						 depth + 1);
344 			}
345 			break;
346 		case HAMMER_BTREE_TYPE_LEAF:
347 			if (elm->leaf.data_offset)
348 				collect_btree_leaf(elm);
349 			break;
350 		default:
351 			assert(!DebugOpt);
352 			break;
353 		}
354 	}
355 	rel_buffer(buffer);
356 }
357 
358 static void
359 check_undo(hammer_blockmap_t undomap)
360 {
361 	struct buffer_info *buffer = NULL;
362 	hammer_off_t scan_offset;
363 	hammer_fifo_head_t head;
364 
365 	scan_offset = HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0);
366 	while (scan_offset < undomap->alloc_offset) {
367 		head = get_buffer_data(scan_offset, &buffer, 0);
368 		switch (head->hdr_type) {
369 		case HAMMER_HEAD_TYPE_PAD:
370 		case HAMMER_HEAD_TYPE_DUMMY:
371 		case HAMMER_HEAD_TYPE_UNDO:
372 		case HAMMER_HEAD_TYPE_REDO:
373 			collect_undo(scan_offset, head);
374 			break;
375 		default:
376 			assert(!DebugOpt);
377 			break;
378 		}
379 		if ((head->hdr_size & HAMMER_HEAD_ALIGN_MASK) ||
380 		     head->hdr_size == 0 ||
381 		     head->hdr_size > HAMMER_UNDO_ALIGN -
382 			((u_int)scan_offset & HAMMER_UNDO_MASK)) {
383 			printf("Illegal size, skipping to next boundary\n");
384 			scan_offset = (scan_offset + HAMMER_UNDO_MASK) &
385 					~HAMMER_UNDO_MASK64;
386 		} else {
387 			scan_offset += head->hdr_size;
388 		}
389 	}
390 	rel_buffer(buffer);
391 }
392 
393 static __inline
394 void
395 collect_freemap_layer1(hammer_blockmap_t freemap)
396 {
397 	/*
398 	 * This translation is necessary to do checkmap properly
399 	 * as zone4 is really just zone2 address space.
400 	 */
401 	hammer_off_t zone4_offset = hammer_xlate_to_zoneX(
402 		HAMMER_ZONE_FREEMAP_INDEX, freemap->phys_offset);
403 	collect_blockmap(zone4_offset, HAMMER_BIGBLOCK_SIZE,
404 		HAMMER_ZONE_FREEMAP_INDEX);
405 }
406 
407 static __inline
408 void
409 collect_freemap_layer2(struct hammer_blockmap_layer1 *layer1)
410 {
411 	/*
412 	 * This translation is necessary to do checkmap properly
413 	 * as zone4 is really just zone2 address space.
414 	 */
415 	hammer_off_t zone4_offset = hammer_xlate_to_zoneX(
416 		HAMMER_ZONE_FREEMAP_INDEX, layer1->phys_offset);
417 	collect_blockmap(zone4_offset, HAMMER_BIGBLOCK_SIZE,
418 		HAMMER_ZONE_FREEMAP_INDEX);
419 }
420 
421 static __inline
422 void
423 collect_btree_root(hammer_off_t node_offset)
424 {
425 	collect_blockmap(node_offset,
426 		sizeof(struct hammer_node_ondisk),  /* 4KB */
427 		HAMMER_ZONE_BTREE_INDEX);
428 }
429 
430 static __inline
431 void
432 collect_btree_internal(hammer_btree_elm_t elm)
433 {
434 	collect_blockmap(elm->internal.subtree_offset,
435 		sizeof(struct hammer_node_ondisk),  /* 4KB */
436 		HAMMER_ZONE_BTREE_INDEX);
437 }
438 
439 static __inline
440 void
441 collect_btree_leaf(hammer_btree_elm_t elm)
442 {
443 	int zone;
444 
445 	switch (elm->base.rec_type) {
446 	case HAMMER_RECTYPE_INODE:
447 	case HAMMER_RECTYPE_DIRENTRY:
448 	case HAMMER_RECTYPE_EXT:
449 	case HAMMER_RECTYPE_FIX:
450 	case HAMMER_RECTYPE_PFS:
451 	case HAMMER_RECTYPE_SNAPSHOT:
452 	case HAMMER_RECTYPE_CONFIG:
453 		zone = HAMMER_ZONE_META_INDEX;
454 		break;
455 	case HAMMER_RECTYPE_DATA:
456 	case HAMMER_RECTYPE_DB:
457 		zone = hammer_data_zone_index(elm->leaf.data_len);
458 		break;
459 	default:
460 		zone = HAMMER_ZONE_UNAVAIL_INDEX;
461 		break;
462 	}
463 	collect_blockmap(elm->leaf.data_offset,
464 		(elm->leaf.data_len + 15) & ~15, zone);
465 }
466 
467 static __inline
468 void
469 collect_undo(hammer_off_t scan_offset, hammer_fifo_head_t head)
470 {
471 	collect_blockmap(scan_offset, head->hdr_size,
472 		HAMMER_ZONE_UNDO_INDEX);
473 }
474 
475 static
476 void
477 collect_blockmap(hammer_off_t offset, int32_t length, int zone)
478 {
479 	struct hammer_blockmap_layer1 layer1;
480 	struct hammer_blockmap_layer2 layer2;
481 	struct hammer_blockmap_layer2 *track2;
482 	hammer_off_t result_offset;
483 	collect_t collect;
484 	int error;
485 
486 	result_offset = blockmap_lookup(offset, &layer1, &layer2, &error);
487 	if (DebugOpt) {
488 		assert(HAMMER_ZONE_DECODE(offset) == zone);
489 		assert(HAMMER_ZONE_DECODE(result_offset) ==
490 			HAMMER_ZONE_RAW_BUFFER_INDEX);
491 		assert(error == 0);
492 	}
493 	collect = collect_get(layer1.phys_offset); /* layer2 address */
494 	track2 = collect_get_track(collect, result_offset, zone, &layer2);
495 	track2->bytes_free -= length;
496 }
497 
498 static
499 collect_t
500 collect_get(hammer_off_t phys_offset)
501 {
502 	collect_t collect;
503 
504 	collect = RB_LOOKUP(collect_rb_tree, &CollectTree, phys_offset);
505 	if (collect)
506 		return(collect);
507 
508 	collect = calloc(sizeof(*collect), 1);
509 	collect->track2 = malloc(HAMMER_BIGBLOCK_SIZE);  /* 1<<23 bytes */
510 	collect->layer2 = malloc(HAMMER_BIGBLOCK_SIZE);  /* 1<<23 bytes */
511 	collect->offsets = malloc(sizeof(hammer_off_t) * HAMMER_BLOCKMAP_RADIX2);
512 	collect->phys_offset = phys_offset;
513 	RB_INSERT(collect_rb_tree, &CollectTree, collect);
514 	bzero(collect->track2, HAMMER_BIGBLOCK_SIZE);
515 	bzero(collect->layer2, HAMMER_BIGBLOCK_SIZE);
516 
517 	return (collect);
518 }
519 
520 static
521 void
522 collect_rel(collect_t collect)
523 {
524 	free(collect->offsets);
525 	free(collect->layer2);
526 	free(collect->track2);
527 	free(collect);
528 }
529 
530 static
531 struct hammer_blockmap_layer2 *
532 collect_get_track(collect_t collect, hammer_off_t offset, int zone,
533 		  struct hammer_blockmap_layer2 *layer2)
534 {
535 	struct hammer_blockmap_layer2 *track2;
536 	size_t i;
537 
538 	i = HAMMER_BLOCKMAP_LAYER2_INDEX(offset);
539 	track2 = &collect->track2[i];
540 	if (track2->entry_crc == 0) {
541 		collect->layer2[i] = *layer2;
542 		collect->offsets[i] = offset & ~HAMMER_BIGBLOCK_MASK64;
543 		track2->zone = zone;
544 		track2->bytes_free = HAMMER_BIGBLOCK_SIZE;
545 		track2->entry_crc = 1;	/* steal field to tag track load */
546 	}
547 	return (track2);
548 }
549 
550 static
551 void
552 dump_collect_table(void)
553 {
554 	collect_t collect;
555 	int error = 0;
556 	struct zone_stat *stats = NULL;
557 
558 	if (VerboseOpt)
559 		stats = hammer_init_zone_stat();
560 
561 	RB_FOREACH(collect, collect_rb_tree, &CollectTree) {
562 		dump_collect(collect, stats);
563 		error += collect->error;
564 	}
565 
566 	while ((collect = RB_ROOT(&CollectTree)) != NULL) {
567 		RB_REMOVE(collect_rb_tree, &CollectTree, collect);
568 		collect_rel(collect);
569 	}
570 	assert(RB_EMPTY(&CollectTree));
571 
572 	if (VerboseOpt) {
573 		hammer_print_zone_stat(stats);
574 		hammer_cleanup_zone_stat(stats);
575 	}
576 
577 	if (num_bad_node || VerboseOpt) {
578 		printf("%d bad nodes\n", num_bad_node);
579 	}
580 	if (error || VerboseOpt) {
581 		printf("%d errors\n", error);
582 	}
583 }
584 
585 static
586 void
587 dump_collect(collect_t collect, struct zone_stat *stats)
588 {
589 	struct hammer_blockmap_layer2 *track2;
590 	struct hammer_blockmap_layer2 *layer2;
591 	hammer_off_t offset;
592 	int i, zone;
593 
594 	for (i = 0; i < HAMMER_BLOCKMAP_RADIX2; ++i) {
595 		track2 = &collect->track2[i];
596 		layer2 = &collect->layer2[i];
597 		offset = collect->offsets[i];
598 
599 		/*
600 		 * Check big-blocks referenced by freemap, data,
601 		 * B-Tree nodes and UNDO fifo.
602 		 */
603 		if (track2->entry_crc == 0)
604 			continue;
605 
606 		zone = layer2->zone;
607 		if (DebugOpt) {
608 			assert((zone == HAMMER_ZONE_UNDO_INDEX) ||
609 				(zone == HAMMER_ZONE_FREEMAP_INDEX) ||
610 				hammer_is_zone2_mapped_index(zone));
611 		}
612 		if (VerboseOpt)
613 			hammer_add_zone_stat_layer2(stats, layer2);
614 
615 		if (track2->zone != layer2->zone) {
616 			printf("BZ\tblock=%016jx calc zone=%-2d, got zone=%-2d\n",
617 				(intmax_t)offset,
618 				track2->zone,
619 				layer2->zone);
620 			collect->error++;
621 		} else if (track2->bytes_free != layer2->bytes_free) {
622 			printf("BM\tblock=%016jx zone=%-2d calc %d free, got %d\n",
623 				(intmax_t)offset,
624 				layer2->zone,
625 				track2->bytes_free,
626 				layer2->bytes_free);
627 			collect->error++;
628 		} else if (VerboseOpt) {
629 			printf("\tblock=%016jx zone=%-2d %d free (correct)\n",
630 				(intmax_t)offset,
631 				layer2->zone,
632 				track2->bytes_free);
633 		}
634 	}
635 }
636