xref: /spdk/lib/ftl/ftl_p2l.c (revision 8afdeef3becfe9409cc9e7372bd0bc10e8b7d46d)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   Copyright 2023 Solidigm All Rights Reserved
4  *   All rights reserved.
5  */
6 
7 #include "spdk/bdev_module.h"
8 #include "spdk/crc32.h"
9 
10 #include "ftl_internal.h"
11 #include "ftl_band.h"
12 #include "ftl_core.h"
13 #include "ftl_layout.h"
14 #include "ftl_nv_cache_io.h"
15 #include "ftl_writer.h"
16 #include "mngt/ftl_mngt.h"
17 
18 struct ftl_p2l_ckpt {
19 	TAILQ_ENTRY(ftl_p2l_ckpt)	link;
20 	union ftl_md_vss		*vss_md_page;
21 	struct ftl_md			*md;
22 	struct ftl_layout_region	*layout_region;
23 	uint64_t			num_pages;
24 	uint64_t			pages_per_xfer;
25 
26 #if defined(DEBUG)
27 	uint64_t			dbg_bmp_sz;
28 	void				*dbg_bmp;
29 	struct ftl_bitmap		*bmp;
30 #endif
31 };
32 
33 static struct ftl_p2l_ckpt *
34 ftl_p2l_ckpt_new(struct spdk_ftl_dev *dev, int region_type)
35 {
36 	struct ftl_p2l_ckpt *ckpt;
37 	struct ftl_layout_region *region = ftl_layout_region_get(dev, region_type);
38 
39 	ckpt = calloc(1, sizeof(struct ftl_p2l_ckpt));
40 	if (!ckpt) {
41 		return NULL;
42 	}
43 
44 	ckpt->layout_region = region;
45 	ckpt->md = dev->layout.md[region_type];
46 	ckpt->pages_per_xfer = dev->layout.p2l.pages_per_xfer;
47 	ckpt->num_pages = dev->layout.p2l.ckpt_pages;
48 	if (dev->nv_cache.md_size) {
49 		ckpt->vss_md_page = ftl_md_vss_buf_alloc(region, region->num_entries);
50 		if (!ckpt->vss_md_page) {
51 			free(ckpt);
52 			return NULL;
53 		}
54 	}
55 
56 #if defined(DEBUG)
57 	/* The bitmap size must be a multiple of word size (8b) - round up */
58 	ckpt->dbg_bmp_sz = spdk_divide_round_up(ckpt->num_pages, 8);
59 
60 	ckpt->dbg_bmp = calloc(1, ckpt->dbg_bmp_sz);
61 	assert(ckpt->dbg_bmp);
62 	ckpt->bmp = ftl_bitmap_create(ckpt->dbg_bmp, ckpt->dbg_bmp_sz);
63 	assert(ckpt->bmp);
64 #endif
65 
66 	return ckpt;
67 }
68 
69 static void
70 ftl_p2l_ckpt_destroy(struct ftl_p2l_ckpt *ckpt)
71 {
72 #if defined(DEBUG)
73 	ftl_bitmap_destroy(ckpt->bmp);
74 	free(ckpt->dbg_bmp);
75 #endif
76 	spdk_dma_free(ckpt->vss_md_page);
77 	free(ckpt);
78 }
79 
80 int
81 ftl_p2l_ckpt_init(struct spdk_ftl_dev *dev)
82 {
83 	int region_type;
84 	struct ftl_p2l_ckpt *ckpt;
85 
86 	TAILQ_INIT(&dev->p2l_ckpt.free);
87 	TAILQ_INIT(&dev->p2l_ckpt.inuse);
88 	for (region_type = FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MIN;
89 	     region_type <= FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MAX;
90 	     region_type++) {
91 		ckpt = ftl_p2l_ckpt_new(dev, region_type);
92 		if (!ckpt) {
93 			return -1;
94 		}
95 		TAILQ_INSERT_TAIL(&dev->p2l_ckpt.free, ckpt, link);
96 	}
97 	return 0;
98 }
99 
100 void
101 ftl_p2l_ckpt_deinit(struct spdk_ftl_dev *dev)
102 {
103 	struct ftl_p2l_ckpt *ckpt, *ckpt_next;
104 
105 	TAILQ_FOREACH_SAFE(ckpt, &dev->p2l_ckpt.free, link, ckpt_next) {
106 		TAILQ_REMOVE(&dev->p2l_ckpt.free, ckpt, link);
107 		ftl_p2l_ckpt_destroy(ckpt);
108 	}
109 
110 	TAILQ_FOREACH_SAFE(ckpt, &dev->p2l_ckpt.inuse, link, ckpt_next) {
111 		TAILQ_REMOVE(&dev->p2l_ckpt.inuse, ckpt, link);
112 		ftl_p2l_ckpt_destroy(ckpt);
113 	}
114 }
115 
116 struct ftl_p2l_ckpt *
117 ftl_p2l_ckpt_acquire(struct spdk_ftl_dev *dev)
118 {
119 	struct ftl_p2l_ckpt *ckpt;
120 
121 	ckpt = TAILQ_FIRST(&dev->p2l_ckpt.free);
122 	assert(ckpt);
123 	TAILQ_REMOVE(&dev->p2l_ckpt.free, ckpt, link);
124 	TAILQ_INSERT_TAIL(&dev->p2l_ckpt.inuse, ckpt, link);
125 	return ckpt;
126 }
127 
128 void
129 ftl_p2l_ckpt_release(struct spdk_ftl_dev *dev, struct ftl_p2l_ckpt *ckpt)
130 {
131 	assert(ckpt);
132 #if defined(DEBUG)
133 	memset(ckpt->dbg_bmp, 0, ckpt->dbg_bmp_sz);
134 #endif
135 	TAILQ_REMOVE(&dev->p2l_ckpt.inuse, ckpt, link);
136 	TAILQ_INSERT_TAIL(&dev->p2l_ckpt.free, ckpt, link);
137 }
138 
139 static void
140 ftl_p2l_ckpt_issue_end(int status, void *arg)
141 {
142 	struct ftl_rq *rq = arg;
143 	assert(rq);
144 
145 	if (status) {
146 #ifdef SPDK_FTL_RETRY_ON_ERROR
147 		/* retry */
148 		ftl_md_persist_entry_retry(&rq->md_persist_entry_ctx);
149 		return;
150 #else
151 		ftl_abort();
152 #endif
153 	}
154 
155 	assert(rq->io.band->queue_depth > 0);
156 	rq->io.band->queue_depth--;
157 
158 	rq->owner.cb(rq);
159 }
160 
161 void
162 ftl_p2l_ckpt_issue(struct ftl_rq *rq)
163 {
164 	struct ftl_rq_entry *iter = rq->entries;
165 	struct spdk_ftl_dev *dev = rq->dev;
166 	ftl_addr addr = rq->io.addr;
167 	struct ftl_p2l_ckpt *ckpt = NULL;
168 	struct ftl_p2l_ckpt_page_no_vss *map_page;
169 	struct ftl_band *band;
170 	uint64_t band_offs, p2l_map_page_no, cur_page, i, j;
171 
172 	assert(rq);
173 	band = rq->io.band;
174 	ckpt = band->p2l_map.p2l_ckpt;
175 	assert(ckpt);
176 	assert(rq->num_blocks == dev->xfer_size);
177 
178 	/* Derive the P2L map page no */
179 	band_offs = ftl_band_block_offset_from_addr(band, rq->io.addr);
180 	p2l_map_page_no = band_offs / dev->xfer_size * ckpt->pages_per_xfer;
181 	assert(p2l_map_page_no < ckpt->num_pages);
182 
183 	/* Get the corresponding P2L map page - the underlying stored data has the same entries as in the end metadata of band P2L (ftl_p2l_map_entry),
184 	 * however we're interested in a whole page (4KiB) worth of content and submit it in two requests with additional metadata
185 	 */
186 	map_page = ftl_md_get_buffer(ckpt->md);
187 	assert(map_page);
188 	map_page += p2l_map_page_no;
189 	i = 0;
190 	for (cur_page = 0; cur_page < ckpt->pages_per_xfer; cur_page++) {
191 		struct ftl_p2l_ckpt_page_no_vss *page = map_page + cur_page;
192 		/* Update the band P2L map */
193 		for (j = 0; i < rq->num_blocks && j < FTL_NUM_P2L_ENTRIES_NO_VSS; i++, iter++, j++) {
194 			if (iter->lba != FTL_LBA_INVALID) {
195 				/* This is compaction or reloc */
196 				assert(!ftl_addr_in_nvc(rq->dev, addr));
197 				ftl_band_set_p2l(band, iter->lba, addr, iter->seq_id);
198 			}
199 			page->map[j].lba = iter->lba;
200 			page->map[j].seq_id = iter->seq_id;
201 			addr = ftl_band_next_addr(band, addr, 1);
202 		}
203 
204 		/* Set up the md */
205 		page->metadata.p2l_ckpt.seq_id = band->md->seq;
206 		page->metadata.p2l_ckpt.count = j;
207 
208 #if defined(DEBUG)
209 		ftl_bitmap_set(ckpt->bmp, p2l_map_page_no + cur_page);
210 #endif
211 		page->metadata.p2l_ckpt.p2l_checksum = spdk_crc32c_update(page->map,
212 						       FTL_NUM_P2L_ENTRIES_NO_VSS * sizeof(struct ftl_p2l_map_entry), 0);
213 	}
214 	/* Save the P2L map entry */
215 	ftl_md_persist_entries(ckpt->md, p2l_map_page_no, ckpt->pages_per_xfer, map_page, NULL,
216 			       ftl_p2l_ckpt_issue_end, rq, &rq->md_persist_entry_ctx);
217 }
218 
219 #if defined(DEBUG)
220 static void
221 ftl_p2l_validate_pages(struct ftl_band *band, struct ftl_p2l_ckpt *ckpt,
222 		       uint64_t page_begin, uint64_t page_end, bool val)
223 {
224 	uint64_t page_no;
225 
226 	for (page_no = page_begin; page_no < page_end; page_no++) {
227 		assert(ftl_bitmap_get(ckpt->bmp, page_no) == val);
228 	}
229 }
230 
231 void
232 ftl_p2l_validate_ckpt(struct ftl_band *band)
233 {
234 	struct ftl_p2l_ckpt *ckpt = band->p2l_map.p2l_ckpt;
235 	uint64_t num_blks_tail_md = ftl_tail_md_num_blocks(band->dev);
236 	uint64_t num_pages_tail_md;
237 
238 	if (!ckpt) {
239 		return;
240 	}
241 
242 	num_pages_tail_md = num_blks_tail_md / band->dev->xfer_size * ckpt->pages_per_xfer;
243 
244 	assert(num_blks_tail_md % band->dev->xfer_size == 0);
245 
246 	/* all data pages written */
247 	ftl_p2l_validate_pages(band, ckpt,
248 			       0, ckpt->num_pages - num_pages_tail_md, true);
249 
250 	/* tail md pages not written */
251 	ftl_p2l_validate_pages(band, ckpt, ckpt->num_pages - num_pages_tail_md,
252 			       ckpt->num_pages, false);
253 }
254 #endif
255 
256 static struct ftl_band *
257 ftl_get_band_from_region(struct spdk_ftl_dev *dev, enum ftl_layout_region_type type)
258 {
259 	struct ftl_band *band = NULL;
260 	uint64_t i;
261 
262 	assert(type >= FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MIN);
263 	assert(type <= FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MAX);
264 
265 	for (i = 0; i < ftl_get_num_bands(dev); i++) {
266 		band = &dev->bands[i];
267 		if ((band->md->state == FTL_BAND_STATE_OPEN ||
268 		     band->md->state == FTL_BAND_STATE_FULL) &&
269 		    band->md->p2l_md_region == type) {
270 			return band;
271 		}
272 	}
273 
274 	return NULL;
275 }
276 
277 static void ftl_mngt_persist_band_p2l(struct ftl_mngt_process *mngt, struct ftl_p2l_sync_ctx *ctx);
278 
279 static void
280 ftl_p2l_ckpt_persist_end(int status, void *arg)
281 {
282 	struct ftl_mngt_process *mngt = arg;
283 	struct ftl_p2l_sync_ctx *ctx;
284 
285 	assert(mngt);
286 
287 	if (status) {
288 		ftl_mngt_fail_step(mngt);
289 		return;
290 	}
291 
292 	ctx = ftl_mngt_get_step_ctx(mngt);
293 	ctx->xfer_start++;
294 
295 	if (ctx->xfer_start == ctx->xfer_end) {
296 		ctx->md_region++;
297 		ftl_mngt_continue_step(mngt);
298 	} else {
299 		ftl_mngt_persist_band_p2l(mngt, ctx);
300 	}
301 }
302 
303 static void
304 ftl_mngt_persist_band_p2l(struct ftl_mngt_process *mngt, struct ftl_p2l_sync_ctx *ctx)
305 {
306 	struct ftl_band *band = ctx->band;
307 	struct ftl_p2l_ckpt_page_no_vss *map_page;
308 	struct ftl_p2l_map_entry *band_entries;
309 	struct ftl_p2l_ckpt *ckpt;
310 	struct spdk_ftl_dev *dev = band->dev;
311 	uint64_t cur_page;
312 	uint64_t lbas_synced = 0;
313 
314 	ckpt = band->p2l_map.p2l_ckpt;
315 
316 	map_page = ftl_md_get_buffer(ckpt->md);
317 	assert(map_page);
318 
319 	map_page += ctx->xfer_start * ckpt->pages_per_xfer;
320 
321 	for (cur_page = 0; cur_page < ckpt->pages_per_xfer; cur_page++) {
322 		struct ftl_p2l_ckpt_page_no_vss *page = map_page + cur_page;
323 		uint64_t lbas_to_copy = spdk_min(FTL_NUM_P2L_ENTRIES_NO_VSS, dev->xfer_size - lbas_synced);
324 
325 		band_entries = band->p2l_map.band_map + ctx->xfer_start * dev->xfer_size + lbas_synced;
326 		memcpy(page->map, band_entries, lbas_to_copy * sizeof(struct ftl_p2l_map_entry));
327 
328 		page->metadata.p2l_ckpt.seq_id = band->md->seq;
329 		page->metadata.p2l_ckpt.p2l_checksum = spdk_crc32c_update(page->map,
330 						       FTL_NUM_P2L_ENTRIES_NO_VSS * sizeof(struct ftl_p2l_map_entry), 0);
331 		page->metadata.p2l_ckpt.count = lbas_to_copy;
332 		lbas_synced += lbas_to_copy;
333 	}
334 
335 	assert(lbas_synced == dev->xfer_size);
336 	/* Save the P2L map entry */
337 	ftl_md_persist_entries(ckpt->md, ctx->xfer_start * ckpt->pages_per_xfer, ckpt->pages_per_xfer,
338 			       map_page, NULL,
339 			       ftl_p2l_ckpt_persist_end, mngt, &band->md_persist_entry_ctx);
340 }
341 
342 void
343 ftl_mngt_persist_bands_p2l(struct ftl_mngt_process *mngt)
344 {
345 	struct ftl_p2l_sync_ctx *ctx = ftl_mngt_get_step_ctx(mngt);
346 	struct ftl_band *band;
347 	uint64_t band_offs, num_xfers;
348 
349 	if (ctx->md_region > FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MAX) {
350 		ftl_mngt_next_step(mngt);
351 		return;
352 	}
353 
354 	band = ftl_get_band_from_region(ftl_mngt_get_dev(mngt), ctx->md_region);
355 
356 	/* No band has the md region assigned (shutdown happened before next_band was assigned) */
357 	if (!band) {
358 		ctx->xfer_start = 0;
359 		ctx->xfer_end = 0;
360 		ctx->md_region++;
361 		ftl_mngt_continue_step(mngt);
362 		return;
363 	}
364 
365 	band_offs = ftl_band_block_offset_from_addr(band, band->md->iter.addr);
366 	num_xfers = band_offs / band->dev->xfer_size;
367 
368 	ctx->xfer_start = 0;
369 	ctx->xfer_end = num_xfers;
370 	ctx->band = band;
371 
372 	/* Band wasn't written to - no need to sync its P2L */
373 	if (ctx->xfer_end == 0) {
374 		ctx->md_region++;
375 		ftl_mngt_continue_step(mngt);
376 		return;
377 	}
378 
379 	ftl_mngt_persist_band_p2l(mngt, ctx);
380 }
381 
382 uint64_t
383 ftl_mngt_p2l_ckpt_get_seq_id(struct spdk_ftl_dev *dev, int md_region)
384 {
385 	struct ftl_layout *layout = &dev->layout;
386 	struct ftl_md *md = layout->md[md_region];
387 	struct ftl_p2l_ckpt_page_no_vss *page = ftl_md_get_buffer(md);
388 	uint64_t page_no, seq_id = 0;
389 
390 	for (page_no = 0; page_no < layout->p2l.ckpt_pages; page_no++, page++) {
391 		if (seq_id < page->metadata.p2l_ckpt.seq_id) {
392 			seq_id = page->metadata.p2l_ckpt.seq_id;
393 		}
394 	}
395 	return seq_id;
396 }
397 
398 int
399 ftl_mngt_p2l_ckpt_restore(struct ftl_band *band, uint32_t md_region, uint64_t seq_id)
400 {
401 	struct ftl_layout *layout = &band->dev->layout;
402 	struct ftl_md *md = layout->md[md_region];
403 	struct ftl_p2l_ckpt_page_no_vss *page = ftl_md_get_buffer(md);
404 	struct ftl_p2l_map_entry *band_entries;
405 	struct spdk_ftl_dev *dev = band->dev;
406 	uint64_t page_no, page_max = 0, xfer_count, lbas_synced;
407 	uint64_t pages_per_xfer = spdk_divide_round_up(dev->xfer_size, FTL_NUM_P2L_ENTRIES_NO_VSS);
408 	bool page_found = false;
409 
410 	assert(band->md->p2l_md_region == md_region);
411 	if (band->md->p2l_md_region != md_region) {
412 		return -EINVAL;
413 	}
414 
415 	assert(band->md->seq == seq_id);
416 	if (band->md->seq != seq_id) {
417 		return -EINVAL;
418 	}
419 
420 	for (page_no = 0; page_no < layout->p2l.ckpt_pages; page_no++, page++) {
421 		if (page->metadata.p2l_ckpt.seq_id != seq_id) {
422 			continue;
423 		}
424 
425 		page_max = page_no;
426 		page_found = true;
427 
428 		if (page->metadata.p2l_ckpt.p2l_checksum &&
429 		    page->metadata.p2l_ckpt.p2l_checksum != spdk_crc32c_update(page->map,
430 				    FTL_NUM_P2L_ENTRIES_NO_VSS * sizeof(struct ftl_p2l_map_entry), 0)) {
431 			ftl_stats_crc_error(band->dev, FTL_STATS_TYPE_MD_NV_CACHE);
432 			return -EINVAL;
433 		}
434 
435 		xfer_count = page_no / pages_per_xfer;
436 		lbas_synced = (page_no % pages_per_xfer) * FTL_NUM_P2L_ENTRIES_NO_VSS;
437 
438 		/* Restore the page from P2L checkpoint */
439 		band_entries = band->p2l_map.band_map + xfer_count * dev->xfer_size + lbas_synced;
440 
441 		memcpy(band_entries, page->map, page->metadata.p2l_ckpt.count * sizeof(struct ftl_p2l_map_entry));
442 	}
443 
444 	assert(page_found);
445 	if (!page_found) {
446 		return -EINVAL;
447 	}
448 
449 	/* Restore check point in band P2L map */
450 	band->p2l_map.p2l_ckpt = ftl_p2l_ckpt_acquire_region_type(
451 					 band->dev, md_region);
452 
453 	/* Align page_max to xfer_size aligned pages */
454 	if ((page_max + 1) % pages_per_xfer != 0) {
455 		page_max += (pages_per_xfer - page_max % pages_per_xfer - 1);
456 	}
457 #ifdef DEBUG
458 	/* Set check point valid map for validation */
459 	struct ftl_p2l_ckpt *ckpt = band->p2l_map.p2l_ckpt;
460 	for (uint64_t i = 0; i <= page_max; i++) {
461 		ftl_bitmap_set(ckpt->bmp, i);
462 	}
463 #endif
464 
465 	ftl_band_iter_init(band);
466 	/* Align page max to xfer size and set iter */
467 	ftl_band_iter_set(band, (page_max / band->p2l_map.p2l_ckpt->pages_per_xfer + 1) * dev->xfer_size);
468 
469 	return 0;
470 }
471 
472 enum ftl_layout_region_type
473 ftl_p2l_ckpt_region_type(const struct ftl_p2l_ckpt *ckpt) {
474 	return ckpt->layout_region->type;
475 }
476 
477 struct ftl_p2l_ckpt *
478 ftl_p2l_ckpt_acquire_region_type(struct spdk_ftl_dev *dev, uint32_t region_type)
479 {
480 	struct ftl_p2l_ckpt *ckpt = NULL;
481 
482 	TAILQ_FOREACH(ckpt, &dev->p2l_ckpt.free, link) {
483 		if (ckpt->layout_region->type == region_type) {
484 			break;
485 		}
486 	}
487 
488 	assert(ckpt);
489 
490 	TAILQ_REMOVE(&dev->p2l_ckpt.free, ckpt, link);
491 	TAILQ_INSERT_TAIL(&dev->p2l_ckpt.inuse, ckpt, link);
492 
493 	return ckpt;
494 }
495 
496 int
497 ftl_mngt_p2l_ckpt_restore_clean(struct ftl_band *band)
498 {
499 	struct spdk_ftl_dev *dev = band->dev;
500 	struct ftl_layout *layout = &dev->layout;
501 	struct ftl_p2l_ckpt_page_no_vss *page;
502 	enum ftl_layout_region_type md_region = band->md->p2l_md_region;
503 	struct ftl_p2l_ckpt *ckpt;
504 	uint64_t page_no;
505 	uint64_t num_written_pages, lbas_synced;
506 
507 	if (md_region < FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MIN ||
508 	    md_region > FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MAX) {
509 		return -EINVAL;
510 	}
511 
512 	assert(band->md->iter.offset % dev->xfer_size == 0);
513 
514 	/* Associate band with md region before shutdown */
515 	if (!band->p2l_map.p2l_ckpt) {
516 		band->p2l_map.p2l_ckpt = ftl_p2l_ckpt_acquire_region_type(dev, md_region);
517 	}
518 
519 	/* Band was opened but no data was written */
520 	if (band->md->iter.offset == 0) {
521 		return 0;
522 	}
523 
524 	ckpt = band->p2l_map.p2l_ckpt;
525 	num_written_pages = band->md->iter.offset / dev->xfer_size * ckpt->pages_per_xfer;
526 
527 	page_no = 0;
528 	lbas_synced = 0;
529 
530 	/* Restore P2L map up to last written page */
531 	page = ftl_md_get_buffer(layout->md[md_region]);
532 
533 
534 	for (; page_no < num_written_pages; page_no++, page++) {
535 		assert(page->metadata.p2l_ckpt.seq_id == band->md->seq);
536 		/* Restore the page from P2L checkpoint */
537 		memcpy(band->p2l_map.band_map + lbas_synced, page->map,
538 		       page->metadata.p2l_ckpt.count * sizeof(struct ftl_p2l_map_entry));
539 
540 		lbas_synced += page->metadata.p2l_ckpt.count;
541 
542 #if defined(DEBUG)
543 		assert(ftl_bitmap_get(band->p2l_map.p2l_ckpt->bmp, page_no) == false);
544 		ftl_bitmap_set(band->p2l_map.p2l_ckpt->bmp, page_no);
545 #endif
546 	}
547 
548 	assert(lbas_synced % dev->xfer_size == 0);
549 
550 	assert(page->metadata.p2l_ckpt.seq_id < band->md->seq);
551 
552 	return 0;
553 }
554 
555 void
556 ftl_mngt_p2l_ckpt_restore_shm_clean(struct ftl_band *band)
557 {
558 	struct spdk_ftl_dev *dev = band->dev;
559 	enum ftl_layout_region_type md_region = band->md->p2l_md_region;
560 
561 	/* Associate band with md region before shutdown */
562 	if (!band->p2l_map.p2l_ckpt) {
563 		band->p2l_map.p2l_ckpt = ftl_p2l_ckpt_acquire_region_type(dev, md_region);
564 	}
565 
566 #if defined(DEBUG)
567 	uint64_t page_no;
568 	uint64_t num_written_pages;
569 
570 	assert(band->md->iter.offset % dev->xfer_size == 0);
571 	num_written_pages = band->md->iter.offset / dev->xfer_size * band->p2l_map.p2l_ckpt->pages_per_xfer;
572 
573 	/* Band was opened but no data was written */
574 	if (band->md->iter.offset == 0) {
575 		return;
576 	}
577 
578 	/* Set page number to first data page - skip head md */
579 	page_no = 0;
580 
581 	for (; page_no < num_written_pages; page_no++) {
582 		assert(ftl_bitmap_get(band->p2l_map.p2l_ckpt->bmp, page_no) == false);
583 		ftl_bitmap_set(band->p2l_map.p2l_ckpt->bmp, page_no);
584 	}
585 #endif
586 }
587