xref: /spdk/lib/ftl/ftl_p2l.c (revision 28fa2c2f668e229a1b215535d2a8ea5039c28deb)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   Copyright 2023 Solidigm All Rights Reserved
4  *   All rights reserved.
5  */
6 
7 #include "spdk/bdev_module.h"
8 #include "spdk/crc32.h"
9 
10 #include "ftl_internal.h"
11 #include "ftl_band.h"
12 #include "ftl_core.h"
13 #include "ftl_layout.h"
14 #include "ftl_nv_cache_io.h"
15 #include "ftl_writer.h"
16 #include "mngt/ftl_mngt.h"
17 
18 struct ftl_p2l_ckpt {
19 	TAILQ_ENTRY(ftl_p2l_ckpt)	link;
20 	union ftl_md_vss		*vss_md_page;
21 	struct ftl_md			*md;
22 	struct ftl_layout_region	*layout_region;
23 	uint64_t			num_pages;
24 	uint64_t			pages_per_xfer;
25 
26 #if defined(DEBUG)
27 	uint64_t			dbg_bmp_sz;
28 	void				*dbg_bmp;
29 	struct ftl_bitmap		*bmp;
30 #endif
31 };
32 
33 static struct ftl_p2l_ckpt *
34 ftl_p2l_ckpt_new(struct spdk_ftl_dev *dev, int region_type)
35 {
36 	struct ftl_p2l_ckpt *ckpt;
37 	struct ftl_layout_region *region = ftl_layout_region_get(dev, region_type);
38 
39 	ckpt = calloc(1, sizeof(struct ftl_p2l_ckpt));
40 	if (!ckpt) {
41 		return NULL;
42 	}
43 
44 	ckpt->layout_region = region;
45 	ckpt->md = dev->layout.md[region_type];
46 	ckpt->pages_per_xfer = dev->layout.p2l.pages_per_xfer;
47 	ckpt->num_pages = dev->layout.p2l.ckpt_pages;
48 	if (dev->nv_cache.md_size) {
49 		ckpt->vss_md_page = ftl_md_vss_buf_alloc(region, region->num_entries);
50 		if (!ckpt->vss_md_page) {
51 			free(ckpt);
52 			return NULL;
53 		}
54 	}
55 
56 #if defined(DEBUG)
57 	/* The bitmap size must be a multiple of word size (8b) - round up */
58 	ckpt->dbg_bmp_sz = spdk_divide_round_up(ckpt->num_pages, 8);
59 
60 	ckpt->dbg_bmp = calloc(1, ckpt->dbg_bmp_sz);
61 	assert(ckpt->dbg_bmp);
62 	ckpt->bmp = ftl_bitmap_create(ckpt->dbg_bmp, ckpt->dbg_bmp_sz);
63 	assert(ckpt->bmp);
64 #endif
65 
66 	return ckpt;
67 }
68 
69 static void
70 ftl_p2l_ckpt_destroy(struct ftl_p2l_ckpt *ckpt)
71 {
72 #if defined(DEBUG)
73 	ftl_bitmap_destroy(ckpt->bmp);
74 	free(ckpt->dbg_bmp);
75 #endif
76 	spdk_dma_free(ckpt->vss_md_page);
77 	free(ckpt);
78 }
79 
80 int
81 ftl_p2l_ckpt_init(struct spdk_ftl_dev *dev)
82 {
83 	int region_type;
84 	struct ftl_p2l_ckpt *ckpt;
85 
86 	TAILQ_INIT(&dev->p2l_ckpt.free);
87 	TAILQ_INIT(&dev->p2l_ckpt.inuse);
88 	for (region_type = FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MIN;
89 	     region_type <= FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MAX;
90 	     region_type++) {
91 		ckpt = ftl_p2l_ckpt_new(dev, region_type);
92 		if (!ckpt) {
93 			return -1;
94 		}
95 		TAILQ_INSERT_TAIL(&dev->p2l_ckpt.free, ckpt, link);
96 	}
97 	return 0;
98 }
99 
100 void
101 ftl_p2l_ckpt_deinit(struct spdk_ftl_dev *dev)
102 {
103 	struct ftl_p2l_ckpt *ckpt, *ckpt_next;
104 
105 	TAILQ_FOREACH_SAFE(ckpt, &dev->p2l_ckpt.free, link, ckpt_next) {
106 		TAILQ_REMOVE(&dev->p2l_ckpt.free, ckpt, link);
107 		ftl_p2l_ckpt_destroy(ckpt);
108 	}
109 
110 	TAILQ_FOREACH_SAFE(ckpt, &dev->p2l_ckpt.inuse, link, ckpt_next) {
111 		TAILQ_REMOVE(&dev->p2l_ckpt.inuse, ckpt, link);
112 		ftl_p2l_ckpt_destroy(ckpt);
113 	}
114 }
115 
116 struct ftl_p2l_ckpt *
117 ftl_p2l_ckpt_acquire(struct spdk_ftl_dev *dev)
118 {
119 	struct ftl_p2l_ckpt *ckpt;
120 
121 	ckpt = TAILQ_FIRST(&dev->p2l_ckpt.free);
122 	assert(ckpt);
123 	TAILQ_REMOVE(&dev->p2l_ckpt.free, ckpt, link);
124 	TAILQ_INSERT_TAIL(&dev->p2l_ckpt.inuse, ckpt, link);
125 	return ckpt;
126 }
127 
128 void
129 ftl_p2l_ckpt_release(struct spdk_ftl_dev *dev, struct ftl_p2l_ckpt *ckpt)
130 {
131 	assert(ckpt);
132 #if defined(DEBUG)
133 	memset(ckpt->dbg_bmp, 0, ckpt->dbg_bmp_sz);
134 #endif
135 	TAILQ_REMOVE(&dev->p2l_ckpt.inuse, ckpt, link);
136 	TAILQ_INSERT_TAIL(&dev->p2l_ckpt.free, ckpt, link);
137 }
138 
139 static void
140 ftl_p2l_ckpt_issue_end(int status, void *arg)
141 {
142 	struct ftl_rq *rq = arg;
143 	assert(rq);
144 
145 	if (status) {
146 #ifdef SPDK_FTL_RETRY_ON_ERROR
147 		/* retry */
148 		ftl_md_persist_entry_retry(&rq->md_persist_entry_ctx);
149 		return;
150 #else
151 		ftl_abort();
152 #endif
153 	}
154 
155 	assert(rq->io.band->queue_depth > 0);
156 	rq->io.band->queue_depth--;
157 
158 	rq->owner.cb(rq);
159 }
160 
161 void
162 ftl_p2l_ckpt_issue(struct ftl_rq *rq)
163 {
164 	struct ftl_rq_entry *iter = rq->entries;
165 	struct spdk_ftl_dev *dev = rq->dev;
166 	ftl_addr addr = rq->io.addr;
167 	struct ftl_p2l_ckpt *ckpt = NULL;
168 	struct ftl_p2l_ckpt_page_no_vss *map_page;
169 	struct ftl_band *band;
170 	uint64_t band_offs, p2l_map_page_no, cur_page, i, j;
171 
172 	assert(rq);
173 	band = rq->io.band;
174 	ckpt = band->p2l_map.p2l_ckpt;
175 	assert(ckpt);
176 	assert(rq->num_blocks == dev->xfer_size);
177 
178 	/* Derive the P2L map page no */
179 	band_offs = ftl_band_block_offset_from_addr(band, rq->io.addr);
180 	p2l_map_page_no = band_offs / dev->xfer_size * ckpt->pages_per_xfer;
181 	assert(p2l_map_page_no < ckpt->num_pages);
182 
183 	/* Get the corresponding P2L map page - the underlying stored data has the same entries as in the end metadata of band P2L (ftl_p2l_map_entry),
184 	 * however we're interested in a whole page (4KiB) worth of content and submit it in two requests with additional metadata
185 	 */
186 	map_page = ftl_md_get_buffer(ckpt->md);
187 	assert(map_page);
188 	map_page += p2l_map_page_no;
189 	i = 0;
190 	for (cur_page = 0; cur_page < ckpt->pages_per_xfer; cur_page++) {
191 		struct ftl_p2l_ckpt_page_no_vss *page = map_page + cur_page;
192 		/* Update the band P2L map */
193 		for (j = 0; i < rq->num_blocks && j < FTL_NUM_P2L_ENTRIES_NO_VSS; i++, iter++, j++) {
194 			if (iter->lba != FTL_LBA_INVALID) {
195 				/* This is compaction or reloc */
196 				assert(!ftl_addr_in_nvc(rq->dev, addr));
197 				ftl_band_set_p2l(band, iter->lba, addr, iter->seq_id);
198 			}
199 			page->map[j].lba = iter->lba;
200 			page->map[j].seq_id = iter->seq_id;
201 			addr = ftl_band_next_addr(band, addr, 1);
202 		}
203 
204 		/* Set up the md */
205 		page->metadata.p2l_ckpt.seq_id = band->md->seq;
206 		page->metadata.p2l_ckpt.count = j;
207 
208 #if defined(DEBUG)
209 		ftl_bitmap_set(ckpt->bmp, p2l_map_page_no + cur_page);
210 #endif
211 		page->metadata.p2l_ckpt.p2l_checksum = spdk_crc32c_update(page->map,
212 						       FTL_NUM_P2L_ENTRIES_NO_VSS * sizeof(struct ftl_p2l_map_entry), 0);
213 	}
214 	/* Save the P2L map entry */
215 	ftl_md_persist_entries(ckpt->md, p2l_map_page_no, ckpt->pages_per_xfer, map_page, NULL,
216 			       ftl_p2l_ckpt_issue_end, rq, &rq->md_persist_entry_ctx);
217 }
218 
219 #if defined(DEBUG)
220 static void
221 ftl_p2l_validate_pages(struct ftl_band *band, struct ftl_p2l_ckpt *ckpt,
222 		       uint64_t page_begin, uint64_t page_end, bool val)
223 {
224 	uint64_t page_no;
225 
226 	for (page_no = page_begin; page_no < page_end; page_no++) {
227 		assert(ftl_bitmap_get(ckpt->bmp, page_no) == val);
228 	}
229 }
230 
231 void
232 ftl_p2l_validate_ckpt(struct ftl_band *band)
233 {
234 	struct ftl_p2l_ckpt *ckpt = band->p2l_map.p2l_ckpt;
235 	uint64_t num_blks_tail_md = ftl_tail_md_num_blocks(band->dev);
236 	uint64_t num_pages_tail_md = num_blks_tail_md / band->dev->xfer_size * ckpt->pages_per_xfer;
237 
238 	if (!ckpt) {
239 		return;
240 	}
241 
242 	assert(num_blks_tail_md % band->dev->xfer_size == 0);
243 
244 	/* all data pages written */
245 	ftl_p2l_validate_pages(band, ckpt,
246 			       0, ckpt->num_pages - num_pages_tail_md, true);
247 
248 	/* tail md pages not written */
249 	ftl_p2l_validate_pages(band, ckpt, ckpt->num_pages - num_pages_tail_md,
250 			       ckpt->num_pages, false);
251 }
252 #endif
253 
254 static struct ftl_band *
255 ftl_get_band_from_region(struct spdk_ftl_dev *dev, enum ftl_layout_region_type type)
256 {
257 	struct ftl_band *band = NULL;
258 	uint64_t i;
259 
260 	assert(type >= FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MIN);
261 	assert(type <= FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MAX);
262 
263 	for (i = 0; i < ftl_get_num_bands(dev); i++) {
264 		band = &dev->bands[i];
265 		if ((band->md->state == FTL_BAND_STATE_OPEN ||
266 		     band->md->state == FTL_BAND_STATE_FULL) &&
267 		    band->md->p2l_md_region == type) {
268 			return band;
269 		}
270 	}
271 
272 	return NULL;
273 }
274 
275 static void ftl_mngt_persist_band_p2l(struct ftl_mngt_process *mngt, struct ftl_p2l_sync_ctx *ctx);
276 
277 static void
278 ftl_p2l_ckpt_persist_end(int status, void *arg)
279 {
280 	struct ftl_mngt_process *mngt = arg;
281 	struct ftl_p2l_sync_ctx *ctx;
282 
283 	assert(mngt);
284 
285 	if (status) {
286 		ftl_mngt_fail_step(mngt);
287 		return;
288 	}
289 
290 	ctx = ftl_mngt_get_step_ctx(mngt);
291 	ctx->xfer_start++;
292 
293 	if (ctx->xfer_start == ctx->xfer_end) {
294 		ctx->md_region++;
295 		ftl_mngt_continue_step(mngt);
296 	} else {
297 		ftl_mngt_persist_band_p2l(mngt, ctx);
298 	}
299 }
300 
301 static void
302 ftl_mngt_persist_band_p2l(struct ftl_mngt_process *mngt, struct ftl_p2l_sync_ctx *ctx)
303 {
304 	struct ftl_band *band = ctx->band;
305 	struct ftl_p2l_ckpt_page_no_vss *map_page;
306 	struct ftl_p2l_map_entry *band_entries;
307 	struct ftl_p2l_ckpt *ckpt;
308 	struct spdk_ftl_dev *dev = band->dev;
309 	uint64_t cur_page;
310 	uint64_t lbas_synced = 0;
311 
312 	ckpt = band->p2l_map.p2l_ckpt;
313 
314 	map_page = ftl_md_get_buffer(ckpt->md);
315 	assert(map_page);
316 
317 	map_page += ctx->xfer_start * ckpt->pages_per_xfer;
318 
319 	for (cur_page = 0; cur_page < ckpt->pages_per_xfer; cur_page++) {
320 		struct ftl_p2l_ckpt_page_no_vss *page = map_page + cur_page;
321 		uint64_t lbas_to_copy = spdk_min(FTL_NUM_P2L_ENTRIES_NO_VSS, dev->xfer_size - lbas_synced);
322 
323 		band_entries = band->p2l_map.band_map + ctx->xfer_start * dev->xfer_size + lbas_synced;
324 		memcpy(page->map, band_entries, lbas_to_copy * sizeof(struct ftl_p2l_map_entry));
325 
326 		page->metadata.p2l_ckpt.seq_id = band->md->seq;
327 		page->metadata.p2l_ckpt.p2l_checksum = spdk_crc32c_update(page->map,
328 						       FTL_NUM_P2L_ENTRIES_NO_VSS * sizeof(struct ftl_p2l_map_entry), 0);
329 		page->metadata.p2l_ckpt.count = lbas_to_copy;
330 		lbas_synced += lbas_to_copy;
331 	}
332 
333 	assert(lbas_synced == dev->xfer_size);
334 	/* Save the P2L map entry */
335 	ftl_md_persist_entries(ckpt->md, ctx->xfer_start * ckpt->pages_per_xfer, ckpt->pages_per_xfer,
336 			       map_page, NULL,
337 			       ftl_p2l_ckpt_persist_end, mngt, &band->md_persist_entry_ctx);
338 }
339 
340 void
341 ftl_mngt_persist_bands_p2l(struct ftl_mngt_process *mngt)
342 {
343 	struct ftl_p2l_sync_ctx *ctx = ftl_mngt_get_step_ctx(mngt);
344 	struct ftl_band *band;
345 	uint64_t band_offs, num_xfers;
346 
347 	if (ctx->md_region > FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MAX) {
348 		ftl_mngt_next_step(mngt);
349 		return;
350 	}
351 
352 	band = ftl_get_band_from_region(ftl_mngt_get_dev(mngt), ctx->md_region);
353 
354 	/* No band has the md region assigned (shutdown happened before next_band was assigned) */
355 	if (!band) {
356 		ctx->xfer_start = 0;
357 		ctx->xfer_end = 0;
358 		ctx->md_region++;
359 		ftl_mngt_continue_step(mngt);
360 		return;
361 	}
362 
363 	band_offs = ftl_band_block_offset_from_addr(band, band->md->iter.addr);
364 	num_xfers = band_offs / band->dev->xfer_size;
365 
366 	ctx->xfer_start = 0;
367 	ctx->xfer_end = num_xfers;
368 	ctx->band = band;
369 
370 	/* Band wasn't written to - no need to sync its P2L */
371 	if (ctx->xfer_end == 0) {
372 		ctx->md_region++;
373 		ftl_mngt_continue_step(mngt);
374 		return;
375 	}
376 
377 	ftl_mngt_persist_band_p2l(mngt, ctx);
378 }
379 
380 uint64_t
381 ftl_mngt_p2l_ckpt_get_seq_id(struct spdk_ftl_dev *dev, int md_region)
382 {
383 	struct ftl_layout *layout = &dev->layout;
384 	struct ftl_md *md = layout->md[md_region];
385 	struct ftl_p2l_ckpt_page_no_vss *page = ftl_md_get_buffer(md);
386 	uint64_t page_no, seq_id = 0;
387 
388 	for (page_no = 0; page_no < layout->p2l.ckpt_pages; page_no++, page++) {
389 		if (seq_id < page->metadata.p2l_ckpt.seq_id) {
390 			seq_id = page->metadata.p2l_ckpt.seq_id;
391 		}
392 	}
393 	return seq_id;
394 }
395 
396 int
397 ftl_mngt_p2l_ckpt_restore(struct ftl_band *band, uint32_t md_region, uint64_t seq_id)
398 {
399 	struct ftl_layout *layout = &band->dev->layout;
400 	struct ftl_md *md = layout->md[md_region];
401 	struct ftl_p2l_ckpt_page_no_vss *page = ftl_md_get_buffer(md);
402 	struct ftl_p2l_map_entry *band_entries;
403 	struct spdk_ftl_dev *dev = band->dev;
404 	uint64_t page_no, page_max = 0, xfer_count, lbas_synced;
405 	uint64_t pages_per_xfer = spdk_divide_round_up(dev->xfer_size, FTL_NUM_P2L_ENTRIES_NO_VSS);
406 	bool page_found = false;
407 
408 	assert(band->md->p2l_md_region == md_region);
409 	if (band->md->p2l_md_region != md_region) {
410 		return -EINVAL;
411 	}
412 
413 	assert(band->md->seq == seq_id);
414 	if (band->md->seq != seq_id) {
415 		return -EINVAL;
416 	}
417 
418 	for (page_no = 0; page_no < layout->p2l.ckpt_pages; page_no++, page++) {
419 		if (page->metadata.p2l_ckpt.seq_id != seq_id) {
420 			continue;
421 		}
422 
423 		page_max = page_no;
424 		page_found = true;
425 
426 		if (page->metadata.p2l_ckpt.p2l_checksum &&
427 		    page->metadata.p2l_ckpt.p2l_checksum != spdk_crc32c_update(page->map,
428 				    FTL_NUM_P2L_ENTRIES_NO_VSS * sizeof(struct ftl_p2l_map_entry), 0)) {
429 			ftl_stats_crc_error(band->dev, FTL_STATS_TYPE_MD_NV_CACHE);
430 			return -EINVAL;
431 		}
432 
433 		xfer_count = page_no / pages_per_xfer;
434 		lbas_synced = (page_no % pages_per_xfer) * FTL_NUM_P2L_ENTRIES_NO_VSS;
435 
436 		/* Restore the page from P2L checkpoint */
437 		band_entries = band->p2l_map.band_map + xfer_count * dev->xfer_size + lbas_synced;
438 
439 		memcpy(band_entries, page->map, page->metadata.p2l_ckpt.count * sizeof(struct ftl_p2l_map_entry));
440 	}
441 
442 	assert(page_found);
443 	if (!page_found) {
444 		return -EINVAL;
445 	}
446 
447 	/* Restore check point in band P2L map */
448 	band->p2l_map.p2l_ckpt = ftl_p2l_ckpt_acquire_region_type(
449 					 band->dev, md_region);
450 
451 	/* Align page_max to xfer_size aligned pages */
452 	if ((page_max + 1) % pages_per_xfer != 0) {
453 		page_max += (pages_per_xfer - page_max % pages_per_xfer - 1);
454 	}
455 #ifdef DEBUG
456 	/* Set check point valid map for validation */
457 	struct ftl_p2l_ckpt *ckpt = band->p2l_map.p2l_ckpt;
458 	for (uint64_t i = 0; i <= page_max; i++) {
459 		ftl_bitmap_set(ckpt->bmp, i);
460 	}
461 #endif
462 
463 	ftl_band_iter_init(band);
464 	/* Align page max to xfer size and set iter */
465 	ftl_band_iter_set(band, (page_max / band->p2l_map.p2l_ckpt->pages_per_xfer + 1) * dev->xfer_size);
466 
467 	return 0;
468 }
469 
470 enum ftl_layout_region_type
471 ftl_p2l_ckpt_region_type(const struct ftl_p2l_ckpt *ckpt) {
472 	return ckpt->layout_region->type;
473 }
474 
475 struct ftl_p2l_ckpt *
476 ftl_p2l_ckpt_acquire_region_type(struct spdk_ftl_dev *dev, uint32_t region_type)
477 {
478 	struct ftl_p2l_ckpt *ckpt = NULL;
479 
480 	TAILQ_FOREACH(ckpt, &dev->p2l_ckpt.free, link) {
481 		if (ckpt->layout_region->type == region_type) {
482 			break;
483 		}
484 	}
485 
486 	assert(ckpt);
487 
488 	TAILQ_REMOVE(&dev->p2l_ckpt.free, ckpt, link);
489 	TAILQ_INSERT_TAIL(&dev->p2l_ckpt.inuse, ckpt, link);
490 
491 	return ckpt;
492 }
493 
494 int
495 ftl_mngt_p2l_ckpt_restore_clean(struct ftl_band *band)
496 {
497 	struct spdk_ftl_dev *dev = band->dev;
498 	struct ftl_layout *layout = &dev->layout;
499 	struct ftl_p2l_ckpt_page_no_vss *page;
500 	enum ftl_layout_region_type md_region = band->md->p2l_md_region;
501 	struct ftl_p2l_ckpt *ckpt;
502 	uint64_t page_no;
503 	uint64_t num_written_pages, lbas_synced;
504 
505 	if (md_region < FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MIN ||
506 	    md_region > FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MAX) {
507 		return -EINVAL;
508 	}
509 
510 	assert(band->md->iter.offset % dev->xfer_size == 0);
511 
512 	/* Associate band with md region before shutdown */
513 	if (!band->p2l_map.p2l_ckpt) {
514 		band->p2l_map.p2l_ckpt = ftl_p2l_ckpt_acquire_region_type(dev, md_region);
515 	}
516 
517 	/* Band was opened but no data was written */
518 	if (band->md->iter.offset == 0) {
519 		return 0;
520 	}
521 
522 	ckpt = band->p2l_map.p2l_ckpt;
523 	num_written_pages = band->md->iter.offset / dev->xfer_size * ckpt->pages_per_xfer;
524 
525 	page_no = 0;
526 	lbas_synced = 0;
527 
528 	/* Restore P2L map up to last written page */
529 	page = ftl_md_get_buffer(layout->md[md_region]);
530 
531 
532 	for (; page_no < num_written_pages; page_no++, page++) {
533 		assert(page->metadata.p2l_ckpt.seq_id == band->md->seq);
534 		/* Restore the page from P2L checkpoint */
535 		memcpy(band->p2l_map.band_map + lbas_synced, page->map,
536 		       page->metadata.p2l_ckpt.count * sizeof(struct ftl_p2l_map_entry));
537 
538 		lbas_synced += page->metadata.p2l_ckpt.count;
539 
540 #if defined(DEBUG)
541 		assert(ftl_bitmap_get(band->p2l_map.p2l_ckpt->bmp, page_no) == false);
542 		ftl_bitmap_set(band->p2l_map.p2l_ckpt->bmp, page_no);
543 #endif
544 	}
545 
546 	assert(lbas_synced % dev->xfer_size == 0);
547 
548 	assert(page->metadata.p2l_ckpt.seq_id < band->md->seq);
549 
550 	return 0;
551 }
552 
553 void
554 ftl_mngt_p2l_ckpt_restore_shm_clean(struct ftl_band *band)
555 {
556 	struct spdk_ftl_dev *dev = band->dev;
557 	enum ftl_layout_region_type md_region = band->md->p2l_md_region;
558 
559 	/* Associate band with md region before shutdown */
560 	if (!band->p2l_map.p2l_ckpt) {
561 		band->p2l_map.p2l_ckpt = ftl_p2l_ckpt_acquire_region_type(dev, md_region);
562 	}
563 
564 #if defined(DEBUG)
565 	uint64_t page_no;
566 	uint64_t num_written_pages;
567 
568 	assert(band->md->iter.offset % dev->xfer_size == 0);
569 	num_written_pages = band->md->iter.offset / dev->xfer_size * band->p2l_map.p2l_ckpt->pages_per_xfer;
570 
571 	/* Band was opened but no data was written */
572 	if (band->md->iter.offset == 0) {
573 		return;
574 	}
575 
576 	/* Set page number to first data page - skip head md */
577 	page_no = 0;
578 
579 	for (; page_no < num_written_pages; page_no++) {
580 		assert(ftl_bitmap_get(band->p2l_map.p2l_ckpt->bmp, page_no) == false);
581 		ftl_bitmap_set(band->p2l_map.p2l_ckpt->bmp, page_no);
582 	}
583 #endif
584 }
585