xref: /spdk/test/unit/lib/ftl/ftl_p2l.c/ftl_p2l_ut.c (revision 95d6c9fac17572b107042103439aafd696d60b0e)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright 2023 Solidigm All Rights Reserved
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 
8 #include "spdk_internal/cunit.h"
9 #include "common/lib/test_env.c"
10 
11 #include "ftl/ftl_core.c"
12 #include "ftl/ftl_p2l.c"
13 
14 struct spdk_ftl_dev g_dev;
15 struct ftl_band g_band;
16 void *md_buffer;
17 
18 DEFINE_STUB(ftl_bitmap_create, struct ftl_bitmap *, (void *buf, size_t size), (void *)1);
19 DEFINE_STUB_V(ftl_bitmap_destroy, (struct ftl_bitmap *bitmap));
20 DEFINE_STUB_V(ftl_bitmap_set, (struct ftl_bitmap *bitmap, uint64_t bit));
21 DEFINE_STUB(ftl_bitmap_get, bool, (const struct ftl_bitmap *bitmap, uint64_t bit), false);
22 DEFINE_STUB_V(ftl_bitmap_clear, (struct ftl_bitmap *bitmap, uint64_t bit));
23 DEFINE_STUB(ftl_md_vss_buf_alloc, union ftl_md_vss *, (struct ftl_layout_region *region,
24 		uint32_t count), NULL);
25 DEFINE_STUB_V(ftl_band_set_p2l, (struct ftl_band *band, uint64_t lba, ftl_addr addr,
26 				 uint64_t seq_id));
27 DEFINE_STUB_V(ftl_md_persist, (struct ftl_md *md));
28 DEFINE_STUB_V(ftl_md_persist_entries, (struct ftl_md *md, uint64_t start_entry,
29 				       uint64_t num_entries, void *buffer,
30 				       void *vss_buffer, ftl_md_io_entry_cb cb, void *cb_arg,
31 				       struct ftl_md_io_entry_ctx *ctx));
32 DEFINE_STUB(ftl_mngt_get_step_ctx, void *, (struct ftl_mngt_process *mngt), NULL);
33 DEFINE_STUB_V(ftl_mngt_continue_step, (struct ftl_mngt_process *mngt));
34 DEFINE_STUB_V(ftl_mngt_next_step, (struct ftl_mngt_process *mngt));
35 DEFINE_STUB_V(ftl_mngt_fail_step, (struct ftl_mngt_process *mngt));
36 DEFINE_STUB(ftl_band_from_addr, struct ftl_band *, (struct spdk_ftl_dev *dev, ftl_addr addr), NULL);
37 DEFINE_STUB(ftl_io_init, int, (struct spdk_io_channel *_ioch, struct ftl_io *io, uint64_t lba,
38 			       size_t num_blocks,
39 			       struct iovec *iov, size_t iov_cnt, spdk_ftl_fn cb_fn, void *cb_ctx, int type), 0);
40 DEFINE_STUB_V(ftl_io_inc_req, (struct ftl_io *io));
41 DEFINE_STUB_V(ftl_io_dec_req, (struct ftl_io *io));
42 DEFINE_STUB(ftl_io_iovec_addr, void *, (struct ftl_io *io), NULL);
43 DEFINE_STUB(ftl_io_iovec_len_left, size_t, (struct ftl_io *io), 0);
44 DEFINE_STUB_V(ftl_io_advance, (struct ftl_io *io, size_t num_blocks));
45 DEFINE_STUB(ftl_io_current_lba, uint64_t, (const struct ftl_io *io), 0);
46 DEFINE_STUB(ftl_io_get_lba, uint64_t, (const struct ftl_io *io, size_t offset), 0);
47 DEFINE_STUB(ftl_io_channel_get_ctx, struct ftl_io_channel *, (struct spdk_io_channel *ioch), NULL);
48 DEFINE_STUB(ftl_iovec_num_blocks, size_t, (struct iovec *iov, size_t iov_cnt), 0);
49 DEFINE_STUB_V(ftl_io_complete, (struct ftl_io *io));
50 DEFINE_STUB(ftl_mngt_trim, int, (struct spdk_ftl_dev *dev, uint64_t lba, uint64_t num_blocks,
51 				 spdk_ftl_fn cb, void *cb_cntx), 0);
52 DEFINE_STUB(ftl_md_get_vss_buffer, union ftl_md_vss *, (struct ftl_md *md), NULL);
53 DEFINE_STUB_V(ftl_writer_run, (struct ftl_writer *writer));
54 DEFINE_STUB_V(ftl_reloc, (struct ftl_reloc *reloc));
55 DEFINE_STUB_V(ftl_l2p_process, (struct spdk_ftl_dev *dev));
56 DEFINE_STUB_V(ftl_nv_cache_process, (struct spdk_ftl_dev *dev));
57 DEFINE_STUB(ftl_reloc_is_halted, bool, (const struct ftl_reloc *reloc), false);
58 DEFINE_STUB(ftl_writer_is_halted, bool, (struct ftl_writer *writer), true);
59 DEFINE_STUB(ftl_nv_cache_is_halted, bool, (struct ftl_nv_cache *nvc), true);
60 DEFINE_STUB(ftl_l2p_is_halted, bool, (struct spdk_ftl_dev *dev), true);
61 DEFINE_STUB_V(ftl_reloc_halt, (struct ftl_reloc *reloc));
62 DEFINE_STUB_V(ftl_nv_cache_halt, (struct ftl_nv_cache *nvc));
63 DEFINE_STUB_V(ftl_l2p_halt, (struct spdk_ftl_dev *dev));
64 DEFINE_STUB(ftl_nv_cache_chunks_busy, int, (struct ftl_nv_cache *nvc), true);
65 DEFINE_STUB(ftl_nv_cache_throttle, bool, (struct spdk_ftl_dev *dev), true);
66 DEFINE_STUB(ftl_nv_cache_write, bool, (struct ftl_io *io), true);
67 DEFINE_STUB_V(ftl_band_set_state, (struct ftl_band *band, enum ftl_band_state state));
68 DEFINE_STUB_V(spdk_bdev_io_get_nvme_status, (const struct spdk_bdev_io *bdev_io, uint32_t *cdw0,
69 		int *sct, int *sc));
70 DEFINE_STUB(ftl_mngt_get_dev, struct spdk_ftl_dev *, (struct ftl_mngt_process *mngt), NULL);
71 DEFINE_STUB_V(ftl_l2p_pin, (struct spdk_ftl_dev *dev, uint64_t lba, uint64_t count,
72 			    ftl_l2p_pin_cb cb, void *cb_ctx,
73 			    struct ftl_l2p_pin_ctx *pin_ctx));
74 DEFINE_STUB_V(ftl_l2p_pin_skip, (struct spdk_ftl_dev *dev, ftl_l2p_pin_cb cb, void *cb_ctx,
75 				 struct ftl_l2p_pin_ctx *pin_ctx));
76 DEFINE_STUB(ftl_l2p_get, ftl_addr, (struct spdk_ftl_dev *dev, uint64_t lba), 0);
77 DEFINE_STUB(ftl_nv_cache_acquire_trim_seq_id, uint64_t, (struct ftl_nv_cache *nv_cache), 0);
78 DEFINE_STUB(ftl_nv_cache_read, int, (struct ftl_io *io, ftl_addr addr, uint32_t num_blocks,
79 				     spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
80 DEFINE_STUB(spdk_bdev_read_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
81 		void *buf, uint64_t offset_blocks, uint64_t num_blocks,
82 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
83 DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *, (struct spdk_bdev_desc *desc), NULL);
84 DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
85 		struct spdk_bdev_io_wait_entry *entry), 0);
86 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
87 DEFINE_STUB(ftl_mempool_get, void *, (struct ftl_mempool *mpool), NULL);
88 DEFINE_STUB(ftl_layout_upgrade_drop_regions, int, (struct spdk_ftl_dev *dev), 0);
89 
90 #if defined(DEBUG)
91 DEFINE_STUB_V(ftl_trace_limits, (struct spdk_ftl_dev *dev, int limit, size_t num_free));
92 DEFINE_STUB_V(ftl_trace_submission, (struct spdk_ftl_dev *dev, const struct ftl_io *io,
93 				     ftl_addr addr, size_t addr_cnt));
94 DEFINE_STUB_V(ftl_trace_completion, (struct spdk_ftl_dev *dev, const struct ftl_io *io,
95 				     enum ftl_trace_completion completion));
96 #endif
97 
98 struct ftl_layout_region *
99 ftl_layout_region_get(struct spdk_ftl_dev *dev, enum ftl_layout_region_type reg_type)
100 {
101 	assert(reg_type < FTL_LAYOUT_REGION_TYPE_MAX);
102 	return &g_dev.layout.region[reg_type];
103 }
104 
105 uint64_t
106 ftl_band_block_offset_from_addr(struct ftl_band *band, ftl_addr addr)
107 {
108 	return addr - band->start_addr;
109 }
110 
111 ftl_addr
112 ftl_band_addr_from_block_offset(struct ftl_band *band, uint64_t block_off)
113 {
114 	ftl_addr addr;
115 
116 	addr = block_off + band->start_addr;
117 	return addr;
118 }
119 
120 ftl_addr
121 ftl_band_next_addr(struct ftl_band *band, ftl_addr addr, size_t offset)
122 {
123 	uint64_t block_off = ftl_band_block_offset_from_addr(band, addr);
124 
125 	return ftl_band_addr_from_block_offset(band, block_off + offset);
126 }
127 
128 void *
129 ftl_md_get_buffer(struct ftl_md *md)
130 {
131 	return md_buffer;
132 }
133 
134 ftl_addr
135 ftl_band_next_xfer_addr(struct ftl_band *band, ftl_addr addr, size_t num_blocks)
136 {
137 	CU_ASSERT_EQUAL(num_blocks % g_dev.xfer_size, 0);
138 	return addr += num_blocks;
139 }
140 
141 static void
142 dev_setup(uint64_t xfer_size, uint64_t band_size)
143 {
144 	/* 512 KiB */
145 	g_dev.xfer_size = xfer_size;
146 	/* 1GiB */
147 	g_dev.num_blocks_in_band = band_size;
148 	g_dev.nv_cache.md_size = 0;
149 	g_dev.bands = &g_band;
150 	g_dev.layout.base.total_blocks = (uint64_t)100 * 1024 * 1024 * 1024;
151 	g_dev.layout.p2l.pages_per_xfer = spdk_divide_round_up(xfer_size, FTL_NUM_P2L_ENTRIES_NO_VSS);
152 	g_dev.layout.p2l.ckpt_pages = spdk_divide_round_up(band_size,
153 				      xfer_size) * g_dev.layout.p2l.pages_per_xfer;
154 	g_dev.layout.region[FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC].type = FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC;
155 	g_dev.layout.region[FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC].mirror_type =
156 		FTL_LAYOUT_REGION_TYPE_INVALID;
157 	TAILQ_INIT(&g_dev.p2l_ckpt.free);
158 	TAILQ_INIT(&g_dev.p2l_ckpt.inuse);
159 }
160 
161 static void
162 band_setup(struct ftl_p2l_ckpt *ckpt, uint64_t xfer_size)
163 {
164 	g_band.p2l_map.p2l_ckpt = ckpt;
165 	g_band.dev = &g_dev;
166 	g_band.md = calloc(1, sizeof(struct ftl_band_md));
167 	g_band.md->seq = 0xDEADBEEF;
168 	g_band.md->p2l_md_region = FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC;
169 	g_band.p2l_map.band_map = calloc(g_dev.num_blocks_in_band, sizeof(struct ftl_p2l_map_entry));
170 	md_buffer = calloc(1, 1024 * 1024 * 1024);
171 }
172 
173 static void
174 band_free(struct ftl_band *band)
175 {
176 	free(md_buffer);
177 	free(band->md);
178 	free(band->p2l_map.band_map);
179 }
180 
181 static void
182 test_p2l_num_pages(void)
183 {
184 	struct ftl_p2l_ckpt *ckpt;
185 	uint64_t xfer_size, band_size;
186 
187 	/* 1GiB band size, xfer size 512KiB, each write unit needs 1 page */
188 	band_size = 1024 * 1024 * 1024 / FTL_BLOCK_SIZE;
189 	xfer_size = 512 * 1024 / FTL_BLOCK_SIZE;
190 	dev_setup(xfer_size, band_size);
191 	ckpt = ftl_p2l_ckpt_new(&g_dev, FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC);
192 	CU_ASSERT_EQUAL(ckpt->num_pages, 2048);
193 	ftl_p2l_ckpt_destroy(ckpt);
194 
195 	/* 1GiB band size, xfer size 256KiB, each write unit needs 1 page */
196 	xfer_size = 256 * 1024 / FTL_BLOCK_SIZE;
197 	dev_setup(xfer_size, band_size);
198 	ckpt = ftl_p2l_ckpt_new(&g_dev, FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC);
199 	CU_ASSERT_EQUAL(ckpt->num_pages, 4096);
200 	ftl_p2l_ckpt_destroy(ckpt);
201 
202 	/* 1GiB band size, xfer size 4KiB, each write unit needs 1 page */
203 	xfer_size = 1;
204 	dev_setup(xfer_size, band_size);
205 	ckpt = ftl_p2l_ckpt_new(&g_dev, FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC);
206 	CU_ASSERT_EQUAL(ckpt->num_pages, 262144);
207 	ftl_p2l_ckpt_destroy(ckpt);
208 
209 	/* 1GiB band size, xfer size 1MiB, each write unit needs 2 pages */
210 	xfer_size = 1024 * 1024 / FTL_BLOCK_SIZE;
211 	dev_setup(xfer_size, band_size);
212 	ckpt = ftl_p2l_ckpt_new(&g_dev, FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC);
213 	CU_ASSERT_EQUAL(ckpt->num_pages, 2048);
214 	ftl_p2l_ckpt_destroy(ckpt);
215 
216 	/* 1GiB band size, xfer size 2MiB, each write unit needs 3 pages */
217 	xfer_size = 2 * 1024 * 1024 / FTL_BLOCK_SIZE;
218 	dev_setup(xfer_size, band_size);
219 	ckpt = ftl_p2l_ckpt_new(&g_dev, FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC);
220 	CU_ASSERT_EQUAL(ckpt->num_pages, 1536);
221 	ftl_p2l_ckpt_destroy(ckpt);
222 
223 	/* 1GiB band size, xfer size 8MiB, each write unit needs 9 pages */
224 	xfer_size = 8 * 1024 * 1024 / FTL_BLOCK_SIZE;
225 	dev_setup(xfer_size, band_size);
226 	ckpt = ftl_p2l_ckpt_new(&g_dev, FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC);
227 	CU_ASSERT_EQUAL(ckpt->num_pages, 1152);
228 	ftl_p2l_ckpt_destroy(ckpt);
229 
230 	/* 3GiB band size, xfer size 1.5MiB, each write unit needs 2 pages */
231 	band_size = (uint64_t)3 * 1024 * 1024 * 1024 / FTL_BLOCK_SIZE;
232 	xfer_size = 3 * 512 * 1024 / FTL_BLOCK_SIZE;
233 	dev_setup(xfer_size, band_size);
234 	ckpt = ftl_p2l_ckpt_new(&g_dev, FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC);
235 	CU_ASSERT_EQUAL(ckpt->num_pages, 4096);
236 	ftl_p2l_ckpt_destroy(ckpt);
237 
238 	/* 3GiB band size, xfer size 0.75MiB, each write unit needs 1 page */
239 	xfer_size = 3 * 256 * 1024 / FTL_BLOCK_SIZE;
240 	dev_setup(xfer_size, band_size);
241 	ckpt = ftl_p2l_ckpt_new(&g_dev, FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC);
242 	CU_ASSERT_EQUAL(ckpt->num_pages, 4096);
243 	ftl_p2l_ckpt_destroy(ckpt);
244 }
245 
246 static struct ftl_rq *
247 setup_rq(uint64_t xfer_size, uint64_t start_lba)
248 {
249 	struct ftl_rq *rq;
250 
251 	rq = calloc(1, xfer_size * sizeof(struct ftl_rq_entry) + sizeof(struct ftl_rq));
252 	rq->dev = &g_dev;
253 	rq->io.band = &g_band;
254 	rq->io.addr = start_lba;
255 	rq->num_blocks = xfer_size;
256 
257 	for (uint64_t i = 0; i < xfer_size; i++) {
258 		rq->entries[i].lba = start_lba + i;
259 		rq->entries[i].seq_id = 1;
260 	}
261 
262 	return rq;
263 }
264 
265 static void
266 free_rq(struct ftl_rq *rq)
267 {
268 	free(rq);
269 }
270 
271 static void
272 verify_p2l(uint64_t start_page, uint64_t start_lba, uint64_t num_lbas)
273 {
274 	struct ftl_p2l_ckpt_page_no_vss *map_page, *first_page = md_buffer;
275 	uint64_t entry_idx = 0;
276 
277 	map_page = first_page + start_page;
278 
279 	for (uint64_t i = start_lba; i < start_lba + num_lbas; i++, entry_idx++) {
280 		if (entry_idx == FTL_NUM_P2L_ENTRIES_NO_VSS) {
281 			CU_ASSERT_EQUAL(map_page->metadata.p2l_ckpt.count, FTL_NUM_P2L_ENTRIES_NO_VSS);
282 			entry_idx = 0;
283 			map_page++;
284 		}
285 		CU_ASSERT_EQUAL(map_page->metadata.p2l_ckpt.seq_id, 0xDEADBEEF);
286 
287 		CU_ASSERT_EQUAL(map_page->map[entry_idx].lba, i);
288 		CU_ASSERT_EQUAL(map_page->map[entry_idx].seq_id, 1);
289 	}
290 
291 	CU_ASSERT_EQUAL(map_page->metadata.p2l_ckpt.count, num_lbas % FTL_NUM_P2L_ENTRIES_NO_VSS);
292 }
293 
294 static void
295 test_ckpt_issue(void)
296 {
297 	struct ftl_p2l_ckpt *ckpt;
298 	struct ftl_rq *rq;
299 	uint64_t xfer_size, band_size;
300 
301 	/* 1GiB band size, xfer size 512KiB, each write unit needs 1 page */
302 	band_size = 1024 * 1024 * 1024 / FTL_BLOCK_SIZE;
303 	xfer_size = 512 * 1024 / FTL_BLOCK_SIZE;
304 	dev_setup(xfer_size, band_size);
305 	ckpt = ftl_p2l_ckpt_new(&g_dev, FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC);
306 	band_setup(ckpt, xfer_size);
307 
308 	/* Issue 2x 512 KiB writes */
309 	rq = setup_rq(xfer_size, 0);
310 	ftl_p2l_ckpt_issue(rq);
311 	free_rq(rq);
312 
313 	rq = setup_rq(xfer_size, xfer_size);
314 	ftl_p2l_ckpt_issue(rq);
315 	free_rq(rq);
316 
317 	/* Check contents of two expected P2L pages */
318 	verify_p2l(0, 0, xfer_size);
319 	verify_p2l(1, xfer_size, xfer_size);
320 
321 	ftl_p2l_ckpt_destroy(ckpt);
322 	band_free(&g_band);
323 
324 	/* 1GiB band size, xfer size 1MiB, each write unit needs 2 page */
325 	xfer_size = 1024 * 1024 / FTL_BLOCK_SIZE;
326 	dev_setup(xfer_size, band_size);
327 	ckpt = ftl_p2l_ckpt_new(&g_dev, FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC);
328 	band_setup(ckpt, xfer_size);
329 
330 	/* Issue 2x 1 MiB writes */
331 	rq = setup_rq(xfer_size, 0);
332 	ftl_p2l_ckpt_issue(rq);
333 	free_rq(rq);
334 
335 	rq = setup_rq(xfer_size, xfer_size);
336 	ftl_p2l_ckpt_issue(rq);
337 	free_rq(rq);
338 
339 	/* Check contents of four expected P2L pages */
340 	verify_p2l(0, 0, xfer_size);
341 	verify_p2l(2, xfer_size, xfer_size);
342 
343 	ftl_p2l_ckpt_destroy(ckpt);
344 	band_free(&g_band);
345 }
346 
347 static void
348 setup_sync_ctx(struct ftl_p2l_sync_ctx *ctx, uint64_t xfer_start)
349 {
350 	ctx->band = &g_band;
351 	ctx->xfer_start = xfer_start;
352 }
353 
354 static void
355 fill_band_p2l(struct ftl_band *band, uint64_t start_lba)
356 {
357 	for (uint64_t i = 0; i < g_dev.num_blocks_in_band; i++) {
358 		band->p2l_map.band_map[i].lba = start_lba + i;
359 		band->p2l_map.band_map[i].seq_id = 1;
360 	}
361 }
362 
363 static void
364 test_persist_band_p2l(void)
365 {
366 	struct ftl_p2l_sync_ctx ctx;
367 	struct ftl_p2l_ckpt *ckpt;
368 	uint64_t xfer_size, band_size;
369 
370 	/* 1GiB band size, xfer size 512KiB, each write unit needs 1 page */
371 	band_size = 1024 * 1024 * 1024 / FTL_BLOCK_SIZE;
372 	xfer_size = 512 * 1024 / FTL_BLOCK_SIZE;
373 	dev_setup(xfer_size, band_size);
374 	ckpt = ftl_p2l_ckpt_new(&g_dev, FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC);
375 	band_setup(ckpt, xfer_size);
376 	fill_band_p2l(&g_band, 0);
377 
378 	/* Persist two band P2L pages */
379 	setup_sync_ctx(&ctx, 0);
380 	ftl_mngt_persist_band_p2l(NULL, &ctx);
381 
382 	setup_sync_ctx(&ctx, 1);
383 	ftl_mngt_persist_band_p2l(NULL, &ctx);
384 
385 	/* Check contents of two expected P2L pages */
386 	verify_p2l(0, 0, xfer_size);
387 	verify_p2l(1, xfer_size, xfer_size);
388 
389 	ftl_p2l_ckpt_destroy(ckpt);
390 	band_free(&g_band);
391 
392 
393 	/* 1GiB band size, xfer size 1MiB, each write unit needs 2 pages */
394 	band_size = 1024 * 1024 * 1024 / FTL_BLOCK_SIZE;
395 	xfer_size = 1024 * 1024 / FTL_BLOCK_SIZE;
396 	dev_setup(xfer_size, band_size);
397 	ckpt = ftl_p2l_ckpt_new(&g_dev, FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC);
398 	band_setup(ckpt, xfer_size);
399 	fill_band_p2l(&g_band, 0);
400 
401 	/* Persist two band P2L pages */
402 	setup_sync_ctx(&ctx, 0);
403 	ftl_mngt_persist_band_p2l(NULL, &ctx);
404 
405 	setup_sync_ctx(&ctx, 1);
406 	ftl_mngt_persist_band_p2l(NULL, &ctx);
407 
408 	/* Check contents of two expected P2L pages */
409 	verify_p2l(0, 0, xfer_size);
410 	verify_p2l(2, xfer_size, xfer_size);
411 
412 	ftl_p2l_ckpt_destroy(ckpt);
413 	band_free(&g_band);
414 }
415 
416 static void
417 fill_running_p2l(uint64_t starting_page, uint64_t starting_lba, uint64_t num_lbas)
418 {
419 	struct ftl_p2l_ckpt_page_no_vss *map_page = md_buffer;
420 	uint64_t page_counter = 0;
421 
422 	map_page += starting_page;
423 	map_page->metadata.p2l_ckpt.count = 0;
424 
425 	for (uint64_t i = 0; i < num_lbas; i++, page_counter++) {
426 		if (page_counter == FTL_NUM_P2L_ENTRIES_NO_VSS) {
427 			page_counter = 0;
428 			map_page++;
429 			map_page->metadata.p2l_ckpt.count = 0;
430 		}
431 		map_page->metadata.p2l_ckpt.seq_id = 0xDEADBEEF;
432 		map_page->metadata.p2l_ckpt.count++;
433 		map_page->map[page_counter].lba = starting_lba + i;
434 		map_page->map[page_counter].seq_id = 1;
435 	}
436 }
437 
438 static void
439 verify_band_p2l(struct ftl_band *band, uint64_t start_entry, uint64_t num_entries)
440 {
441 	for (uint64_t i = start_entry; i < num_entries; i++) {
442 		CU_ASSERT_EQUAL(band->p2l_map.band_map[i].seq_id, 1);
443 		CU_ASSERT_EQUAL(band->p2l_map.band_map[i].lba, i);
444 	}
445 }
446 
447 static void
448 test_clean_restore_p2l(void)
449 {
450 	struct ftl_p2l_ckpt *ckpt;
451 	uint64_t xfer_size, band_size;
452 
453 	/* 1GiB band size, xfer size 512KiB, each write unit needs 1 page */
454 	band_size = 1024 * 1024 * 1024 / FTL_BLOCK_SIZE;
455 	xfer_size = 512 * 1024 / FTL_BLOCK_SIZE;
456 	dev_setup(xfer_size, band_size);
457 	ckpt = ftl_p2l_ckpt_new(&g_dev, FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC);
458 	band_setup(ckpt, xfer_size);
459 
460 	fill_running_p2l(0, 0, xfer_size);
461 	fill_running_p2l(1, xfer_size, xfer_size);
462 	verify_p2l(0, 0, xfer_size);
463 	verify_p2l(1, xfer_size, xfer_size);
464 	g_band.md->iter.offset = 2 * xfer_size;
465 
466 	ftl_mngt_p2l_ckpt_restore_clean(&g_band);
467 	verify_band_p2l(&g_band, 0, 2 * xfer_size);
468 
469 	ftl_p2l_ckpt_destroy(ckpt);
470 	band_free(&g_band);
471 
472 
473 	/* 1GiB band size, xfer size 1MiB, each write unit needs 2 page */
474 	band_size = 1024 * 1024 * 1024 / FTL_BLOCK_SIZE;
475 	xfer_size = 1024 * 1024 / FTL_BLOCK_SIZE;
476 	dev_setup(xfer_size, band_size);
477 	ckpt = ftl_p2l_ckpt_new(&g_dev, FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC);
478 	band_setup(ckpt, xfer_size);
479 
480 	fill_running_p2l(0, 0, xfer_size);
481 	fill_running_p2l(2, xfer_size, xfer_size);
482 	verify_p2l(0, 0, xfer_size);
483 	verify_p2l(2, xfer_size, xfer_size);
484 	g_band.md->iter.offset = 2 * xfer_size;
485 
486 	ftl_mngt_p2l_ckpt_restore_clean(&g_band);
487 	verify_band_p2l(&g_band, 0, 2 * xfer_size);
488 
489 	ftl_p2l_ckpt_destroy(ckpt);
490 	band_free(&g_band);
491 }
492 
493 static void
494 test_dirty_restore_p2l(void)
495 {
496 	struct ftl_p2l_ckpt *ckpt;
497 	uint64_t xfer_size, band_size;
498 
499 	/* 1GiB band size, xfer size 512KiB, each write unit needs 1 page */
500 	band_size = 1024 * 1024 * 1024 / FTL_BLOCK_SIZE;
501 	xfer_size = 512 * 1024 / FTL_BLOCK_SIZE;
502 	dev_setup(xfer_size, band_size);
503 	ckpt = ftl_p2l_ckpt_new(&g_dev, FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC);
504 	TAILQ_INSERT_TAIL(&g_dev.p2l_ckpt.free, ckpt, link);
505 	band_setup(ckpt, xfer_size);
506 
507 	/* Running P2L are fully filled */
508 	fill_running_p2l(0, 0, xfer_size);
509 	fill_running_p2l(1, xfer_size, xfer_size);
510 
511 	ftl_mngt_p2l_ckpt_restore(&g_band, FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC, 0xDEADBEEF);
512 	verify_band_p2l(&g_band, 0, 2 * xfer_size);
513 	CU_ASSERT_EQUAL(g_band.md->iter.offset, 2 * xfer_size);
514 
515 	TAILQ_REMOVE(&g_dev.p2l_ckpt.inuse, ckpt, link);
516 	ftl_p2l_ckpt_destroy(ckpt);
517 	band_free(&g_band);
518 
519 
520 	/* 1GiB band size, xfer size 1MiB, each write unit needs 2 page */
521 	band_size = 1024 * 1024 * 1024 / FTL_BLOCK_SIZE;
522 	xfer_size = 1024 * 1024 / FTL_BLOCK_SIZE;
523 	dev_setup(xfer_size, band_size);
524 	ckpt = ftl_p2l_ckpt_new(&g_dev, FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC);
525 	TAILQ_INSERT_TAIL(&g_dev.p2l_ckpt.free, ckpt, link);
526 	band_setup(ckpt, xfer_size);
527 
528 	/* Running P2L are fully filled */
529 	fill_running_p2l(0, 0, xfer_size);
530 	fill_running_p2l(2, xfer_size, xfer_size);
531 
532 	ftl_mngt_p2l_ckpt_restore(&g_band, FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC, 0xDEADBEEF);
533 	verify_band_p2l(&g_band, 0, 2 * xfer_size);
534 
535 	CU_ASSERT_EQUAL(g_band.md->iter.offset, 2 * xfer_size);
536 
537 	TAILQ_REMOVE(&g_dev.p2l_ckpt.inuse, ckpt, link);
538 	ftl_p2l_ckpt_destroy(ckpt);
539 	band_free(&g_band);
540 
541 	/* 1GiB band size, xfer size 1MiB, each write unit needs 2 page */
542 	band_size = 1024 * 1024 * 1024 / FTL_BLOCK_SIZE;
543 	xfer_size = 1024 * 1024 / FTL_BLOCK_SIZE;
544 	dev_setup(xfer_size, band_size);
545 	ckpt = ftl_p2l_ckpt_new(&g_dev, FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC);
546 	TAILQ_INSERT_TAIL(&g_dev.p2l_ckpt.free, ckpt, link);
547 	band_setup(ckpt, xfer_size);
548 
549 	/* Running P2L are fully filled, only second xfer_size was written */
550 	fill_running_p2l(2, xfer_size, xfer_size);
551 
552 	ftl_mngt_p2l_ckpt_restore(&g_band, FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC, 0xDEADBEEF);
553 	verify_band_p2l(&g_band, xfer_size, xfer_size);
554 
555 	CU_ASSERT_EQUAL(g_band.md->iter.offset, 2 * xfer_size);
556 
557 	TAILQ_REMOVE(&g_dev.p2l_ckpt.inuse, ckpt, link);
558 	ftl_p2l_ckpt_destroy(ckpt);
559 	band_free(&g_band);
560 
561 	/* 1GiB band size, xfer size 1MiB, each write unit needs 2 page */
562 	band_size = 1024 * 1024 * 1024 / FTL_BLOCK_SIZE;
563 	xfer_size = 1024 * 1024 / FTL_BLOCK_SIZE;
564 	dev_setup(xfer_size, band_size);
565 	ckpt = ftl_p2l_ckpt_new(&g_dev, FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC);
566 	TAILQ_INSERT_TAIL(&g_dev.p2l_ckpt.free, ckpt, link);
567 	band_setup(ckpt, xfer_size);
568 
569 	/* Running P2L is partially filled, only first part of second xfer_size was written */
570 	fill_running_p2l(2, xfer_size, FTL_NUM_P2L_ENTRIES_NO_VSS);
571 
572 	ftl_mngt_p2l_ckpt_restore(&g_band, FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC, 0xDEADBEEF);
573 	verify_band_p2l(&g_band, xfer_size, FTL_NUM_P2L_ENTRIES_NO_VSS);
574 
575 	CU_ASSERT_EQUAL(g_band.md->iter.offset, 2 * xfer_size);
576 
577 	TAILQ_REMOVE(&g_dev.p2l_ckpt.inuse, ckpt, link);
578 	ftl_p2l_ckpt_destroy(ckpt);
579 	band_free(&g_band);
580 
581 	/* 1GiB band size, xfer size 1MiB, each write unit needs 2 page */
582 	band_size = 1024 * 1024 * 1024 / FTL_BLOCK_SIZE;
583 	xfer_size = 1024 * 1024 / FTL_BLOCK_SIZE;
584 	dev_setup(xfer_size, band_size);
585 	ckpt = ftl_p2l_ckpt_new(&g_dev, FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC);
586 	TAILQ_INSERT_TAIL(&g_dev.p2l_ckpt.free, ckpt, link);
587 	band_setup(ckpt, xfer_size);
588 
589 	/* Running P2L is partially filled, only second part of second xfer_size was written */
590 	fill_running_p2l(3, xfer_size, xfer_size - FTL_NUM_P2L_ENTRIES_NO_VSS);
591 
592 	ftl_mngt_p2l_ckpt_restore(&g_band, FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC, 0xDEADBEEF);
593 	verify_band_p2l(&g_band, xfer_size + xfer_size - FTL_NUM_P2L_ENTRIES_NO_VSS,
594 			xfer_size - FTL_NUM_P2L_ENTRIES_NO_VSS);
595 
596 	CU_ASSERT_EQUAL(g_band.md->iter.offset, 2 * xfer_size);
597 
598 	TAILQ_REMOVE(&g_dev.p2l_ckpt.inuse, ckpt, link);
599 	ftl_p2l_ckpt_destroy(ckpt);
600 	band_free(&g_band);
601 }
602 
603 int
604 main(int argc, char **argv)
605 {
606 	CU_pSuite suite = NULL;
607 	unsigned int num_failures;
608 
609 	CU_set_error_action(CUEA_ABORT);
610 	CU_initialize_registry();
611 
612 	suite = CU_add_suite("ftl_p2l_suite", NULL, NULL);
613 
614 	CU_ADD_TEST(suite, test_p2l_num_pages);
615 	CU_ADD_TEST(suite, test_ckpt_issue);
616 	CU_ADD_TEST(suite, test_persist_band_p2l);
617 	CU_ADD_TEST(suite, test_clean_restore_p2l);
618 	CU_ADD_TEST(suite, test_dirty_restore_p2l);
619 
620 	CU_basic_set_mode(CU_BRM_VERBOSE);
621 	CU_basic_run_tests();
622 	num_failures = CU_get_number_of_failures();
623 	CU_cleanup_registry();
624 
625 	return num_failures;
626 }
627