xref: /spdk/lib/ftl/ftl_nv_cache.c (revision 57fd99b91e71a4baa5543e19ff83958dc99d4dac)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   Copyright 2023 Solidigm All Rights Reserved
4  *   All rights reserved.
5  */
6 
7 
8 #include "spdk/bdev.h"
9 #include "spdk/bdev_module.h"
10 #include "spdk/ftl.h"
11 #include "spdk/string.h"
12 
13 #include "ftl_nv_cache.h"
14 #include "ftl_nv_cache_io.h"
15 #include "ftl_core.h"
16 #include "ftl_band.h"
17 #include "utils/ftl_addr_utils.h"
18 #include "mngt/ftl_mngt.h"
19 
20 static inline uint64_t nvc_data_blocks(struct ftl_nv_cache *nv_cache) __attribute__((unused));
21 static struct ftl_nv_cache_compactor *compactor_alloc(struct spdk_ftl_dev *dev);
22 static void compactor_free(struct spdk_ftl_dev *dev, struct ftl_nv_cache_compactor *compactor);
23 static void compaction_process_ftl_done(struct ftl_rq *rq);
24 static void compaction_process_read_entry(void *arg);
25 static void ftl_property_dump_cache_dev(struct spdk_ftl_dev *dev,
26 					const struct ftl_property *property,
27 					struct spdk_json_write_ctx *w);
28 
29 static inline void
30 nvc_validate_md(struct ftl_nv_cache *nv_cache,
31 		struct ftl_nv_cache_chunk_md *chunk_md)
32 {
33 	struct ftl_md *md = nv_cache->md;
34 	void *buffer = ftl_md_get_buffer(md);
35 	uint64_t size = ftl_md_get_buffer_size(md);
36 	void *ptr = chunk_md;
37 
38 	if (ptr < buffer) {
39 		ftl_abort();
40 	}
41 
42 	ptr += sizeof(*chunk_md);
43 	if (ptr > buffer + size) {
44 		ftl_abort();
45 	}
46 }
47 
48 static inline uint64_t
49 nvc_data_offset(struct ftl_nv_cache *nv_cache)
50 {
51 	return 0;
52 }
53 
54 static inline uint64_t
55 nvc_data_blocks(struct ftl_nv_cache *nv_cache)
56 {
57 	return nv_cache->chunk_blocks * nv_cache->chunk_count;
58 }
59 
60 size_t
61 ftl_nv_cache_chunk_tail_md_num_blocks(const struct ftl_nv_cache *nv_cache)
62 {
63 	struct spdk_ftl_dev *dev =  SPDK_CONTAINEROF(nv_cache,
64 				    struct spdk_ftl_dev, nv_cache);
65 	return spdk_divide_round_up(dev->layout.nvc.chunk_data_blocks * dev->layout.l2p.addr_size,
66 				    FTL_BLOCK_SIZE);
67 }
68 
69 static size_t
70 nv_cache_p2l_map_pool_elem_size(const struct ftl_nv_cache *nv_cache)
71 {
72 	/* Map pool element holds the whole tail md */
73 	return nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE;
74 }
75 
76 static uint64_t
77 get_chunk_idx(struct ftl_nv_cache_chunk *chunk)
78 {
79 	struct ftl_nv_cache_chunk *first_chunk = chunk->nv_cache->chunks;
80 
81 	return (chunk->offset - first_chunk->offset) / chunk->nv_cache->chunk_blocks;
82 }
83 
84 static void
85 ftl_nv_cache_init_update_limits(struct spdk_ftl_dev *dev)
86 {
87 	struct ftl_nv_cache *nvc = &dev->nv_cache;
88 	uint64_t usable_chunks = nvc->chunk_count - nvc->chunk_inactive_count;
89 
90 	/* Start compaction when full chunks exceed given % of entire active chunks */
91 	nvc->chunk_compaction_threshold = usable_chunks *
92 					  dev->conf.nv_cache.chunk_compaction_threshold /
93 					  100;
94 
95 	nvc->throttle.interval_tsc = FTL_NV_CACHE_THROTTLE_INTERVAL_MS *
96 				     (spdk_get_ticks_hz() / 1000);
97 
98 	nvc->chunk_free_target = spdk_divide_round_up(usable_chunks *
99 				 dev->conf.nv_cache.chunk_free_target,
100 				 100);
101 }
102 
103 struct nvc_scrub_ctx {
104 	uint64_t chunk_no;
105 	nvc_scrub_cb cb;
106 	void *cb_ctx;
107 
108 	struct ftl_layout_region reg_chunk;
109 	struct ftl_md *md_chunk;
110 };
111 
112 static int
113 nvc_scrub_find_next_chunk(struct spdk_ftl_dev *dev, struct nvc_scrub_ctx *scrub_ctx)
114 {
115 	while (scrub_ctx->chunk_no < dev->layout.nvc.chunk_count) {
116 		if (dev->nv_cache.nvc_type->ops.is_chunk_active(dev, scrub_ctx->reg_chunk.current.offset)) {
117 			return 0;
118 		}
119 
120 		/* Move the dummy region along with the active chunk */
121 		scrub_ctx->reg_chunk.current.offset += dev->layout.nvc.chunk_data_blocks;
122 		scrub_ctx->chunk_no++;
123 	}
124 	return -ENOENT;
125 }
126 
127 static void
128 nvc_scrub_clear_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
129 {
130 	struct nvc_scrub_ctx *scrub_ctx = md->owner.cb_ctx;
131 	union ftl_md_vss vss;
132 
133 	/* Move to the next chunk */
134 	scrub_ctx->chunk_no++;
135 	scrub_ctx->reg_chunk.current.offset += dev->layout.nvc.chunk_data_blocks;
136 
137 	FTL_DEBUGLOG(dev, "Scrub progress: %"PRIu64"/%"PRIu64" chunks\n",
138 		     scrub_ctx->chunk_no, dev->layout.nvc.chunk_count);
139 
140 	if (status || nvc_scrub_find_next_chunk(dev, scrub_ctx)) {
141 		/* IO error or no more active chunks found. Scrubbing finished. */
142 		scrub_ctx->cb(dev, scrub_ctx->cb_ctx, status);
143 		ftl_md_destroy(scrub_ctx->md_chunk, 0);
144 		free(scrub_ctx);
145 		return;
146 	}
147 
148 	/* Scrub the next chunk */
149 	vss.version.md_version = 0;
150 	vss.nv_cache.lba = FTL_ADDR_INVALID;
151 
152 	scrub_ctx->md_chunk->cb = nvc_scrub_clear_cb;
153 	scrub_ctx->md_chunk->owner.cb_ctx = scrub_ctx;
154 
155 	ftl_md_clear(scrub_ctx->md_chunk, 0, &vss);
156 }
157 
158 void
159 ftl_nv_cache_scrub(struct spdk_ftl_dev *dev, nvc_scrub_cb cb, void *cb_ctx)
160 {
161 	struct nvc_scrub_ctx *scrub_ctx = calloc(1, sizeof(*scrub_ctx));
162 	union ftl_md_vss vss;
163 
164 	if (!scrub_ctx) {
165 		cb(dev, cb_ctx, -ENOMEM);
166 		return;
167 	}
168 
169 	scrub_ctx->cb = cb;
170 	scrub_ctx->cb_ctx = cb_ctx;
171 
172 	/* Setup a dummy region for the first chunk */
173 	scrub_ctx->reg_chunk.name = ftl_md_region_name(FTL_LAYOUT_REGION_TYPE_DATA_NVC);
174 	scrub_ctx->reg_chunk.type = FTL_LAYOUT_REGION_TYPE_DATA_NVC;
175 	scrub_ctx->reg_chunk.mirror_type = FTL_LAYOUT_REGION_TYPE_INVALID;
176 	scrub_ctx->reg_chunk.current.version = 0;
177 	scrub_ctx->reg_chunk.current.offset = 0;
178 	scrub_ctx->reg_chunk.current.blocks = dev->layout.nvc.chunk_data_blocks;
179 	scrub_ctx->reg_chunk.entry_size = FTL_BLOCK_SIZE;
180 	scrub_ctx->reg_chunk.num_entries = dev->layout.nvc.chunk_data_blocks;
181 	scrub_ctx->reg_chunk.vss_blksz = dev->nv_cache.md_size;
182 	scrub_ctx->reg_chunk.bdev_desc = dev->nv_cache.bdev_desc;
183 	scrub_ctx->reg_chunk.ioch = dev->nv_cache.cache_ioch;
184 
185 	/* Setup an MD object for the region */
186 	scrub_ctx->md_chunk = ftl_md_create(dev, scrub_ctx->reg_chunk.current.blocks,
187 					    scrub_ctx->reg_chunk.vss_blksz, scrub_ctx->reg_chunk.name, FTL_MD_CREATE_NO_MEM,
188 					    &scrub_ctx->reg_chunk);
189 
190 	if (!scrub_ctx->md_chunk) {
191 		free(scrub_ctx);
192 		cb(dev, cb_ctx, -ENOMEM);
193 		return;
194 	}
195 
196 	if (nvc_scrub_find_next_chunk(dev, scrub_ctx)) {
197 		/* No active chunks found */
198 		ftl_md_destroy(scrub_ctx->md_chunk, 0);
199 		free(scrub_ctx);
200 		cb(dev, cb_ctx, -ENOENT);
201 		return;
202 	}
203 
204 	/* Scrub the first chunk */
205 	vss.version.md_version = 0;
206 	vss.nv_cache.lba = FTL_ADDR_INVALID;
207 
208 	scrub_ctx->md_chunk->cb = nvc_scrub_clear_cb;
209 	scrub_ctx->md_chunk->owner.cb_ctx = scrub_ctx;
210 
211 	ftl_md_clear(scrub_ctx->md_chunk, 0, &vss);
212 	return;
213 }
214 
215 int
216 ftl_nv_cache_init(struct spdk_ftl_dev *dev)
217 {
218 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
219 	struct ftl_nv_cache_chunk *chunk;
220 	struct ftl_nv_cache_chunk_md *md;
221 	struct ftl_nv_cache_compactor *compactor;
222 	uint64_t i, offset;
223 
224 	nv_cache->halt = true;
225 
226 	nv_cache->md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
227 	if (!nv_cache->md) {
228 		FTL_ERRLOG(dev, "No NV cache metadata object\n");
229 		return -1;
230 	}
231 
232 	nv_cache->md_pool = ftl_mempool_create(dev->conf.user_io_pool_size,
233 					       nv_cache->md_size * dev->xfer_size,
234 					       FTL_BLOCK_SIZE, SPDK_ENV_SOCKET_ID_ANY);
235 	if (!nv_cache->md_pool) {
236 		FTL_ERRLOG(dev, "Failed to initialize NV cache metadata pool\n");
237 		return -1;
238 	}
239 
240 	/*
241 	 * Initialize chunk info
242 	 */
243 	nv_cache->chunk_blocks = dev->layout.nvc.chunk_data_blocks;
244 	nv_cache->chunk_count = dev->layout.nvc.chunk_count;
245 	nv_cache->tail_md_chunk_blocks = ftl_nv_cache_chunk_tail_md_num_blocks(nv_cache);
246 
247 	/* Allocate chunks */
248 	nv_cache->chunks = calloc(nv_cache->chunk_count,
249 				  sizeof(nv_cache->chunks[0]));
250 	if (!nv_cache->chunks) {
251 		FTL_ERRLOG(dev, "Failed to initialize NV cache chunks\n");
252 		return -1;
253 	}
254 
255 	TAILQ_INIT(&nv_cache->chunk_free_list);
256 	TAILQ_INIT(&nv_cache->chunk_open_list);
257 	TAILQ_INIT(&nv_cache->chunk_full_list);
258 	TAILQ_INIT(&nv_cache->chunk_comp_list);
259 	TAILQ_INIT(&nv_cache->chunk_inactive_list);
260 	TAILQ_INIT(&nv_cache->needs_free_persist_list);
261 
262 	/* First chunk metadata */
263 	md = ftl_md_get_buffer(nv_cache->md);
264 	if (!md) {
265 		FTL_ERRLOG(dev, "No NV cache metadata\n");
266 		return -1;
267 	}
268 
269 	chunk = nv_cache->chunks;
270 	offset = nvc_data_offset(nv_cache);
271 	for (i = 0; i < nv_cache->chunk_count; i++, chunk++, md++) {
272 		chunk->nv_cache = nv_cache;
273 		chunk->md = md;
274 		chunk->md->version = FTL_NVC_VERSION_CURRENT;
275 		nvc_validate_md(nv_cache, md);
276 		chunk->offset = offset;
277 		offset += nv_cache->chunk_blocks;
278 
279 		if (nv_cache->nvc_type->ops.is_chunk_active(dev, chunk->offset)) {
280 			nv_cache->chunk_free_count++;
281 			TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
282 		} else {
283 			chunk->md->state = FTL_CHUNK_STATE_INACTIVE;
284 			nv_cache->chunk_inactive_count++;
285 			TAILQ_INSERT_TAIL(&nv_cache->chunk_inactive_list, chunk, entry);
286 		}
287 	}
288 	assert(nv_cache->chunk_free_count + nv_cache->chunk_inactive_count == nv_cache->chunk_count);
289 	assert(offset <= nvc_data_offset(nv_cache) + nvc_data_blocks(nv_cache));
290 
291 	TAILQ_INIT(&nv_cache->compactor_list);
292 	for (i = 0; i < FTL_NV_CACHE_NUM_COMPACTORS; i++) {
293 		compactor = compactor_alloc(dev);
294 
295 		if (!compactor) {
296 			FTL_ERRLOG(dev, "Cannot allocate compaction process\n");
297 			return -1;
298 		}
299 
300 		TAILQ_INSERT_TAIL(&nv_cache->compactor_list, compactor, entry);
301 	}
302 
303 #define FTL_MAX_OPEN_CHUNKS 2
304 	nv_cache->p2l_pool = ftl_mempool_create(FTL_MAX_OPEN_CHUNKS,
305 						nv_cache_p2l_map_pool_elem_size(nv_cache),
306 						FTL_BLOCK_SIZE,
307 						SPDK_ENV_SOCKET_ID_ANY);
308 	if (!nv_cache->p2l_pool) {
309 		return -ENOMEM;
310 	}
311 
312 	/* One entry per open chunk */
313 	nv_cache->chunk_md_pool = ftl_mempool_create(FTL_MAX_OPEN_CHUNKS,
314 				  sizeof(struct ftl_nv_cache_chunk_md),
315 				  FTL_BLOCK_SIZE,
316 				  SPDK_ENV_SOCKET_ID_ANY);
317 	if (!nv_cache->chunk_md_pool) {
318 		return -ENOMEM;
319 	}
320 
321 	/* Each compactor can be reading a different chunk which it needs to switch state to free to at the end,
322 	 * plus one backup each for high invalidity chunks processing (if there's a backlog of chunks with extremely
323 	 * small, even 0, validity then they can be processed by the compactors quickly and trigger a lot of updates
324 	 * to free state at once) */
325 	nv_cache->free_chunk_md_pool = ftl_mempool_create(2 * FTL_NV_CACHE_NUM_COMPACTORS,
326 				       sizeof(struct ftl_nv_cache_chunk_md),
327 				       FTL_BLOCK_SIZE,
328 				       SPDK_ENV_SOCKET_ID_ANY);
329 	if (!nv_cache->free_chunk_md_pool) {
330 		return -ENOMEM;
331 	}
332 
333 	ftl_nv_cache_init_update_limits(dev);
334 	ftl_property_register(dev, "cache_device", NULL, 0, NULL, NULL, ftl_property_dump_cache_dev, NULL,
335 			      NULL, true);
336 	return 0;
337 }
338 
339 void
340 ftl_nv_cache_deinit(struct spdk_ftl_dev *dev)
341 {
342 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
343 	struct ftl_nv_cache_compactor *compactor;
344 
345 	while (!TAILQ_EMPTY(&nv_cache->compactor_list)) {
346 		compactor = TAILQ_FIRST(&nv_cache->compactor_list);
347 		TAILQ_REMOVE(&nv_cache->compactor_list, compactor, entry);
348 
349 		compactor_free(dev, compactor);
350 	}
351 
352 	ftl_mempool_destroy(nv_cache->md_pool);
353 	ftl_mempool_destroy(nv_cache->p2l_pool);
354 	ftl_mempool_destroy(nv_cache->chunk_md_pool);
355 	ftl_mempool_destroy(nv_cache->free_chunk_md_pool);
356 	nv_cache->md_pool = NULL;
357 	nv_cache->p2l_pool = NULL;
358 	nv_cache->chunk_md_pool = NULL;
359 	nv_cache->free_chunk_md_pool = NULL;
360 
361 	free(nv_cache->chunks);
362 	nv_cache->chunks = NULL;
363 }
364 
365 static uint64_t
366 chunk_get_free_space(struct ftl_nv_cache *nv_cache,
367 		     struct ftl_nv_cache_chunk *chunk)
368 {
369 	assert(chunk->md->write_pointer + nv_cache->tail_md_chunk_blocks <=
370 	       nv_cache->chunk_blocks);
371 	return nv_cache->chunk_blocks - chunk->md->write_pointer -
372 	       nv_cache->tail_md_chunk_blocks;
373 }
374 
375 static bool
376 chunk_is_closed(struct ftl_nv_cache_chunk *chunk)
377 {
378 	return chunk->md->write_pointer == chunk->nv_cache->chunk_blocks;
379 }
380 
381 static void ftl_chunk_close(struct ftl_nv_cache_chunk *chunk);
382 
383 static uint64_t
384 ftl_nv_cache_get_wr_buffer(struct ftl_nv_cache *nv_cache, struct ftl_io *io)
385 {
386 	uint64_t address = FTL_LBA_INVALID;
387 	uint64_t num_blocks = io->num_blocks;
388 	uint64_t free_space;
389 	struct ftl_nv_cache_chunk *chunk;
390 
391 	do {
392 		chunk = nv_cache->chunk_current;
393 		/* Chunk has been closed so pick new one */
394 		if (chunk && chunk_is_closed(chunk))  {
395 			chunk = NULL;
396 		}
397 
398 		if (!chunk) {
399 			chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
400 			if (chunk && chunk->md->state == FTL_CHUNK_STATE_OPEN) {
401 				TAILQ_REMOVE(&nv_cache->chunk_open_list, chunk, entry);
402 				nv_cache->chunk_current = chunk;
403 			} else {
404 				break;
405 			}
406 		}
407 
408 		free_space = chunk_get_free_space(nv_cache, chunk);
409 
410 		if (free_space >= num_blocks) {
411 			/* Enough space in chunk */
412 
413 			/* Calculate address in NV cache */
414 			address = chunk->offset + chunk->md->write_pointer;
415 
416 			/* Set chunk in IO */
417 			io->nv_cache_chunk = chunk;
418 
419 			/* Move write pointer */
420 			chunk->md->write_pointer += num_blocks;
421 			break;
422 		}
423 
424 		/* Not enough space in nv_cache_chunk */
425 		nv_cache->chunk_current = NULL;
426 
427 		if (0 == free_space) {
428 			continue;
429 		}
430 
431 		chunk->md->blocks_skipped = free_space;
432 		chunk->md->blocks_written += free_space;
433 		chunk->md->write_pointer += free_space;
434 
435 		if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
436 			ftl_chunk_close(chunk);
437 		}
438 	} while (1);
439 
440 	return address;
441 }
442 
443 void
444 ftl_nv_cache_fill_md(struct ftl_io *io)
445 {
446 	struct ftl_nv_cache_chunk *chunk = io->nv_cache_chunk;
447 	uint64_t i;
448 	union ftl_md_vss *metadata = io->md;
449 	uint64_t lba = ftl_io_get_lba(io, 0);
450 
451 	for (i = 0; i < io->num_blocks; ++i, lba++, metadata++) {
452 		metadata->nv_cache.lba = lba;
453 		metadata->nv_cache.seq_id = chunk->md->seq_id;
454 	}
455 }
456 
457 uint64_t
458 chunk_tail_md_offset(struct ftl_nv_cache *nv_cache)
459 {
460 	return nv_cache->chunk_blocks - nv_cache->tail_md_chunk_blocks;
461 }
462 
463 static void
464 chunk_advance_blocks(struct ftl_nv_cache *nv_cache, struct ftl_nv_cache_chunk *chunk,
465 		     uint64_t advanced_blocks)
466 {
467 	chunk->md->blocks_written += advanced_blocks;
468 
469 	assert(chunk->md->blocks_written <= nv_cache->chunk_blocks);
470 
471 	if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
472 		ftl_chunk_close(chunk);
473 	}
474 }
475 
476 static uint64_t
477 chunk_user_blocks_written(struct ftl_nv_cache_chunk *chunk)
478 {
479 	return chunk->md->blocks_written - chunk->md->blocks_skipped -
480 	       chunk->nv_cache->tail_md_chunk_blocks;
481 }
482 
483 static bool
484 is_chunk_compacted(struct ftl_nv_cache_chunk *chunk)
485 {
486 	assert(chunk->md->blocks_written != 0);
487 
488 	if (chunk_user_blocks_written(chunk) == chunk->md->blocks_compacted) {
489 		return true;
490 	}
491 
492 	return false;
493 }
494 
495 static int
496 ftl_chunk_alloc_md_entry(struct ftl_nv_cache_chunk *chunk)
497 {
498 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
499 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
500 
501 	p2l_map->chunk_dma_md = ftl_mempool_get(nv_cache->chunk_md_pool);
502 
503 	if (!p2l_map->chunk_dma_md) {
504 		return -ENOMEM;
505 	}
506 
507 	ftl_nv_cache_chunk_md_initialize(p2l_map->chunk_dma_md);
508 	return 0;
509 }
510 
511 static void
512 ftl_chunk_free_md_entry(struct ftl_nv_cache_chunk *chunk)
513 {
514 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
515 
516 	ftl_mempool_put(chunk->nv_cache->chunk_md_pool, p2l_map->chunk_dma_md);
517 	p2l_map->chunk_dma_md = NULL;
518 }
519 
520 static void
521 ftl_chunk_free(struct ftl_nv_cache_chunk *chunk)
522 {
523 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
524 
525 	/* Reset chunk */
526 	ftl_nv_cache_chunk_md_initialize(chunk->md);
527 
528 	TAILQ_INSERT_TAIL(&nv_cache->needs_free_persist_list, chunk, entry);
529 	nv_cache->chunk_free_persist_count++;
530 }
531 
532 static int
533 ftl_chunk_alloc_chunk_free_entry(struct ftl_nv_cache_chunk *chunk)
534 {
535 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
536 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
537 
538 	p2l_map->chunk_dma_md = ftl_mempool_get(nv_cache->free_chunk_md_pool);
539 	if (!p2l_map->chunk_dma_md) {
540 		return -ENOMEM;
541 	}
542 
543 	ftl_nv_cache_chunk_md_initialize(p2l_map->chunk_dma_md);
544 	return 0;
545 }
546 
547 static void
548 ftl_chunk_free_chunk_free_entry(struct ftl_nv_cache_chunk *chunk)
549 {
550 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
551 
552 	ftl_mempool_put(chunk->nv_cache->free_chunk_md_pool, p2l_map->chunk_dma_md);
553 	p2l_map->chunk_dma_md = NULL;
554 }
555 
556 static void
557 chunk_free_cb(int status, void *ctx)
558 {
559 	struct ftl_nv_cache_chunk *chunk = (struct ftl_nv_cache_chunk *)ctx;
560 
561 	if (spdk_likely(!status)) {
562 		struct ftl_nv_cache *nv_cache = chunk->nv_cache;
563 
564 		nv_cache->chunk_free_persist_count--;
565 		TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
566 		nv_cache->chunk_free_count++;
567 		nv_cache->chunk_full_count--;
568 		chunk->md->state = FTL_CHUNK_STATE_FREE;
569 		chunk->md->close_seq_id = 0;
570 		ftl_chunk_free_chunk_free_entry(chunk);
571 	} else {
572 #ifdef SPDK_FTL_RETRY_ON_ERROR
573 		ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx);
574 #else
575 		ftl_abort();
576 #endif
577 	}
578 }
579 
580 static void
581 ftl_chunk_persist_free_state(struct ftl_nv_cache *nv_cache)
582 {
583 	int rc;
584 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
585 	struct ftl_p2l_map *p2l_map;
586 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
587 	struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD);
588 	struct ftl_nv_cache_chunk *tchunk, *chunk = NULL;
589 
590 	TAILQ_FOREACH_SAFE(chunk, &nv_cache->needs_free_persist_list, entry, tchunk) {
591 		p2l_map = &chunk->p2l_map;
592 		rc = ftl_chunk_alloc_chunk_free_entry(chunk);
593 		if (rc) {
594 			break;
595 		}
596 
597 		TAILQ_REMOVE(&nv_cache->needs_free_persist_list, chunk, entry);
598 
599 		memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
600 		p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_FREE;
601 		p2l_map->chunk_dma_md->close_seq_id = 0;
602 		p2l_map->chunk_dma_md->p2l_map_checksum = 0;
603 
604 		ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, p2l_map->chunk_dma_md, NULL,
605 				       chunk_free_cb, chunk, &chunk->md_persist_entry_ctx);
606 	}
607 }
608 
609 static void
610 compaction_stats_update(struct ftl_nv_cache_chunk *chunk)
611 {
612 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
613 	struct compaction_bw_stats *compaction_bw = &nv_cache->compaction_recent_bw;
614 	double *ptr;
615 
616 	if (spdk_unlikely(chunk->compaction_length_tsc == 0)) {
617 		return;
618 	}
619 
620 	if (spdk_likely(compaction_bw->count == FTL_NV_CACHE_COMPACTION_SMA_N)) {
621 		ptr = compaction_bw->buf + compaction_bw->first;
622 		compaction_bw->first++;
623 		if (compaction_bw->first == FTL_NV_CACHE_COMPACTION_SMA_N) {
624 			compaction_bw->first = 0;
625 		}
626 		compaction_bw->sum -= *ptr;
627 	} else {
628 		ptr = compaction_bw->buf + compaction_bw->count;
629 		compaction_bw->count++;
630 	}
631 
632 	*ptr = (double)chunk->md->blocks_compacted * FTL_BLOCK_SIZE / chunk->compaction_length_tsc;
633 	chunk->compaction_length_tsc = 0;
634 
635 	compaction_bw->sum += *ptr;
636 	nv_cache->compaction_sma = compaction_bw->sum / compaction_bw->count;
637 }
638 
639 static void
640 chunk_compaction_advance(struct ftl_nv_cache_chunk *chunk, uint64_t num_blocks)
641 {
642 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
643 	uint64_t tsc = spdk_thread_get_last_tsc(spdk_get_thread());
644 
645 	chunk->compaction_length_tsc += tsc - chunk->compaction_start_tsc;
646 	chunk->compaction_start_tsc = tsc;
647 
648 	chunk->md->blocks_compacted += num_blocks;
649 	assert(chunk->md->blocks_compacted <= chunk_user_blocks_written(chunk));
650 	if (!is_chunk_compacted(chunk)) {
651 		return;
652 	}
653 
654 	/* Remove chunk from compacted list */
655 	TAILQ_REMOVE(&nv_cache->chunk_comp_list, chunk, entry);
656 	nv_cache->chunk_comp_count--;
657 
658 	compaction_stats_update(chunk);
659 
660 	ftl_chunk_free(chunk);
661 }
662 
663 static bool
664 is_compaction_required_for_upgrade(struct ftl_nv_cache *nv_cache)
665 {
666 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
667 
668 	if (dev->conf.prep_upgrade_on_shutdown) {
669 		if (nv_cache->chunk_full_count || nv_cache->chunk_open_count) {
670 			return true;
671 		}
672 	}
673 
674 	return false;
675 }
676 
677 static bool
678 is_compaction_required(struct ftl_nv_cache *nv_cache)
679 {
680 	if (spdk_unlikely(nv_cache->halt)) {
681 		return is_compaction_required_for_upgrade(nv_cache);
682 	}
683 
684 	if (nv_cache->chunk_full_count >= nv_cache->chunk_compaction_threshold) {
685 		return true;
686 	}
687 
688 	return false;
689 }
690 
691 static void compaction_process_finish_read(struct ftl_nv_cache_compactor *compactor);
692 static void compaction_process_pin_lba(struct ftl_nv_cache_compactor *comp);
693 
694 static void
695 _compaction_process_pin_lba(void *_comp)
696 {
697 	struct ftl_nv_cache_compactor *comp = _comp;
698 
699 	compaction_process_pin_lba(comp);
700 }
701 
702 static void
703 compaction_process_pin_lba_cb(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx)
704 {
705 	struct ftl_nv_cache_compactor *comp = pin_ctx->cb_ctx;
706 	struct ftl_rq *rq = comp->rq;
707 
708 	if (status) {
709 		rq->iter.status = status;
710 		pin_ctx->lba = FTL_LBA_INVALID;
711 	}
712 
713 	if (--rq->iter.remaining == 0) {
714 		if (rq->iter.status) {
715 			/* unpin and try again */
716 			ftl_rq_unpin(rq);
717 			spdk_thread_send_msg(spdk_get_thread(), _compaction_process_pin_lba, comp);
718 			return;
719 		}
720 
721 		compaction_process_finish_read(comp);
722 	}
723 }
724 
725 static void
726 compaction_process_pin_lba(struct ftl_nv_cache_compactor *comp)
727 {
728 	struct ftl_rq *rq = comp->rq;
729 	struct spdk_ftl_dev *dev = rq->dev;
730 	struct ftl_rq_entry *entry;
731 
732 	assert(rq->iter.count);
733 	rq->iter.remaining = rq->iter.count;
734 	rq->iter.status = 0;
735 
736 	FTL_RQ_ENTRY_LOOP(rq, entry, rq->iter.count) {
737 		struct ftl_nv_cache_chunk *chunk = entry->owner.priv;
738 		struct ftl_l2p_pin_ctx *pin_ctx = &entry->l2p_pin_ctx;
739 		union ftl_md_vss *md = entry->io_md;
740 
741 		if (md->nv_cache.lba == FTL_LBA_INVALID || md->nv_cache.seq_id != chunk->md->seq_id) {
742 			ftl_l2p_pin_skip(dev, compaction_process_pin_lba_cb, comp, pin_ctx);
743 		} else {
744 			ftl_l2p_pin(dev, md->nv_cache.lba, 1, compaction_process_pin_lba_cb, comp, pin_ctx);
745 		}
746 	}
747 }
748 
749 static void
750 compaction_process_read_entry_cb(struct spdk_bdev_io *bdev_io, bool success, void *arg)
751 {
752 	struct ftl_rq_entry *entry = arg;
753 	struct ftl_rq *rq = ftl_rq_from_entry(entry);
754 	struct spdk_ftl_dev *dev = rq->dev;
755 	struct ftl_nv_cache_compactor *compactor = rq->owner.priv;
756 
757 	ftl_stats_bdev_io_completed(dev, FTL_STATS_TYPE_CMP, bdev_io);
758 
759 	spdk_bdev_free_io(bdev_io);
760 
761 	if (!success) {
762 		/* retry */
763 		spdk_thread_send_msg(spdk_get_thread(), compaction_process_read_entry, entry);
764 		return;
765 	}
766 
767 	assert(rq->iter.remaining >= entry->bdev_io.num_blocks);
768 	rq->iter.remaining -= entry->bdev_io.num_blocks;
769 	if (0 == rq->iter.remaining) {
770 		/* All IOs processed, go to next phase - pining */
771 		compaction_process_pin_lba(compactor);
772 	}
773 }
774 
775 static void
776 compaction_process_read_entry(void *arg)
777 {
778 	struct ftl_rq_entry *entry = arg;
779 	struct ftl_rq *rq = ftl_rq_from_entry(entry);
780 	struct spdk_ftl_dev *dev = rq->dev;
781 
782 	int rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, dev->nv_cache.bdev_desc,
783 			dev->nv_cache.cache_ioch, entry->io_payload, entry->io_md,
784 			entry->bdev_io.offset_blocks, entry->bdev_io.num_blocks,
785 			compaction_process_read_entry_cb, entry);
786 
787 	if (spdk_unlikely(rc)) {
788 		if (rc == -ENOMEM) {
789 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->nv_cache.bdev_desc);
790 			entry->bdev_io.wait_entry.bdev = bdev;
791 			entry->bdev_io.wait_entry.cb_fn = compaction_process_read_entry;
792 			entry->bdev_io.wait_entry.cb_arg = entry;
793 			spdk_bdev_queue_io_wait(bdev, dev->nv_cache.cache_ioch, &entry->bdev_io.wait_entry);
794 		} else {
795 			ftl_abort();
796 		}
797 	}
798 
799 	dev->stats.io_activity_total += entry->bdev_io.num_blocks;
800 }
801 
802 static bool
803 is_chunk_to_read(struct ftl_nv_cache_chunk *chunk)
804 {
805 	assert(chunk->md->blocks_written != 0);
806 
807 	if (chunk_user_blocks_written(chunk) == chunk->md->read_pointer) {
808 		return false;
809 	}
810 
811 	return true;
812 }
813 
814 static struct ftl_nv_cache_chunk *
815 get_chunk_for_compaction(struct ftl_nv_cache *nv_cache)
816 {
817 	struct ftl_nv_cache_chunk *chunk = NULL;
818 
819 	if (!TAILQ_EMPTY(&nv_cache->chunk_comp_list)) {
820 		chunk = TAILQ_FIRST(&nv_cache->chunk_comp_list);
821 		if (is_chunk_to_read(chunk)) {
822 			return chunk;
823 		}
824 	}
825 
826 	if (!TAILQ_EMPTY(&nv_cache->chunk_full_list)) {
827 		chunk = TAILQ_FIRST(&nv_cache->chunk_full_list);
828 		TAILQ_REMOVE(&nv_cache->chunk_full_list, chunk, entry);
829 
830 		assert(chunk->md->write_pointer);
831 	} else {
832 		return NULL;
833 	}
834 
835 	if (spdk_likely(chunk)) {
836 		assert(chunk->md->write_pointer != 0);
837 		TAILQ_INSERT_HEAD(&nv_cache->chunk_comp_list, chunk, entry);
838 		nv_cache->chunk_comp_count++;
839 	}
840 
841 	return chunk;
842 }
843 
844 static uint64_t
845 chunk_blocks_to_read(struct ftl_nv_cache_chunk *chunk)
846 {
847 	uint64_t blocks_written;
848 	uint64_t blocks_to_read;
849 
850 	assert(chunk->md->blocks_written >= chunk->md->blocks_skipped);
851 	blocks_written = chunk_user_blocks_written(chunk);
852 
853 	assert(blocks_written >= chunk->md->read_pointer);
854 	blocks_to_read = blocks_written - chunk->md->read_pointer;
855 
856 	return blocks_to_read;
857 }
858 
859 static void
860 compactor_deactivate(struct ftl_nv_cache_compactor *compactor)
861 {
862 	struct ftl_nv_cache *nv_cache = compactor->nv_cache;
863 
864 	compactor->rq->iter.count = 0;
865 	assert(nv_cache->compaction_active_count);
866 	nv_cache->compaction_active_count--;
867 	TAILQ_INSERT_TAIL(&nv_cache->compactor_list, compactor, entry);
868 }
869 
870 static void
871 compaction_process_invalidate_entry(struct ftl_rq_entry *entry)
872 {
873 	entry->addr = FTL_ADDR_INVALID;
874 	entry->lba = FTL_LBA_INVALID;
875 	entry->seq_id = 0;
876 	entry->owner.priv = NULL;
877 }
878 
879 static void
880 compaction_process_pad(struct ftl_nv_cache_compactor *compactor, uint64_t idx)
881 {
882 	struct ftl_rq *rq = compactor->rq;
883 	struct ftl_rq_entry *entry;
884 
885 	assert(idx < rq->num_blocks);
886 	FTL_RQ_ENTRY_LOOP_FROM(rq, &rq->entries[idx], entry, rq->num_blocks) {
887 		compaction_process_invalidate_entry(entry);
888 	}
889 }
890 
891 static void
892 compaction_process_read(struct ftl_nv_cache_compactor *compactor)
893 {
894 	struct ftl_rq *rq = compactor->rq;
895 	struct ftl_nv_cache *nv_cache = compactor->nv_cache;
896 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
897 	struct ftl_rq_entry *entry, *io;
898 
899 	assert(rq->iter.count);
900 	rq->iter.remaining = rq->iter.count;
901 
902 	io = rq->entries;
903 	io->bdev_io.num_blocks = 1;
904 	io->bdev_io.offset_blocks = ftl_addr_to_nvc_offset(dev, io->addr);
905 	FTL_RQ_ENTRY_LOOP_FROM(rq, &rq->entries[1], entry,  rq->iter.count) {
906 		if (entry->addr == io->addr + io->bdev_io.num_blocks) {
907 			io->bdev_io.num_blocks++;
908 		} else {
909 			compaction_process_read_entry(io);
910 			io = entry;
911 			io->bdev_io.num_blocks = 1;
912 			io->bdev_io.offset_blocks = ftl_addr_to_nvc_offset(dev, io->addr);
913 		}
914 	}
915 	compaction_process_read_entry(io);
916 }
917 
918 static ftl_addr
919 compaction_chunk_read_pos(struct spdk_ftl_dev *dev, struct ftl_nv_cache_chunk *chunk)
920 {
921 	ftl_addr start, pos;
922 	uint64_t skip, to_read = chunk_blocks_to_read(chunk);
923 
924 	if (0 == to_read) {
925 		return FTL_ADDR_INVALID;
926 	}
927 
928 	start = ftl_addr_from_nvc_offset(dev, chunk->offset + chunk->md->read_pointer);
929 	pos = ftl_bitmap_find_first_set(dev->valid_map, start, start + to_read - 1);
930 
931 	if (pos == UINT64_MAX) {
932 		chunk->md->read_pointer += to_read;
933 		chunk_compaction_advance(chunk, to_read);
934 		return FTL_ADDR_INVALID;
935 	}
936 
937 	assert(pos >= start);
938 	skip = pos - start;
939 	if (skip) {
940 		chunk->md->read_pointer += skip;
941 		chunk_compaction_advance(chunk, skip);
942 	}
943 
944 	return pos;
945 }
946 
947 static bool
948 compaction_entry_read_pos(struct ftl_nv_cache *nv_cache, struct ftl_rq_entry *entry)
949 {
950 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
951 	struct ftl_nv_cache_chunk *chunk = NULL;
952 	ftl_addr addr = FTL_ADDR_INVALID;
953 
954 	while (!chunk) {
955 		/* Get currently handled chunk */
956 		chunk = get_chunk_for_compaction(nv_cache);
957 		if (!chunk) {
958 			return false;
959 		}
960 		chunk->compaction_start_tsc = spdk_thread_get_last_tsc(spdk_get_thread());
961 
962 		/* Get next read position in chunk */
963 		addr = compaction_chunk_read_pos(dev, chunk);
964 		if (FTL_ADDR_INVALID == addr) {
965 			chunk = NULL;
966 		}
967 	}
968 
969 	assert(FTL_ADDR_INVALID != addr);
970 
971 	/* Set entry address info and chunk */
972 	entry->addr = addr;
973 	entry->owner.priv = chunk;
974 
975 	/* Move read pointer in the chunk */
976 	chunk->md->read_pointer++;
977 
978 	return true;
979 }
980 
981 static void
982 compaction_process_start(struct ftl_nv_cache_compactor *compactor)
983 {
984 	struct ftl_rq *rq = compactor->rq;
985 	struct ftl_nv_cache *nv_cache = compactor->nv_cache;
986 	struct ftl_rq_entry *entry;
987 
988 	assert(0 == compactor->rq->iter.count);
989 	FTL_RQ_ENTRY_LOOP(rq, entry, rq->num_blocks) {
990 		if (!compaction_entry_read_pos(nv_cache, entry)) {
991 			compaction_process_pad(compactor, entry->index);
992 			break;
993 		}
994 		rq->iter.count++;
995 	}
996 
997 	if (rq->iter.count) {
998 		/* Schedule Read IOs */
999 		compaction_process_read(compactor);
1000 	} else {
1001 		compactor_deactivate(compactor);
1002 	}
1003 }
1004 
1005 static void
1006 compaction_process(struct ftl_nv_cache *nv_cache)
1007 {
1008 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1009 	struct ftl_nv_cache_compactor *compactor;
1010 
1011 	if (!is_compaction_required(nv_cache)) {
1012 		return;
1013 	}
1014 
1015 	compactor = TAILQ_FIRST(&nv_cache->compactor_list);
1016 	if (!compactor) {
1017 		return;
1018 	}
1019 
1020 	TAILQ_REMOVE(&nv_cache->compactor_list, compactor, entry);
1021 	compactor->nv_cache->compaction_active_count++;
1022 	compaction_process_start(compactor);
1023 	ftl_add_io_activity(dev);
1024 }
1025 
1026 static void
1027 compaction_process_ftl_done(struct ftl_rq *rq)
1028 {
1029 	struct spdk_ftl_dev *dev = rq->dev;
1030 	struct ftl_nv_cache_compactor *compactor = rq->owner.priv;
1031 	struct ftl_band *band = rq->io.band;
1032 	struct ftl_rq_entry *entry;
1033 	ftl_addr addr;
1034 
1035 	if (spdk_unlikely(false == rq->success)) {
1036 		/* IO error retry writing */
1037 #ifdef SPDK_FTL_RETRY_ON_ERROR
1038 		ftl_writer_queue_rq(&dev->writer_user, rq);
1039 		return;
1040 #else
1041 		ftl_abort();
1042 #endif
1043 	}
1044 
1045 	assert(rq->iter.count);
1046 
1047 	/* Update L2P table */
1048 	addr = rq->io.addr;
1049 	FTL_RQ_ENTRY_LOOP(rq, entry, rq->iter.count) {
1050 		struct ftl_nv_cache_chunk *chunk = entry->owner.priv;
1051 
1052 		if (entry->lba != FTL_LBA_INVALID) {
1053 			ftl_l2p_update_base(dev, entry->lba, addr, entry->addr);
1054 			ftl_l2p_unpin(dev, entry->lba, 1);
1055 			chunk_compaction_advance(chunk, 1);
1056 		} else {
1057 			assert(entry->addr == FTL_ADDR_INVALID);
1058 		}
1059 
1060 		addr = ftl_band_next_addr(band, addr, 1);
1061 		compaction_process_invalidate_entry(entry);
1062 	}
1063 
1064 	compactor_deactivate(compactor);
1065 }
1066 
1067 static void
1068 compaction_process_finish_read(struct ftl_nv_cache_compactor *compactor)
1069 {
1070 	struct ftl_rq *rq = compactor->rq;
1071 	struct spdk_ftl_dev *dev = rq->dev;
1072 	struct ftl_rq_entry *entry;
1073 	ftl_addr current_addr;
1074 	uint64_t skip = 0;
1075 
1076 	FTL_RQ_ENTRY_LOOP(rq, entry, rq->iter.count) {
1077 		struct ftl_nv_cache_chunk *chunk = entry->owner.priv;
1078 		union ftl_md_vss *md = entry->io_md;
1079 
1080 		if (md->nv_cache.lba == FTL_LBA_INVALID || md->nv_cache.seq_id != chunk->md->seq_id) {
1081 			skip++;
1082 			compaction_process_invalidate_entry(entry);
1083 			chunk_compaction_advance(chunk, 1);
1084 			continue;
1085 		}
1086 
1087 		current_addr = ftl_l2p_get(dev, md->nv_cache.lba);
1088 		if (current_addr == entry->addr) {
1089 			entry->lba = md->nv_cache.lba;
1090 			entry->seq_id = chunk->md->seq_id;
1091 		} else {
1092 			/* This address already invalidated, just omit this block */
1093 			chunk_compaction_advance(chunk, 1);
1094 			ftl_l2p_unpin(dev, md->nv_cache.lba, 1);
1095 			compaction_process_invalidate_entry(entry);
1096 			skip++;
1097 		}
1098 	}
1099 
1100 	if (skip < rq->iter.count) {
1101 		/*
1102 		 * Request contains data to be placed on FTL, compact it
1103 		 */
1104 		ftl_writer_queue_rq(&dev->writer_user, rq);
1105 	} else {
1106 		compactor_deactivate(compactor);
1107 	}
1108 }
1109 
1110 static void
1111 compactor_free(struct spdk_ftl_dev *dev, struct ftl_nv_cache_compactor *compactor)
1112 {
1113 	if (!compactor) {
1114 		return;
1115 	}
1116 
1117 	ftl_rq_del(compactor->rq);
1118 	free(compactor);
1119 }
1120 
1121 static struct ftl_nv_cache_compactor *
1122 compactor_alloc(struct spdk_ftl_dev *dev)
1123 {
1124 	struct ftl_nv_cache_compactor *compactor;
1125 	struct ftl_rq_entry *entry;
1126 
1127 	compactor = calloc(1, sizeof(*compactor));
1128 	if (!compactor) {
1129 		goto error;
1130 	}
1131 
1132 	/* Allocate help request for reading */
1133 	compactor->rq = ftl_rq_new(dev, dev->nv_cache.md_size);
1134 	if (!compactor->rq) {
1135 		goto error;
1136 	}
1137 
1138 	compactor->nv_cache = &dev->nv_cache;
1139 	compactor->rq->owner.priv = compactor;
1140 	compactor->rq->owner.cb = compaction_process_ftl_done;
1141 	compactor->rq->owner.compaction = true;
1142 
1143 	FTL_RQ_ENTRY_LOOP(compactor->rq, entry, compactor->rq->num_blocks) {
1144 		compaction_process_invalidate_entry(entry);
1145 	}
1146 
1147 	return compactor;
1148 
1149 error:
1150 	compactor_free(dev, compactor);
1151 	return NULL;
1152 }
1153 
1154 static void
1155 ftl_nv_cache_submit_cb_done(struct ftl_io *io)
1156 {
1157 	struct ftl_nv_cache *nv_cache = &io->dev->nv_cache;
1158 
1159 	chunk_advance_blocks(nv_cache, io->nv_cache_chunk, io->num_blocks);
1160 	io->nv_cache_chunk = NULL;
1161 
1162 	ftl_mempool_put(nv_cache->md_pool, io->md);
1163 	ftl_io_complete(io);
1164 }
1165 
1166 static void
1167 ftl_nv_cache_l2p_update(struct ftl_io *io)
1168 {
1169 	struct spdk_ftl_dev *dev = io->dev;
1170 	ftl_addr next_addr = io->addr;
1171 	size_t i;
1172 
1173 	for (i = 0; i < io->num_blocks; ++i, ++next_addr) {
1174 		ftl_l2p_update_cache(dev, ftl_io_get_lba(io, i), next_addr, io->map[i]);
1175 	}
1176 
1177 	ftl_l2p_unpin(dev, io->lba, io->num_blocks);
1178 	ftl_nv_cache_submit_cb_done(io);
1179 }
1180 
1181 static void
1182 ftl_nv_cache_submit_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1183 {
1184 	struct ftl_io *io = cb_arg;
1185 
1186 	ftl_stats_bdev_io_completed(io->dev, FTL_STATS_TYPE_USER, bdev_io);
1187 
1188 	spdk_bdev_free_io(bdev_io);
1189 
1190 	if (spdk_unlikely(!success)) {
1191 		FTL_ERRLOG(io->dev, "Non-volatile cache write failed at %"PRIx64"\n",
1192 			   io->addr);
1193 		io->status = -EIO;
1194 		ftl_l2p_unpin(io->dev, io->lba, io->num_blocks);
1195 		ftl_nv_cache_submit_cb_done(io);
1196 	} else {
1197 		ftl_nv_cache_l2p_update(io);
1198 	}
1199 }
1200 
1201 static void
1202 nv_cache_write(void *_io)
1203 {
1204 	struct ftl_io *io = _io;
1205 	struct spdk_ftl_dev *dev = io->dev;
1206 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
1207 	int rc;
1208 
1209 	rc = spdk_bdev_writev_blocks_with_md(nv_cache->bdev_desc, nv_cache->cache_ioch,
1210 					     io->iov, io->iov_cnt, io->md,
1211 					     ftl_addr_to_nvc_offset(dev, io->addr), io->num_blocks,
1212 					     ftl_nv_cache_submit_cb, io);
1213 	if (spdk_unlikely(rc)) {
1214 		if (rc == -ENOMEM) {
1215 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc);
1216 			io->bdev_io_wait.bdev = bdev;
1217 			io->bdev_io_wait.cb_fn = nv_cache_write;
1218 			io->bdev_io_wait.cb_arg = io;
1219 			spdk_bdev_queue_io_wait(bdev, nv_cache->cache_ioch, &io->bdev_io_wait);
1220 		} else {
1221 			ftl_abort();
1222 		}
1223 	}
1224 }
1225 
1226 static void
1227 ftl_nv_cache_pin_cb(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx)
1228 {
1229 	struct ftl_io *io = pin_ctx->cb_ctx;
1230 	size_t i;
1231 
1232 	if (spdk_unlikely(status != 0)) {
1233 		/* Retry on the internal L2P fault */
1234 		FTL_ERRLOG(dev, "Cannot PIN LBA for NV cache write failed at %"PRIx64"\n",
1235 			   io->addr);
1236 		io->status = -EAGAIN;
1237 		ftl_nv_cache_submit_cb_done(io);
1238 		return;
1239 	}
1240 
1241 	/* Remember previous l2p mapping to resolve conflicts in case of outstanding write-after-write */
1242 	for (i = 0; i < io->num_blocks; ++i) {
1243 		io->map[i] = ftl_l2p_get(dev, ftl_io_get_lba(io, i));
1244 	}
1245 
1246 	assert(io->iov_pos == 0);
1247 
1248 	ftl_trace_submission(io->dev, io, io->addr, io->num_blocks);
1249 
1250 	nv_cache_write(io);
1251 }
1252 
1253 bool
1254 ftl_nv_cache_write(struct ftl_io *io)
1255 {
1256 	struct spdk_ftl_dev *dev = io->dev;
1257 	uint64_t cache_offset;
1258 
1259 	io->md = ftl_mempool_get(dev->nv_cache.md_pool);
1260 	if (spdk_unlikely(!io->md)) {
1261 		return false;
1262 	}
1263 
1264 	/* Reserve area on the write buffer cache */
1265 	cache_offset = ftl_nv_cache_get_wr_buffer(&dev->nv_cache, io);
1266 	if (cache_offset == FTL_LBA_INVALID) {
1267 		/* No free space in NV cache, resubmit request */
1268 		ftl_mempool_put(dev->nv_cache.md_pool, io->md);
1269 		return false;
1270 	}
1271 	io->addr = ftl_addr_from_nvc_offset(dev, cache_offset);
1272 	io->nv_cache_chunk = dev->nv_cache.chunk_current;
1273 
1274 	ftl_nv_cache_fill_md(io);
1275 	ftl_l2p_pin(io->dev, io->lba, io->num_blocks,
1276 		    ftl_nv_cache_pin_cb, io,
1277 		    &io->l2p_pin_ctx);
1278 
1279 	dev->nv_cache.throttle.blocks_submitted += io->num_blocks;
1280 
1281 	return true;
1282 }
1283 
1284 int
1285 ftl_nv_cache_read(struct ftl_io *io, ftl_addr addr, uint32_t num_blocks,
1286 		  spdk_bdev_io_completion_cb cb, void *cb_arg)
1287 {
1288 	int rc;
1289 	struct ftl_nv_cache *nv_cache = &io->dev->nv_cache;
1290 
1291 	assert(ftl_addr_in_nvc(io->dev, addr));
1292 
1293 	rc = ftl_nv_cache_bdev_read_blocks_with_md(io->dev, nv_cache->bdev_desc, nv_cache->cache_ioch,
1294 			ftl_io_iovec_addr(io), NULL, ftl_addr_to_nvc_offset(io->dev, addr),
1295 			num_blocks, cb, cb_arg);
1296 
1297 	return rc;
1298 }
1299 
1300 bool
1301 ftl_nv_cache_is_halted(struct ftl_nv_cache *nv_cache)
1302 {
1303 	if (nv_cache->compaction_active_count) {
1304 		return false;
1305 	}
1306 
1307 	if (nv_cache->chunk_open_count > 0) {
1308 		return false;
1309 	}
1310 
1311 	if (is_compaction_required_for_upgrade(nv_cache)) {
1312 		return false;
1313 	}
1314 
1315 	return true;
1316 }
1317 
1318 void
1319 ftl_chunk_map_set_lba(struct ftl_nv_cache_chunk *chunk,
1320 		      uint64_t offset, uint64_t lba)
1321 {
1322 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1323 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1324 
1325 	ftl_lba_store(dev, p2l_map->chunk_map, offset, lba);
1326 }
1327 
1328 uint64_t
1329 ftl_chunk_map_get_lba(struct ftl_nv_cache_chunk *chunk, uint64_t offset)
1330 {
1331 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1332 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1333 
1334 	return ftl_lba_load(dev, p2l_map->chunk_map, offset);
1335 }
1336 
1337 static void
1338 ftl_chunk_set_addr(struct ftl_nv_cache_chunk *chunk, uint64_t lba, ftl_addr addr)
1339 {
1340 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1341 	uint64_t cache_offset = ftl_addr_to_nvc_offset(dev, addr);
1342 	uint64_t offset;
1343 
1344 	offset = (cache_offset - chunk->offset) % chunk->nv_cache->chunk_blocks;
1345 	ftl_chunk_map_set_lba(chunk, offset, lba);
1346 }
1347 
1348 struct ftl_nv_cache_chunk *
1349 ftl_nv_cache_get_chunk_from_addr(struct spdk_ftl_dev *dev, ftl_addr addr)
1350 {
1351 	struct ftl_nv_cache_chunk *chunk = dev->nv_cache.chunks;
1352 	uint64_t chunk_idx;
1353 	uint64_t cache_offset = ftl_addr_to_nvc_offset(dev, addr);
1354 
1355 	assert(chunk != NULL);
1356 	chunk_idx = (cache_offset - chunk->offset) / chunk->nv_cache->chunk_blocks;
1357 	chunk += chunk_idx;
1358 
1359 	return chunk;
1360 }
1361 
1362 void
1363 ftl_nv_cache_set_addr(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr addr)
1364 {
1365 	struct ftl_nv_cache_chunk *chunk;
1366 
1367 	chunk = ftl_nv_cache_get_chunk_from_addr(dev, addr);
1368 
1369 	assert(lba != FTL_LBA_INVALID);
1370 
1371 	ftl_chunk_set_addr(chunk, lba, addr);
1372 	ftl_bitmap_set(dev->valid_map, addr);
1373 }
1374 
1375 static void
1376 ftl_nv_cache_throttle_update(struct ftl_nv_cache *nv_cache)
1377 {
1378 	double err;
1379 	double modifier;
1380 
1381 	err = ((double)nv_cache->chunk_free_count - nv_cache->chunk_free_target) / nv_cache->chunk_count;
1382 	modifier = FTL_NV_CACHE_THROTTLE_MODIFIER_KP * err;
1383 
1384 	if (modifier < FTL_NV_CACHE_THROTTLE_MODIFIER_MIN) {
1385 		modifier = FTL_NV_CACHE_THROTTLE_MODIFIER_MIN;
1386 	} else if (modifier > FTL_NV_CACHE_THROTTLE_MODIFIER_MAX) {
1387 		modifier = FTL_NV_CACHE_THROTTLE_MODIFIER_MAX;
1388 	}
1389 
1390 	if (spdk_unlikely(nv_cache->compaction_sma == 0 || nv_cache->compaction_active_count == 0)) {
1391 		nv_cache->throttle.blocks_submitted_limit = UINT64_MAX;
1392 	} else {
1393 		double blocks_per_interval = nv_cache->compaction_sma * nv_cache->throttle.interval_tsc /
1394 					     FTL_BLOCK_SIZE;
1395 		nv_cache->throttle.blocks_submitted_limit = blocks_per_interval * (1.0 + modifier);
1396 	}
1397 }
1398 
1399 static void
1400 ftl_nv_cache_process_throttle(struct ftl_nv_cache *nv_cache)
1401 {
1402 	uint64_t tsc = spdk_thread_get_last_tsc(spdk_get_thread());
1403 
1404 	if (spdk_unlikely(!nv_cache->throttle.start_tsc)) {
1405 		nv_cache->throttle.start_tsc = tsc;
1406 	} else if (tsc - nv_cache->throttle.start_tsc >= nv_cache->throttle.interval_tsc) {
1407 		ftl_nv_cache_throttle_update(nv_cache);
1408 		nv_cache->throttle.start_tsc = tsc;
1409 		nv_cache->throttle.blocks_submitted = 0;
1410 	}
1411 }
1412 
1413 static void ftl_chunk_open(struct ftl_nv_cache_chunk *chunk);
1414 
1415 void
1416 ftl_nv_cache_process(struct spdk_ftl_dev *dev)
1417 {
1418 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
1419 
1420 	assert(dev->nv_cache.bdev_desc);
1421 
1422 	if (nv_cache->chunk_open_count < FTL_MAX_OPEN_CHUNKS && spdk_likely(!nv_cache->halt) &&
1423 	    !TAILQ_EMPTY(&nv_cache->chunk_free_list)) {
1424 		struct ftl_nv_cache_chunk *chunk = TAILQ_FIRST(&nv_cache->chunk_free_list);
1425 		TAILQ_REMOVE(&nv_cache->chunk_free_list, chunk, entry);
1426 		TAILQ_INSERT_TAIL(&nv_cache->chunk_open_list, chunk, entry);
1427 		nv_cache->chunk_free_count--;
1428 		chunk->md->seq_id = ftl_get_next_seq_id(dev);
1429 		ftl_chunk_open(chunk);
1430 		ftl_add_io_activity(dev);
1431 	}
1432 
1433 	compaction_process(nv_cache);
1434 	ftl_chunk_persist_free_state(nv_cache);
1435 	ftl_nv_cache_process_throttle(nv_cache);
1436 }
1437 
1438 static bool
1439 ftl_nv_cache_full(struct ftl_nv_cache *nv_cache)
1440 {
1441 	if (0 == nv_cache->chunk_open_count && NULL == nv_cache->chunk_current) {
1442 		return true;
1443 	} else {
1444 		return false;
1445 	}
1446 }
1447 
1448 bool
1449 ftl_nv_cache_throttle(struct spdk_ftl_dev *dev)
1450 {
1451 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
1452 
1453 	if (dev->nv_cache.throttle.blocks_submitted >= nv_cache->throttle.blocks_submitted_limit ||
1454 	    ftl_nv_cache_full(nv_cache)) {
1455 		return true;
1456 	}
1457 
1458 	return false;
1459 }
1460 
1461 static void
1462 chunk_free_p2l_map(struct ftl_nv_cache_chunk *chunk)
1463 {
1464 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1465 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1466 
1467 	ftl_mempool_put(nv_cache->p2l_pool, p2l_map->chunk_map);
1468 	p2l_map->chunk_map = NULL;
1469 
1470 	ftl_chunk_free_md_entry(chunk);
1471 }
1472 
1473 int
1474 ftl_nv_cache_save_state(struct ftl_nv_cache *nv_cache)
1475 {
1476 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1477 	struct ftl_nv_cache_chunk *chunk;
1478 	int status = 0;
1479 	uint64_t i;
1480 
1481 	assert(nv_cache->chunk_open_count == 0);
1482 
1483 	if (nv_cache->compaction_active_count) {
1484 		FTL_ERRLOG(dev, "Cannot save NV cache state, compaction in progress\n");
1485 		return -EINVAL;
1486 	}
1487 
1488 	chunk = nv_cache->chunks;
1489 	if (!chunk) {
1490 		FTL_ERRLOG(dev, "Cannot save NV cache state, no NV cache metadata\n");
1491 		return -ENOMEM;
1492 	}
1493 
1494 	for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
1495 		nvc_validate_md(nv_cache, chunk->md);
1496 
1497 		if (chunk->md->read_pointer)  {
1498 			/* Only full chunks can be compacted */
1499 			if (chunk->md->blocks_written != nv_cache->chunk_blocks) {
1500 				assert(0);
1501 				status = -EINVAL;
1502 				break;
1503 			}
1504 
1505 			/*
1506 			 * Chunk in the middle of compaction, start over after
1507 			 * load
1508 			 */
1509 			chunk->md->read_pointer = chunk->md->blocks_compacted = 0;
1510 		} else if (chunk->md->blocks_written == nv_cache->chunk_blocks) {
1511 			/* Full chunk */
1512 		} else if (0 == chunk->md->blocks_written) {
1513 			/* Empty chunk */
1514 		} else {
1515 			assert(0);
1516 			status = -EINVAL;
1517 			break;
1518 		}
1519 	}
1520 
1521 	if (status) {
1522 		FTL_ERRLOG(dev, "Cannot save NV cache state, inconsistent NV cache"
1523 			   "metadata\n");
1524 	}
1525 
1526 	return status;
1527 }
1528 
1529 static int
1530 sort_chunks_cmp(const void *a, const void *b)
1531 {
1532 	struct ftl_nv_cache_chunk *a_chunk = *(struct ftl_nv_cache_chunk **)a;
1533 	struct ftl_nv_cache_chunk *b_chunk = *(struct ftl_nv_cache_chunk **)b;
1534 
1535 	return a_chunk->md->seq_id - b_chunk->md->seq_id;
1536 }
1537 
1538 static int
1539 sort_chunks(struct ftl_nv_cache *nv_cache)
1540 {
1541 	struct ftl_nv_cache_chunk **chunks_list;
1542 	struct ftl_nv_cache_chunk *chunk;
1543 	uint32_t i;
1544 
1545 	if (TAILQ_EMPTY(&nv_cache->chunk_full_list)) {
1546 		return 0;
1547 	}
1548 
1549 	chunks_list = calloc(nv_cache->chunk_full_count,
1550 			     sizeof(chunks_list[0]));
1551 	if (!chunks_list) {
1552 		return -ENOMEM;
1553 	}
1554 
1555 	i = 0;
1556 	TAILQ_FOREACH(chunk, &nv_cache->chunk_full_list, entry) {
1557 		chunks_list[i] = chunk;
1558 		i++;
1559 	}
1560 	assert(i == nv_cache->chunk_full_count);
1561 
1562 	qsort(chunks_list, nv_cache->chunk_full_count, sizeof(chunks_list[0]),
1563 	      sort_chunks_cmp);
1564 
1565 	TAILQ_INIT(&nv_cache->chunk_full_list);
1566 	for (i = 0; i < nv_cache->chunk_full_count; i++) {
1567 		chunk = chunks_list[i];
1568 		TAILQ_INSERT_TAIL(&nv_cache->chunk_full_list, chunk, entry);
1569 	}
1570 
1571 	free(chunks_list);
1572 	return 0;
1573 }
1574 
1575 static int
1576 chunk_alloc_p2l_map(struct ftl_nv_cache_chunk *chunk)
1577 {
1578 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1579 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1580 
1581 	assert(p2l_map->ref_cnt == 0);
1582 	assert(p2l_map->chunk_map == NULL);
1583 
1584 	p2l_map->chunk_map = ftl_mempool_get(nv_cache->p2l_pool);
1585 
1586 	if (!p2l_map->chunk_map) {
1587 		return -ENOMEM;
1588 	}
1589 
1590 	if (ftl_chunk_alloc_md_entry(chunk)) {
1591 		ftl_mempool_put(nv_cache->p2l_pool, p2l_map->chunk_map);
1592 		p2l_map->chunk_map = NULL;
1593 		return -ENOMEM;
1594 	}
1595 
1596 	/* Set the P2L to FTL_LBA_INVALID */
1597 	memset(p2l_map->chunk_map, -1, FTL_BLOCK_SIZE * nv_cache->tail_md_chunk_blocks);
1598 
1599 	return 0;
1600 }
1601 
1602 int
1603 ftl_nv_cache_load_state(struct ftl_nv_cache *nv_cache)
1604 {
1605 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1606 	struct ftl_nv_cache_chunk *chunk;
1607 	uint64_t chunks_number, offset, i;
1608 	int status = 0;
1609 	bool active;
1610 
1611 	nv_cache->chunk_current = NULL;
1612 	TAILQ_INIT(&nv_cache->chunk_free_list);
1613 	TAILQ_INIT(&nv_cache->chunk_full_list);
1614 	TAILQ_INIT(&nv_cache->chunk_inactive_list);
1615 	nv_cache->chunk_full_count = 0;
1616 	nv_cache->chunk_free_count = 0;
1617 	nv_cache->chunk_inactive_count = 0;
1618 
1619 	assert(nv_cache->chunk_open_count == 0);
1620 	offset = nvc_data_offset(nv_cache);
1621 	if (!nv_cache->chunks) {
1622 		FTL_ERRLOG(dev, "No NV cache metadata\n");
1623 		return -1;
1624 	}
1625 
1626 	if (dev->sb->upgrade_ready) {
1627 		/*
1628 		 * During upgrade some transitions are allowed:
1629 		 *
1630 		 * 1. FREE -> INACTIVE
1631 		 * 2. INACTIVE -> FREE
1632 		 */
1633 		chunk = nv_cache->chunks;
1634 		for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
1635 			active = nv_cache->nvc_type->ops.is_chunk_active(dev, chunk->offset);
1636 
1637 			if (chunk->md->state == FTL_CHUNK_STATE_FREE) {
1638 				if (!active) {
1639 					chunk->md->state = FTL_CHUNK_STATE_INACTIVE;
1640 				}
1641 			} else if (chunk->md->state == FTL_CHUNK_STATE_INACTIVE) {
1642 				if (active) {
1643 					chunk->md->state = FTL_CHUNK_STATE_FREE;
1644 				}
1645 			}
1646 		}
1647 	}
1648 
1649 	chunk = nv_cache->chunks;
1650 	for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
1651 		chunk->nv_cache = nv_cache;
1652 		nvc_validate_md(nv_cache, chunk->md);
1653 
1654 		if (offset != chunk->offset) {
1655 			status = -EINVAL;
1656 			goto error;
1657 		}
1658 
1659 		if (chunk->md->version != FTL_NVC_VERSION_CURRENT) {
1660 			status = -EINVAL;
1661 			goto error;
1662 		}
1663 
1664 		active = nv_cache->nvc_type->ops.is_chunk_active(dev, chunk->offset);
1665 		if (false == active) {
1666 			if (chunk->md->state != FTL_CHUNK_STATE_INACTIVE) {
1667 				status = -EINVAL;
1668 				goto error;
1669 			}
1670 		}
1671 
1672 		switch (chunk->md->state) {
1673 		case FTL_CHUNK_STATE_FREE:
1674 			if (chunk->md->blocks_written || chunk->md->write_pointer) {
1675 				status = -EINVAL;
1676 				goto error;
1677 			}
1678 			/* Chunk empty, move it on empty list */
1679 			TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
1680 			nv_cache->chunk_free_count++;
1681 			break;
1682 		case FTL_CHUNK_STATE_OPEN:
1683 			/* All chunks needs to closed at this point */
1684 			status = -EINVAL;
1685 			goto error;
1686 			break;
1687 		case FTL_CHUNK_STATE_CLOSED:
1688 			if (chunk->md->blocks_written != nv_cache->chunk_blocks) {
1689 				status = -EINVAL;
1690 				goto error;
1691 			}
1692 			/* Chunk full, move it on full list */
1693 			TAILQ_INSERT_TAIL(&nv_cache->chunk_full_list, chunk, entry);
1694 			nv_cache->chunk_full_count++;
1695 			break;
1696 		case FTL_CHUNK_STATE_INACTIVE:
1697 			TAILQ_INSERT_TAIL(&nv_cache->chunk_inactive_list, chunk, entry);
1698 			nv_cache->chunk_inactive_count++;
1699 			break;
1700 		default:
1701 			status = -EINVAL;
1702 			FTL_ERRLOG(dev, "Invalid chunk state\n");
1703 			goto error;
1704 		}
1705 
1706 		offset += nv_cache->chunk_blocks;
1707 	}
1708 
1709 	chunks_number = nv_cache->chunk_free_count + nv_cache->chunk_full_count +
1710 			nv_cache->chunk_inactive_count;
1711 	assert(nv_cache->chunk_current == NULL);
1712 
1713 	if (chunks_number != nv_cache->chunk_count) {
1714 		FTL_ERRLOG(dev, "Inconsistent NV cache metadata\n");
1715 		status = -EINVAL;
1716 		goto error;
1717 	}
1718 
1719 	status = sort_chunks(nv_cache);
1720 	if (status) {
1721 		FTL_ERRLOG(dev, "FTL NV Cache: sorting chunks ERROR\n");
1722 	}
1723 
1724 	FTL_NOTICELOG(dev, "FTL NV Cache: full chunks = %lu, empty chunks = %lu\n",
1725 		      nv_cache->chunk_full_count, nv_cache->chunk_free_count);
1726 
1727 	if (0 == status) {
1728 		FTL_NOTICELOG(dev, "FTL NV Cache: state loaded successfully\n");
1729 	} else {
1730 		FTL_ERRLOG(dev, "FTL NV Cache: loading state ERROR\n");
1731 	}
1732 
1733 	/* The number of active/inactive chunks calculated at initialization can change at this point due to metadata
1734 	 * upgrade. Recalculate the thresholds that depend on active chunk count.
1735 	 */
1736 	ftl_nv_cache_init_update_limits(dev);
1737 error:
1738 	return status;
1739 }
1740 
1741 void
1742 ftl_nv_cache_get_max_seq_id(struct ftl_nv_cache *nv_cache, uint64_t *open_seq_id,
1743 			    uint64_t *close_seq_id)
1744 {
1745 	uint64_t i, o_seq_id = 0, c_seq_id = 0;
1746 	struct ftl_nv_cache_chunk *chunk;
1747 
1748 	chunk = nv_cache->chunks;
1749 	assert(chunk);
1750 
1751 	/* Iterate over chunks and get their max open and close seq id */
1752 	for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
1753 		o_seq_id = spdk_max(o_seq_id, chunk->md->seq_id);
1754 		c_seq_id = spdk_max(c_seq_id, chunk->md->close_seq_id);
1755 	}
1756 
1757 	*open_seq_id = o_seq_id;
1758 	*close_seq_id = c_seq_id;
1759 }
1760 
1761 typedef void (*ftl_chunk_ops_cb)(struct ftl_nv_cache_chunk *chunk, void *cntx, bool status);
1762 
1763 static void
1764 write_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
1765 {
1766 	struct ftl_basic_rq *brq = arg;
1767 	struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
1768 
1769 	ftl_stats_bdev_io_completed(brq->dev, FTL_STATS_TYPE_MD_NV_CACHE, bdev_io);
1770 
1771 	brq->success = success;
1772 	if (spdk_likely(success)) {
1773 		chunk_advance_blocks(chunk->nv_cache, chunk, brq->num_blocks);
1774 	}
1775 
1776 	spdk_bdev_free_io(bdev_io);
1777 	brq->owner.cb(brq);
1778 }
1779 
1780 static void
1781 _ftl_chunk_basic_rq_write(void *_brq)
1782 {
1783 	struct ftl_basic_rq *brq = _brq;
1784 	struct ftl_nv_cache *nv_cache = brq->io.chunk->nv_cache;
1785 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1786 	int rc;
1787 
1788 	rc = ftl_nv_cache_bdev_write_blocks_with_md(dev, nv_cache->bdev_desc, nv_cache->cache_ioch,
1789 			brq->io_payload, NULL, brq->io.addr,
1790 			brq->num_blocks, write_brq_end, brq);
1791 	if (spdk_unlikely(rc)) {
1792 		if (rc == -ENOMEM) {
1793 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc);
1794 			brq->io.bdev_io_wait.bdev = bdev;
1795 			brq->io.bdev_io_wait.cb_fn = _ftl_chunk_basic_rq_write;
1796 			brq->io.bdev_io_wait.cb_arg = brq;
1797 			spdk_bdev_queue_io_wait(bdev, nv_cache->cache_ioch, &brq->io.bdev_io_wait);
1798 		} else {
1799 			ftl_abort();
1800 		}
1801 	}
1802 }
1803 
1804 static void
1805 ftl_chunk_basic_rq_write(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq)
1806 {
1807 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1808 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1809 
1810 	brq->io.chunk = chunk;
1811 	brq->success = false;
1812 
1813 	_ftl_chunk_basic_rq_write(brq);
1814 
1815 	chunk->md->write_pointer += brq->num_blocks;
1816 	dev->stats.io_activity_total += brq->num_blocks;
1817 }
1818 
1819 static void
1820 read_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
1821 {
1822 	struct ftl_basic_rq *brq = arg;
1823 
1824 	ftl_stats_bdev_io_completed(brq->dev, FTL_STATS_TYPE_MD_NV_CACHE, bdev_io);
1825 
1826 	brq->success = success;
1827 
1828 	brq->owner.cb(brq);
1829 	spdk_bdev_free_io(bdev_io);
1830 }
1831 
1832 static int
1833 ftl_chunk_basic_rq_read(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq)
1834 {
1835 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1836 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1837 	int rc;
1838 
1839 	brq->io.chunk = chunk;
1840 	brq->success = false;
1841 
1842 	rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, nv_cache->bdev_desc, nv_cache->cache_ioch,
1843 			brq->io_payload, NULL, brq->io.addr, brq->num_blocks, read_brq_end, brq);
1844 
1845 	if (spdk_likely(!rc)) {
1846 		dev->stats.io_activity_total += brq->num_blocks;
1847 	}
1848 
1849 	return rc;
1850 }
1851 
1852 static void
1853 chunk_open_cb(int status, void *ctx)
1854 {
1855 	struct ftl_nv_cache_chunk *chunk = (struct ftl_nv_cache_chunk *)ctx;
1856 
1857 	if (spdk_unlikely(status)) {
1858 #ifdef SPDK_FTL_RETRY_ON_ERROR
1859 		ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx);
1860 		return;
1861 #else
1862 		ftl_abort();
1863 #endif
1864 	}
1865 
1866 	chunk->md->state = FTL_CHUNK_STATE_OPEN;
1867 }
1868 
1869 static void
1870 ftl_chunk_open(struct ftl_nv_cache_chunk *chunk)
1871 {
1872 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1873 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1874 	struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD);
1875 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
1876 
1877 	if (chunk_alloc_p2l_map(chunk)) {
1878 		assert(0);
1879 		/*
1880 		 * We control number of opening chunk and it shall be consistent with size of chunk
1881 		 * P2L map pool
1882 		 */
1883 		ftl_abort();
1884 		return;
1885 	}
1886 
1887 	chunk->nv_cache->chunk_open_count++;
1888 
1889 	assert(chunk->md->write_pointer == 0);
1890 	assert(chunk->md->blocks_written == 0);
1891 
1892 	memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
1893 	p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_OPEN;
1894 	p2l_map->chunk_dma_md->p2l_map_checksum = 0;
1895 
1896 	ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, p2l_map->chunk_dma_md,
1897 			       NULL, chunk_open_cb, chunk,
1898 			       &chunk->md_persist_entry_ctx);
1899 }
1900 
1901 static void
1902 chunk_close_cb(int status, void *ctx)
1903 {
1904 	struct ftl_nv_cache_chunk *chunk = (struct ftl_nv_cache_chunk *)ctx;
1905 
1906 	assert(chunk->md->write_pointer == chunk->nv_cache->chunk_blocks);
1907 
1908 	if (spdk_likely(!status)) {
1909 		chunk->md->p2l_map_checksum = chunk->p2l_map.chunk_dma_md->p2l_map_checksum;
1910 		chunk_free_p2l_map(chunk);
1911 
1912 		assert(chunk->nv_cache->chunk_open_count > 0);
1913 		chunk->nv_cache->chunk_open_count--;
1914 
1915 		/* Chunk full move it on full list */
1916 		TAILQ_INSERT_TAIL(&chunk->nv_cache->chunk_full_list, chunk, entry);
1917 		chunk->nv_cache->chunk_full_count++;
1918 
1919 		chunk->nv_cache->last_seq_id = chunk->md->close_seq_id;
1920 
1921 		chunk->md->state = FTL_CHUNK_STATE_CLOSED;
1922 	} else {
1923 #ifdef SPDK_FTL_RETRY_ON_ERROR
1924 		ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx);
1925 #else
1926 		ftl_abort();
1927 #endif
1928 	}
1929 }
1930 
1931 static void
1932 chunk_map_write_cb(struct ftl_basic_rq *brq)
1933 {
1934 	struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
1935 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1936 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1937 	struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD);
1938 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
1939 	uint32_t chunk_map_crc;
1940 
1941 	if (spdk_likely(brq->success)) {
1942 		chunk_map_crc = spdk_crc32c_update(p2l_map->chunk_map,
1943 						   chunk->nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE, 0);
1944 		memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
1945 		p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_CLOSED;
1946 		p2l_map->chunk_dma_md->p2l_map_checksum = chunk_map_crc;
1947 		ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, chunk->p2l_map.chunk_dma_md,
1948 				       NULL, chunk_close_cb, chunk,
1949 				       &chunk->md_persist_entry_ctx);
1950 	} else {
1951 #ifdef SPDK_FTL_RETRY_ON_ERROR
1952 		/* retry */
1953 		chunk->md->write_pointer -= brq->num_blocks;
1954 		ftl_chunk_basic_rq_write(chunk, brq);
1955 #else
1956 		ftl_abort();
1957 #endif
1958 	}
1959 }
1960 
1961 static void
1962 ftl_chunk_close(struct ftl_nv_cache_chunk *chunk)
1963 {
1964 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1965 	struct ftl_basic_rq *brq = &chunk->metadata_rq;
1966 	void *metadata = chunk->p2l_map.chunk_map;
1967 
1968 	chunk->md->close_seq_id = ftl_get_next_seq_id(dev);
1969 	ftl_basic_rq_init(dev, brq, metadata, chunk->nv_cache->tail_md_chunk_blocks);
1970 	ftl_basic_rq_set_owner(brq, chunk_map_write_cb, chunk);
1971 
1972 	assert(chunk->md->write_pointer == chunk_tail_md_offset(chunk->nv_cache));
1973 	brq->io.addr = chunk->offset + chunk->md->write_pointer;
1974 
1975 	ftl_chunk_basic_rq_write(chunk, brq);
1976 }
1977 
1978 static int ftl_chunk_read_tail_md(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq,
1979 				  void (*cb)(struct ftl_basic_rq *brq), void *cb_ctx);
1980 static void read_tail_md_cb(struct ftl_basic_rq *brq);
1981 static void recover_open_chunk_cb(struct ftl_basic_rq *brq);
1982 
1983 static void
1984 restore_chunk_close_cb(int status, void *ctx)
1985 {
1986 	struct ftl_basic_rq *parent = (struct ftl_basic_rq *)ctx;
1987 	struct ftl_nv_cache_chunk *chunk = parent->io.chunk;
1988 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1989 
1990 	if (spdk_unlikely(status)) {
1991 		parent->success = false;
1992 	} else {
1993 		chunk->md->p2l_map_checksum = p2l_map->chunk_dma_md->p2l_map_checksum;
1994 		chunk->md->state = FTL_CHUNK_STATE_CLOSED;
1995 	}
1996 
1997 	read_tail_md_cb(parent);
1998 }
1999 
2000 static void
2001 restore_fill_p2l_map_cb(struct ftl_basic_rq *parent)
2002 {
2003 	struct ftl_nv_cache_chunk *chunk = parent->io.chunk;
2004 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
2005 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
2006 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
2007 	struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD);
2008 	uint32_t chunk_map_crc;
2009 
2010 	/* Set original callback */
2011 	ftl_basic_rq_set_owner(parent, recover_open_chunk_cb, parent->owner.priv);
2012 
2013 	if (spdk_unlikely(!parent->success)) {
2014 		read_tail_md_cb(parent);
2015 		return;
2016 	}
2017 
2018 	chunk_map_crc = spdk_crc32c_update(p2l_map->chunk_map,
2019 					   chunk->nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE, 0);
2020 	memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
2021 	p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_CLOSED;
2022 	p2l_map->chunk_dma_md->write_pointer = chunk->nv_cache->chunk_blocks;
2023 	p2l_map->chunk_dma_md->blocks_written = chunk->nv_cache->chunk_blocks;
2024 	p2l_map->chunk_dma_md->p2l_map_checksum = chunk_map_crc;
2025 
2026 	ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, p2l_map->chunk_dma_md, NULL,
2027 			       restore_chunk_close_cb, parent, &chunk->md_persist_entry_ctx);
2028 }
2029 
2030 static void
2031 restore_fill_tail_md(struct ftl_basic_rq *parent, struct ftl_nv_cache_chunk *chunk)
2032 {
2033 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
2034 	void *metadata;
2035 
2036 	chunk->md->close_seq_id = ftl_get_next_seq_id(dev);
2037 
2038 	metadata = chunk->p2l_map.chunk_map;
2039 	ftl_basic_rq_init(dev, parent, metadata, chunk->nv_cache->tail_md_chunk_blocks);
2040 	ftl_basic_rq_set_owner(parent, restore_fill_p2l_map_cb, parent->owner.priv);
2041 
2042 	parent->io.addr = chunk->offset + chunk_tail_md_offset(chunk->nv_cache);
2043 	parent->io.chunk = chunk;
2044 
2045 	ftl_chunk_basic_rq_write(chunk, parent);
2046 }
2047 
2048 static void
2049 read_open_chunk_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
2050 {
2051 	struct ftl_rq *rq = (struct ftl_rq *)cb_arg;
2052 	struct ftl_basic_rq *parent = (struct ftl_basic_rq *)rq->owner.priv;
2053 	struct ftl_nv_cache_chunk *chunk = parent->io.chunk;
2054 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
2055 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
2056 	union ftl_md_vss *md;
2057 	uint64_t cache_offset = bdev_io->u.bdev.offset_blocks;
2058 	uint64_t len = bdev_io->u.bdev.num_blocks;
2059 	ftl_addr addr = ftl_addr_from_nvc_offset(dev, cache_offset);
2060 	int rc;
2061 
2062 	ftl_stats_bdev_io_completed(dev, FTL_STATS_TYPE_USER, bdev_io);
2063 
2064 	spdk_bdev_free_io(bdev_io);
2065 
2066 	if (!success) {
2067 		parent->success = false;
2068 		read_tail_md_cb(parent);
2069 		return;
2070 	}
2071 
2072 	while (rq->iter.idx < rq->iter.count) {
2073 		/* Get metadata */
2074 		md = rq->entries[rq->iter.idx].io_md;
2075 		if (md->nv_cache.seq_id != chunk->md->seq_id) {
2076 			md->nv_cache.lba = FTL_LBA_INVALID;
2077 		}
2078 		/*
2079 		 * The p2l map contains effectively random data at this point (since it contains arbitrary
2080 		 * blocks from potentially not even filled tail md), so even LBA_INVALID needs to be set explicitly
2081 		 */
2082 
2083 		ftl_chunk_set_addr(chunk,  md->nv_cache.lba, addr + rq->iter.idx);
2084 		rq->iter.idx++;
2085 	}
2086 
2087 	if (cache_offset + len < chunk->offset + chunk_tail_md_offset(nv_cache)) {
2088 		cache_offset += len;
2089 		len = spdk_min(dev->xfer_size, chunk->offset + chunk_tail_md_offset(nv_cache) - cache_offset);
2090 		rq->iter.idx = 0;
2091 		rq->iter.count = len;
2092 
2093 		rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, nv_cache->bdev_desc,
2094 				nv_cache->cache_ioch,
2095 				rq->io_payload,
2096 				rq->io_md,
2097 				cache_offset, len,
2098 				read_open_chunk_cb,
2099 				rq);
2100 
2101 		if (rc) {
2102 			ftl_rq_del(rq);
2103 			parent->success = false;
2104 			read_tail_md_cb(parent);
2105 			return;
2106 		}
2107 	} else {
2108 		ftl_rq_del(rq);
2109 		restore_fill_tail_md(parent, chunk);
2110 	}
2111 }
2112 
2113 static void
2114 restore_open_chunk(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *parent)
2115 {
2116 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
2117 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
2118 	struct ftl_rq *rq;
2119 	uint64_t addr;
2120 	uint64_t len = dev->xfer_size;
2121 	int rc;
2122 
2123 	/*
2124 	 * We've just read the p2l map, prefill it with INVALID LBA
2125 	 * TODO we need to do this because tail md blocks (p2l map) are also represented in the p2l map, instead of just user data region
2126 	 */
2127 	memset(chunk->p2l_map.chunk_map, -1, FTL_BLOCK_SIZE * nv_cache->tail_md_chunk_blocks);
2128 
2129 	/* Need to read user data, recalculate chunk's P2L and write tail md with it */
2130 	rq = ftl_rq_new(dev, dev->nv_cache.md_size);
2131 	if (!rq) {
2132 		parent->success = false;
2133 		read_tail_md_cb(parent);
2134 		return;
2135 	}
2136 
2137 	rq->owner.priv = parent;
2138 	rq->iter.idx = 0;
2139 	rq->iter.count = len;
2140 
2141 	addr = chunk->offset;
2142 
2143 	len = spdk_min(dev->xfer_size, chunk->offset + chunk_tail_md_offset(nv_cache) - addr);
2144 
2145 	rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, nv_cache->bdev_desc,
2146 			nv_cache->cache_ioch,
2147 			rq->io_payload,
2148 			rq->io_md,
2149 			addr, len,
2150 			read_open_chunk_cb,
2151 			rq);
2152 
2153 	if (rc) {
2154 		ftl_rq_del(rq);
2155 		parent->success = false;
2156 		read_tail_md_cb(parent);
2157 	}
2158 }
2159 
2160 static void
2161 read_tail_md_cb(struct ftl_basic_rq *brq)
2162 {
2163 	brq->owner.cb(brq);
2164 }
2165 
2166 static int
2167 ftl_chunk_read_tail_md(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq,
2168 		       void (*cb)(struct ftl_basic_rq *brq), void *cb_ctx)
2169 {
2170 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
2171 	void *metadata;
2172 	int rc;
2173 
2174 	metadata = chunk->p2l_map.chunk_map;
2175 	ftl_basic_rq_init(dev, brq, metadata, chunk->nv_cache->tail_md_chunk_blocks);
2176 	ftl_basic_rq_set_owner(brq, cb, cb_ctx);
2177 
2178 	brq->io.addr = chunk->offset + chunk_tail_md_offset(chunk->nv_cache);
2179 	rc = ftl_chunk_basic_rq_read(chunk, brq);
2180 
2181 	return rc;
2182 }
2183 
2184 struct restore_chunk_md_ctx {
2185 	ftl_chunk_md_cb cb;
2186 	void *cb_ctx;
2187 	int status;
2188 	uint64_t qd;
2189 	uint64_t id;
2190 };
2191 
2192 static inline bool
2193 is_chunk_count_valid(struct ftl_nv_cache *nv_cache)
2194 {
2195 	uint64_t chunk_count = 0;
2196 
2197 	chunk_count += nv_cache->chunk_open_count;
2198 	chunk_count += nv_cache->chunk_free_count;
2199 	chunk_count += nv_cache->chunk_full_count;
2200 	chunk_count += nv_cache->chunk_comp_count;
2201 	chunk_count += nv_cache->chunk_inactive_count;
2202 
2203 	return chunk_count == nv_cache->chunk_count;
2204 }
2205 
2206 static void
2207 walk_tail_md_cb(struct ftl_basic_rq *brq)
2208 {
2209 	struct ftl_mngt_process *mngt = brq->owner.priv;
2210 	struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
2211 	struct restore_chunk_md_ctx *ctx = ftl_mngt_get_step_ctx(mngt);
2212 	int rc = 0;
2213 
2214 	if (brq->success) {
2215 		rc = ctx->cb(chunk, ctx->cb_ctx);
2216 	} else {
2217 		rc = -EIO;
2218 	}
2219 
2220 	if (rc) {
2221 		ctx->status = rc;
2222 	}
2223 	ctx->qd--;
2224 	chunk_free_p2l_map(chunk);
2225 	ftl_mngt_continue_step(mngt);
2226 }
2227 
2228 static void
2229 ftl_mngt_nv_cache_walk_tail_md(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt,
2230 			       uint64_t seq_id, ftl_chunk_md_cb cb, void *cb_ctx)
2231 {
2232 	struct ftl_nv_cache *nvc = &dev->nv_cache;
2233 	struct restore_chunk_md_ctx *ctx;
2234 
2235 	ctx = ftl_mngt_get_step_ctx(mngt);
2236 	if (!ctx) {
2237 		if (ftl_mngt_alloc_step_ctx(mngt, sizeof(*ctx))) {
2238 			ftl_mngt_fail_step(mngt);
2239 			return;
2240 		}
2241 		ctx = ftl_mngt_get_step_ctx(mngt);
2242 		assert(ctx);
2243 
2244 		ctx->cb = cb;
2245 		ctx->cb_ctx = cb_ctx;
2246 	}
2247 
2248 	/*
2249 	 * This function generates a high queue depth and will utilize ftl_mngt_continue_step during completions to make sure all chunks
2250 	 * are processed before returning an error (if any were found) or continuing on.
2251 	 */
2252 	if (0 == ctx->qd && ctx->id == nvc->chunk_count) {
2253 		if (!is_chunk_count_valid(nvc)) {
2254 			FTL_ERRLOG(dev, "Recovery ERROR, invalid number of chunk\n");
2255 			assert(false);
2256 			ctx->status = -EINVAL;
2257 		}
2258 
2259 		if (ctx->status) {
2260 			ftl_mngt_fail_step(mngt);
2261 		} else {
2262 			ftl_mngt_next_step(mngt);
2263 		}
2264 		return;
2265 	}
2266 
2267 	while (ctx->id < nvc->chunk_count) {
2268 		struct ftl_nv_cache_chunk *chunk = &nvc->chunks[ctx->id];
2269 		int rc;
2270 
2271 		if (!chunk->recovery) {
2272 			/* This chunk is inactive or empty and not used in recovery */
2273 			ctx->id++;
2274 			continue;
2275 		}
2276 
2277 		if (seq_id && (chunk->md->close_seq_id <= seq_id)) {
2278 			ctx->id++;
2279 			continue;
2280 		}
2281 
2282 		if (chunk_alloc_p2l_map(chunk)) {
2283 			/* No more free P2L map, break and continue later */
2284 			break;
2285 		}
2286 		ctx->id++;
2287 
2288 		rc = ftl_chunk_read_tail_md(chunk, &chunk->metadata_rq, walk_tail_md_cb, mngt);
2289 
2290 		if (0 == rc) {
2291 			ctx->qd++;
2292 		} else {
2293 			chunk_free_p2l_map(chunk);
2294 			ctx->status = rc;
2295 		}
2296 	}
2297 
2298 	if (0 == ctx->qd) {
2299 		/*
2300 		 * No QD could happen due to all leftover chunks being in free state.
2301 		 * Additionally ftl_chunk_read_tail_md could fail starting with the first IO in a given patch.
2302 		 * For streamlining of all potential error handling (since many chunks are reading P2L at the same time),
2303 		 * we're using ftl_mngt_continue_step to arrive at the same spot of checking for mngt step end (see beginning of function).
2304 		 */
2305 		ftl_mngt_continue_step(mngt);
2306 	}
2307 
2308 }
2309 
2310 void
2311 ftl_mngt_nv_cache_restore_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt,
2312 			      ftl_chunk_md_cb cb, void *cb_ctx)
2313 {
2314 	ftl_mngt_nv_cache_walk_tail_md(dev, mngt, dev->sb->ckpt_seq_id, cb, cb_ctx);
2315 }
2316 
2317 static void
2318 restore_chunk_state_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
2319 {
2320 	struct ftl_mngt_process *mngt = md->owner.cb_ctx;
2321 	struct ftl_nv_cache *nvc = &dev->nv_cache;
2322 	struct ftl_nv_cache_chunk *chunk;
2323 	uint64_t i;
2324 
2325 	if (status) {
2326 		/* Restore error, end step */
2327 		ftl_mngt_fail_step(mngt);
2328 		return;
2329 	}
2330 
2331 	for (i = 0; i < nvc->chunk_count; i++) {
2332 		chunk = &nvc->chunks[i];
2333 
2334 		if (false == nvc->nvc_type->ops.is_chunk_active(dev, chunk->offset) &&
2335 		    chunk->md->state != FTL_CHUNK_STATE_INACTIVE) {
2336 			status = -EINVAL;
2337 			break;
2338 		}
2339 
2340 		if (chunk->md->version != FTL_NVC_VERSION_CURRENT) {
2341 			status = -EINVAL;
2342 			break;
2343 		}
2344 
2345 		switch (chunk->md->state) {
2346 		case FTL_CHUNK_STATE_FREE:
2347 			break;
2348 		case FTL_CHUNK_STATE_OPEN:
2349 			TAILQ_REMOVE(&nvc->chunk_free_list, chunk, entry);
2350 			nvc->chunk_free_count--;
2351 
2352 			TAILQ_INSERT_TAIL(&nvc->chunk_open_list, chunk, entry);
2353 			nvc->chunk_open_count++;
2354 
2355 			/* Chunk is not empty, mark it to be recovered */
2356 			chunk->recovery = true;
2357 			break;
2358 		case FTL_CHUNK_STATE_CLOSED:
2359 			TAILQ_REMOVE(&nvc->chunk_free_list, chunk, entry);
2360 			nvc->chunk_free_count--;
2361 
2362 			TAILQ_INSERT_TAIL(&nvc->chunk_full_list, chunk, entry);
2363 			nvc->chunk_full_count++;
2364 
2365 			/* Chunk is not empty, mark it to be recovered */
2366 			chunk->recovery = true;
2367 			break;
2368 		case FTL_CHUNK_STATE_INACTIVE:
2369 			break;
2370 		default:
2371 			status = -EINVAL;
2372 		}
2373 	}
2374 
2375 	if (status) {
2376 		ftl_mngt_fail_step(mngt);
2377 	} else {
2378 		ftl_mngt_next_step(mngt);
2379 	}
2380 }
2381 
2382 void
2383 ftl_mngt_nv_cache_restore_chunk_state(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
2384 {
2385 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
2386 
2387 	md->owner.cb_ctx = mngt;
2388 	md->cb = restore_chunk_state_cb;
2389 	ftl_md_restore(md);
2390 }
2391 
2392 static void
2393 recover_open_chunk_cb(struct ftl_basic_rq *brq)
2394 {
2395 	struct ftl_mngt_process *mngt = brq->owner.priv;
2396 	struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
2397 	struct ftl_nv_cache *nvc = chunk->nv_cache;
2398 	struct spdk_ftl_dev *dev = ftl_mngt_get_dev(mngt);
2399 
2400 	chunk_free_p2l_map(chunk);
2401 
2402 	if (!brq->success) {
2403 		FTL_ERRLOG(dev, "Recovery chunk ERROR, offset = %"PRIu64", seq id %"PRIu64"\n", chunk->offset,
2404 			   chunk->md->seq_id);
2405 		ftl_mngt_fail_step(mngt);
2406 		return;
2407 	}
2408 
2409 	FTL_NOTICELOG(dev, "Recovered chunk, offset = %"PRIu64", seq id %"PRIu64"\n", chunk->offset,
2410 		      chunk->md->seq_id);
2411 
2412 	TAILQ_REMOVE(&nvc->chunk_open_list, chunk, entry);
2413 	nvc->chunk_open_count--;
2414 
2415 	TAILQ_INSERT_TAIL(&nvc->chunk_full_list, chunk, entry);
2416 	nvc->chunk_full_count++;
2417 
2418 	/* This is closed chunk */
2419 	chunk->md->write_pointer = nvc->chunk_blocks;
2420 	chunk->md->blocks_written = nvc->chunk_blocks;
2421 
2422 	ftl_mngt_continue_step(mngt);
2423 }
2424 
2425 void
2426 ftl_mngt_nv_cache_recover_open_chunk(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
2427 {
2428 	struct ftl_nv_cache *nvc = &dev->nv_cache;
2429 	struct ftl_nv_cache_chunk *chunk;
2430 	struct ftl_basic_rq *brq = ftl_mngt_get_step_ctx(mngt);
2431 
2432 	if (!brq) {
2433 		if (TAILQ_EMPTY(&nvc->chunk_open_list)) {
2434 			FTL_NOTICELOG(dev, "No open chunks to recover P2L\n");
2435 			ftl_mngt_next_step(mngt);
2436 			return;
2437 		}
2438 
2439 		if (ftl_mngt_alloc_step_ctx(mngt, sizeof(*brq))) {
2440 			ftl_mngt_fail_step(mngt);
2441 			return;
2442 		}
2443 		brq = ftl_mngt_get_step_ctx(mngt);
2444 		ftl_basic_rq_set_owner(brq, recover_open_chunk_cb, mngt);
2445 	}
2446 
2447 	if (TAILQ_EMPTY(&nvc->chunk_open_list)) {
2448 		if (!is_chunk_count_valid(nvc)) {
2449 			FTL_ERRLOG(dev, "Recovery ERROR, invalid number of chunk\n");
2450 			ftl_mngt_fail_step(mngt);
2451 			return;
2452 		}
2453 
2454 		/*
2455 		 * Now all chunks loaded and closed, do final step of restoring
2456 		 * chunks state
2457 		 */
2458 		if (ftl_nv_cache_load_state(nvc)) {
2459 			ftl_mngt_fail_step(mngt);
2460 		} else {
2461 			ftl_mngt_next_step(mngt);
2462 		}
2463 	} else {
2464 		chunk = TAILQ_FIRST(&nvc->chunk_open_list);
2465 		if (chunk_alloc_p2l_map(chunk)) {
2466 			ftl_mngt_fail_step(mngt);
2467 			return;
2468 		}
2469 
2470 		brq->io.chunk = chunk;
2471 
2472 		FTL_NOTICELOG(dev, "Start recovery open chunk, offset = %"PRIu64", seq id %"PRIu64"\n",
2473 			      chunk->offset, chunk->md->seq_id);
2474 		restore_open_chunk(chunk, brq);
2475 	}
2476 }
2477 
2478 int
2479 ftl_nv_cache_chunks_busy(struct ftl_nv_cache *nv_cache)
2480 {
2481 	/* chunk_current is migrating to closed status when closing, any others should already be
2482 	 * moved to free chunk list. Also need to wait for free md requests */
2483 	return nv_cache->chunk_open_count == 0 && nv_cache->chunk_free_persist_count == 0;
2484 }
2485 
2486 void
2487 ftl_nv_cache_halt(struct ftl_nv_cache *nv_cache)
2488 {
2489 	struct ftl_nv_cache_chunk *chunk;
2490 	uint64_t free_space;
2491 
2492 	nv_cache->halt = true;
2493 
2494 	/* Set chunks on open list back to free state since no user data has been written to it */
2495 	while (!TAILQ_EMPTY(&nv_cache->chunk_open_list)) {
2496 		chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
2497 
2498 		/* Chunks are moved between lists on metadata update submission, but state is changed
2499 		 * on completion. Breaking early in such a case to make sure all the necessary resources
2500 		 * will be freed (during next pass(es) of ftl_nv_cache_halt).
2501 		 */
2502 		if (chunk->md->state != FTL_CHUNK_STATE_OPEN) {
2503 			break;
2504 		}
2505 
2506 		TAILQ_REMOVE(&nv_cache->chunk_open_list, chunk, entry);
2507 		chunk_free_p2l_map(chunk);
2508 		ftl_nv_cache_chunk_md_initialize(chunk->md);
2509 		assert(nv_cache->chunk_open_count > 0);
2510 		nv_cache->chunk_open_count--;
2511 	}
2512 
2513 	/* Close current chunk by skipping all not written blocks */
2514 	chunk = nv_cache->chunk_current;
2515 	if (chunk != NULL) {
2516 		nv_cache->chunk_current = NULL;
2517 		if (chunk_is_closed(chunk)) {
2518 			return;
2519 		}
2520 
2521 		free_space = chunk_get_free_space(nv_cache, chunk);
2522 		chunk->md->blocks_skipped = free_space;
2523 		chunk->md->blocks_written += free_space;
2524 		chunk->md->write_pointer += free_space;
2525 		ftl_chunk_close(chunk);
2526 	}
2527 }
2528 
2529 uint64_t
2530 ftl_nv_cache_acquire_trim_seq_id(struct ftl_nv_cache *nv_cache)
2531 {
2532 	struct ftl_nv_cache_chunk *chunk = nv_cache->chunk_current;
2533 	uint64_t seq_id, free_space;
2534 
2535 	if (!chunk) {
2536 		chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
2537 		if (chunk && chunk->md->state == FTL_CHUNK_STATE_OPEN) {
2538 			return chunk->md->seq_id;
2539 		} else {
2540 			return 0;
2541 		}
2542 	}
2543 
2544 	if (chunk_is_closed(chunk)) {
2545 		return 0;
2546 	}
2547 
2548 	seq_id = nv_cache->chunk_current->md->seq_id;
2549 	free_space = chunk_get_free_space(nv_cache, chunk);
2550 
2551 	chunk->md->blocks_skipped = free_space;
2552 	chunk->md->blocks_written += free_space;
2553 	chunk->md->write_pointer += free_space;
2554 	if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
2555 		ftl_chunk_close(chunk);
2556 	}
2557 	nv_cache->chunk_current = NULL;
2558 
2559 	seq_id++;
2560 	return seq_id;
2561 }
2562 
2563 static double
2564 ftl_nv_cache_get_chunk_utilization(struct ftl_nv_cache *nv_cache,
2565 				   struct ftl_nv_cache_chunk *chunk)
2566 {
2567 	double capacity = nv_cache->chunk_blocks;
2568 	double used = chunk->md->blocks_written + chunk->md->blocks_skipped;
2569 
2570 	return used / capacity;
2571 }
2572 
2573 static const char *
2574 ftl_nv_cache_get_chunk_state_name(struct ftl_nv_cache_chunk *chunk)
2575 {
2576 	static const char *names[] = {
2577 		"FREE", "OPEN", "CLOSED", "INACTIVE"
2578 	};
2579 
2580 	assert(chunk->md->state < SPDK_COUNTOF(names));
2581 	if (chunk->md->state < SPDK_COUNTOF(names)) {
2582 		return names[chunk->md->state];
2583 	} else {
2584 		assert(false);
2585 		return "?";
2586 	}
2587 }
2588 
2589 static void
2590 ftl_property_dump_cache_dev(struct spdk_ftl_dev *dev, const struct ftl_property *property,
2591 			    struct spdk_json_write_ctx *w)
2592 {
2593 	uint64_t i;
2594 	struct ftl_nv_cache_chunk *chunk;
2595 
2596 	spdk_json_write_named_string(w, "type", dev->nv_cache.nvc_type->name);
2597 	spdk_json_write_named_array_begin(w, "chunks");
2598 	for (i = 0, chunk = dev->nv_cache.chunks; i < dev->nv_cache.chunk_count; i++, chunk++) {
2599 		spdk_json_write_object_begin(w);
2600 		spdk_json_write_named_uint64(w, "id", i);
2601 		spdk_json_write_named_string(w, "state", ftl_nv_cache_get_chunk_state_name(chunk));
2602 		spdk_json_write_named_double(w, "utilization",
2603 					     ftl_nv_cache_get_chunk_utilization(&dev->nv_cache, chunk));
2604 		spdk_json_write_object_end(w);
2605 	}
2606 	spdk_json_write_array_end(w);
2607 }
2608 
2609 void
2610 ftl_nv_cache_chunk_md_initialize(struct ftl_nv_cache_chunk_md *md)
2611 {
2612 	memset(md, 0, sizeof(*md));
2613 	md->version = FTL_NVC_VERSION_CURRENT;
2614 }
2615