xref: /spdk/lib/ftl/ftl_nv_cache.c (revision 85c5ce74a66e3224a9ea39cfac328aa0ed1c836f)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   Copyright 2023 Solidigm All Rights Reserved
4  *   All rights reserved.
5  */
6 
7 
8 #include "spdk/bdev.h"
9 #include "spdk/bdev_module.h"
10 #include "spdk/ftl.h"
11 #include "spdk/string.h"
12 
13 #include "ftl_nv_cache.h"
14 #include "ftl_nv_cache_io.h"
15 #include "ftl_core.h"
16 #include "ftl_band.h"
17 #include "utils/ftl_addr_utils.h"
18 #include "mngt/ftl_mngt.h"
19 
20 static inline uint64_t nvc_data_blocks(struct ftl_nv_cache *nv_cache) __attribute__((unused));
21 static struct ftl_nv_cache_compactor *compactor_alloc(struct spdk_ftl_dev *dev);
22 static void compactor_free(struct spdk_ftl_dev *dev, struct ftl_nv_cache_compactor *compactor);
23 static void compaction_process_ftl_done(struct ftl_rq *rq);
24 static void compaction_process_read_entry(void *arg);
25 static void ftl_property_dump_cache_dev(struct spdk_ftl_dev *dev,
26 					const struct ftl_property *property,
27 					struct spdk_json_write_ctx *w);
28 
29 static inline void
30 nvc_validate_md(struct ftl_nv_cache *nv_cache,
31 		struct ftl_nv_cache_chunk_md *chunk_md)
32 {
33 	struct ftl_md *md = nv_cache->md;
34 	void *buffer = ftl_md_get_buffer(md);
35 	uint64_t size = ftl_md_get_buffer_size(md);
36 	void *ptr = chunk_md;
37 
38 	if (ptr < buffer) {
39 		ftl_abort();
40 	}
41 
42 	ptr += sizeof(*chunk_md);
43 	if (ptr > buffer + size) {
44 		ftl_abort();
45 	}
46 }
47 
48 static inline uint64_t
49 nvc_data_offset(struct ftl_nv_cache *nv_cache)
50 {
51 	return 0;
52 }
53 
54 static inline uint64_t
55 nvc_data_blocks(struct ftl_nv_cache *nv_cache)
56 {
57 	return nv_cache->chunk_blocks * nv_cache->chunk_count;
58 }
59 
60 size_t
61 ftl_nv_cache_chunk_tail_md_num_blocks(const struct ftl_nv_cache *nv_cache)
62 {
63 	struct spdk_ftl_dev *dev =  SPDK_CONTAINEROF(nv_cache,
64 				    struct spdk_ftl_dev, nv_cache);
65 	return spdk_divide_round_up(dev->layout.nvc.chunk_data_blocks * dev->layout.l2p.addr_size,
66 				    FTL_BLOCK_SIZE);
67 }
68 
69 static size_t
70 nv_cache_p2l_map_pool_elem_size(const struct ftl_nv_cache *nv_cache)
71 {
72 	/* Map pool element holds the whole tail md */
73 	return nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE;
74 }
75 
76 static uint64_t
77 get_chunk_idx(struct ftl_nv_cache_chunk *chunk)
78 {
79 	struct ftl_nv_cache_chunk *first_chunk = chunk->nv_cache->chunks;
80 
81 	return (chunk->offset - first_chunk->offset) / chunk->nv_cache->chunk_blocks;
82 }
83 
84 static void
85 ftl_nv_cache_init_update_limits(struct spdk_ftl_dev *dev)
86 {
87 	struct ftl_nv_cache *nvc = &dev->nv_cache;
88 	uint64_t usable_chunks = nvc->chunk_count - nvc->chunk_inactive_count;
89 
90 	/* Start compaction when full chunks exceed given % of entire active chunks */
91 	nvc->chunk_compaction_threshold = usable_chunks *
92 					  dev->conf.nv_cache.chunk_compaction_threshold /
93 					  100;
94 
95 	nvc->throttle.interval_tsc = FTL_NV_CACHE_THROTTLE_INTERVAL_MS *
96 				     (spdk_get_ticks_hz() / 1000);
97 
98 	nvc->chunk_free_target = spdk_divide_round_up(usable_chunks *
99 				 dev->conf.nv_cache.chunk_free_target,
100 				 100);
101 }
102 
103 struct nvc_scrub_ctx {
104 	uint64_t chunk_no;
105 	nvc_scrub_cb cb;
106 	void *cb_ctx;
107 
108 	struct ftl_layout_region reg_chunk;
109 	struct ftl_md *md_chunk;
110 };
111 
112 static int
113 nvc_scrub_find_next_chunk(struct spdk_ftl_dev *dev, struct nvc_scrub_ctx *scrub_ctx)
114 {
115 	while (scrub_ctx->chunk_no < dev->layout.nvc.chunk_count) {
116 		if (dev->nv_cache.nvc_type->ops.is_chunk_active(dev, scrub_ctx->reg_chunk.current.offset)) {
117 			return 0;
118 		}
119 
120 		/* Move the dummy region along with the active chunk */
121 		scrub_ctx->reg_chunk.current.offset += dev->layout.nvc.chunk_data_blocks;
122 		scrub_ctx->chunk_no++;
123 	}
124 	return -ENOENT;
125 }
126 
127 static void
128 nvc_scrub_clear_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
129 {
130 	struct nvc_scrub_ctx *scrub_ctx = md->owner.cb_ctx;
131 	union ftl_md_vss vss;
132 
133 	/* Move to the next chunk */
134 	scrub_ctx->chunk_no++;
135 	scrub_ctx->reg_chunk.current.offset += dev->layout.nvc.chunk_data_blocks;
136 
137 	FTL_DEBUGLOG(dev, "Scrub progress: %"PRIu64"/%"PRIu64" chunks\n",
138 		     scrub_ctx->chunk_no, dev->layout.nvc.chunk_count);
139 
140 	if (status || nvc_scrub_find_next_chunk(dev, scrub_ctx)) {
141 		/* IO error or no more active chunks found. Scrubbing finished. */
142 		scrub_ctx->cb(dev, scrub_ctx->cb_ctx, status);
143 		ftl_md_destroy(scrub_ctx->md_chunk, 0);
144 		free(scrub_ctx);
145 		return;
146 	}
147 
148 	/* Scrub the next chunk */
149 	vss.version.md_version = 0;
150 	vss.nv_cache.lba = FTL_ADDR_INVALID;
151 
152 	scrub_ctx->md_chunk->cb = nvc_scrub_clear_cb;
153 	scrub_ctx->md_chunk->owner.cb_ctx = scrub_ctx;
154 
155 	ftl_md_clear(scrub_ctx->md_chunk, 0, &vss);
156 }
157 
158 void
159 ftl_nv_cache_scrub(struct spdk_ftl_dev *dev, nvc_scrub_cb cb, void *cb_ctx)
160 {
161 	struct nvc_scrub_ctx *scrub_ctx = calloc(1, sizeof(*scrub_ctx));
162 	union ftl_md_vss vss;
163 
164 	if (!scrub_ctx) {
165 		cb(dev, cb_ctx, -ENOMEM);
166 		return;
167 	}
168 
169 	scrub_ctx->cb = cb;
170 	scrub_ctx->cb_ctx = cb_ctx;
171 
172 	/* Setup a dummy region for the first chunk */
173 	scrub_ctx->reg_chunk.name = ftl_md_region_name(FTL_LAYOUT_REGION_TYPE_DATA_NVC);
174 	scrub_ctx->reg_chunk.type = FTL_LAYOUT_REGION_TYPE_DATA_NVC;
175 	scrub_ctx->reg_chunk.mirror_type = FTL_LAYOUT_REGION_TYPE_INVALID;
176 	scrub_ctx->reg_chunk.current.version = 0;
177 	scrub_ctx->reg_chunk.current.offset = 0;
178 	scrub_ctx->reg_chunk.current.blocks = dev->layout.nvc.chunk_data_blocks;
179 	scrub_ctx->reg_chunk.entry_size = FTL_BLOCK_SIZE;
180 	scrub_ctx->reg_chunk.num_entries = dev->layout.nvc.chunk_data_blocks;
181 	scrub_ctx->reg_chunk.vss_blksz = dev->nv_cache.md_size;
182 	scrub_ctx->reg_chunk.bdev_desc = dev->nv_cache.bdev_desc;
183 	scrub_ctx->reg_chunk.ioch = dev->nv_cache.cache_ioch;
184 
185 	/* Setup an MD object for the region */
186 	scrub_ctx->md_chunk = ftl_md_create(dev, scrub_ctx->reg_chunk.current.blocks,
187 					    scrub_ctx->reg_chunk.vss_blksz, scrub_ctx->reg_chunk.name, FTL_MD_CREATE_NO_MEM,
188 					    &scrub_ctx->reg_chunk);
189 
190 	if (!scrub_ctx->md_chunk) {
191 		free(scrub_ctx);
192 		cb(dev, cb_ctx, -ENOMEM);
193 		return;
194 	}
195 
196 	if (nvc_scrub_find_next_chunk(dev, scrub_ctx)) {
197 		/* No active chunks found */
198 		ftl_md_destroy(scrub_ctx->md_chunk, 0);
199 		free(scrub_ctx);
200 		cb(dev, cb_ctx, -ENOENT);
201 		return;
202 	}
203 
204 	/* Scrub the first chunk */
205 	vss.version.md_version = 0;
206 	vss.nv_cache.lba = FTL_ADDR_INVALID;
207 
208 	scrub_ctx->md_chunk->cb = nvc_scrub_clear_cb;
209 	scrub_ctx->md_chunk->owner.cb_ctx = scrub_ctx;
210 
211 	ftl_md_clear(scrub_ctx->md_chunk, 0, &vss);
212 	return;
213 }
214 
215 int
216 ftl_nv_cache_init(struct spdk_ftl_dev *dev)
217 {
218 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
219 	struct ftl_nv_cache_chunk *chunk;
220 	struct ftl_nv_cache_chunk_md *md;
221 	struct ftl_nv_cache_compactor *compactor;
222 	uint64_t i, offset;
223 
224 	nv_cache->halt = true;
225 
226 	nv_cache->md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
227 	if (!nv_cache->md) {
228 		FTL_ERRLOG(dev, "No NV cache metadata object\n");
229 		return -1;
230 	}
231 
232 	nv_cache->md_pool = ftl_mempool_create(dev->conf.user_io_pool_size,
233 					       nv_cache->md_size * dev->xfer_size,
234 					       FTL_BLOCK_SIZE, SPDK_ENV_NUMA_ID_ANY);
235 	if (!nv_cache->md_pool) {
236 		FTL_ERRLOG(dev, "Failed to initialize NV cache metadata pool\n");
237 		return -1;
238 	}
239 
240 	/*
241 	 * Initialize chunk info
242 	 */
243 	nv_cache->chunk_blocks = dev->layout.nvc.chunk_data_blocks;
244 	nv_cache->chunk_count = dev->layout.nvc.chunk_count;
245 	nv_cache->tail_md_chunk_blocks = ftl_nv_cache_chunk_tail_md_num_blocks(nv_cache);
246 
247 	/* Allocate chunks */
248 	nv_cache->chunks = calloc(nv_cache->chunk_count,
249 				  sizeof(nv_cache->chunks[0]));
250 	if (!nv_cache->chunks) {
251 		FTL_ERRLOG(dev, "Failed to initialize NV cache chunks\n");
252 		return -1;
253 	}
254 
255 	TAILQ_INIT(&nv_cache->chunk_free_list);
256 	TAILQ_INIT(&nv_cache->chunk_open_list);
257 	TAILQ_INIT(&nv_cache->chunk_full_list);
258 	TAILQ_INIT(&nv_cache->chunk_comp_list);
259 	TAILQ_INIT(&nv_cache->chunk_inactive_list);
260 	TAILQ_INIT(&nv_cache->needs_free_persist_list);
261 
262 	/* First chunk metadata */
263 	md = ftl_md_get_buffer(nv_cache->md);
264 	if (!md) {
265 		FTL_ERRLOG(dev, "No NV cache metadata\n");
266 		return -1;
267 	}
268 
269 	chunk = nv_cache->chunks;
270 	offset = nvc_data_offset(nv_cache);
271 	for (i = 0; i < nv_cache->chunk_count; i++, chunk++, md++) {
272 		chunk->nv_cache = nv_cache;
273 		chunk->md = md;
274 		chunk->md->version = FTL_NVC_VERSION_CURRENT;
275 		nvc_validate_md(nv_cache, md);
276 		chunk->offset = offset;
277 		offset += nv_cache->chunk_blocks;
278 
279 		if (nv_cache->nvc_type->ops.is_chunk_active(dev, chunk->offset)) {
280 			nv_cache->chunk_free_count++;
281 			TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
282 		} else {
283 			chunk->md->state = FTL_CHUNK_STATE_INACTIVE;
284 			nv_cache->chunk_inactive_count++;
285 			TAILQ_INSERT_TAIL(&nv_cache->chunk_inactive_list, chunk, entry);
286 		}
287 	}
288 	assert(nv_cache->chunk_free_count + nv_cache->chunk_inactive_count == nv_cache->chunk_count);
289 	assert(offset <= nvc_data_offset(nv_cache) + nvc_data_blocks(nv_cache));
290 
291 	TAILQ_INIT(&nv_cache->compactor_list);
292 	for (i = 0; i < FTL_NV_CACHE_NUM_COMPACTORS; i++) {
293 		compactor = compactor_alloc(dev);
294 
295 		if (!compactor) {
296 			FTL_ERRLOG(dev, "Cannot allocate compaction process\n");
297 			return -1;
298 		}
299 
300 		TAILQ_INSERT_TAIL(&nv_cache->compactor_list, compactor, entry);
301 	}
302 
303 #define FTL_MAX_OPEN_CHUNKS 2
304 #define FTL_MAX_COMPACTED_CHUNKS 2
305 	nv_cache->p2l_pool = ftl_mempool_create(FTL_MAX_OPEN_CHUNKS + FTL_MAX_COMPACTED_CHUNKS,
306 						nv_cache_p2l_map_pool_elem_size(nv_cache),
307 						FTL_BLOCK_SIZE,
308 						SPDK_ENV_NUMA_ID_ANY);
309 	if (!nv_cache->p2l_pool) {
310 		return -ENOMEM;
311 	}
312 
313 	/* One entry per open chunk */
314 	nv_cache->chunk_md_pool = ftl_mempool_create(FTL_MAX_OPEN_CHUNKS + FTL_MAX_COMPACTED_CHUNKS,
315 				  sizeof(struct ftl_nv_cache_chunk_md),
316 				  FTL_BLOCK_SIZE,
317 				  SPDK_ENV_NUMA_ID_ANY);
318 	if (!nv_cache->chunk_md_pool) {
319 		return -ENOMEM;
320 	}
321 
322 	/* Each compactor can be reading a different chunk which it needs to switch state to free to at the end,
323 	 * plus one backup each for high invalidity chunks processing (if there's a backlog of chunks with extremely
324 	 * small, even 0, validity then they can be processed by the compactors quickly and trigger a lot of updates
325 	 * to free state at once) */
326 	nv_cache->free_chunk_md_pool = ftl_mempool_create(2 * FTL_NV_CACHE_NUM_COMPACTORS,
327 				       sizeof(struct ftl_nv_cache_chunk_md),
328 				       FTL_BLOCK_SIZE,
329 				       SPDK_ENV_NUMA_ID_ANY);
330 	if (!nv_cache->free_chunk_md_pool) {
331 		return -ENOMEM;
332 	}
333 
334 	ftl_nv_cache_init_update_limits(dev);
335 	ftl_property_register(dev, "cache_device", NULL, 0, NULL, NULL, ftl_property_dump_cache_dev, NULL,
336 			      NULL, true);
337 	return 0;
338 }
339 
340 void
341 ftl_nv_cache_deinit(struct spdk_ftl_dev *dev)
342 {
343 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
344 	struct ftl_nv_cache_compactor *compactor;
345 
346 	while (!TAILQ_EMPTY(&nv_cache->compactor_list)) {
347 		compactor = TAILQ_FIRST(&nv_cache->compactor_list);
348 		TAILQ_REMOVE(&nv_cache->compactor_list, compactor, entry);
349 
350 		compactor_free(dev, compactor);
351 	}
352 
353 	ftl_mempool_destroy(nv_cache->md_pool);
354 	ftl_mempool_destroy(nv_cache->p2l_pool);
355 	ftl_mempool_destroy(nv_cache->chunk_md_pool);
356 	ftl_mempool_destroy(nv_cache->free_chunk_md_pool);
357 	nv_cache->md_pool = NULL;
358 	nv_cache->p2l_pool = NULL;
359 	nv_cache->chunk_md_pool = NULL;
360 	nv_cache->free_chunk_md_pool = NULL;
361 
362 	free(nv_cache->chunks);
363 	nv_cache->chunks = NULL;
364 }
365 
366 static uint64_t
367 chunk_get_free_space(struct ftl_nv_cache *nv_cache,
368 		     struct ftl_nv_cache_chunk *chunk)
369 {
370 	assert(chunk->md->write_pointer + nv_cache->tail_md_chunk_blocks <=
371 	       nv_cache->chunk_blocks);
372 	return nv_cache->chunk_blocks - chunk->md->write_pointer -
373 	       nv_cache->tail_md_chunk_blocks;
374 }
375 
376 static bool
377 chunk_is_closed(struct ftl_nv_cache_chunk *chunk)
378 {
379 	return chunk->md->write_pointer == chunk->nv_cache->chunk_blocks;
380 }
381 
382 static void ftl_chunk_close(struct ftl_nv_cache_chunk *chunk);
383 
384 static uint64_t
385 ftl_nv_cache_get_wr_buffer(struct ftl_nv_cache *nv_cache, struct ftl_io *io)
386 {
387 	uint64_t address = FTL_LBA_INVALID;
388 	uint64_t num_blocks = io->num_blocks;
389 	uint64_t free_space;
390 	struct ftl_nv_cache_chunk *chunk;
391 
392 	do {
393 		chunk = nv_cache->chunk_current;
394 		/* Chunk has been closed so pick new one */
395 		if (chunk && chunk_is_closed(chunk))  {
396 			chunk = NULL;
397 		}
398 
399 		if (!chunk) {
400 			chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
401 			if (chunk && chunk->md->state == FTL_CHUNK_STATE_OPEN) {
402 				TAILQ_REMOVE(&nv_cache->chunk_open_list, chunk, entry);
403 				nv_cache->chunk_current = chunk;
404 			} else {
405 				break;
406 			}
407 		}
408 
409 		free_space = chunk_get_free_space(nv_cache, chunk);
410 
411 		if (free_space >= num_blocks) {
412 			/* Enough space in chunk */
413 
414 			/* Calculate address in NV cache */
415 			address = chunk->offset + chunk->md->write_pointer;
416 
417 			/* Set chunk in IO */
418 			io->nv_cache_chunk = chunk;
419 
420 			/* Move write pointer */
421 			chunk->md->write_pointer += num_blocks;
422 			break;
423 		}
424 
425 		/* Not enough space in nv_cache_chunk */
426 		nv_cache->chunk_current = NULL;
427 
428 		if (0 == free_space) {
429 			continue;
430 		}
431 
432 		chunk->md->blocks_skipped = free_space;
433 		chunk->md->blocks_written += free_space;
434 		chunk->md->write_pointer += free_space;
435 
436 		if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
437 			ftl_chunk_close(chunk);
438 		}
439 	} while (1);
440 
441 	return address;
442 }
443 
444 void
445 ftl_nv_cache_fill_md(struct ftl_io *io)
446 {
447 	struct ftl_nv_cache_chunk *chunk = io->nv_cache_chunk;
448 	uint64_t i;
449 	union ftl_md_vss *metadata = io->md;
450 	uint64_t lba = ftl_io_get_lba(io, 0);
451 
452 	for (i = 0; i < io->num_blocks; ++i, lba++, metadata++) {
453 		metadata->nv_cache.lba = lba;
454 		metadata->nv_cache.seq_id = chunk->md->seq_id;
455 	}
456 }
457 
458 uint64_t
459 chunk_tail_md_offset(struct ftl_nv_cache *nv_cache)
460 {
461 	return nv_cache->chunk_blocks - nv_cache->tail_md_chunk_blocks;
462 }
463 
464 static void
465 chunk_advance_blocks(struct ftl_nv_cache *nv_cache, struct ftl_nv_cache_chunk *chunk,
466 		     uint64_t advanced_blocks)
467 {
468 	chunk->md->blocks_written += advanced_blocks;
469 
470 	assert(chunk->md->blocks_written <= nv_cache->chunk_blocks);
471 
472 	if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
473 		ftl_chunk_close(chunk);
474 	}
475 }
476 
477 static uint64_t
478 chunk_user_blocks_written(struct ftl_nv_cache_chunk *chunk)
479 {
480 	return chunk->md->blocks_written - chunk->md->blocks_skipped -
481 	       chunk->nv_cache->tail_md_chunk_blocks;
482 }
483 
484 static bool
485 is_chunk_compacted(struct ftl_nv_cache_chunk *chunk)
486 {
487 	assert(chunk->md->blocks_written != 0);
488 
489 	if (chunk_user_blocks_written(chunk) == chunk->md->blocks_compacted) {
490 		return true;
491 	}
492 
493 	return false;
494 }
495 
496 static int
497 ftl_chunk_alloc_md_entry(struct ftl_nv_cache_chunk *chunk)
498 {
499 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
500 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
501 
502 	p2l_map->chunk_dma_md = ftl_mempool_get(nv_cache->chunk_md_pool);
503 
504 	if (!p2l_map->chunk_dma_md) {
505 		return -ENOMEM;
506 	}
507 
508 	ftl_nv_cache_chunk_md_initialize(p2l_map->chunk_dma_md);
509 	return 0;
510 }
511 
512 static void
513 ftl_chunk_free_md_entry(struct ftl_nv_cache_chunk *chunk)
514 {
515 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
516 
517 	ftl_mempool_put(chunk->nv_cache->chunk_md_pool, p2l_map->chunk_dma_md);
518 	p2l_map->chunk_dma_md = NULL;
519 }
520 
521 static void chunk_free_p2l_map(struct ftl_nv_cache_chunk *chunk);
522 
523 static void
524 ftl_chunk_free(struct ftl_nv_cache_chunk *chunk)
525 {
526 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
527 
528 	/* Reset chunk */
529 	ftl_nv_cache_chunk_md_initialize(chunk->md);
530 
531 	TAILQ_INSERT_TAIL(&nv_cache->needs_free_persist_list, chunk, entry);
532 	nv_cache->chunk_free_persist_count++;
533 }
534 
535 static int
536 ftl_chunk_alloc_chunk_free_entry(struct ftl_nv_cache_chunk *chunk)
537 {
538 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
539 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
540 
541 	p2l_map->chunk_dma_md = ftl_mempool_get(nv_cache->free_chunk_md_pool);
542 	if (!p2l_map->chunk_dma_md) {
543 		return -ENOMEM;
544 	}
545 
546 	ftl_nv_cache_chunk_md_initialize(p2l_map->chunk_dma_md);
547 	return 0;
548 }
549 
550 static void
551 ftl_chunk_free_chunk_free_entry(struct ftl_nv_cache_chunk *chunk)
552 {
553 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
554 
555 	ftl_mempool_put(chunk->nv_cache->free_chunk_md_pool, p2l_map->chunk_dma_md);
556 	p2l_map->chunk_dma_md = NULL;
557 }
558 
559 static void
560 chunk_free_cb(int status, void *ctx)
561 {
562 	struct ftl_nv_cache_chunk *chunk = (struct ftl_nv_cache_chunk *)ctx;
563 
564 	if (spdk_likely(!status)) {
565 		struct ftl_nv_cache *nv_cache = chunk->nv_cache;
566 
567 		nv_cache->chunk_free_persist_count--;
568 		TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
569 		nv_cache->chunk_free_count++;
570 		nv_cache->chunk_full_count--;
571 		chunk->md->state = FTL_CHUNK_STATE_FREE;
572 		chunk->md->close_seq_id = 0;
573 		ftl_chunk_free_chunk_free_entry(chunk);
574 	} else {
575 #ifdef SPDK_FTL_RETRY_ON_ERROR
576 		ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx);
577 #else
578 		ftl_abort();
579 #endif
580 	}
581 }
582 
583 static void
584 ftl_chunk_persist_free_state(struct ftl_nv_cache *nv_cache)
585 {
586 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
587 	struct ftl_p2l_map *p2l_map;
588 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
589 	struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD);
590 	struct ftl_nv_cache_chunk *tchunk, *chunk = NULL;
591 	int rc;
592 
593 	TAILQ_FOREACH_SAFE(chunk, &nv_cache->needs_free_persist_list, entry, tchunk) {
594 		p2l_map = &chunk->p2l_map;
595 		rc = ftl_chunk_alloc_chunk_free_entry(chunk);
596 		if (rc) {
597 			break;
598 		}
599 
600 		TAILQ_REMOVE(&nv_cache->needs_free_persist_list, chunk, entry);
601 
602 		memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
603 		p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_FREE;
604 		p2l_map->chunk_dma_md->close_seq_id = 0;
605 		p2l_map->chunk_dma_md->p2l_map_checksum = 0;
606 
607 		ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, p2l_map->chunk_dma_md, NULL,
608 				       chunk_free_cb, chunk, &chunk->md_persist_entry_ctx);
609 	}
610 }
611 
612 static void
613 compaction_stats_update(struct ftl_nv_cache_chunk *chunk)
614 {
615 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
616 	struct compaction_bw_stats *compaction_bw = &nv_cache->compaction_recent_bw;
617 	double *ptr;
618 
619 	if (spdk_unlikely(chunk->compaction_length_tsc == 0)) {
620 		return;
621 	}
622 
623 	if (spdk_likely(compaction_bw->count == FTL_NV_CACHE_COMPACTION_SMA_N)) {
624 		ptr = compaction_bw->buf + compaction_bw->first;
625 		compaction_bw->first++;
626 		if (compaction_bw->first == FTL_NV_CACHE_COMPACTION_SMA_N) {
627 			compaction_bw->first = 0;
628 		}
629 		compaction_bw->sum -= *ptr;
630 	} else {
631 		ptr = compaction_bw->buf + compaction_bw->count;
632 		compaction_bw->count++;
633 	}
634 
635 	*ptr = (double)chunk->md->blocks_compacted * FTL_BLOCK_SIZE / chunk->compaction_length_tsc;
636 	chunk->compaction_length_tsc = 0;
637 
638 	compaction_bw->sum += *ptr;
639 	nv_cache->compaction_sma = compaction_bw->sum / compaction_bw->count;
640 }
641 
642 static void
643 chunk_compaction_advance(struct ftl_nv_cache_chunk *chunk, uint64_t num_blocks)
644 {
645 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
646 	uint64_t tsc = spdk_thread_get_last_tsc(spdk_get_thread());
647 
648 	chunk->compaction_length_tsc += tsc - chunk->compaction_start_tsc;
649 	chunk->compaction_start_tsc = tsc;
650 
651 	chunk->md->blocks_compacted += num_blocks;
652 	assert(chunk->md->blocks_compacted <= chunk_user_blocks_written(chunk));
653 	if (!is_chunk_compacted(chunk)) {
654 		return;
655 	}
656 
657 	/* Remove chunk from compacted list */
658 	TAILQ_REMOVE(&nv_cache->chunk_comp_list, chunk, entry);
659 	nv_cache->chunk_comp_count--;
660 
661 	compaction_stats_update(chunk);
662 
663 	chunk_free_p2l_map(chunk);
664 
665 	ftl_chunk_free(chunk);
666 }
667 
668 static bool
669 is_compaction_required_for_upgrade(struct ftl_nv_cache *nv_cache)
670 {
671 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
672 
673 	if (dev->conf.prep_upgrade_on_shutdown) {
674 		if (nv_cache->chunk_full_count || nv_cache->chunk_open_count) {
675 			return true;
676 		}
677 	}
678 
679 	return false;
680 }
681 
682 static bool
683 is_compaction_required(struct ftl_nv_cache *nv_cache)
684 {
685 	if (spdk_unlikely(nv_cache->halt)) {
686 		return is_compaction_required_for_upgrade(nv_cache);
687 	}
688 
689 	if (nv_cache->chunk_full_count >= nv_cache->chunk_compaction_threshold) {
690 		return true;
691 	}
692 
693 	return false;
694 }
695 
696 static void compaction_process_finish_read(struct ftl_nv_cache_compactor *compactor);
697 static void compaction_process_pin_lba(struct ftl_nv_cache_compactor *comp);
698 
699 static void
700 _compaction_process_pin_lba(void *_comp)
701 {
702 	struct ftl_nv_cache_compactor *comp = _comp;
703 
704 	compaction_process_pin_lba(comp);
705 }
706 
707 static void
708 compaction_process_pin_lba_cb(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx)
709 {
710 	struct ftl_nv_cache_compactor *comp = pin_ctx->cb_ctx;
711 	struct ftl_rq *rq = comp->rq;
712 
713 	if (status) {
714 		rq->iter.status = status;
715 		pin_ctx->lba = FTL_LBA_INVALID;
716 	}
717 
718 	if (--rq->iter.remaining == 0) {
719 		if (rq->iter.status) {
720 			/* unpin and try again */
721 			ftl_rq_unpin(rq);
722 			spdk_thread_send_msg(spdk_get_thread(), _compaction_process_pin_lba, comp);
723 			return;
724 		}
725 
726 		compaction_process_finish_read(comp);
727 	}
728 }
729 
730 static void
731 compaction_process_pin_lba(struct ftl_nv_cache_compactor *comp)
732 {
733 	struct ftl_rq *rq = comp->rq;
734 	struct spdk_ftl_dev *dev = rq->dev;
735 	struct ftl_rq_entry *entry;
736 
737 	assert(rq->iter.count);
738 	rq->iter.remaining = rq->iter.count;
739 	rq->iter.status = 0;
740 
741 	FTL_RQ_ENTRY_LOOP(rq, entry, rq->iter.count) {
742 		struct ftl_nv_cache_chunk *chunk = entry->owner.priv;
743 		struct ftl_l2p_pin_ctx *pin_ctx = &entry->l2p_pin_ctx;
744 		union ftl_md_vss *md = entry->io_md;
745 
746 		if (md->nv_cache.lba == FTL_LBA_INVALID || md->nv_cache.seq_id != chunk->md->seq_id) {
747 			ftl_l2p_pin_skip(dev, compaction_process_pin_lba_cb, comp, pin_ctx);
748 		} else {
749 			ftl_l2p_pin(dev, md->nv_cache.lba, 1, compaction_process_pin_lba_cb, comp, pin_ctx);
750 		}
751 	}
752 }
753 
754 static void
755 compaction_process_read_entry_cb(struct spdk_bdev_io *bdev_io, bool success, void *arg)
756 {
757 	struct ftl_rq_entry *entry = arg;
758 	struct ftl_rq *rq = ftl_rq_from_entry(entry);
759 	struct spdk_ftl_dev *dev = rq->dev;
760 	struct ftl_nv_cache_compactor *compactor = rq->owner.priv;
761 
762 	ftl_stats_bdev_io_completed(dev, FTL_STATS_TYPE_CMP, bdev_io);
763 
764 	spdk_bdev_free_io(bdev_io);
765 
766 	if (!success) {
767 		/* retry */
768 		spdk_thread_send_msg(spdk_get_thread(), compaction_process_read_entry, entry);
769 		return;
770 	}
771 
772 	assert(rq->iter.remaining >= entry->bdev_io.num_blocks);
773 	rq->iter.remaining -= entry->bdev_io.num_blocks;
774 	if (0 == rq->iter.remaining) {
775 		/* All IOs processed, go to next phase - pining */
776 		compaction_process_pin_lba(compactor);
777 	}
778 }
779 
780 static void
781 compaction_process_read_entry(void *arg)
782 {
783 	struct ftl_rq_entry *entry = arg;
784 	struct ftl_rq *rq = ftl_rq_from_entry(entry);
785 	struct spdk_ftl_dev *dev = rq->dev;
786 
787 	int rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, dev->nv_cache.bdev_desc,
788 			dev->nv_cache.cache_ioch, entry->io_payload, entry->io_md,
789 			entry->bdev_io.offset_blocks, entry->bdev_io.num_blocks,
790 			compaction_process_read_entry_cb, entry);
791 
792 	if (spdk_unlikely(rc)) {
793 		if (rc == -ENOMEM) {
794 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->nv_cache.bdev_desc);
795 			entry->bdev_io.wait_entry.bdev = bdev;
796 			entry->bdev_io.wait_entry.cb_fn = compaction_process_read_entry;
797 			entry->bdev_io.wait_entry.cb_arg = entry;
798 			spdk_bdev_queue_io_wait(bdev, dev->nv_cache.cache_ioch, &entry->bdev_io.wait_entry);
799 		} else {
800 			ftl_abort();
801 		}
802 	}
803 
804 	dev->stats.io_activity_total += entry->bdev_io.num_blocks;
805 }
806 
807 static bool
808 is_chunk_to_read(struct ftl_nv_cache_chunk *chunk)
809 {
810 	assert(chunk->md->blocks_written != 0);
811 
812 	if (chunk_user_blocks_written(chunk) == chunk->md->read_pointer) {
813 		return false;
814 	}
815 
816 	return true;
817 }
818 
819 static void
820 read_chunk_p2l_map_cb(struct ftl_basic_rq *brq)
821 {
822 	struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
823 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
824 
825 	if (!brq->success) {
826 #ifdef SPDK_FTL_RETRY_ON_ERROR
827 		read_chunk_p2l_map(chunk);
828 #else
829 		ftl_abort();
830 #endif
831 	}
832 
833 	TAILQ_INSERT_HEAD(&nv_cache->chunk_comp_list, chunk, entry);
834 }
835 
836 static int chunk_alloc_p2l_map(struct ftl_nv_cache_chunk *chunk);
837 static int ftl_chunk_read_tail_md(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq,
838 				  void (*cb)(struct ftl_basic_rq *brq), void *cb_ctx);
839 
840 static void
841 read_chunk_p2l_map(void *arg)
842 {
843 	struct ftl_nv_cache_chunk *chunk = arg;
844 	int rc;
845 
846 	if (chunk_alloc_p2l_map(chunk)) {
847 		ftl_abort();
848 	}
849 
850 	rc = ftl_chunk_read_tail_md(chunk, &chunk->metadata_rq, read_chunk_p2l_map_cb, NULL);
851 	if (rc) {
852 		if (rc == -ENOMEM) {
853 			struct ftl_nv_cache *nv_cache = chunk->nv_cache;
854 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc);
855 			struct spdk_bdev_io_wait_entry *wait_entry = &chunk->metadata_rq.io.bdev_io_wait;
856 
857 			wait_entry->bdev = bdev;
858 			wait_entry->cb_fn = read_chunk_p2l_map;
859 			wait_entry->cb_arg = chunk;
860 			spdk_bdev_queue_io_wait(bdev, nv_cache->cache_ioch, wait_entry);
861 		} else {
862 			ftl_abort();
863 		}
864 	}
865 }
866 
867 static void
868 prepare_chunk_for_compaction(struct ftl_nv_cache *nv_cache)
869 {
870 	struct ftl_nv_cache_chunk *chunk = NULL;
871 
872 	if (TAILQ_EMPTY(&nv_cache->chunk_full_list)) {
873 		return;
874 	}
875 
876 	chunk = TAILQ_FIRST(&nv_cache->chunk_full_list);
877 	TAILQ_REMOVE(&nv_cache->chunk_full_list, chunk, entry);
878 	assert(chunk->md->write_pointer);
879 
880 	nv_cache->chunk_comp_count++;
881 	read_chunk_p2l_map(chunk);
882 }
883 
884 
885 static struct ftl_nv_cache_chunk *
886 get_chunk_for_compaction(struct ftl_nv_cache *nv_cache)
887 {
888 	struct ftl_nv_cache_chunk *chunk = NULL;
889 
890 	if (TAILQ_EMPTY(&nv_cache->chunk_comp_list)) {
891 		return NULL;
892 	}
893 
894 	chunk = TAILQ_FIRST(&nv_cache->chunk_comp_list);
895 	if (!is_chunk_to_read(chunk)) {
896 		return NULL;
897 	}
898 
899 	return chunk;
900 }
901 
902 static uint64_t
903 chunk_blocks_to_read(struct ftl_nv_cache_chunk *chunk)
904 {
905 	uint64_t blocks_written;
906 	uint64_t blocks_to_read;
907 
908 	assert(chunk->md->blocks_written >= chunk->md->blocks_skipped);
909 	blocks_written = chunk_user_blocks_written(chunk);
910 
911 	assert(blocks_written >= chunk->md->read_pointer);
912 	blocks_to_read = blocks_written - chunk->md->read_pointer;
913 
914 	return blocks_to_read;
915 }
916 
917 static void
918 compactor_deactivate(struct ftl_nv_cache_compactor *compactor)
919 {
920 	struct ftl_nv_cache *nv_cache = compactor->nv_cache;
921 
922 	compactor->rq->iter.count = 0;
923 	assert(nv_cache->compaction_active_count);
924 	nv_cache->compaction_active_count--;
925 	TAILQ_INSERT_TAIL(&nv_cache->compactor_list, compactor, entry);
926 }
927 
928 static void
929 compaction_process_invalidate_entry(struct ftl_rq_entry *entry)
930 {
931 	entry->addr = FTL_ADDR_INVALID;
932 	entry->lba = FTL_LBA_INVALID;
933 	entry->seq_id = 0;
934 	entry->owner.priv = NULL;
935 }
936 
937 static void
938 compaction_process_pad(struct ftl_nv_cache_compactor *compactor, uint64_t idx)
939 {
940 	struct ftl_rq *rq = compactor->rq;
941 	struct ftl_rq_entry *entry;
942 
943 	assert(idx < rq->num_blocks);
944 	FTL_RQ_ENTRY_LOOP_FROM(rq, &rq->entries[idx], entry, rq->num_blocks) {
945 		compaction_process_invalidate_entry(entry);
946 	}
947 }
948 
949 static void
950 compaction_process_read(struct ftl_nv_cache_compactor *compactor)
951 {
952 	struct ftl_rq *rq = compactor->rq;
953 	struct ftl_nv_cache *nv_cache = compactor->nv_cache;
954 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
955 	struct ftl_rq_entry *entry, *io;
956 
957 	assert(rq->iter.count);
958 	rq->iter.remaining = rq->iter.count;
959 
960 	io = rq->entries;
961 	io->bdev_io.num_blocks = 1;
962 	io->bdev_io.offset_blocks = ftl_addr_to_nvc_offset(dev, io->addr);
963 	FTL_RQ_ENTRY_LOOP_FROM(rq, &rq->entries[1], entry,  rq->iter.count) {
964 		if (entry->addr == io->addr + io->bdev_io.num_blocks) {
965 			io->bdev_io.num_blocks++;
966 		} else {
967 			compaction_process_read_entry(io);
968 			io = entry;
969 			io->bdev_io.num_blocks = 1;
970 			io->bdev_io.offset_blocks = ftl_addr_to_nvc_offset(dev, io->addr);
971 		}
972 	}
973 	compaction_process_read_entry(io);
974 }
975 
976 static ftl_addr
977 compaction_chunk_read_pos(struct spdk_ftl_dev *dev, struct ftl_nv_cache_chunk *chunk)
978 {
979 	ftl_addr start, pos;
980 	uint64_t skip, to_read = chunk_blocks_to_read(chunk);
981 
982 	if (0 == to_read) {
983 		return FTL_ADDR_INVALID;
984 	}
985 
986 	start = ftl_addr_from_nvc_offset(dev, chunk->offset + chunk->md->read_pointer);
987 	pos = ftl_bitmap_find_first_set(dev->valid_map, start, start + to_read - 1);
988 
989 	if (pos == UINT64_MAX) {
990 		chunk->md->read_pointer += to_read;
991 		chunk_compaction_advance(chunk, to_read);
992 		return FTL_ADDR_INVALID;
993 	}
994 
995 	assert(pos >= start);
996 	skip = pos - start;
997 	if (skip) {
998 		chunk->md->read_pointer += skip;
999 		chunk_compaction_advance(chunk, skip);
1000 	}
1001 
1002 	return pos;
1003 }
1004 
1005 static bool
1006 compaction_entry_read_pos(struct ftl_nv_cache *nv_cache, struct ftl_rq_entry *entry)
1007 {
1008 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1009 	struct ftl_nv_cache_chunk *chunk = NULL;
1010 	ftl_addr addr = FTL_ADDR_INVALID;
1011 
1012 	while (!chunk) {
1013 		/* Get currently handled chunk */
1014 		chunk = get_chunk_for_compaction(nv_cache);
1015 		if (!chunk) {
1016 			return false;
1017 		}
1018 		chunk->compaction_start_tsc = spdk_thread_get_last_tsc(spdk_get_thread());
1019 
1020 		/* Get next read position in chunk */
1021 		addr = compaction_chunk_read_pos(dev, chunk);
1022 		if (FTL_ADDR_INVALID == addr) {
1023 			chunk = NULL;
1024 		}
1025 	}
1026 
1027 	assert(FTL_ADDR_INVALID != addr);
1028 
1029 	/* Set entry address info and chunk */
1030 	entry->addr = addr;
1031 	entry->owner.priv = chunk;
1032 
1033 	/* Move read pointer in the chunk */
1034 	chunk->md->read_pointer++;
1035 
1036 	return true;
1037 }
1038 
1039 static void
1040 compaction_process_start(struct ftl_nv_cache_compactor *compactor)
1041 {
1042 	struct ftl_rq *rq = compactor->rq;
1043 	struct ftl_nv_cache *nv_cache = compactor->nv_cache;
1044 	struct ftl_rq_entry *entry;
1045 
1046 	assert(0 == compactor->rq->iter.count);
1047 	FTL_RQ_ENTRY_LOOP(rq, entry, rq->num_blocks) {
1048 		if (!compaction_entry_read_pos(nv_cache, entry)) {
1049 			compaction_process_pad(compactor, entry->index);
1050 			break;
1051 		}
1052 		rq->iter.count++;
1053 	}
1054 
1055 	if (rq->iter.count) {
1056 		/* Schedule Read IOs */
1057 		compaction_process_read(compactor);
1058 	} else {
1059 		compactor_deactivate(compactor);
1060 	}
1061 }
1062 
1063 static void
1064 compaction_process(struct ftl_nv_cache *nv_cache)
1065 {
1066 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1067 	struct ftl_nv_cache_compactor *compactor;
1068 
1069 	if (!is_compaction_required(nv_cache)) {
1070 		return;
1071 	}
1072 
1073 	if (nv_cache->chunk_comp_count < FTL_MAX_COMPACTED_CHUNKS) {
1074 		prepare_chunk_for_compaction(nv_cache);
1075 	}
1076 
1077 	if (TAILQ_EMPTY(&nv_cache->chunk_comp_list)) {
1078 		return;
1079 	}
1080 
1081 	compactor = TAILQ_FIRST(&nv_cache->compactor_list);
1082 	if (!compactor) {
1083 		return;
1084 	}
1085 
1086 	TAILQ_REMOVE(&nv_cache->compactor_list, compactor, entry);
1087 	compactor->nv_cache->compaction_active_count++;
1088 	compaction_process_start(compactor);
1089 	ftl_add_io_activity(dev);
1090 }
1091 
1092 static void
1093 compaction_process_ftl_done(struct ftl_rq *rq)
1094 {
1095 	struct spdk_ftl_dev *dev = rq->dev;
1096 	struct ftl_nv_cache_compactor *compactor = rq->owner.priv;
1097 	struct ftl_band *band = rq->io.band;
1098 	struct ftl_rq_entry *entry;
1099 	ftl_addr addr;
1100 
1101 	if (spdk_unlikely(false == rq->success)) {
1102 		/* IO error retry writing */
1103 #ifdef SPDK_FTL_RETRY_ON_ERROR
1104 		ftl_writer_queue_rq(&dev->writer_user, rq);
1105 		return;
1106 #else
1107 		ftl_abort();
1108 #endif
1109 	}
1110 
1111 	assert(rq->iter.count);
1112 
1113 	/* Update L2P table */
1114 	addr = rq->io.addr;
1115 	FTL_RQ_ENTRY_LOOP(rq, entry, rq->iter.count) {
1116 		struct ftl_nv_cache_chunk *chunk = entry->owner.priv;
1117 
1118 		if (entry->lba != FTL_LBA_INVALID) {
1119 			ftl_l2p_update_base(dev, entry->lba, addr, entry->addr);
1120 			ftl_l2p_unpin(dev, entry->lba, 1);
1121 			chunk_compaction_advance(chunk, 1);
1122 		} else {
1123 			assert(entry->addr == FTL_ADDR_INVALID);
1124 		}
1125 
1126 		addr = ftl_band_next_addr(band, addr, 1);
1127 		compaction_process_invalidate_entry(entry);
1128 	}
1129 
1130 	compactor_deactivate(compactor);
1131 }
1132 
1133 static void
1134 compaction_process_finish_read(struct ftl_nv_cache_compactor *compactor)
1135 {
1136 	struct ftl_rq *rq = compactor->rq;
1137 	struct spdk_ftl_dev *dev = rq->dev;
1138 	struct ftl_rq_entry *entry;
1139 	ftl_addr current_addr;
1140 	uint64_t skip = 0;
1141 
1142 	FTL_RQ_ENTRY_LOOP(rq, entry, rq->iter.count) {
1143 		struct ftl_nv_cache_chunk *chunk = entry->owner.priv;
1144 		union ftl_md_vss *md = entry->io_md;
1145 
1146 		if (md->nv_cache.lba == FTL_LBA_INVALID || md->nv_cache.seq_id != chunk->md->seq_id) {
1147 			skip++;
1148 			compaction_process_invalidate_entry(entry);
1149 			chunk_compaction_advance(chunk, 1);
1150 			continue;
1151 		}
1152 
1153 		current_addr = ftl_l2p_get(dev, md->nv_cache.lba);
1154 		if (current_addr == entry->addr) {
1155 			entry->lba = md->nv_cache.lba;
1156 			entry->seq_id = chunk->md->seq_id;
1157 		} else {
1158 			/* This address already invalidated, just omit this block */
1159 			chunk_compaction_advance(chunk, 1);
1160 			ftl_l2p_unpin(dev, md->nv_cache.lba, 1);
1161 			compaction_process_invalidate_entry(entry);
1162 			skip++;
1163 		}
1164 	}
1165 
1166 	if (skip < rq->iter.count) {
1167 		/*
1168 		 * Request contains data to be placed on FTL, compact it
1169 		 */
1170 		ftl_writer_queue_rq(&dev->writer_user, rq);
1171 	} else {
1172 		compactor_deactivate(compactor);
1173 	}
1174 }
1175 
1176 static void
1177 compactor_free(struct spdk_ftl_dev *dev, struct ftl_nv_cache_compactor *compactor)
1178 {
1179 	if (!compactor) {
1180 		return;
1181 	}
1182 
1183 	ftl_rq_del(compactor->rq);
1184 	free(compactor);
1185 }
1186 
1187 static struct ftl_nv_cache_compactor *
1188 compactor_alloc(struct spdk_ftl_dev *dev)
1189 {
1190 	struct ftl_nv_cache_compactor *compactor;
1191 	struct ftl_rq_entry *entry;
1192 
1193 	compactor = calloc(1, sizeof(*compactor));
1194 	if (!compactor) {
1195 		goto error;
1196 	}
1197 
1198 	/* Allocate help request for reading */
1199 	compactor->rq = ftl_rq_new(dev, dev->nv_cache.md_size);
1200 	if (!compactor->rq) {
1201 		goto error;
1202 	}
1203 
1204 	compactor->nv_cache = &dev->nv_cache;
1205 	compactor->rq->owner.priv = compactor;
1206 	compactor->rq->owner.cb = compaction_process_ftl_done;
1207 	compactor->rq->owner.compaction = true;
1208 
1209 	FTL_RQ_ENTRY_LOOP(compactor->rq, entry, compactor->rq->num_blocks) {
1210 		compaction_process_invalidate_entry(entry);
1211 	}
1212 
1213 	return compactor;
1214 
1215 error:
1216 	compactor_free(dev, compactor);
1217 	return NULL;
1218 }
1219 
1220 static void
1221 ftl_nv_cache_submit_cb_done(struct ftl_io *io)
1222 {
1223 	struct ftl_nv_cache *nv_cache = &io->dev->nv_cache;
1224 
1225 	chunk_advance_blocks(nv_cache, io->nv_cache_chunk, io->num_blocks);
1226 	io->nv_cache_chunk = NULL;
1227 
1228 	ftl_io_complete(io);
1229 }
1230 
1231 static void
1232 ftl_nv_cache_l2p_update(struct ftl_io *io)
1233 {
1234 	struct spdk_ftl_dev *dev = io->dev;
1235 	ftl_addr next_addr = io->addr;
1236 	size_t i;
1237 
1238 	for (i = 0; i < io->num_blocks; ++i, ++next_addr) {
1239 		ftl_l2p_update_cache(dev, ftl_io_get_lba(io, i), next_addr, io->map[i]);
1240 	}
1241 
1242 	ftl_l2p_unpin(dev, io->lba, io->num_blocks);
1243 	ftl_nv_cache_submit_cb_done(io);
1244 }
1245 
1246 static void
1247 ftl_nv_cache_pin_cb(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx)
1248 {
1249 	struct ftl_io *io = pin_ctx->cb_ctx;
1250 	size_t i;
1251 
1252 	if (spdk_unlikely(status != 0)) {
1253 		/* Retry on the internal L2P fault */
1254 		FTL_ERRLOG(dev, "Cannot PIN LBA for NV cache write failed at %"PRIx64"\n",
1255 			   io->addr);
1256 		io->status = -EAGAIN;
1257 		ftl_nv_cache_submit_cb_done(io);
1258 		return;
1259 	}
1260 
1261 	/* Remember previous l2p mapping to resolve conflicts in case of outstanding write-after-write */
1262 	for (i = 0; i < io->num_blocks; ++i) {
1263 		io->map[i] = ftl_l2p_get(dev, ftl_io_get_lba(io, i));
1264 	}
1265 
1266 	assert(io->iov_pos == 0);
1267 
1268 	ftl_trace_submission(io->dev, io, io->addr, io->num_blocks);
1269 
1270 	dev->nv_cache.nvc_type->ops.write(io);
1271 }
1272 
1273 void
1274 ftl_nv_cache_write_complete(struct ftl_io *io, bool success)
1275 {
1276 	if (spdk_unlikely(!success)) {
1277 		FTL_ERRLOG(io->dev, "Non-volatile cache write failed at %"PRIx64"\n",
1278 			   io->addr);
1279 		io->status = -EIO;
1280 		ftl_l2p_unpin(io->dev, io->lba, io->num_blocks);
1281 		ftl_nv_cache_submit_cb_done(io);
1282 		return;
1283 	}
1284 
1285 	ftl_nv_cache_l2p_update(io);
1286 }
1287 
1288 bool
1289 ftl_nv_cache_write(struct ftl_io *io)
1290 {
1291 	struct spdk_ftl_dev *dev = io->dev;
1292 	uint64_t cache_offset;
1293 
1294 	/* Reserve area on the write buffer cache */
1295 	cache_offset = ftl_nv_cache_get_wr_buffer(&dev->nv_cache, io);
1296 	if (cache_offset == FTL_LBA_INVALID) {
1297 		/* No free space in NV cache, resubmit request */
1298 		return false;
1299 	}
1300 	io->addr = ftl_addr_from_nvc_offset(dev, cache_offset);
1301 
1302 	ftl_l2p_pin(io->dev, io->lba, io->num_blocks,
1303 		    ftl_nv_cache_pin_cb, io,
1304 		    &io->l2p_pin_ctx);
1305 
1306 	dev->nv_cache.throttle.blocks_submitted += io->num_blocks;
1307 
1308 	return true;
1309 }
1310 
1311 int
1312 ftl_nv_cache_read(struct ftl_io *io, ftl_addr addr, uint32_t num_blocks,
1313 		  spdk_bdev_io_completion_cb cb, void *cb_arg)
1314 {
1315 	int rc;
1316 	struct ftl_nv_cache *nv_cache = &io->dev->nv_cache;
1317 
1318 	assert(ftl_addr_in_nvc(io->dev, addr));
1319 
1320 	rc = ftl_nv_cache_bdev_read_blocks_with_md(io->dev, nv_cache->bdev_desc, nv_cache->cache_ioch,
1321 			ftl_io_iovec_addr(io), NULL, ftl_addr_to_nvc_offset(io->dev, addr),
1322 			num_blocks, cb, cb_arg);
1323 
1324 	return rc;
1325 }
1326 
1327 bool
1328 ftl_nv_cache_is_halted(struct ftl_nv_cache *nv_cache)
1329 {
1330 	if (nv_cache->compaction_active_count) {
1331 		return false;
1332 	}
1333 
1334 	if (nv_cache->chunk_open_count > 0) {
1335 		return false;
1336 	}
1337 
1338 	if (is_compaction_required_for_upgrade(nv_cache)) {
1339 		return false;
1340 	}
1341 
1342 	return true;
1343 }
1344 
1345 void
1346 ftl_chunk_map_set_lba(struct ftl_nv_cache_chunk *chunk,
1347 		      uint64_t offset, uint64_t lba)
1348 {
1349 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1350 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1351 
1352 	ftl_lba_store(dev, p2l_map->chunk_map, offset, lba);
1353 }
1354 
1355 uint64_t
1356 ftl_chunk_map_get_lba(struct ftl_nv_cache_chunk *chunk, uint64_t offset)
1357 {
1358 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1359 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1360 
1361 	return ftl_lba_load(dev, p2l_map->chunk_map, offset);
1362 }
1363 
1364 static void
1365 ftl_chunk_set_addr(struct ftl_nv_cache_chunk *chunk, uint64_t lba, ftl_addr addr)
1366 {
1367 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1368 	uint64_t cache_offset = ftl_addr_to_nvc_offset(dev, addr);
1369 	uint64_t offset;
1370 
1371 	offset = (cache_offset - chunk->offset) % chunk->nv_cache->chunk_blocks;
1372 	ftl_chunk_map_set_lba(chunk, offset, lba);
1373 }
1374 
1375 struct ftl_nv_cache_chunk *
1376 ftl_nv_cache_get_chunk_from_addr(struct spdk_ftl_dev *dev, ftl_addr addr)
1377 {
1378 	struct ftl_nv_cache_chunk *chunk = dev->nv_cache.chunks;
1379 	uint64_t chunk_idx;
1380 	uint64_t cache_offset = ftl_addr_to_nvc_offset(dev, addr);
1381 
1382 	assert(chunk != NULL);
1383 	chunk_idx = (cache_offset - chunk->offset) / chunk->nv_cache->chunk_blocks;
1384 	chunk += chunk_idx;
1385 
1386 	return chunk;
1387 }
1388 
1389 void
1390 ftl_nv_cache_set_addr(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr addr)
1391 {
1392 	struct ftl_nv_cache_chunk *chunk;
1393 
1394 	chunk = ftl_nv_cache_get_chunk_from_addr(dev, addr);
1395 
1396 	assert(lba != FTL_LBA_INVALID);
1397 
1398 	ftl_chunk_set_addr(chunk, lba, addr);
1399 	ftl_bitmap_set(dev->valid_map, addr);
1400 }
1401 
1402 static void
1403 ftl_nv_cache_throttle_update(struct ftl_nv_cache *nv_cache)
1404 {
1405 	double err;
1406 	double modifier;
1407 
1408 	err = ((double)nv_cache->chunk_free_count - nv_cache->chunk_free_target) / nv_cache->chunk_count;
1409 	modifier = FTL_NV_CACHE_THROTTLE_MODIFIER_KP * err;
1410 
1411 	if (modifier < FTL_NV_CACHE_THROTTLE_MODIFIER_MIN) {
1412 		modifier = FTL_NV_CACHE_THROTTLE_MODIFIER_MIN;
1413 	} else if (modifier > FTL_NV_CACHE_THROTTLE_MODIFIER_MAX) {
1414 		modifier = FTL_NV_CACHE_THROTTLE_MODIFIER_MAX;
1415 	}
1416 
1417 	if (spdk_unlikely(nv_cache->compaction_sma == 0 || nv_cache->compaction_active_count == 0)) {
1418 		nv_cache->throttle.blocks_submitted_limit = UINT64_MAX;
1419 	} else {
1420 		double blocks_per_interval = nv_cache->compaction_sma * nv_cache->throttle.interval_tsc /
1421 					     FTL_BLOCK_SIZE;
1422 		nv_cache->throttle.blocks_submitted_limit = blocks_per_interval * (1.0 + modifier);
1423 	}
1424 }
1425 
1426 static void
1427 ftl_nv_cache_process_throttle(struct ftl_nv_cache *nv_cache)
1428 {
1429 	uint64_t tsc = spdk_thread_get_last_tsc(spdk_get_thread());
1430 
1431 	if (spdk_unlikely(!nv_cache->throttle.start_tsc)) {
1432 		nv_cache->throttle.start_tsc = tsc;
1433 	} else if (tsc - nv_cache->throttle.start_tsc >= nv_cache->throttle.interval_tsc) {
1434 		ftl_nv_cache_throttle_update(nv_cache);
1435 		nv_cache->throttle.start_tsc = tsc;
1436 		nv_cache->throttle.blocks_submitted = 0;
1437 	}
1438 }
1439 
1440 static void ftl_chunk_open(struct ftl_nv_cache_chunk *chunk);
1441 
1442 void
1443 ftl_nv_cache_process(struct spdk_ftl_dev *dev)
1444 {
1445 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
1446 
1447 	assert(dev->nv_cache.bdev_desc);
1448 
1449 	if (nv_cache->chunk_open_count < FTL_MAX_OPEN_CHUNKS && spdk_likely(!nv_cache->halt) &&
1450 	    !TAILQ_EMPTY(&nv_cache->chunk_free_list)) {
1451 		struct ftl_nv_cache_chunk *chunk = TAILQ_FIRST(&nv_cache->chunk_free_list);
1452 		TAILQ_REMOVE(&nv_cache->chunk_free_list, chunk, entry);
1453 		TAILQ_INSERT_TAIL(&nv_cache->chunk_open_list, chunk, entry);
1454 		nv_cache->chunk_free_count--;
1455 		chunk->md->seq_id = ftl_get_next_seq_id(dev);
1456 		ftl_chunk_open(chunk);
1457 		ftl_add_io_activity(dev);
1458 	}
1459 
1460 	compaction_process(nv_cache);
1461 	ftl_chunk_persist_free_state(nv_cache);
1462 	ftl_nv_cache_process_throttle(nv_cache);
1463 }
1464 
1465 static bool
1466 ftl_nv_cache_full(struct ftl_nv_cache *nv_cache)
1467 {
1468 	if (0 == nv_cache->chunk_open_count && NULL == nv_cache->chunk_current) {
1469 		return true;
1470 	} else {
1471 		return false;
1472 	}
1473 }
1474 
1475 bool
1476 ftl_nv_cache_throttle(struct spdk_ftl_dev *dev)
1477 {
1478 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
1479 
1480 	if (dev->nv_cache.throttle.blocks_submitted >= nv_cache->throttle.blocks_submitted_limit ||
1481 	    ftl_nv_cache_full(nv_cache)) {
1482 		return true;
1483 	}
1484 
1485 	return false;
1486 }
1487 
1488 static void
1489 chunk_free_p2l_map(struct ftl_nv_cache_chunk *chunk)
1490 {
1491 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1492 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1493 
1494 	ftl_mempool_put(nv_cache->p2l_pool, p2l_map->chunk_map);
1495 	p2l_map->chunk_map = NULL;
1496 
1497 	ftl_chunk_free_md_entry(chunk);
1498 }
1499 
1500 int
1501 ftl_nv_cache_save_state(struct ftl_nv_cache *nv_cache)
1502 {
1503 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1504 	struct ftl_nv_cache_chunk *chunk;
1505 	int status = 0;
1506 	uint64_t i;
1507 
1508 	assert(nv_cache->chunk_open_count == 0);
1509 
1510 	if (nv_cache->compaction_active_count) {
1511 		FTL_ERRLOG(dev, "Cannot save NV cache state, compaction in progress\n");
1512 		return -EINVAL;
1513 	}
1514 
1515 	chunk = nv_cache->chunks;
1516 	if (!chunk) {
1517 		FTL_ERRLOG(dev, "Cannot save NV cache state, no NV cache metadata\n");
1518 		return -ENOMEM;
1519 	}
1520 
1521 	for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
1522 		nvc_validate_md(nv_cache, chunk->md);
1523 
1524 		if (chunk->md->read_pointer)  {
1525 			/* Only full chunks can be compacted */
1526 			if (chunk->md->blocks_written != nv_cache->chunk_blocks) {
1527 				assert(0);
1528 				status = -EINVAL;
1529 				break;
1530 			}
1531 
1532 			/*
1533 			 * Chunk in the middle of compaction, start over after
1534 			 * load
1535 			 */
1536 			chunk->md->read_pointer = chunk->md->blocks_compacted = 0;
1537 		} else if (chunk->md->blocks_written == nv_cache->chunk_blocks) {
1538 			/* Full chunk */
1539 		} else if (0 == chunk->md->blocks_written) {
1540 			/* Empty chunk */
1541 		} else {
1542 			assert(0);
1543 			status = -EINVAL;
1544 			break;
1545 		}
1546 	}
1547 
1548 	if (status) {
1549 		FTL_ERRLOG(dev, "Cannot save NV cache state, inconsistent NV cache"
1550 			   "metadata\n");
1551 	}
1552 
1553 	return status;
1554 }
1555 
1556 static int
1557 sort_chunks_cmp(const void *a, const void *b)
1558 {
1559 	struct ftl_nv_cache_chunk *a_chunk = *(struct ftl_nv_cache_chunk **)a;
1560 	struct ftl_nv_cache_chunk *b_chunk = *(struct ftl_nv_cache_chunk **)b;
1561 
1562 	return a_chunk->md->seq_id - b_chunk->md->seq_id;
1563 }
1564 
1565 static int
1566 sort_chunks(struct ftl_nv_cache *nv_cache)
1567 {
1568 	struct ftl_nv_cache_chunk **chunks_list;
1569 	struct ftl_nv_cache_chunk *chunk;
1570 	uint32_t i;
1571 
1572 	if (TAILQ_EMPTY(&nv_cache->chunk_full_list)) {
1573 		return 0;
1574 	}
1575 
1576 	chunks_list = calloc(nv_cache->chunk_full_count,
1577 			     sizeof(chunks_list[0]));
1578 	if (!chunks_list) {
1579 		return -ENOMEM;
1580 	}
1581 
1582 	i = 0;
1583 	TAILQ_FOREACH(chunk, &nv_cache->chunk_full_list, entry) {
1584 		chunks_list[i] = chunk;
1585 		i++;
1586 	}
1587 	assert(i == nv_cache->chunk_full_count);
1588 
1589 	qsort(chunks_list, nv_cache->chunk_full_count, sizeof(chunks_list[0]),
1590 	      sort_chunks_cmp);
1591 
1592 	TAILQ_INIT(&nv_cache->chunk_full_list);
1593 	for (i = 0; i < nv_cache->chunk_full_count; i++) {
1594 		chunk = chunks_list[i];
1595 		TAILQ_INSERT_TAIL(&nv_cache->chunk_full_list, chunk, entry);
1596 	}
1597 
1598 	free(chunks_list);
1599 	return 0;
1600 }
1601 
1602 static int
1603 chunk_alloc_p2l_map(struct ftl_nv_cache_chunk *chunk)
1604 {
1605 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1606 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1607 
1608 	assert(p2l_map->ref_cnt == 0);
1609 	assert(p2l_map->chunk_map == NULL);
1610 
1611 	p2l_map->chunk_map = ftl_mempool_get(nv_cache->p2l_pool);
1612 
1613 	if (!p2l_map->chunk_map) {
1614 		return -ENOMEM;
1615 	}
1616 
1617 	if (ftl_chunk_alloc_md_entry(chunk)) {
1618 		ftl_mempool_put(nv_cache->p2l_pool, p2l_map->chunk_map);
1619 		p2l_map->chunk_map = NULL;
1620 		return -ENOMEM;
1621 	}
1622 
1623 	/* Set the P2L to FTL_LBA_INVALID */
1624 	memset(p2l_map->chunk_map, -1, FTL_BLOCK_SIZE * nv_cache->tail_md_chunk_blocks);
1625 
1626 	return 0;
1627 }
1628 
1629 int
1630 ftl_nv_cache_load_state(struct ftl_nv_cache *nv_cache)
1631 {
1632 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1633 	struct ftl_nv_cache_chunk *chunk;
1634 	uint64_t chunks_number, offset, i;
1635 	int status = 0;
1636 	bool active;
1637 
1638 	nv_cache->chunk_current = NULL;
1639 	TAILQ_INIT(&nv_cache->chunk_free_list);
1640 	TAILQ_INIT(&nv_cache->chunk_full_list);
1641 	TAILQ_INIT(&nv_cache->chunk_inactive_list);
1642 	nv_cache->chunk_full_count = 0;
1643 	nv_cache->chunk_free_count = 0;
1644 	nv_cache->chunk_inactive_count = 0;
1645 
1646 	assert(nv_cache->chunk_open_count == 0);
1647 	offset = nvc_data_offset(nv_cache);
1648 	if (!nv_cache->chunks) {
1649 		FTL_ERRLOG(dev, "No NV cache metadata\n");
1650 		return -1;
1651 	}
1652 
1653 	if (dev->sb->upgrade_ready) {
1654 		/*
1655 		 * During upgrade some transitions are allowed:
1656 		 *
1657 		 * 1. FREE -> INACTIVE
1658 		 * 2. INACTIVE -> FREE
1659 		 */
1660 		chunk = nv_cache->chunks;
1661 		for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
1662 			active = nv_cache->nvc_type->ops.is_chunk_active(dev, chunk->offset);
1663 
1664 			if (chunk->md->state == FTL_CHUNK_STATE_FREE) {
1665 				if (!active) {
1666 					chunk->md->state = FTL_CHUNK_STATE_INACTIVE;
1667 				}
1668 			} else if (chunk->md->state == FTL_CHUNK_STATE_INACTIVE) {
1669 				if (active) {
1670 					chunk->md->state = FTL_CHUNK_STATE_FREE;
1671 				}
1672 			}
1673 		}
1674 	}
1675 
1676 	chunk = nv_cache->chunks;
1677 	for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
1678 		chunk->nv_cache = nv_cache;
1679 		nvc_validate_md(nv_cache, chunk->md);
1680 
1681 		if (offset != chunk->offset) {
1682 			status = -EINVAL;
1683 			goto error;
1684 		}
1685 
1686 		if (chunk->md->version != FTL_NVC_VERSION_CURRENT) {
1687 			status = -EINVAL;
1688 			goto error;
1689 		}
1690 
1691 		active = nv_cache->nvc_type->ops.is_chunk_active(dev, chunk->offset);
1692 		if (false == active) {
1693 			if (chunk->md->state != FTL_CHUNK_STATE_INACTIVE) {
1694 				status = -EINVAL;
1695 				goto error;
1696 			}
1697 		}
1698 
1699 		switch (chunk->md->state) {
1700 		case FTL_CHUNK_STATE_FREE:
1701 			if (chunk->md->blocks_written || chunk->md->write_pointer) {
1702 				status = -EINVAL;
1703 				goto error;
1704 			}
1705 			/* Chunk empty, move it on empty list */
1706 			TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
1707 			nv_cache->chunk_free_count++;
1708 			break;
1709 		case FTL_CHUNK_STATE_OPEN:
1710 			/* All chunks needs to closed at this point */
1711 			status = -EINVAL;
1712 			goto error;
1713 			break;
1714 		case FTL_CHUNK_STATE_CLOSED:
1715 			if (chunk->md->blocks_written != nv_cache->chunk_blocks) {
1716 				status = -EINVAL;
1717 				goto error;
1718 			}
1719 			/* Chunk full, move it on full list */
1720 			TAILQ_INSERT_TAIL(&nv_cache->chunk_full_list, chunk, entry);
1721 			nv_cache->chunk_full_count++;
1722 			break;
1723 		case FTL_CHUNK_STATE_INACTIVE:
1724 			TAILQ_INSERT_TAIL(&nv_cache->chunk_inactive_list, chunk, entry);
1725 			nv_cache->chunk_inactive_count++;
1726 			break;
1727 		default:
1728 			status = -EINVAL;
1729 			FTL_ERRLOG(dev, "Invalid chunk state\n");
1730 			goto error;
1731 		}
1732 
1733 		offset += nv_cache->chunk_blocks;
1734 	}
1735 
1736 	chunks_number = nv_cache->chunk_free_count + nv_cache->chunk_full_count +
1737 			nv_cache->chunk_inactive_count;
1738 	assert(nv_cache->chunk_current == NULL);
1739 
1740 	if (chunks_number != nv_cache->chunk_count) {
1741 		FTL_ERRLOG(dev, "Inconsistent NV cache metadata\n");
1742 		status = -EINVAL;
1743 		goto error;
1744 	}
1745 
1746 	status = sort_chunks(nv_cache);
1747 	if (status) {
1748 		FTL_ERRLOG(dev, "FTL NV Cache: sorting chunks ERROR\n");
1749 	}
1750 
1751 	FTL_NOTICELOG(dev, "FTL NV Cache: full chunks = %lu, empty chunks = %lu\n",
1752 		      nv_cache->chunk_full_count, nv_cache->chunk_free_count);
1753 
1754 	if (0 == status) {
1755 		FTL_NOTICELOG(dev, "FTL NV Cache: state loaded successfully\n");
1756 	} else {
1757 		FTL_ERRLOG(dev, "FTL NV Cache: loading state ERROR\n");
1758 	}
1759 
1760 	/* The number of active/inactive chunks calculated at initialization can change at this point due to metadata
1761 	 * upgrade. Recalculate the thresholds that depend on active chunk count.
1762 	 */
1763 	ftl_nv_cache_init_update_limits(dev);
1764 error:
1765 	return status;
1766 }
1767 
1768 void
1769 ftl_nv_cache_get_max_seq_id(struct ftl_nv_cache *nv_cache, uint64_t *open_seq_id,
1770 			    uint64_t *close_seq_id)
1771 {
1772 	uint64_t i, o_seq_id = 0, c_seq_id = 0;
1773 	struct ftl_nv_cache_chunk *chunk;
1774 
1775 	chunk = nv_cache->chunks;
1776 	assert(chunk);
1777 
1778 	/* Iterate over chunks and get their max open and close seq id */
1779 	for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
1780 		o_seq_id = spdk_max(o_seq_id, chunk->md->seq_id);
1781 		c_seq_id = spdk_max(c_seq_id, chunk->md->close_seq_id);
1782 	}
1783 
1784 	*open_seq_id = o_seq_id;
1785 	*close_seq_id = c_seq_id;
1786 }
1787 
1788 typedef void (*ftl_chunk_ops_cb)(struct ftl_nv_cache_chunk *chunk, void *cntx, bool status);
1789 
1790 static void
1791 write_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
1792 {
1793 	struct ftl_basic_rq *brq = arg;
1794 	struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
1795 
1796 	ftl_stats_bdev_io_completed(brq->dev, FTL_STATS_TYPE_MD_NV_CACHE, bdev_io);
1797 
1798 	brq->success = success;
1799 	if (spdk_likely(success)) {
1800 		chunk_advance_blocks(chunk->nv_cache, chunk, brq->num_blocks);
1801 	}
1802 
1803 	spdk_bdev_free_io(bdev_io);
1804 	brq->owner.cb(brq);
1805 }
1806 
1807 static void
1808 _ftl_chunk_basic_rq_write(void *_brq)
1809 {
1810 	struct ftl_basic_rq *brq = _brq;
1811 	struct ftl_nv_cache *nv_cache = brq->io.chunk->nv_cache;
1812 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1813 	int rc;
1814 
1815 	rc = ftl_nv_cache_bdev_write_blocks_with_md(dev, nv_cache->bdev_desc, nv_cache->cache_ioch,
1816 			brq->io_payload, NULL, brq->io.addr,
1817 			brq->num_blocks, write_brq_end, brq);
1818 	if (spdk_unlikely(rc)) {
1819 		if (rc == -ENOMEM) {
1820 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc);
1821 			brq->io.bdev_io_wait.bdev = bdev;
1822 			brq->io.bdev_io_wait.cb_fn = _ftl_chunk_basic_rq_write;
1823 			brq->io.bdev_io_wait.cb_arg = brq;
1824 			spdk_bdev_queue_io_wait(bdev, nv_cache->cache_ioch, &brq->io.bdev_io_wait);
1825 		} else {
1826 			ftl_abort();
1827 		}
1828 	}
1829 }
1830 
1831 static void
1832 ftl_chunk_basic_rq_write(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq)
1833 {
1834 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1835 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1836 
1837 	brq->io.chunk = chunk;
1838 	brq->success = false;
1839 
1840 	_ftl_chunk_basic_rq_write(brq);
1841 
1842 	chunk->md->write_pointer += brq->num_blocks;
1843 	dev->stats.io_activity_total += brq->num_blocks;
1844 }
1845 
1846 static void
1847 read_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
1848 {
1849 	struct ftl_basic_rq *brq = arg;
1850 
1851 	ftl_stats_bdev_io_completed(brq->dev, FTL_STATS_TYPE_MD_NV_CACHE, bdev_io);
1852 
1853 	brq->success = success;
1854 
1855 	brq->owner.cb(brq);
1856 	spdk_bdev_free_io(bdev_io);
1857 }
1858 
1859 static int
1860 ftl_chunk_basic_rq_read(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq)
1861 {
1862 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1863 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1864 	int rc;
1865 
1866 	brq->io.chunk = chunk;
1867 	brq->success = false;
1868 
1869 	rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, nv_cache->bdev_desc, nv_cache->cache_ioch,
1870 			brq->io_payload, NULL, brq->io.addr, brq->num_blocks, read_brq_end, brq);
1871 
1872 	if (spdk_likely(!rc)) {
1873 		dev->stats.io_activity_total += brq->num_blocks;
1874 	}
1875 
1876 	return rc;
1877 }
1878 
1879 static void
1880 chunk_open_cb(int status, void *ctx)
1881 {
1882 	struct ftl_nv_cache_chunk *chunk = (struct ftl_nv_cache_chunk *)ctx;
1883 
1884 	if (spdk_unlikely(status)) {
1885 #ifdef SPDK_FTL_RETRY_ON_ERROR
1886 		ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx);
1887 		return;
1888 #else
1889 		ftl_abort();
1890 #endif
1891 	}
1892 
1893 	chunk->md->state = FTL_CHUNK_STATE_OPEN;
1894 }
1895 
1896 static void
1897 ftl_chunk_open(struct ftl_nv_cache_chunk *chunk)
1898 {
1899 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1900 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1901 	struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD);
1902 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
1903 
1904 	if (chunk_alloc_p2l_map(chunk)) {
1905 		assert(0);
1906 		/*
1907 		 * We control number of opening chunk and it shall be consistent with size of chunk
1908 		 * P2L map pool
1909 		 */
1910 		ftl_abort();
1911 		return;
1912 	}
1913 
1914 	chunk->nv_cache->chunk_open_count++;
1915 
1916 	assert(chunk->md->write_pointer == 0);
1917 	assert(chunk->md->blocks_written == 0);
1918 
1919 	memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
1920 	p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_OPEN;
1921 	p2l_map->chunk_dma_md->p2l_map_checksum = 0;
1922 
1923 	ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, p2l_map->chunk_dma_md,
1924 			       NULL, chunk_open_cb, chunk,
1925 			       &chunk->md_persist_entry_ctx);
1926 }
1927 
1928 static void
1929 chunk_close_cb(int status, void *ctx)
1930 {
1931 	struct ftl_nv_cache_chunk *chunk = (struct ftl_nv_cache_chunk *)ctx;
1932 
1933 	assert(chunk->md->write_pointer == chunk->nv_cache->chunk_blocks);
1934 
1935 	if (spdk_likely(!status)) {
1936 		chunk->md->p2l_map_checksum = chunk->p2l_map.chunk_dma_md->p2l_map_checksum;
1937 		chunk_free_p2l_map(chunk);
1938 
1939 		assert(chunk->nv_cache->chunk_open_count > 0);
1940 		chunk->nv_cache->chunk_open_count--;
1941 
1942 		/* Chunk full move it on full list */
1943 		TAILQ_INSERT_TAIL(&chunk->nv_cache->chunk_full_list, chunk, entry);
1944 		chunk->nv_cache->chunk_full_count++;
1945 
1946 		chunk->nv_cache->last_seq_id = chunk->md->close_seq_id;
1947 
1948 		chunk->md->state = FTL_CHUNK_STATE_CLOSED;
1949 	} else {
1950 #ifdef SPDK_FTL_RETRY_ON_ERROR
1951 		ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx);
1952 #else
1953 		ftl_abort();
1954 #endif
1955 	}
1956 }
1957 
1958 static void
1959 chunk_map_write_cb(struct ftl_basic_rq *brq)
1960 {
1961 	struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
1962 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1963 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1964 	struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD);
1965 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
1966 	uint32_t chunk_map_crc;
1967 
1968 	if (spdk_likely(brq->success)) {
1969 		chunk_map_crc = spdk_crc32c_update(p2l_map->chunk_map,
1970 						   chunk->nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE, 0);
1971 		memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
1972 		p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_CLOSED;
1973 		p2l_map->chunk_dma_md->p2l_map_checksum = chunk_map_crc;
1974 		ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, chunk->p2l_map.chunk_dma_md,
1975 				       NULL, chunk_close_cb, chunk,
1976 				       &chunk->md_persist_entry_ctx);
1977 	} else {
1978 #ifdef SPDK_FTL_RETRY_ON_ERROR
1979 		/* retry */
1980 		chunk->md->write_pointer -= brq->num_blocks;
1981 		ftl_chunk_basic_rq_write(chunk, brq);
1982 #else
1983 		ftl_abort();
1984 #endif
1985 	}
1986 }
1987 
1988 static void
1989 ftl_chunk_close(struct ftl_nv_cache_chunk *chunk)
1990 {
1991 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1992 	struct ftl_basic_rq *brq = &chunk->metadata_rq;
1993 	void *metadata = chunk->p2l_map.chunk_map;
1994 
1995 	chunk->md->close_seq_id = ftl_get_next_seq_id(dev);
1996 	ftl_basic_rq_init(dev, brq, metadata, chunk->nv_cache->tail_md_chunk_blocks);
1997 	ftl_basic_rq_set_owner(brq, chunk_map_write_cb, chunk);
1998 
1999 	assert(chunk->md->write_pointer == chunk_tail_md_offset(chunk->nv_cache));
2000 	brq->io.addr = chunk->offset + chunk->md->write_pointer;
2001 
2002 	ftl_chunk_basic_rq_write(chunk, brq);
2003 }
2004 
2005 static void read_tail_md_cb(struct ftl_basic_rq *brq);
2006 static void recover_open_chunk_cb(struct ftl_basic_rq *brq);
2007 
2008 static void
2009 restore_chunk_close_cb(int status, void *ctx)
2010 {
2011 	struct ftl_basic_rq *parent = (struct ftl_basic_rq *)ctx;
2012 	struct ftl_nv_cache_chunk *chunk = parent->io.chunk;
2013 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
2014 
2015 	if (spdk_unlikely(status)) {
2016 		parent->success = false;
2017 	} else {
2018 		chunk->md->p2l_map_checksum = p2l_map->chunk_dma_md->p2l_map_checksum;
2019 		chunk->md->state = FTL_CHUNK_STATE_CLOSED;
2020 	}
2021 
2022 	read_tail_md_cb(parent);
2023 }
2024 
2025 static void
2026 restore_fill_p2l_map_cb(struct ftl_basic_rq *parent)
2027 {
2028 	struct ftl_nv_cache_chunk *chunk = parent->io.chunk;
2029 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
2030 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
2031 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
2032 	struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD);
2033 	uint32_t chunk_map_crc;
2034 
2035 	/* Set original callback */
2036 	ftl_basic_rq_set_owner(parent, recover_open_chunk_cb, parent->owner.priv);
2037 
2038 	if (spdk_unlikely(!parent->success)) {
2039 		read_tail_md_cb(parent);
2040 		return;
2041 	}
2042 
2043 	chunk_map_crc = spdk_crc32c_update(p2l_map->chunk_map,
2044 					   chunk->nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE, 0);
2045 	memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
2046 	p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_CLOSED;
2047 	p2l_map->chunk_dma_md->write_pointer = chunk->nv_cache->chunk_blocks;
2048 	p2l_map->chunk_dma_md->blocks_written = chunk->nv_cache->chunk_blocks;
2049 	p2l_map->chunk_dma_md->p2l_map_checksum = chunk_map_crc;
2050 
2051 	ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, p2l_map->chunk_dma_md, NULL,
2052 			       restore_chunk_close_cb, parent, &chunk->md_persist_entry_ctx);
2053 }
2054 
2055 static void
2056 restore_fill_tail_md(struct ftl_basic_rq *parent, struct ftl_nv_cache_chunk *chunk)
2057 {
2058 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
2059 	void *metadata;
2060 
2061 	chunk->md->close_seq_id = ftl_get_next_seq_id(dev);
2062 
2063 	metadata = chunk->p2l_map.chunk_map;
2064 	ftl_basic_rq_init(dev, parent, metadata, chunk->nv_cache->tail_md_chunk_blocks);
2065 	ftl_basic_rq_set_owner(parent, restore_fill_p2l_map_cb, parent->owner.priv);
2066 
2067 	parent->io.addr = chunk->offset + chunk_tail_md_offset(chunk->nv_cache);
2068 	parent->io.chunk = chunk;
2069 
2070 	ftl_chunk_basic_rq_write(chunk, parent);
2071 }
2072 
2073 static void
2074 read_open_chunk_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
2075 {
2076 	struct ftl_rq *rq = (struct ftl_rq *)cb_arg;
2077 	struct ftl_basic_rq *parent = (struct ftl_basic_rq *)rq->owner.priv;
2078 	struct ftl_nv_cache_chunk *chunk = parent->io.chunk;
2079 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
2080 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
2081 	union ftl_md_vss *md;
2082 	uint64_t cache_offset = bdev_io->u.bdev.offset_blocks;
2083 	uint64_t len = bdev_io->u.bdev.num_blocks;
2084 	ftl_addr addr = ftl_addr_from_nvc_offset(dev, cache_offset);
2085 	int rc;
2086 
2087 	ftl_stats_bdev_io_completed(dev, FTL_STATS_TYPE_USER, bdev_io);
2088 
2089 	spdk_bdev_free_io(bdev_io);
2090 
2091 	if (!success) {
2092 		parent->success = false;
2093 		read_tail_md_cb(parent);
2094 		return;
2095 	}
2096 
2097 	while (rq->iter.idx < rq->iter.count) {
2098 		/* Get metadata */
2099 		md = rq->entries[rq->iter.idx].io_md;
2100 		if (md->nv_cache.seq_id != chunk->md->seq_id) {
2101 			md->nv_cache.lba = FTL_LBA_INVALID;
2102 		}
2103 		/*
2104 		 * The p2l map contains effectively random data at this point (since it contains arbitrary
2105 		 * blocks from potentially not even filled tail md), so even LBA_INVALID needs to be set explicitly
2106 		 */
2107 
2108 		ftl_chunk_set_addr(chunk,  md->nv_cache.lba, addr + rq->iter.idx);
2109 		rq->iter.idx++;
2110 	}
2111 
2112 	if (cache_offset + len < chunk->offset + chunk_tail_md_offset(nv_cache)) {
2113 		cache_offset += len;
2114 		len = spdk_min(dev->xfer_size, chunk->offset + chunk_tail_md_offset(nv_cache) - cache_offset);
2115 		rq->iter.idx = 0;
2116 		rq->iter.count = len;
2117 
2118 		rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, nv_cache->bdev_desc,
2119 				nv_cache->cache_ioch,
2120 				rq->io_payload,
2121 				rq->io_md,
2122 				cache_offset, len,
2123 				read_open_chunk_cb,
2124 				rq);
2125 
2126 		if (rc) {
2127 			ftl_rq_del(rq);
2128 			parent->success = false;
2129 			read_tail_md_cb(parent);
2130 			return;
2131 		}
2132 	} else {
2133 		ftl_rq_del(rq);
2134 		restore_fill_tail_md(parent, chunk);
2135 	}
2136 }
2137 
2138 static void
2139 restore_open_chunk(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *parent)
2140 {
2141 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
2142 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
2143 	struct ftl_rq *rq;
2144 	uint64_t addr;
2145 	uint64_t len = dev->xfer_size;
2146 	int rc;
2147 
2148 	/*
2149 	 * We've just read the p2l map, prefill it with INVALID LBA
2150 	 * TODO we need to do this because tail md blocks (p2l map) are also represented in the p2l map, instead of just user data region
2151 	 */
2152 	memset(chunk->p2l_map.chunk_map, -1, FTL_BLOCK_SIZE * nv_cache->tail_md_chunk_blocks);
2153 
2154 	/* Need to read user data, recalculate chunk's P2L and write tail md with it */
2155 	rq = ftl_rq_new(dev, dev->nv_cache.md_size);
2156 	if (!rq) {
2157 		parent->success = false;
2158 		read_tail_md_cb(parent);
2159 		return;
2160 	}
2161 
2162 	rq->owner.priv = parent;
2163 	rq->iter.idx = 0;
2164 	rq->iter.count = len;
2165 
2166 	addr = chunk->offset;
2167 
2168 	len = spdk_min(dev->xfer_size, chunk->offset + chunk_tail_md_offset(nv_cache) - addr);
2169 
2170 	rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, nv_cache->bdev_desc,
2171 			nv_cache->cache_ioch,
2172 			rq->io_payload,
2173 			rq->io_md,
2174 			addr, len,
2175 			read_open_chunk_cb,
2176 			rq);
2177 
2178 	if (rc) {
2179 		ftl_rq_del(rq);
2180 		parent->success = false;
2181 		read_tail_md_cb(parent);
2182 	}
2183 }
2184 
2185 static void
2186 read_tail_md_cb(struct ftl_basic_rq *brq)
2187 {
2188 	brq->owner.cb(brq);
2189 }
2190 
2191 static int
2192 ftl_chunk_read_tail_md(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq,
2193 		       void (*cb)(struct ftl_basic_rq *brq), void *cb_ctx)
2194 {
2195 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
2196 	void *metadata;
2197 	int rc;
2198 
2199 	metadata = chunk->p2l_map.chunk_map;
2200 	ftl_basic_rq_init(dev, brq, metadata, chunk->nv_cache->tail_md_chunk_blocks);
2201 	ftl_basic_rq_set_owner(brq, cb, cb_ctx);
2202 
2203 	brq->io.addr = chunk->offset + chunk_tail_md_offset(chunk->nv_cache);
2204 	rc = ftl_chunk_basic_rq_read(chunk, brq);
2205 
2206 	return rc;
2207 }
2208 
2209 struct restore_chunk_md_ctx {
2210 	ftl_chunk_md_cb cb;
2211 	void *cb_ctx;
2212 	int status;
2213 	uint64_t qd;
2214 	uint64_t id;
2215 };
2216 
2217 static inline bool
2218 is_chunk_count_valid(struct ftl_nv_cache *nv_cache)
2219 {
2220 	uint64_t chunk_count = 0;
2221 
2222 	chunk_count += nv_cache->chunk_open_count;
2223 	chunk_count += nv_cache->chunk_free_count;
2224 	chunk_count += nv_cache->chunk_full_count;
2225 	chunk_count += nv_cache->chunk_comp_count;
2226 	chunk_count += nv_cache->chunk_inactive_count;
2227 
2228 	return chunk_count == nv_cache->chunk_count;
2229 }
2230 
2231 static void
2232 walk_tail_md_cb(struct ftl_basic_rq *brq)
2233 {
2234 	struct ftl_mngt_process *mngt = brq->owner.priv;
2235 	struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
2236 	struct restore_chunk_md_ctx *ctx = ftl_mngt_get_step_ctx(mngt);
2237 	int rc = 0;
2238 
2239 	if (brq->success) {
2240 		rc = ctx->cb(chunk, ctx->cb_ctx);
2241 	} else {
2242 		rc = -EIO;
2243 	}
2244 
2245 	if (rc) {
2246 		ctx->status = rc;
2247 	}
2248 	ctx->qd--;
2249 	chunk_free_p2l_map(chunk);
2250 	ftl_mngt_continue_step(mngt);
2251 }
2252 
2253 static void
2254 ftl_mngt_nv_cache_walk_tail_md(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt,
2255 			       uint64_t seq_id, ftl_chunk_md_cb cb, void *cb_ctx)
2256 {
2257 	struct ftl_nv_cache *nvc = &dev->nv_cache;
2258 	struct restore_chunk_md_ctx *ctx;
2259 
2260 	ctx = ftl_mngt_get_step_ctx(mngt);
2261 	if (!ctx) {
2262 		if (ftl_mngt_alloc_step_ctx(mngt, sizeof(*ctx))) {
2263 			ftl_mngt_fail_step(mngt);
2264 			return;
2265 		}
2266 		ctx = ftl_mngt_get_step_ctx(mngt);
2267 		assert(ctx);
2268 
2269 		ctx->cb = cb;
2270 		ctx->cb_ctx = cb_ctx;
2271 	}
2272 
2273 	/*
2274 	 * This function generates a high queue depth and will utilize ftl_mngt_continue_step during completions to make sure all chunks
2275 	 * are processed before returning an error (if any were found) or continuing on.
2276 	 */
2277 	if (0 == ctx->qd && ctx->id == nvc->chunk_count) {
2278 		if (!is_chunk_count_valid(nvc)) {
2279 			FTL_ERRLOG(dev, "Recovery ERROR, invalid number of chunk\n");
2280 			assert(false);
2281 			ctx->status = -EINVAL;
2282 		}
2283 
2284 		if (ctx->status) {
2285 			ftl_mngt_fail_step(mngt);
2286 		} else {
2287 			ftl_mngt_next_step(mngt);
2288 		}
2289 		return;
2290 	}
2291 
2292 	while (ctx->id < nvc->chunk_count) {
2293 		struct ftl_nv_cache_chunk *chunk = &nvc->chunks[ctx->id];
2294 		int rc;
2295 
2296 		if (!chunk->recovery) {
2297 			/* This chunk is inactive or empty and not used in recovery */
2298 			ctx->id++;
2299 			continue;
2300 		}
2301 
2302 		if (seq_id && (chunk->md->close_seq_id <= seq_id)) {
2303 			ctx->id++;
2304 			continue;
2305 		}
2306 
2307 		if (chunk_alloc_p2l_map(chunk)) {
2308 			/* No more free P2L map, break and continue later */
2309 			break;
2310 		}
2311 		ctx->id++;
2312 
2313 		rc = ftl_chunk_read_tail_md(chunk, &chunk->metadata_rq, walk_tail_md_cb, mngt);
2314 
2315 		if (0 == rc) {
2316 			ctx->qd++;
2317 		} else {
2318 			chunk_free_p2l_map(chunk);
2319 			ctx->status = rc;
2320 		}
2321 	}
2322 
2323 	if (0 == ctx->qd) {
2324 		/*
2325 		 * No QD could happen due to all leftover chunks being in free state.
2326 		 * Additionally ftl_chunk_read_tail_md could fail starting with the first IO in a given patch.
2327 		 * For streamlining of all potential error handling (since many chunks are reading P2L at the same time),
2328 		 * we're using ftl_mngt_continue_step to arrive at the same spot of checking for mngt step end (see beginning of function).
2329 		 */
2330 		ftl_mngt_continue_step(mngt);
2331 	}
2332 
2333 }
2334 
2335 void
2336 ftl_mngt_nv_cache_restore_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt,
2337 			      ftl_chunk_md_cb cb, void *cb_ctx)
2338 {
2339 	ftl_mngt_nv_cache_walk_tail_md(dev, mngt, dev->sb->ckpt_seq_id, cb, cb_ctx);
2340 }
2341 
2342 static void
2343 restore_chunk_state_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
2344 {
2345 	struct ftl_mngt_process *mngt = md->owner.cb_ctx;
2346 	struct ftl_nv_cache *nvc = &dev->nv_cache;
2347 	struct ftl_nv_cache_chunk *chunk;
2348 	uint64_t i;
2349 
2350 	if (status) {
2351 		/* Restore error, end step */
2352 		ftl_mngt_fail_step(mngt);
2353 		return;
2354 	}
2355 
2356 	for (i = 0; i < nvc->chunk_count; i++) {
2357 		chunk = &nvc->chunks[i];
2358 
2359 		if (false == nvc->nvc_type->ops.is_chunk_active(dev, chunk->offset) &&
2360 		    chunk->md->state != FTL_CHUNK_STATE_INACTIVE) {
2361 			status = -EINVAL;
2362 			break;
2363 		}
2364 
2365 		if (chunk->md->version != FTL_NVC_VERSION_CURRENT) {
2366 			status = -EINVAL;
2367 			break;
2368 		}
2369 
2370 		switch (chunk->md->state) {
2371 		case FTL_CHUNK_STATE_FREE:
2372 			break;
2373 		case FTL_CHUNK_STATE_OPEN:
2374 			TAILQ_REMOVE(&nvc->chunk_free_list, chunk, entry);
2375 			nvc->chunk_free_count--;
2376 
2377 			TAILQ_INSERT_TAIL(&nvc->chunk_open_list, chunk, entry);
2378 			nvc->chunk_open_count++;
2379 
2380 			/* Chunk is not empty, mark it to be recovered */
2381 			chunk->recovery = true;
2382 			break;
2383 		case FTL_CHUNK_STATE_CLOSED:
2384 			TAILQ_REMOVE(&nvc->chunk_free_list, chunk, entry);
2385 			nvc->chunk_free_count--;
2386 
2387 			TAILQ_INSERT_TAIL(&nvc->chunk_full_list, chunk, entry);
2388 			nvc->chunk_full_count++;
2389 
2390 			/* Chunk is not empty, mark it to be recovered */
2391 			chunk->recovery = true;
2392 			break;
2393 		case FTL_CHUNK_STATE_INACTIVE:
2394 			break;
2395 		default:
2396 			status = -EINVAL;
2397 		}
2398 	}
2399 
2400 	if (status) {
2401 		ftl_mngt_fail_step(mngt);
2402 	} else {
2403 		ftl_mngt_next_step(mngt);
2404 	}
2405 }
2406 
2407 void
2408 ftl_mngt_nv_cache_restore_chunk_state(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
2409 {
2410 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
2411 
2412 	md->owner.cb_ctx = mngt;
2413 	md->cb = restore_chunk_state_cb;
2414 	ftl_md_restore(md);
2415 }
2416 
2417 static void
2418 recover_open_chunk_cb(struct ftl_basic_rq *brq)
2419 {
2420 	struct ftl_mngt_process *mngt = brq->owner.priv;
2421 	struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
2422 	struct ftl_nv_cache *nvc = chunk->nv_cache;
2423 	struct spdk_ftl_dev *dev = ftl_mngt_get_dev(mngt);
2424 
2425 	chunk_free_p2l_map(chunk);
2426 
2427 	if (!brq->success) {
2428 		FTL_ERRLOG(dev, "Recovery chunk ERROR, offset = %"PRIu64", seq id %"PRIu64"\n", chunk->offset,
2429 			   chunk->md->seq_id);
2430 		ftl_mngt_fail_step(mngt);
2431 		return;
2432 	}
2433 
2434 	FTL_NOTICELOG(dev, "Recovered chunk, offset = %"PRIu64", seq id %"PRIu64"\n", chunk->offset,
2435 		      chunk->md->seq_id);
2436 
2437 	TAILQ_REMOVE(&nvc->chunk_open_list, chunk, entry);
2438 	nvc->chunk_open_count--;
2439 
2440 	TAILQ_INSERT_TAIL(&nvc->chunk_full_list, chunk, entry);
2441 	nvc->chunk_full_count++;
2442 
2443 	/* This is closed chunk */
2444 	chunk->md->write_pointer = nvc->chunk_blocks;
2445 	chunk->md->blocks_written = nvc->chunk_blocks;
2446 
2447 	ftl_mngt_continue_step(mngt);
2448 }
2449 
2450 void
2451 ftl_mngt_nv_cache_recover_open_chunk(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
2452 {
2453 	struct ftl_nv_cache *nvc = &dev->nv_cache;
2454 	struct ftl_nv_cache_chunk *chunk;
2455 	struct ftl_basic_rq *brq = ftl_mngt_get_step_ctx(mngt);
2456 
2457 	if (!brq) {
2458 		if (TAILQ_EMPTY(&nvc->chunk_open_list)) {
2459 			FTL_NOTICELOG(dev, "No open chunks to recover P2L\n");
2460 			ftl_mngt_next_step(mngt);
2461 			return;
2462 		}
2463 
2464 		if (ftl_mngt_alloc_step_ctx(mngt, sizeof(*brq))) {
2465 			ftl_mngt_fail_step(mngt);
2466 			return;
2467 		}
2468 		brq = ftl_mngt_get_step_ctx(mngt);
2469 		ftl_basic_rq_set_owner(brq, recover_open_chunk_cb, mngt);
2470 	}
2471 
2472 	if (TAILQ_EMPTY(&nvc->chunk_open_list)) {
2473 		if (!is_chunk_count_valid(nvc)) {
2474 			FTL_ERRLOG(dev, "Recovery ERROR, invalid number of chunk\n");
2475 			ftl_mngt_fail_step(mngt);
2476 			return;
2477 		}
2478 
2479 		/*
2480 		 * Now all chunks loaded and closed, do final step of restoring
2481 		 * chunks state
2482 		 */
2483 		if (ftl_nv_cache_load_state(nvc)) {
2484 			ftl_mngt_fail_step(mngt);
2485 		} else {
2486 			ftl_mngt_next_step(mngt);
2487 		}
2488 	} else {
2489 		chunk = TAILQ_FIRST(&nvc->chunk_open_list);
2490 		if (chunk_alloc_p2l_map(chunk)) {
2491 			ftl_mngt_fail_step(mngt);
2492 			return;
2493 		}
2494 
2495 		brq->io.chunk = chunk;
2496 
2497 		FTL_NOTICELOG(dev, "Start recovery open chunk, offset = %"PRIu64", seq id %"PRIu64"\n",
2498 			      chunk->offset, chunk->md->seq_id);
2499 		restore_open_chunk(chunk, brq);
2500 	}
2501 }
2502 
2503 int
2504 ftl_nv_cache_chunks_busy(struct ftl_nv_cache *nv_cache)
2505 {
2506 	/* chunk_current is migrating to closed status when closing, any others should already be
2507 	 * moved to free chunk list. Also need to wait for free md requests */
2508 	return nv_cache->chunk_open_count == 0 && nv_cache->chunk_free_persist_count == 0;
2509 }
2510 
2511 void
2512 ftl_nv_cache_halt(struct ftl_nv_cache *nv_cache)
2513 {
2514 	struct ftl_nv_cache_chunk *chunk;
2515 	uint64_t free_space;
2516 
2517 	nv_cache->halt = true;
2518 
2519 	/* Set chunks on open list back to free state since no user data has been written to it */
2520 	while (!TAILQ_EMPTY(&nv_cache->chunk_open_list)) {
2521 		chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
2522 
2523 		/* Chunks are moved between lists on metadata update submission, but state is changed
2524 		 * on completion. Breaking early in such a case to make sure all the necessary resources
2525 		 * will be freed (during next pass(es) of ftl_nv_cache_halt).
2526 		 */
2527 		if (chunk->md->state != FTL_CHUNK_STATE_OPEN) {
2528 			break;
2529 		}
2530 
2531 		TAILQ_REMOVE(&nv_cache->chunk_open_list, chunk, entry);
2532 		chunk_free_p2l_map(chunk);
2533 		ftl_nv_cache_chunk_md_initialize(chunk->md);
2534 		assert(nv_cache->chunk_open_count > 0);
2535 		nv_cache->chunk_open_count--;
2536 	}
2537 
2538 	/* Close current chunk by skipping all not written blocks */
2539 	chunk = nv_cache->chunk_current;
2540 	if (chunk != NULL) {
2541 		nv_cache->chunk_current = NULL;
2542 		if (chunk_is_closed(chunk)) {
2543 			return;
2544 		}
2545 
2546 		free_space = chunk_get_free_space(nv_cache, chunk);
2547 		chunk->md->blocks_skipped = free_space;
2548 		chunk->md->blocks_written += free_space;
2549 		chunk->md->write_pointer += free_space;
2550 		ftl_chunk_close(chunk);
2551 	}
2552 }
2553 
2554 uint64_t
2555 ftl_nv_cache_acquire_trim_seq_id(struct ftl_nv_cache *nv_cache)
2556 {
2557 	struct ftl_nv_cache_chunk *chunk = nv_cache->chunk_current;
2558 	uint64_t seq_id, free_space;
2559 
2560 	if (!chunk) {
2561 		chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
2562 		if (chunk && chunk->md->state == FTL_CHUNK_STATE_OPEN) {
2563 			return chunk->md->seq_id;
2564 		} else {
2565 			return 0;
2566 		}
2567 	}
2568 
2569 	if (chunk_is_closed(chunk)) {
2570 		return 0;
2571 	}
2572 
2573 	seq_id = nv_cache->chunk_current->md->seq_id;
2574 	free_space = chunk_get_free_space(nv_cache, chunk);
2575 
2576 	chunk->md->blocks_skipped = free_space;
2577 	chunk->md->blocks_written += free_space;
2578 	chunk->md->write_pointer += free_space;
2579 	if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
2580 		ftl_chunk_close(chunk);
2581 	}
2582 	nv_cache->chunk_current = NULL;
2583 
2584 	seq_id++;
2585 	return seq_id;
2586 }
2587 
2588 static double
2589 ftl_nv_cache_get_chunk_utilization(struct ftl_nv_cache *nv_cache,
2590 				   struct ftl_nv_cache_chunk *chunk)
2591 {
2592 	double capacity = nv_cache->chunk_blocks;
2593 	double used = chunk->md->blocks_written + chunk->md->blocks_skipped;
2594 
2595 	return used / capacity;
2596 }
2597 
2598 static const char *
2599 ftl_nv_cache_get_chunk_state_name(struct ftl_nv_cache_chunk *chunk)
2600 {
2601 	static const char *names[] = {
2602 		"FREE", "OPEN", "CLOSED", "INACTIVE"
2603 	};
2604 
2605 	assert(chunk->md->state < SPDK_COUNTOF(names));
2606 	if (chunk->md->state < SPDK_COUNTOF(names)) {
2607 		return names[chunk->md->state];
2608 	} else {
2609 		assert(false);
2610 		return "?";
2611 	}
2612 }
2613 
2614 static void
2615 ftl_property_dump_cache_dev(struct spdk_ftl_dev *dev, const struct ftl_property *property,
2616 			    struct spdk_json_write_ctx *w)
2617 {
2618 	uint64_t i;
2619 	struct ftl_nv_cache_chunk *chunk;
2620 
2621 	spdk_json_write_named_string(w, "type", dev->nv_cache.nvc_type->name);
2622 	spdk_json_write_named_array_begin(w, "chunks");
2623 	for (i = 0, chunk = dev->nv_cache.chunks; i < dev->nv_cache.chunk_count; i++, chunk++) {
2624 		spdk_json_write_object_begin(w);
2625 		spdk_json_write_named_uint64(w, "id", i);
2626 		spdk_json_write_named_string(w, "state", ftl_nv_cache_get_chunk_state_name(chunk));
2627 		spdk_json_write_named_double(w, "utilization",
2628 					     ftl_nv_cache_get_chunk_utilization(&dev->nv_cache, chunk));
2629 		spdk_json_write_object_end(w);
2630 	}
2631 	spdk_json_write_array_end(w);
2632 }
2633 
2634 void
2635 ftl_nv_cache_chunk_md_initialize(struct ftl_nv_cache_chunk_md *md)
2636 {
2637 	memset(md, 0, sizeof(*md));
2638 	md->version = FTL_NVC_VERSION_CURRENT;
2639 }
2640