xref: /spdk/lib/ftl/ftl_nv_cache.c (revision d51b3bfc0c6053b72080b38f76c748ef9f9c3760)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   Copyright 2023 Solidigm All Rights Reserved
4  *   All rights reserved.
5  */
6 
7 
8 #include "spdk/bdev.h"
9 #include "spdk/bdev_module.h"
10 #include "spdk/ftl.h"
11 #include "spdk/string.h"
12 
13 #include "ftl_nv_cache.h"
14 #include "ftl_nv_cache_io.h"
15 #include "ftl_core.h"
16 #include "ftl_band.h"
17 #include "utils/ftl_addr_utils.h"
18 #include "mngt/ftl_mngt.h"
19 
20 static inline uint64_t nvc_data_blocks(struct ftl_nv_cache *nv_cache) __attribute__((unused));
21 static struct ftl_nv_cache_compactor *compactor_alloc(struct spdk_ftl_dev *dev);
22 static void compactor_free(struct spdk_ftl_dev *dev, struct ftl_nv_cache_compactor *compactor);
23 static void compaction_process_ftl_done(struct ftl_rq *rq);
24 static void compaction_process_read_entry(void *arg);
25 static void ftl_property_dump_cache_dev(struct spdk_ftl_dev *dev,
26 					const struct ftl_property *property,
27 					struct spdk_json_write_ctx *w);
28 
29 static inline void
30 nvc_validate_md(struct ftl_nv_cache *nv_cache,
31 		struct ftl_nv_cache_chunk_md *chunk_md)
32 {
33 	struct ftl_md *md = nv_cache->md;
34 	void *buffer = ftl_md_get_buffer(md);
35 	uint64_t size = ftl_md_get_buffer_size(md);
36 	void *ptr = chunk_md;
37 
38 	if (ptr < buffer) {
39 		ftl_abort();
40 	}
41 
42 	ptr += sizeof(*chunk_md);
43 	if (ptr > buffer + size) {
44 		ftl_abort();
45 	}
46 }
47 
48 static inline uint64_t
49 nvc_data_offset(struct ftl_nv_cache *nv_cache)
50 {
51 	return 0;
52 }
53 
54 static inline uint64_t
55 nvc_data_blocks(struct ftl_nv_cache *nv_cache)
56 {
57 	return nv_cache->chunk_blocks * nv_cache->chunk_count;
58 }
59 
60 size_t
61 ftl_nv_cache_chunk_tail_md_num_blocks(const struct ftl_nv_cache *nv_cache)
62 {
63 	struct spdk_ftl_dev *dev =  SPDK_CONTAINEROF(nv_cache,
64 				    struct spdk_ftl_dev, nv_cache);
65 	return spdk_divide_round_up(dev->layout.nvc.chunk_data_blocks * dev->layout.l2p.addr_size,
66 				    FTL_BLOCK_SIZE);
67 }
68 
69 static size_t
70 nv_cache_p2l_map_pool_elem_size(const struct ftl_nv_cache *nv_cache)
71 {
72 	/* Map pool element holds the whole tail md */
73 	return nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE;
74 }
75 
76 static uint64_t
77 get_chunk_idx(struct ftl_nv_cache_chunk *chunk)
78 {
79 	struct ftl_nv_cache_chunk *first_chunk = chunk->nv_cache->chunks;
80 
81 	return (chunk->offset - first_chunk->offset) / chunk->nv_cache->chunk_blocks;
82 }
83 
84 static void
85 ftl_nv_cache_init_update_limits(struct spdk_ftl_dev *dev)
86 {
87 	struct ftl_nv_cache *nvc = &dev->nv_cache;
88 	uint64_t usable_chunks = nvc->chunk_count - nvc->chunk_inactive_count;
89 
90 	/* Start compaction when full chunks exceed given % of entire active chunks */
91 	nvc->chunk_compaction_threshold = usable_chunks *
92 					  dev->conf.nv_cache.chunk_compaction_threshold /
93 					  100;
94 
95 	nvc->throttle.interval_tsc = FTL_NV_CACHE_THROTTLE_INTERVAL_MS *
96 				     (spdk_get_ticks_hz() / 1000);
97 
98 	nvc->chunk_free_target = spdk_divide_round_up(usable_chunks *
99 				 dev->conf.nv_cache.chunk_free_target,
100 				 100);
101 }
102 
103 struct nvc_scrub_ctx {
104 	uint64_t chunk_no;
105 	nvc_scrub_cb cb;
106 	void *cb_ctx;
107 
108 	struct ftl_layout_region reg_chunk;
109 	struct ftl_md *md_chunk;
110 };
111 
112 static int
113 nvc_scrub_find_next_chunk(struct spdk_ftl_dev *dev, struct nvc_scrub_ctx *scrub_ctx)
114 {
115 	while (scrub_ctx->chunk_no < dev->layout.nvc.chunk_count) {
116 		if (dev->nv_cache.nvc_type->ops.is_chunk_active(dev, scrub_ctx->reg_chunk.current.offset)) {
117 			return 0;
118 		}
119 
120 		/* Move the dummy region along with the active chunk */
121 		scrub_ctx->reg_chunk.current.offset += dev->layout.nvc.chunk_data_blocks;
122 		scrub_ctx->chunk_no++;
123 	}
124 	return -ENOENT;
125 }
126 
127 static void
128 nvc_scrub_clear_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
129 {
130 	struct nvc_scrub_ctx *scrub_ctx = md->owner.cb_ctx;
131 	union ftl_md_vss vss;
132 
133 	/* Move to the next chunk */
134 	scrub_ctx->chunk_no++;
135 	scrub_ctx->reg_chunk.current.offset += dev->layout.nvc.chunk_data_blocks;
136 
137 	FTL_DEBUGLOG(dev, "Scrub progress: %"PRIu64"/%"PRIu64" chunks\n",
138 		     scrub_ctx->chunk_no, dev->layout.nvc.chunk_count);
139 
140 	if (status || nvc_scrub_find_next_chunk(dev, scrub_ctx)) {
141 		/* IO error or no more active chunks found. Scrubbing finished. */
142 		scrub_ctx->cb(dev, scrub_ctx->cb_ctx, status);
143 		ftl_md_destroy(scrub_ctx->md_chunk, 0);
144 		free(scrub_ctx);
145 		return;
146 	}
147 
148 	/* Scrub the next chunk */
149 	vss.version.md_version = 0;
150 	vss.nv_cache.lba = FTL_ADDR_INVALID;
151 
152 	scrub_ctx->md_chunk->cb = nvc_scrub_clear_cb;
153 	scrub_ctx->md_chunk->owner.cb_ctx = scrub_ctx;
154 
155 	ftl_md_clear(scrub_ctx->md_chunk, 0, &vss);
156 }
157 
158 void
159 ftl_nv_cache_scrub(struct spdk_ftl_dev *dev, nvc_scrub_cb cb, void *cb_ctx)
160 {
161 	struct nvc_scrub_ctx *scrub_ctx = calloc(1, sizeof(*scrub_ctx));
162 	union ftl_md_vss vss;
163 
164 	if (!scrub_ctx) {
165 		cb(dev, cb_ctx, -ENOMEM);
166 		return;
167 	}
168 
169 	scrub_ctx->cb = cb;
170 	scrub_ctx->cb_ctx = cb_ctx;
171 
172 	/* Setup a dummy region for the first chunk */
173 	scrub_ctx->reg_chunk.name = ftl_md_region_name(FTL_LAYOUT_REGION_TYPE_DATA_NVC);
174 	scrub_ctx->reg_chunk.type = FTL_LAYOUT_REGION_TYPE_DATA_NVC;
175 	scrub_ctx->reg_chunk.mirror_type = FTL_LAYOUT_REGION_TYPE_INVALID;
176 	scrub_ctx->reg_chunk.current.version = 0;
177 	scrub_ctx->reg_chunk.current.offset = 0;
178 	scrub_ctx->reg_chunk.current.blocks = dev->layout.nvc.chunk_data_blocks;
179 	scrub_ctx->reg_chunk.entry_size = FTL_BLOCK_SIZE;
180 	scrub_ctx->reg_chunk.num_entries = dev->layout.nvc.chunk_data_blocks;
181 	scrub_ctx->reg_chunk.vss_blksz = dev->nv_cache.md_size;
182 	scrub_ctx->reg_chunk.bdev_desc = dev->nv_cache.bdev_desc;
183 	scrub_ctx->reg_chunk.ioch = dev->nv_cache.cache_ioch;
184 
185 	/* Setup an MD object for the region */
186 	scrub_ctx->md_chunk = ftl_md_create(dev, scrub_ctx->reg_chunk.current.blocks,
187 					    scrub_ctx->reg_chunk.vss_blksz, scrub_ctx->reg_chunk.name, FTL_MD_CREATE_NO_MEM,
188 					    &scrub_ctx->reg_chunk);
189 
190 	if (!scrub_ctx->md_chunk) {
191 		free(scrub_ctx);
192 		cb(dev, cb_ctx, -ENOMEM);
193 		return;
194 	}
195 
196 	if (nvc_scrub_find_next_chunk(dev, scrub_ctx)) {
197 		/* No active chunks found */
198 		ftl_md_destroy(scrub_ctx->md_chunk, 0);
199 		free(scrub_ctx);
200 		cb(dev, cb_ctx, -ENOENT);
201 		return;
202 	}
203 
204 	/* Scrub the first chunk */
205 	vss.version.md_version = 0;
206 	vss.nv_cache.lba = FTL_ADDR_INVALID;
207 
208 	scrub_ctx->md_chunk->cb = nvc_scrub_clear_cb;
209 	scrub_ctx->md_chunk->owner.cb_ctx = scrub_ctx;
210 
211 	ftl_md_clear(scrub_ctx->md_chunk, 0, &vss);
212 	return;
213 }
214 
215 int
216 ftl_nv_cache_init(struct spdk_ftl_dev *dev)
217 {
218 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
219 	struct ftl_nv_cache_chunk *chunk;
220 	struct ftl_nv_cache_chunk_md *md;
221 	struct ftl_nv_cache_compactor *compactor;
222 	uint64_t i, offset;
223 
224 	nv_cache->halt = true;
225 
226 	nv_cache->md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
227 	if (!nv_cache->md) {
228 		FTL_ERRLOG(dev, "No NV cache metadata object\n");
229 		return -1;
230 	}
231 
232 	nv_cache->md_pool = ftl_mempool_create(dev->conf.user_io_pool_size,
233 					       nv_cache->md_size * dev->xfer_size,
234 					       FTL_BLOCK_SIZE, SPDK_ENV_NUMA_ID_ANY);
235 	if (!nv_cache->md_pool) {
236 		FTL_ERRLOG(dev, "Failed to initialize NV cache metadata pool\n");
237 		return -1;
238 	}
239 
240 	/*
241 	 * Initialize chunk info
242 	 */
243 	nv_cache->chunk_blocks = dev->layout.nvc.chunk_data_blocks;
244 	nv_cache->chunk_count = dev->layout.nvc.chunk_count;
245 	nv_cache->tail_md_chunk_blocks = ftl_nv_cache_chunk_tail_md_num_blocks(nv_cache);
246 
247 	/* Allocate chunks */
248 	nv_cache->chunks = calloc(nv_cache->chunk_count,
249 				  sizeof(nv_cache->chunks[0]));
250 	if (!nv_cache->chunks) {
251 		FTL_ERRLOG(dev, "Failed to initialize NV cache chunks\n");
252 		return -1;
253 	}
254 
255 	TAILQ_INIT(&nv_cache->chunk_free_list);
256 	TAILQ_INIT(&nv_cache->chunk_open_list);
257 	TAILQ_INIT(&nv_cache->chunk_full_list);
258 	TAILQ_INIT(&nv_cache->chunk_comp_list);
259 	TAILQ_INIT(&nv_cache->chunk_inactive_list);
260 	TAILQ_INIT(&nv_cache->needs_free_persist_list);
261 
262 	/* First chunk metadata */
263 	md = ftl_md_get_buffer(nv_cache->md);
264 	if (!md) {
265 		FTL_ERRLOG(dev, "No NV cache metadata\n");
266 		return -1;
267 	}
268 
269 	chunk = nv_cache->chunks;
270 	offset = nvc_data_offset(nv_cache);
271 	for (i = 0; i < nv_cache->chunk_count; i++, chunk++, md++) {
272 		chunk->nv_cache = nv_cache;
273 		chunk->md = md;
274 		chunk->md->version = FTL_NVC_VERSION_CURRENT;
275 		nvc_validate_md(nv_cache, md);
276 		chunk->offset = offset;
277 		offset += nv_cache->chunk_blocks;
278 
279 		if (nv_cache->nvc_type->ops.is_chunk_active(dev, chunk->offset)) {
280 			nv_cache->chunk_free_count++;
281 			TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
282 		} else {
283 			chunk->md->state = FTL_CHUNK_STATE_INACTIVE;
284 			nv_cache->chunk_inactive_count++;
285 			TAILQ_INSERT_TAIL(&nv_cache->chunk_inactive_list, chunk, entry);
286 		}
287 	}
288 	assert(nv_cache->chunk_free_count + nv_cache->chunk_inactive_count == nv_cache->chunk_count);
289 	assert(offset <= nvc_data_offset(nv_cache) + nvc_data_blocks(nv_cache));
290 
291 	TAILQ_INIT(&nv_cache->compactor_list);
292 	for (i = 0; i < FTL_NV_CACHE_NUM_COMPACTORS; i++) {
293 		compactor = compactor_alloc(dev);
294 
295 		if (!compactor) {
296 			FTL_ERRLOG(dev, "Cannot allocate compaction process\n");
297 			return -1;
298 		}
299 
300 		TAILQ_INSERT_TAIL(&nv_cache->compactor_list, compactor, entry);
301 	}
302 
303 #define FTL_MAX_OPEN_CHUNKS 2
304 #define FTL_MAX_COMPACTED_CHUNKS 2
305 	nv_cache->p2l_pool = ftl_mempool_create(FTL_MAX_OPEN_CHUNKS + FTL_MAX_COMPACTED_CHUNKS,
306 						nv_cache_p2l_map_pool_elem_size(nv_cache),
307 						FTL_BLOCK_SIZE,
308 						SPDK_ENV_NUMA_ID_ANY);
309 	if (!nv_cache->p2l_pool) {
310 		return -ENOMEM;
311 	}
312 
313 	/* One entry per open chunk */
314 	nv_cache->chunk_md_pool = ftl_mempool_create(FTL_MAX_OPEN_CHUNKS + FTL_MAX_COMPACTED_CHUNKS,
315 				  sizeof(struct ftl_nv_cache_chunk_md),
316 				  FTL_BLOCK_SIZE,
317 				  SPDK_ENV_NUMA_ID_ANY);
318 	if (!nv_cache->chunk_md_pool) {
319 		return -ENOMEM;
320 	}
321 
322 	/* Each compactor can be reading a different chunk which it needs to switch state to free to at the end,
323 	 * plus one backup each for high invalidity chunks processing (if there's a backlog of chunks with extremely
324 	 * small, even 0, validity then they can be processed by the compactors quickly and trigger a lot of updates
325 	 * to free state at once) */
326 	nv_cache->free_chunk_md_pool = ftl_mempool_create(2 * FTL_NV_CACHE_NUM_COMPACTORS,
327 				       sizeof(struct ftl_nv_cache_chunk_md),
328 				       FTL_BLOCK_SIZE,
329 				       SPDK_ENV_NUMA_ID_ANY);
330 	if (!nv_cache->free_chunk_md_pool) {
331 		return -ENOMEM;
332 	}
333 
334 	ftl_nv_cache_init_update_limits(dev);
335 	ftl_property_register(dev, "cache_device", NULL, 0, NULL, NULL, ftl_property_dump_cache_dev, NULL,
336 			      NULL, true);
337 	return 0;
338 }
339 
340 void
341 ftl_nv_cache_deinit(struct spdk_ftl_dev *dev)
342 {
343 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
344 	struct ftl_nv_cache_compactor *compactor;
345 
346 	while (!TAILQ_EMPTY(&nv_cache->compactor_list)) {
347 		compactor = TAILQ_FIRST(&nv_cache->compactor_list);
348 		TAILQ_REMOVE(&nv_cache->compactor_list, compactor, entry);
349 
350 		compactor_free(dev, compactor);
351 	}
352 
353 	ftl_mempool_destroy(nv_cache->md_pool);
354 	ftl_mempool_destroy(nv_cache->p2l_pool);
355 	ftl_mempool_destroy(nv_cache->chunk_md_pool);
356 	ftl_mempool_destroy(nv_cache->free_chunk_md_pool);
357 	nv_cache->md_pool = NULL;
358 	nv_cache->p2l_pool = NULL;
359 	nv_cache->chunk_md_pool = NULL;
360 	nv_cache->free_chunk_md_pool = NULL;
361 
362 	free(nv_cache->chunks);
363 	nv_cache->chunks = NULL;
364 }
365 
366 static uint64_t
367 chunk_get_free_space(struct ftl_nv_cache *nv_cache,
368 		     struct ftl_nv_cache_chunk *chunk)
369 {
370 	assert(chunk->md->write_pointer + nv_cache->tail_md_chunk_blocks <=
371 	       nv_cache->chunk_blocks);
372 	return nv_cache->chunk_blocks - chunk->md->write_pointer -
373 	       nv_cache->tail_md_chunk_blocks;
374 }
375 
376 static bool
377 chunk_is_closed(struct ftl_nv_cache_chunk *chunk)
378 {
379 	return chunk->md->write_pointer == chunk->nv_cache->chunk_blocks;
380 }
381 
382 static void ftl_chunk_close(struct ftl_nv_cache_chunk *chunk);
383 
384 static uint64_t
385 ftl_nv_cache_get_wr_buffer(struct ftl_nv_cache *nv_cache, struct ftl_io *io)
386 {
387 	uint64_t address = FTL_LBA_INVALID;
388 	uint64_t num_blocks = io->num_blocks;
389 	uint64_t free_space;
390 	struct ftl_nv_cache_chunk *chunk;
391 
392 	do {
393 		chunk = nv_cache->chunk_current;
394 		/* Chunk has been closed so pick new one */
395 		if (chunk && chunk_is_closed(chunk))  {
396 			chunk = NULL;
397 		}
398 
399 		if (!chunk) {
400 			chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
401 			if (chunk && chunk->md->state == FTL_CHUNK_STATE_OPEN) {
402 				TAILQ_REMOVE(&nv_cache->chunk_open_list, chunk, entry);
403 				nv_cache->chunk_current = chunk;
404 			} else {
405 				break;
406 			}
407 		}
408 
409 		free_space = chunk_get_free_space(nv_cache, chunk);
410 
411 		if (free_space >= num_blocks) {
412 			/* Enough space in chunk */
413 
414 			/* Calculate address in NV cache */
415 			address = chunk->offset + chunk->md->write_pointer;
416 
417 			/* Set chunk in IO */
418 			io->nv_cache_chunk = chunk;
419 
420 			/* Move write pointer */
421 			chunk->md->write_pointer += num_blocks;
422 			break;
423 		}
424 
425 		/* Not enough space in nv_cache_chunk */
426 		nv_cache->chunk_current = NULL;
427 
428 		if (0 == free_space) {
429 			continue;
430 		}
431 
432 		chunk->md->blocks_skipped = free_space;
433 		chunk->md->blocks_written += free_space;
434 		chunk->md->write_pointer += free_space;
435 
436 		if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
437 			ftl_chunk_close(chunk);
438 		}
439 	} while (1);
440 
441 	return address;
442 }
443 
444 void
445 ftl_nv_cache_fill_md(struct ftl_io *io)
446 {
447 	struct ftl_nv_cache_chunk *chunk = io->nv_cache_chunk;
448 	uint64_t i;
449 	union ftl_md_vss *metadata = io->md;
450 	uint64_t lba = ftl_io_get_lba(io, 0);
451 
452 	for (i = 0; i < io->num_blocks; ++i, lba++, metadata++) {
453 		metadata->nv_cache.lba = lba;
454 		metadata->nv_cache.seq_id = chunk->md->seq_id;
455 	}
456 }
457 
458 uint64_t
459 chunk_tail_md_offset(struct ftl_nv_cache *nv_cache)
460 {
461 	return nv_cache->chunk_blocks - nv_cache->tail_md_chunk_blocks;
462 }
463 
464 static void
465 chunk_advance_blocks(struct ftl_nv_cache *nv_cache, struct ftl_nv_cache_chunk *chunk,
466 		     uint64_t advanced_blocks)
467 {
468 	chunk->md->blocks_written += advanced_blocks;
469 
470 	assert(chunk->md->blocks_written <= nv_cache->chunk_blocks);
471 
472 	if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
473 		ftl_chunk_close(chunk);
474 	}
475 }
476 
477 static uint64_t
478 chunk_user_blocks_written(struct ftl_nv_cache_chunk *chunk)
479 {
480 	return chunk->md->blocks_written - chunk->md->blocks_skipped -
481 	       chunk->nv_cache->tail_md_chunk_blocks;
482 }
483 
484 static bool
485 is_chunk_compacted(struct ftl_nv_cache_chunk *chunk)
486 {
487 	assert(chunk->md->blocks_written != 0);
488 
489 	if (chunk_user_blocks_written(chunk) == chunk->md->blocks_compacted) {
490 		return true;
491 	}
492 
493 	return false;
494 }
495 
496 static int
497 ftl_chunk_alloc_md_entry(struct ftl_nv_cache_chunk *chunk)
498 {
499 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
500 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
501 
502 	p2l_map->chunk_dma_md = ftl_mempool_get(nv_cache->chunk_md_pool);
503 
504 	if (!p2l_map->chunk_dma_md) {
505 		return -ENOMEM;
506 	}
507 
508 	ftl_nv_cache_chunk_md_initialize(p2l_map->chunk_dma_md);
509 	return 0;
510 }
511 
512 static void
513 ftl_chunk_free_md_entry(struct ftl_nv_cache_chunk *chunk)
514 {
515 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
516 
517 	ftl_mempool_put(chunk->nv_cache->chunk_md_pool, p2l_map->chunk_dma_md);
518 	p2l_map->chunk_dma_md = NULL;
519 }
520 
521 static void chunk_free_p2l_map(struct ftl_nv_cache_chunk *chunk);
522 
523 static void
524 ftl_chunk_free(struct ftl_nv_cache_chunk *chunk)
525 {
526 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
527 
528 	/* Reset chunk */
529 	ftl_nv_cache_chunk_md_initialize(chunk->md);
530 
531 	TAILQ_INSERT_TAIL(&nv_cache->needs_free_persist_list, chunk, entry);
532 	nv_cache->chunk_free_persist_count++;
533 }
534 
535 static int
536 ftl_chunk_alloc_chunk_free_entry(struct ftl_nv_cache_chunk *chunk)
537 {
538 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
539 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
540 
541 	p2l_map->chunk_dma_md = ftl_mempool_get(nv_cache->free_chunk_md_pool);
542 	if (!p2l_map->chunk_dma_md) {
543 		return -ENOMEM;
544 	}
545 
546 	ftl_nv_cache_chunk_md_initialize(p2l_map->chunk_dma_md);
547 	return 0;
548 }
549 
550 static void
551 ftl_chunk_free_chunk_free_entry(struct ftl_nv_cache_chunk *chunk)
552 {
553 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
554 
555 	ftl_mempool_put(chunk->nv_cache->free_chunk_md_pool, p2l_map->chunk_dma_md);
556 	p2l_map->chunk_dma_md = NULL;
557 }
558 
559 static void
560 chunk_free_cb(int status, void *ctx)
561 {
562 	struct ftl_nv_cache_chunk *chunk = (struct ftl_nv_cache_chunk *)ctx;
563 
564 	if (spdk_likely(!status)) {
565 		struct ftl_nv_cache *nv_cache = chunk->nv_cache;
566 
567 		nv_cache->chunk_free_persist_count--;
568 		TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
569 		nv_cache->chunk_free_count++;
570 		nv_cache->chunk_full_count--;
571 		chunk->md->state = FTL_CHUNK_STATE_FREE;
572 		chunk->md->close_seq_id = 0;
573 		ftl_chunk_free_chunk_free_entry(chunk);
574 	} else {
575 #ifdef SPDK_FTL_RETRY_ON_ERROR
576 		ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx);
577 #else
578 		ftl_abort();
579 #endif
580 	}
581 }
582 
583 static void
584 ftl_chunk_persist_free_state(struct ftl_nv_cache *nv_cache)
585 {
586 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
587 	struct ftl_p2l_map *p2l_map;
588 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
589 	struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD);
590 	struct ftl_nv_cache_chunk *tchunk, *chunk = NULL;
591 	int rc;
592 
593 	TAILQ_FOREACH_SAFE(chunk, &nv_cache->needs_free_persist_list, entry, tchunk) {
594 		p2l_map = &chunk->p2l_map;
595 		rc = ftl_chunk_alloc_chunk_free_entry(chunk);
596 		if (rc) {
597 			break;
598 		}
599 
600 		TAILQ_REMOVE(&nv_cache->needs_free_persist_list, chunk, entry);
601 
602 		memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
603 		p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_FREE;
604 		p2l_map->chunk_dma_md->close_seq_id = 0;
605 		p2l_map->chunk_dma_md->p2l_map_checksum = 0;
606 
607 		ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, p2l_map->chunk_dma_md, NULL,
608 				       chunk_free_cb, chunk, &chunk->md_persist_entry_ctx);
609 	}
610 }
611 
612 static void
613 compaction_stats_update(struct ftl_nv_cache_chunk *chunk)
614 {
615 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
616 	struct compaction_bw_stats *compaction_bw = &nv_cache->compaction_recent_bw;
617 	double *ptr;
618 
619 	if (spdk_unlikely(chunk->compaction_length_tsc == 0)) {
620 		return;
621 	}
622 
623 	if (spdk_likely(compaction_bw->count == FTL_NV_CACHE_COMPACTION_SMA_N)) {
624 		ptr = compaction_bw->buf + compaction_bw->first;
625 		compaction_bw->first++;
626 		if (compaction_bw->first == FTL_NV_CACHE_COMPACTION_SMA_N) {
627 			compaction_bw->first = 0;
628 		}
629 		compaction_bw->sum -= *ptr;
630 	} else {
631 		ptr = compaction_bw->buf + compaction_bw->count;
632 		compaction_bw->count++;
633 	}
634 
635 	*ptr = (double)chunk->md->blocks_compacted * FTL_BLOCK_SIZE / chunk->compaction_length_tsc;
636 	chunk->compaction_length_tsc = 0;
637 
638 	compaction_bw->sum += *ptr;
639 	nv_cache->compaction_sma = compaction_bw->sum / compaction_bw->count;
640 }
641 
642 static void
643 chunk_compaction_advance(struct ftl_nv_cache_chunk *chunk, uint64_t num_blocks)
644 {
645 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
646 	uint64_t tsc = spdk_thread_get_last_tsc(spdk_get_thread());
647 
648 	chunk->compaction_length_tsc += tsc - chunk->compaction_start_tsc;
649 	chunk->compaction_start_tsc = tsc;
650 
651 	chunk->md->blocks_compacted += num_blocks;
652 	assert(chunk->md->blocks_compacted <= chunk_user_blocks_written(chunk));
653 	if (!is_chunk_compacted(chunk)) {
654 		return;
655 	}
656 
657 	/* Remove chunk from compacted list */
658 	TAILQ_REMOVE(&nv_cache->chunk_comp_list, chunk, entry);
659 	nv_cache->chunk_comp_count--;
660 
661 	compaction_stats_update(chunk);
662 
663 	chunk_free_p2l_map(chunk);
664 
665 	ftl_chunk_free(chunk);
666 }
667 
668 static bool
669 is_compaction_required_for_upgrade(struct ftl_nv_cache *nv_cache)
670 {
671 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
672 
673 	if (dev->conf.prep_upgrade_on_shutdown) {
674 		if (nv_cache->chunk_full_count || nv_cache->chunk_open_count) {
675 			return true;
676 		}
677 	}
678 
679 	return false;
680 }
681 
682 static bool
683 is_compaction_required(struct ftl_nv_cache *nv_cache)
684 {
685 	if (spdk_unlikely(nv_cache->halt)) {
686 		return is_compaction_required_for_upgrade(nv_cache);
687 	}
688 
689 	if (nv_cache->chunk_full_count >= nv_cache->chunk_compaction_threshold) {
690 		return true;
691 	}
692 
693 	return false;
694 }
695 
696 static void compaction_process_finish_read(struct ftl_nv_cache_compactor *compactor);
697 static void compaction_process_pin_lba(struct ftl_nv_cache_compactor *comp);
698 
699 static void
700 _compaction_process_pin_lba(void *_comp)
701 {
702 	struct ftl_nv_cache_compactor *comp = _comp;
703 
704 	compaction_process_pin_lba(comp);
705 }
706 
707 static void
708 compaction_process_pin_lba_cb(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx)
709 {
710 	struct ftl_nv_cache_compactor *comp = pin_ctx->cb_ctx;
711 	struct ftl_rq *rq = comp->rq;
712 
713 	if (status) {
714 		rq->iter.status = status;
715 		pin_ctx->lba = FTL_LBA_INVALID;
716 	}
717 
718 	if (--rq->iter.remaining == 0) {
719 		if (rq->iter.status) {
720 			/* unpin and try again */
721 			ftl_rq_unpin(rq);
722 			spdk_thread_send_msg(spdk_get_thread(), _compaction_process_pin_lba, comp);
723 			return;
724 		}
725 
726 		compaction_process_finish_read(comp);
727 	}
728 }
729 
730 static uint64_t ftl_chunk_map_get_lba_from_addr(struct ftl_nv_cache_chunk *chunk, ftl_addr addr);
731 
732 static void
733 compaction_process_pin_lba(struct ftl_nv_cache_compactor *comp)
734 {
735 	struct ftl_rq *rq = comp->rq;
736 	struct spdk_ftl_dev *dev = rq->dev;
737 	struct ftl_rq_entry *entry;
738 
739 	assert(rq->iter.count);
740 	rq->iter.remaining = rq->iter.count;
741 	rq->iter.status = 0;
742 
743 	FTL_RQ_ENTRY_LOOP(rq, entry, rq->iter.count) {
744 		struct ftl_nv_cache_chunk *chunk = entry->owner.priv;
745 		struct ftl_l2p_pin_ctx *pin_ctx = &entry->l2p_pin_ctx;
746 
747 		entry->lba = ftl_chunk_map_get_lba_from_addr(chunk, entry->addr);
748 
749 		if (entry->lba == FTL_LBA_INVALID) {
750 			ftl_l2p_pin_skip(dev, compaction_process_pin_lba_cb, comp, pin_ctx);
751 		} else {
752 			ftl_l2p_pin(dev, entry->lba, 1, compaction_process_pin_lba_cb, comp, pin_ctx);
753 		}
754 	}
755 }
756 
757 static void
758 compaction_process_read_entry_cb(struct spdk_bdev_io *bdev_io, bool success, void *arg)
759 {
760 	struct ftl_rq_entry *entry = arg;
761 	struct ftl_rq *rq = ftl_rq_from_entry(entry);
762 	struct spdk_ftl_dev *dev = rq->dev;
763 	struct ftl_nv_cache_compactor *compactor = rq->owner.priv;
764 
765 	ftl_stats_bdev_io_completed(dev, FTL_STATS_TYPE_CMP, bdev_io);
766 
767 	spdk_bdev_free_io(bdev_io);
768 
769 	if (!success) {
770 		/* retry */
771 		spdk_thread_send_msg(spdk_get_thread(), compaction_process_read_entry, entry);
772 		return;
773 	}
774 
775 	assert(rq->iter.remaining >= entry->bdev_io.num_blocks);
776 	rq->iter.remaining -= entry->bdev_io.num_blocks;
777 	if (0 == rq->iter.remaining) {
778 		/* All IOs processed, go to next phase - pining */
779 		compaction_process_pin_lba(compactor);
780 	}
781 }
782 
783 static void
784 compaction_process_read_entry(void *arg)
785 {
786 	struct ftl_rq_entry *entry = arg;
787 	struct ftl_rq *rq = ftl_rq_from_entry(entry);
788 	struct spdk_ftl_dev *dev = rq->dev;
789 	int rc;
790 
791 	rc = spdk_bdev_read_blocks(dev->nv_cache.bdev_desc, dev->nv_cache.cache_ioch,
792 				   entry->io_payload, entry->bdev_io.offset_blocks, entry->bdev_io.num_blocks,
793 				   compaction_process_read_entry_cb, entry);
794 
795 	if (spdk_unlikely(rc)) {
796 		if (rc == -ENOMEM) {
797 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->nv_cache.bdev_desc);
798 			entry->bdev_io.wait_entry.bdev = bdev;
799 			entry->bdev_io.wait_entry.cb_fn = compaction_process_read_entry;
800 			entry->bdev_io.wait_entry.cb_arg = entry;
801 			spdk_bdev_queue_io_wait(bdev, dev->nv_cache.cache_ioch, &entry->bdev_io.wait_entry);
802 		} else {
803 			ftl_abort();
804 		}
805 	}
806 
807 	dev->stats.io_activity_total += entry->bdev_io.num_blocks;
808 }
809 
810 static bool
811 is_chunk_to_read(struct ftl_nv_cache_chunk *chunk)
812 {
813 	assert(chunk->md->blocks_written != 0);
814 
815 	if (chunk_user_blocks_written(chunk) == chunk->md->read_pointer) {
816 		return false;
817 	}
818 
819 	return true;
820 }
821 
822 static void
823 read_chunk_p2l_map_cb(struct ftl_basic_rq *brq)
824 {
825 	struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
826 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
827 
828 	if (!brq->success) {
829 #ifdef SPDK_FTL_RETRY_ON_ERROR
830 		read_chunk_p2l_map(chunk);
831 #else
832 		ftl_abort();
833 #endif
834 	}
835 
836 	TAILQ_INSERT_HEAD(&nv_cache->chunk_comp_list, chunk, entry);
837 }
838 
839 static int chunk_alloc_p2l_map(struct ftl_nv_cache_chunk *chunk);
840 static int ftl_chunk_read_tail_md(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq,
841 				  void (*cb)(struct ftl_basic_rq *brq), void *cb_ctx);
842 
843 static void
844 read_chunk_p2l_map(void *arg)
845 {
846 	struct ftl_nv_cache_chunk *chunk = arg;
847 	int rc;
848 
849 	if (chunk_alloc_p2l_map(chunk)) {
850 		ftl_abort();
851 	}
852 
853 	rc = ftl_chunk_read_tail_md(chunk, &chunk->metadata_rq, read_chunk_p2l_map_cb, NULL);
854 	if (rc) {
855 		if (rc == -ENOMEM) {
856 			struct ftl_nv_cache *nv_cache = chunk->nv_cache;
857 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc);
858 			struct spdk_bdev_io_wait_entry *wait_entry = &chunk->metadata_rq.io.bdev_io_wait;
859 
860 			wait_entry->bdev = bdev;
861 			wait_entry->cb_fn = read_chunk_p2l_map;
862 			wait_entry->cb_arg = chunk;
863 			spdk_bdev_queue_io_wait(bdev, nv_cache->cache_ioch, wait_entry);
864 		} else {
865 			ftl_abort();
866 		}
867 	}
868 }
869 
870 static void
871 prepare_chunk_for_compaction(struct ftl_nv_cache *nv_cache)
872 {
873 	struct ftl_nv_cache_chunk *chunk = NULL;
874 
875 	if (TAILQ_EMPTY(&nv_cache->chunk_full_list)) {
876 		return;
877 	}
878 
879 	chunk = TAILQ_FIRST(&nv_cache->chunk_full_list);
880 	TAILQ_REMOVE(&nv_cache->chunk_full_list, chunk, entry);
881 	assert(chunk->md->write_pointer);
882 
883 	nv_cache->chunk_comp_count++;
884 	read_chunk_p2l_map(chunk);
885 }
886 
887 
888 static struct ftl_nv_cache_chunk *
889 get_chunk_for_compaction(struct ftl_nv_cache *nv_cache)
890 {
891 	struct ftl_nv_cache_chunk *chunk = NULL;
892 
893 	if (TAILQ_EMPTY(&nv_cache->chunk_comp_list)) {
894 		return NULL;
895 	}
896 
897 	chunk = TAILQ_FIRST(&nv_cache->chunk_comp_list);
898 	if (!is_chunk_to_read(chunk)) {
899 		return NULL;
900 	}
901 
902 	return chunk;
903 }
904 
905 static uint64_t
906 chunk_blocks_to_read(struct ftl_nv_cache_chunk *chunk)
907 {
908 	uint64_t blocks_written;
909 	uint64_t blocks_to_read;
910 
911 	assert(chunk->md->blocks_written >= chunk->md->blocks_skipped);
912 	blocks_written = chunk_user_blocks_written(chunk);
913 
914 	assert(blocks_written >= chunk->md->read_pointer);
915 	blocks_to_read = blocks_written - chunk->md->read_pointer;
916 
917 	return blocks_to_read;
918 }
919 
920 static void
921 compactor_deactivate(struct ftl_nv_cache_compactor *compactor)
922 {
923 	struct ftl_nv_cache *nv_cache = compactor->nv_cache;
924 
925 	compactor->rq->iter.count = 0;
926 	assert(nv_cache->compaction_active_count);
927 	nv_cache->compaction_active_count--;
928 	TAILQ_INSERT_TAIL(&nv_cache->compactor_list, compactor, entry);
929 }
930 
931 static void
932 compaction_process_invalidate_entry(struct ftl_rq_entry *entry)
933 {
934 	entry->addr = FTL_ADDR_INVALID;
935 	entry->lba = FTL_LBA_INVALID;
936 	entry->seq_id = 0;
937 	entry->owner.priv = NULL;
938 }
939 
940 static void
941 compaction_process_pad(struct ftl_nv_cache_compactor *compactor, uint64_t idx)
942 {
943 	struct ftl_rq *rq = compactor->rq;
944 	struct ftl_rq_entry *entry;
945 
946 	assert(idx < rq->num_blocks);
947 	FTL_RQ_ENTRY_LOOP_FROM(rq, &rq->entries[idx], entry, rq->num_blocks) {
948 		compaction_process_invalidate_entry(entry);
949 	}
950 }
951 
952 static void
953 compaction_process_read(struct ftl_nv_cache_compactor *compactor)
954 {
955 	struct ftl_rq *rq = compactor->rq;
956 	struct ftl_nv_cache *nv_cache = compactor->nv_cache;
957 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
958 	struct ftl_rq_entry *entry, *io;
959 
960 	assert(rq->iter.count);
961 	rq->iter.remaining = rq->iter.count;
962 
963 	io = rq->entries;
964 	io->bdev_io.num_blocks = 1;
965 	io->bdev_io.offset_blocks = ftl_addr_to_nvc_offset(dev, io->addr);
966 	FTL_RQ_ENTRY_LOOP_FROM(rq, &rq->entries[1], entry,  rq->iter.count) {
967 		if (entry->addr == io->addr + io->bdev_io.num_blocks) {
968 			io->bdev_io.num_blocks++;
969 		} else {
970 			compaction_process_read_entry(io);
971 			io = entry;
972 			io->bdev_io.num_blocks = 1;
973 			io->bdev_io.offset_blocks = ftl_addr_to_nvc_offset(dev, io->addr);
974 		}
975 	}
976 	compaction_process_read_entry(io);
977 }
978 
979 static ftl_addr
980 compaction_chunk_read_pos(struct spdk_ftl_dev *dev, struct ftl_nv_cache_chunk *chunk)
981 {
982 	ftl_addr start, pos;
983 	uint64_t skip, to_read = chunk_blocks_to_read(chunk);
984 
985 	if (0 == to_read) {
986 		return FTL_ADDR_INVALID;
987 	}
988 
989 	start = ftl_addr_from_nvc_offset(dev, chunk->offset + chunk->md->read_pointer);
990 	pos = ftl_bitmap_find_first_set(dev->valid_map, start, start + to_read - 1);
991 
992 	if (pos == UINT64_MAX) {
993 		chunk->md->read_pointer += to_read;
994 		chunk_compaction_advance(chunk, to_read);
995 		return FTL_ADDR_INVALID;
996 	}
997 
998 	assert(pos >= start);
999 	skip = pos - start;
1000 	if (skip) {
1001 		chunk->md->read_pointer += skip;
1002 		chunk_compaction_advance(chunk, skip);
1003 	}
1004 
1005 	return pos;
1006 }
1007 
1008 static bool
1009 compaction_entry_read_pos(struct ftl_nv_cache *nv_cache, struct ftl_rq_entry *entry)
1010 {
1011 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1012 	struct ftl_nv_cache_chunk *chunk = NULL;
1013 	ftl_addr addr = FTL_ADDR_INVALID;
1014 
1015 	while (!chunk) {
1016 		/* Get currently handled chunk */
1017 		chunk = get_chunk_for_compaction(nv_cache);
1018 		if (!chunk) {
1019 			return false;
1020 		}
1021 		chunk->compaction_start_tsc = spdk_thread_get_last_tsc(spdk_get_thread());
1022 
1023 		/* Get next read position in chunk */
1024 		addr = compaction_chunk_read_pos(dev, chunk);
1025 		if (FTL_ADDR_INVALID == addr) {
1026 			chunk = NULL;
1027 		}
1028 	}
1029 
1030 	assert(FTL_ADDR_INVALID != addr);
1031 
1032 	/* Set entry address info and chunk */
1033 	entry->addr = addr;
1034 	entry->owner.priv = chunk;
1035 
1036 	/* Move read pointer in the chunk */
1037 	chunk->md->read_pointer++;
1038 
1039 	return true;
1040 }
1041 
1042 static void
1043 compaction_process_start(struct ftl_nv_cache_compactor *compactor)
1044 {
1045 	struct ftl_rq *rq = compactor->rq;
1046 	struct ftl_nv_cache *nv_cache = compactor->nv_cache;
1047 	struct ftl_rq_entry *entry;
1048 
1049 	assert(0 == compactor->rq->iter.count);
1050 	FTL_RQ_ENTRY_LOOP(rq, entry, rq->num_blocks) {
1051 		if (!compaction_entry_read_pos(nv_cache, entry)) {
1052 			compaction_process_pad(compactor, entry->index);
1053 			break;
1054 		}
1055 		rq->iter.count++;
1056 	}
1057 
1058 	if (rq->iter.count) {
1059 		/* Schedule Read IOs */
1060 		compaction_process_read(compactor);
1061 	} else {
1062 		compactor_deactivate(compactor);
1063 	}
1064 }
1065 
1066 static void
1067 compaction_process(struct ftl_nv_cache *nv_cache)
1068 {
1069 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1070 	struct ftl_nv_cache_compactor *compactor;
1071 
1072 	if (!is_compaction_required(nv_cache)) {
1073 		return;
1074 	}
1075 
1076 	if (nv_cache->chunk_comp_count < FTL_MAX_COMPACTED_CHUNKS) {
1077 		prepare_chunk_for_compaction(nv_cache);
1078 	}
1079 
1080 	if (TAILQ_EMPTY(&nv_cache->chunk_comp_list)) {
1081 		return;
1082 	}
1083 
1084 	compactor = TAILQ_FIRST(&nv_cache->compactor_list);
1085 	if (!compactor) {
1086 		return;
1087 	}
1088 
1089 	TAILQ_REMOVE(&nv_cache->compactor_list, compactor, entry);
1090 	compactor->nv_cache->compaction_active_count++;
1091 	compaction_process_start(compactor);
1092 	ftl_add_io_activity(dev);
1093 }
1094 
1095 static void
1096 compaction_process_ftl_done(struct ftl_rq *rq)
1097 {
1098 	struct spdk_ftl_dev *dev = rq->dev;
1099 	struct ftl_nv_cache_compactor *compactor = rq->owner.priv;
1100 	struct ftl_band *band = rq->io.band;
1101 	struct ftl_rq_entry *entry;
1102 	ftl_addr addr;
1103 
1104 	if (spdk_unlikely(false == rq->success)) {
1105 		/* IO error retry writing */
1106 #ifdef SPDK_FTL_RETRY_ON_ERROR
1107 		ftl_writer_queue_rq(&dev->writer_user, rq);
1108 		return;
1109 #else
1110 		ftl_abort();
1111 #endif
1112 	}
1113 
1114 	assert(rq->iter.count);
1115 
1116 	/* Update L2P table */
1117 	addr = rq->io.addr;
1118 	FTL_RQ_ENTRY_LOOP(rq, entry, rq->iter.count) {
1119 		struct ftl_nv_cache_chunk *chunk = entry->owner.priv;
1120 
1121 		if (entry->lba != FTL_LBA_INVALID) {
1122 			ftl_l2p_update_base(dev, entry->lba, addr, entry->addr);
1123 			ftl_l2p_unpin(dev, entry->lba, 1);
1124 			chunk_compaction_advance(chunk, 1);
1125 		} else {
1126 			assert(entry->addr == FTL_ADDR_INVALID);
1127 		}
1128 
1129 		addr = ftl_band_next_addr(band, addr, 1);
1130 		compaction_process_invalidate_entry(entry);
1131 	}
1132 
1133 	compactor_deactivate(compactor);
1134 }
1135 
1136 static void
1137 compaction_process_finish_read(struct ftl_nv_cache_compactor *compactor)
1138 {
1139 	struct ftl_rq *rq = compactor->rq;
1140 	struct spdk_ftl_dev *dev = rq->dev;
1141 	struct ftl_rq_entry *entry;
1142 	ftl_addr current_addr;
1143 	uint64_t skip = 0;
1144 
1145 	FTL_RQ_ENTRY_LOOP(rq, entry, rq->iter.count) {
1146 		struct ftl_nv_cache_chunk *chunk = entry->owner.priv;
1147 		uint64_t lba = entry->lba;
1148 
1149 		if (lba == FTL_LBA_INVALID) {
1150 			skip++;
1151 			compaction_process_invalidate_entry(entry);
1152 			chunk_compaction_advance(chunk, 1);
1153 			continue;
1154 		}
1155 
1156 		current_addr = ftl_l2p_get(dev, lba);
1157 		if (current_addr == entry->addr) {
1158 			entry->seq_id = chunk->md->seq_id;
1159 		} else {
1160 			/* This address already invalidated, just omit this block */
1161 			skip++;
1162 			ftl_l2p_unpin(dev, lba, 1);
1163 			compaction_process_invalidate_entry(entry);
1164 			chunk_compaction_advance(chunk, 1);
1165 		}
1166 	}
1167 
1168 	if (skip < rq->iter.count) {
1169 		/*
1170 		 * Request contains data to be placed on FTL, compact it
1171 		 */
1172 		ftl_writer_queue_rq(&dev->writer_user, rq);
1173 	} else {
1174 		compactor_deactivate(compactor);
1175 	}
1176 }
1177 
1178 static void
1179 compactor_free(struct spdk_ftl_dev *dev, struct ftl_nv_cache_compactor *compactor)
1180 {
1181 	if (!compactor) {
1182 		return;
1183 	}
1184 
1185 	ftl_rq_del(compactor->rq);
1186 	free(compactor);
1187 }
1188 
1189 static struct ftl_nv_cache_compactor *
1190 compactor_alloc(struct spdk_ftl_dev *dev)
1191 {
1192 	struct ftl_nv_cache_compactor *compactor;
1193 	struct ftl_rq_entry *entry;
1194 
1195 	compactor = calloc(1, sizeof(*compactor));
1196 	if (!compactor) {
1197 		goto error;
1198 	}
1199 
1200 	/* Allocate help request for reading */
1201 	compactor->rq = ftl_rq_new(dev, dev->nv_cache.md_size);
1202 	if (!compactor->rq) {
1203 		goto error;
1204 	}
1205 
1206 	compactor->nv_cache = &dev->nv_cache;
1207 	compactor->rq->owner.priv = compactor;
1208 	compactor->rq->owner.cb = compaction_process_ftl_done;
1209 	compactor->rq->owner.compaction = true;
1210 
1211 	FTL_RQ_ENTRY_LOOP(compactor->rq, entry, compactor->rq->num_blocks) {
1212 		compaction_process_invalidate_entry(entry);
1213 	}
1214 
1215 	return compactor;
1216 
1217 error:
1218 	compactor_free(dev, compactor);
1219 	return NULL;
1220 }
1221 
1222 static void
1223 ftl_nv_cache_submit_cb_done(struct ftl_io *io)
1224 {
1225 	struct ftl_nv_cache *nv_cache = &io->dev->nv_cache;
1226 
1227 	chunk_advance_blocks(nv_cache, io->nv_cache_chunk, io->num_blocks);
1228 	io->nv_cache_chunk = NULL;
1229 
1230 	ftl_io_complete(io);
1231 }
1232 
1233 static void
1234 ftl_nv_cache_l2p_update(struct ftl_io *io)
1235 {
1236 	struct spdk_ftl_dev *dev = io->dev;
1237 	ftl_addr next_addr = io->addr;
1238 	size_t i;
1239 
1240 	for (i = 0; i < io->num_blocks; ++i, ++next_addr) {
1241 		ftl_l2p_update_cache(dev, ftl_io_get_lba(io, i), next_addr, io->map[i]);
1242 	}
1243 
1244 	ftl_l2p_unpin(dev, io->lba, io->num_blocks);
1245 	ftl_nv_cache_submit_cb_done(io);
1246 }
1247 
1248 static void
1249 ftl_nv_cache_pin_cb(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx)
1250 {
1251 	struct ftl_io *io = pin_ctx->cb_ctx;
1252 	size_t i;
1253 
1254 	if (spdk_unlikely(status != 0)) {
1255 		/* Retry on the internal L2P fault */
1256 		FTL_ERRLOG(dev, "Cannot PIN LBA for NV cache write failed at %"PRIx64"\n",
1257 			   io->addr);
1258 		io->status = -EAGAIN;
1259 		ftl_nv_cache_submit_cb_done(io);
1260 		return;
1261 	}
1262 
1263 	/* Remember previous l2p mapping to resolve conflicts in case of outstanding write-after-write */
1264 	for (i = 0; i < io->num_blocks; ++i) {
1265 		io->map[i] = ftl_l2p_get(dev, ftl_io_get_lba(io, i));
1266 	}
1267 
1268 	assert(io->iov_pos == 0);
1269 
1270 	ftl_trace_submission(io->dev, io, io->addr, io->num_blocks);
1271 
1272 	dev->nv_cache.nvc_type->ops.write(io);
1273 }
1274 
1275 void
1276 ftl_nv_cache_write_complete(struct ftl_io *io, bool success)
1277 {
1278 	if (spdk_unlikely(!success)) {
1279 		FTL_ERRLOG(io->dev, "Non-volatile cache write failed at %"PRIx64"\n",
1280 			   io->addr);
1281 		io->status = -EIO;
1282 		ftl_l2p_unpin(io->dev, io->lba, io->num_blocks);
1283 		ftl_nv_cache_submit_cb_done(io);
1284 		return;
1285 	}
1286 
1287 	ftl_nv_cache_l2p_update(io);
1288 }
1289 
1290 bool
1291 ftl_nv_cache_write(struct ftl_io *io)
1292 {
1293 	struct spdk_ftl_dev *dev = io->dev;
1294 	uint64_t cache_offset;
1295 
1296 	/* Reserve area on the write buffer cache */
1297 	cache_offset = ftl_nv_cache_get_wr_buffer(&dev->nv_cache, io);
1298 	if (cache_offset == FTL_LBA_INVALID) {
1299 		/* No free space in NV cache, resubmit request */
1300 		return false;
1301 	}
1302 	io->addr = ftl_addr_from_nvc_offset(dev, cache_offset);
1303 
1304 	ftl_l2p_pin(io->dev, io->lba, io->num_blocks,
1305 		    ftl_nv_cache_pin_cb, io,
1306 		    &io->l2p_pin_ctx);
1307 
1308 	dev->nv_cache.throttle.blocks_submitted += io->num_blocks;
1309 
1310 	return true;
1311 }
1312 
1313 int
1314 ftl_nv_cache_read(struct ftl_io *io, ftl_addr addr, uint32_t num_blocks,
1315 		  spdk_bdev_io_completion_cb cb, void *cb_arg)
1316 {
1317 	int rc;
1318 	struct ftl_nv_cache *nv_cache = &io->dev->nv_cache;
1319 
1320 	assert(ftl_addr_in_nvc(io->dev, addr));
1321 
1322 	rc = ftl_nv_cache_bdev_read_blocks_with_md(io->dev, nv_cache->bdev_desc, nv_cache->cache_ioch,
1323 			ftl_io_iovec_addr(io), NULL, ftl_addr_to_nvc_offset(io->dev, addr),
1324 			num_blocks, cb, cb_arg);
1325 
1326 	return rc;
1327 }
1328 
1329 bool
1330 ftl_nv_cache_is_halted(struct ftl_nv_cache *nv_cache)
1331 {
1332 	if (nv_cache->compaction_active_count) {
1333 		return false;
1334 	}
1335 
1336 	if (nv_cache->chunk_open_count > 0) {
1337 		return false;
1338 	}
1339 
1340 	if (is_compaction_required_for_upgrade(nv_cache)) {
1341 		return false;
1342 	}
1343 
1344 	return true;
1345 }
1346 
1347 void
1348 ftl_chunk_map_set_lba(struct ftl_nv_cache_chunk *chunk,
1349 		      uint64_t offset, uint64_t lba)
1350 {
1351 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1352 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1353 
1354 	ftl_lba_store(dev, p2l_map->chunk_map, offset, lba);
1355 }
1356 
1357 uint64_t
1358 ftl_chunk_map_get_lba(struct ftl_nv_cache_chunk *chunk, uint64_t offset)
1359 {
1360 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1361 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1362 
1363 	return ftl_lba_load(dev, p2l_map->chunk_map, offset);
1364 }
1365 
1366 static void
1367 ftl_chunk_set_addr(struct ftl_nv_cache_chunk *chunk, uint64_t lba, ftl_addr addr)
1368 {
1369 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1370 	uint64_t cache_offset = ftl_addr_to_nvc_offset(dev, addr);
1371 	uint64_t offset;
1372 
1373 	offset = (cache_offset - chunk->offset) % chunk->nv_cache->chunk_blocks;
1374 	ftl_chunk_map_set_lba(chunk, offset, lba);
1375 }
1376 
1377 static uint64_t
1378 ftl_chunk_map_get_lba_from_addr(struct ftl_nv_cache_chunk *chunk, ftl_addr addr)
1379 {
1380 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1381 	uint64_t cache_offset = ftl_addr_to_nvc_offset(dev, addr);
1382 	uint64_t offset;
1383 
1384 	offset = (cache_offset - chunk->offset) % chunk->nv_cache->chunk_blocks;
1385 	return ftl_chunk_map_get_lba(chunk, offset);
1386 }
1387 
1388 struct ftl_nv_cache_chunk *
1389 ftl_nv_cache_get_chunk_from_addr(struct spdk_ftl_dev *dev, ftl_addr addr)
1390 {
1391 	struct ftl_nv_cache_chunk *chunk = dev->nv_cache.chunks;
1392 	uint64_t chunk_idx;
1393 	uint64_t cache_offset = ftl_addr_to_nvc_offset(dev, addr);
1394 
1395 	assert(chunk != NULL);
1396 	chunk_idx = (cache_offset - chunk->offset) / chunk->nv_cache->chunk_blocks;
1397 	chunk += chunk_idx;
1398 
1399 	return chunk;
1400 }
1401 
1402 void
1403 ftl_nv_cache_set_addr(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr addr)
1404 {
1405 	struct ftl_nv_cache_chunk *chunk;
1406 
1407 	chunk = ftl_nv_cache_get_chunk_from_addr(dev, addr);
1408 
1409 	assert(lba != FTL_LBA_INVALID);
1410 
1411 	ftl_chunk_set_addr(chunk, lba, addr);
1412 	ftl_bitmap_set(dev->valid_map, addr);
1413 }
1414 
1415 static void
1416 ftl_nv_cache_throttle_update(struct ftl_nv_cache *nv_cache)
1417 {
1418 	double err;
1419 	double modifier;
1420 
1421 	err = ((double)nv_cache->chunk_free_count - nv_cache->chunk_free_target) / nv_cache->chunk_count;
1422 	modifier = FTL_NV_CACHE_THROTTLE_MODIFIER_KP * err;
1423 
1424 	if (modifier < FTL_NV_CACHE_THROTTLE_MODIFIER_MIN) {
1425 		modifier = FTL_NV_CACHE_THROTTLE_MODIFIER_MIN;
1426 	} else if (modifier > FTL_NV_CACHE_THROTTLE_MODIFIER_MAX) {
1427 		modifier = FTL_NV_CACHE_THROTTLE_MODIFIER_MAX;
1428 	}
1429 
1430 	if (spdk_unlikely(nv_cache->compaction_sma == 0 || nv_cache->compaction_active_count == 0)) {
1431 		nv_cache->throttle.blocks_submitted_limit = UINT64_MAX;
1432 	} else {
1433 		double blocks_per_interval = nv_cache->compaction_sma * nv_cache->throttle.interval_tsc /
1434 					     FTL_BLOCK_SIZE;
1435 		nv_cache->throttle.blocks_submitted_limit = blocks_per_interval * (1.0 + modifier);
1436 	}
1437 }
1438 
1439 static void
1440 ftl_nv_cache_process_throttle(struct ftl_nv_cache *nv_cache)
1441 {
1442 	uint64_t tsc = spdk_thread_get_last_tsc(spdk_get_thread());
1443 
1444 	if (spdk_unlikely(!nv_cache->throttle.start_tsc)) {
1445 		nv_cache->throttle.start_tsc = tsc;
1446 	} else if (tsc - nv_cache->throttle.start_tsc >= nv_cache->throttle.interval_tsc) {
1447 		ftl_nv_cache_throttle_update(nv_cache);
1448 		nv_cache->throttle.start_tsc = tsc;
1449 		nv_cache->throttle.blocks_submitted = 0;
1450 	}
1451 }
1452 
1453 static void ftl_chunk_open(struct ftl_nv_cache_chunk *chunk);
1454 
1455 void
1456 ftl_nv_cache_process(struct spdk_ftl_dev *dev)
1457 {
1458 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
1459 
1460 	assert(dev->nv_cache.bdev_desc);
1461 
1462 	if (nv_cache->chunk_open_count < FTL_MAX_OPEN_CHUNKS && spdk_likely(!nv_cache->halt) &&
1463 	    !TAILQ_EMPTY(&nv_cache->chunk_free_list)) {
1464 		struct ftl_nv_cache_chunk *chunk = TAILQ_FIRST(&nv_cache->chunk_free_list);
1465 		TAILQ_REMOVE(&nv_cache->chunk_free_list, chunk, entry);
1466 		TAILQ_INSERT_TAIL(&nv_cache->chunk_open_list, chunk, entry);
1467 		nv_cache->chunk_free_count--;
1468 		chunk->md->seq_id = ftl_get_next_seq_id(dev);
1469 		ftl_chunk_open(chunk);
1470 		ftl_add_io_activity(dev);
1471 	}
1472 
1473 	compaction_process(nv_cache);
1474 	ftl_chunk_persist_free_state(nv_cache);
1475 	ftl_nv_cache_process_throttle(nv_cache);
1476 }
1477 
1478 static bool
1479 ftl_nv_cache_full(struct ftl_nv_cache *nv_cache)
1480 {
1481 	if (0 == nv_cache->chunk_open_count && NULL == nv_cache->chunk_current) {
1482 		return true;
1483 	} else {
1484 		return false;
1485 	}
1486 }
1487 
1488 bool
1489 ftl_nv_cache_throttle(struct spdk_ftl_dev *dev)
1490 {
1491 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
1492 
1493 	if (dev->nv_cache.throttle.blocks_submitted >= nv_cache->throttle.blocks_submitted_limit ||
1494 	    ftl_nv_cache_full(nv_cache)) {
1495 		return true;
1496 	}
1497 
1498 	return false;
1499 }
1500 
1501 static void
1502 chunk_free_p2l_map(struct ftl_nv_cache_chunk *chunk)
1503 {
1504 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1505 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1506 
1507 	ftl_mempool_put(nv_cache->p2l_pool, p2l_map->chunk_map);
1508 	p2l_map->chunk_map = NULL;
1509 
1510 	ftl_chunk_free_md_entry(chunk);
1511 }
1512 
1513 int
1514 ftl_nv_cache_save_state(struct ftl_nv_cache *nv_cache)
1515 {
1516 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1517 	struct ftl_nv_cache_chunk *chunk;
1518 	int status = 0;
1519 	uint64_t i;
1520 
1521 	assert(nv_cache->chunk_open_count == 0);
1522 
1523 	if (nv_cache->compaction_active_count) {
1524 		FTL_ERRLOG(dev, "Cannot save NV cache state, compaction in progress\n");
1525 		return -EINVAL;
1526 	}
1527 
1528 	chunk = nv_cache->chunks;
1529 	if (!chunk) {
1530 		FTL_ERRLOG(dev, "Cannot save NV cache state, no NV cache metadata\n");
1531 		return -ENOMEM;
1532 	}
1533 
1534 	for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
1535 		nvc_validate_md(nv_cache, chunk->md);
1536 
1537 		if (chunk->md->read_pointer)  {
1538 			/* Only full chunks can be compacted */
1539 			if (chunk->md->blocks_written != nv_cache->chunk_blocks) {
1540 				assert(0);
1541 				status = -EINVAL;
1542 				break;
1543 			}
1544 
1545 			/*
1546 			 * Chunk in the middle of compaction, start over after
1547 			 * load
1548 			 */
1549 			chunk->md->read_pointer = chunk->md->blocks_compacted = 0;
1550 		} else if (chunk->md->blocks_written == nv_cache->chunk_blocks) {
1551 			/* Full chunk */
1552 		} else if (0 == chunk->md->blocks_written) {
1553 			/* Empty chunk */
1554 		} else {
1555 			assert(0);
1556 			status = -EINVAL;
1557 			break;
1558 		}
1559 	}
1560 
1561 	if (status) {
1562 		FTL_ERRLOG(dev, "Cannot save NV cache state, inconsistent NV cache"
1563 			   "metadata\n");
1564 	}
1565 
1566 	return status;
1567 }
1568 
1569 static int
1570 sort_chunks_cmp(const void *a, const void *b)
1571 {
1572 	struct ftl_nv_cache_chunk *a_chunk = *(struct ftl_nv_cache_chunk **)a;
1573 	struct ftl_nv_cache_chunk *b_chunk = *(struct ftl_nv_cache_chunk **)b;
1574 
1575 	return a_chunk->md->seq_id - b_chunk->md->seq_id;
1576 }
1577 
1578 static int
1579 sort_chunks(struct ftl_nv_cache *nv_cache)
1580 {
1581 	struct ftl_nv_cache_chunk **chunks_list;
1582 	struct ftl_nv_cache_chunk *chunk;
1583 	uint32_t i;
1584 
1585 	if (TAILQ_EMPTY(&nv_cache->chunk_full_list)) {
1586 		return 0;
1587 	}
1588 
1589 	chunks_list = calloc(nv_cache->chunk_full_count,
1590 			     sizeof(chunks_list[0]));
1591 	if (!chunks_list) {
1592 		return -ENOMEM;
1593 	}
1594 
1595 	i = 0;
1596 	TAILQ_FOREACH(chunk, &nv_cache->chunk_full_list, entry) {
1597 		chunks_list[i] = chunk;
1598 		i++;
1599 	}
1600 	assert(i == nv_cache->chunk_full_count);
1601 
1602 	qsort(chunks_list, nv_cache->chunk_full_count, sizeof(chunks_list[0]),
1603 	      sort_chunks_cmp);
1604 
1605 	TAILQ_INIT(&nv_cache->chunk_full_list);
1606 	for (i = 0; i < nv_cache->chunk_full_count; i++) {
1607 		chunk = chunks_list[i];
1608 		TAILQ_INSERT_TAIL(&nv_cache->chunk_full_list, chunk, entry);
1609 	}
1610 
1611 	free(chunks_list);
1612 	return 0;
1613 }
1614 
1615 static int
1616 chunk_alloc_p2l_map(struct ftl_nv_cache_chunk *chunk)
1617 {
1618 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1619 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1620 
1621 	assert(p2l_map->ref_cnt == 0);
1622 	assert(p2l_map->chunk_map == NULL);
1623 
1624 	p2l_map->chunk_map = ftl_mempool_get(nv_cache->p2l_pool);
1625 
1626 	if (!p2l_map->chunk_map) {
1627 		return -ENOMEM;
1628 	}
1629 
1630 	if (ftl_chunk_alloc_md_entry(chunk)) {
1631 		ftl_mempool_put(nv_cache->p2l_pool, p2l_map->chunk_map);
1632 		p2l_map->chunk_map = NULL;
1633 		return -ENOMEM;
1634 	}
1635 
1636 	/* Set the P2L to FTL_LBA_INVALID */
1637 	memset(p2l_map->chunk_map, -1, FTL_BLOCK_SIZE * nv_cache->tail_md_chunk_blocks);
1638 
1639 	return 0;
1640 }
1641 
1642 int
1643 ftl_nv_cache_load_state(struct ftl_nv_cache *nv_cache)
1644 {
1645 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1646 	struct ftl_nv_cache_chunk *chunk;
1647 	uint64_t chunks_number, offset, i;
1648 	int status = 0;
1649 	bool active;
1650 
1651 	nv_cache->chunk_current = NULL;
1652 	TAILQ_INIT(&nv_cache->chunk_free_list);
1653 	TAILQ_INIT(&nv_cache->chunk_full_list);
1654 	TAILQ_INIT(&nv_cache->chunk_inactive_list);
1655 	nv_cache->chunk_full_count = 0;
1656 	nv_cache->chunk_free_count = 0;
1657 	nv_cache->chunk_inactive_count = 0;
1658 
1659 	assert(nv_cache->chunk_open_count == 0);
1660 	offset = nvc_data_offset(nv_cache);
1661 	if (!nv_cache->chunks) {
1662 		FTL_ERRLOG(dev, "No NV cache metadata\n");
1663 		return -1;
1664 	}
1665 
1666 	if (dev->sb->upgrade_ready) {
1667 		/*
1668 		 * During upgrade some transitions are allowed:
1669 		 *
1670 		 * 1. FREE -> INACTIVE
1671 		 * 2. INACTIVE -> FREE
1672 		 */
1673 		chunk = nv_cache->chunks;
1674 		for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
1675 			active = nv_cache->nvc_type->ops.is_chunk_active(dev, chunk->offset);
1676 
1677 			if (chunk->md->state == FTL_CHUNK_STATE_FREE) {
1678 				if (!active) {
1679 					chunk->md->state = FTL_CHUNK_STATE_INACTIVE;
1680 				}
1681 			} else if (chunk->md->state == FTL_CHUNK_STATE_INACTIVE) {
1682 				if (active) {
1683 					chunk->md->state = FTL_CHUNK_STATE_FREE;
1684 				}
1685 			}
1686 		}
1687 	}
1688 
1689 	chunk = nv_cache->chunks;
1690 	for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
1691 		chunk->nv_cache = nv_cache;
1692 		nvc_validate_md(nv_cache, chunk->md);
1693 
1694 		if (offset != chunk->offset) {
1695 			status = -EINVAL;
1696 			goto error;
1697 		}
1698 
1699 		if (chunk->md->version != FTL_NVC_VERSION_CURRENT) {
1700 			status = -EINVAL;
1701 			goto error;
1702 		}
1703 
1704 		active = nv_cache->nvc_type->ops.is_chunk_active(dev, chunk->offset);
1705 		if (false == active) {
1706 			if (chunk->md->state != FTL_CHUNK_STATE_INACTIVE) {
1707 				status = -EINVAL;
1708 				goto error;
1709 			}
1710 		}
1711 
1712 		switch (chunk->md->state) {
1713 		case FTL_CHUNK_STATE_FREE:
1714 			if (chunk->md->blocks_written || chunk->md->write_pointer) {
1715 				status = -EINVAL;
1716 				goto error;
1717 			}
1718 			/* Chunk empty, move it on empty list */
1719 			TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
1720 			nv_cache->chunk_free_count++;
1721 			break;
1722 		case FTL_CHUNK_STATE_OPEN:
1723 			/* All chunks needs to closed at this point */
1724 			status = -EINVAL;
1725 			goto error;
1726 			break;
1727 		case FTL_CHUNK_STATE_CLOSED:
1728 			if (chunk->md->blocks_written != nv_cache->chunk_blocks) {
1729 				status = -EINVAL;
1730 				goto error;
1731 			}
1732 			/* Chunk full, move it on full list */
1733 			TAILQ_INSERT_TAIL(&nv_cache->chunk_full_list, chunk, entry);
1734 			nv_cache->chunk_full_count++;
1735 			break;
1736 		case FTL_CHUNK_STATE_INACTIVE:
1737 			TAILQ_INSERT_TAIL(&nv_cache->chunk_inactive_list, chunk, entry);
1738 			nv_cache->chunk_inactive_count++;
1739 			break;
1740 		default:
1741 			status = -EINVAL;
1742 			FTL_ERRLOG(dev, "Invalid chunk state\n");
1743 			goto error;
1744 		}
1745 
1746 		offset += nv_cache->chunk_blocks;
1747 	}
1748 
1749 	chunks_number = nv_cache->chunk_free_count + nv_cache->chunk_full_count +
1750 			nv_cache->chunk_inactive_count;
1751 	assert(nv_cache->chunk_current == NULL);
1752 
1753 	if (chunks_number != nv_cache->chunk_count) {
1754 		FTL_ERRLOG(dev, "Inconsistent NV cache metadata\n");
1755 		status = -EINVAL;
1756 		goto error;
1757 	}
1758 
1759 	status = sort_chunks(nv_cache);
1760 	if (status) {
1761 		FTL_ERRLOG(dev, "FTL NV Cache: sorting chunks ERROR\n");
1762 	}
1763 
1764 	FTL_NOTICELOG(dev, "FTL NV Cache: full chunks = %lu, empty chunks = %lu\n",
1765 		      nv_cache->chunk_full_count, nv_cache->chunk_free_count);
1766 
1767 	if (0 == status) {
1768 		FTL_NOTICELOG(dev, "FTL NV Cache: state loaded successfully\n");
1769 	} else {
1770 		FTL_ERRLOG(dev, "FTL NV Cache: loading state ERROR\n");
1771 	}
1772 
1773 	/* The number of active/inactive chunks calculated at initialization can change at this point due to metadata
1774 	 * upgrade. Recalculate the thresholds that depend on active chunk count.
1775 	 */
1776 	ftl_nv_cache_init_update_limits(dev);
1777 error:
1778 	return status;
1779 }
1780 
1781 void
1782 ftl_nv_cache_get_max_seq_id(struct ftl_nv_cache *nv_cache, uint64_t *open_seq_id,
1783 			    uint64_t *close_seq_id)
1784 {
1785 	uint64_t i, o_seq_id = 0, c_seq_id = 0;
1786 	struct ftl_nv_cache_chunk *chunk;
1787 
1788 	chunk = nv_cache->chunks;
1789 	assert(chunk);
1790 
1791 	/* Iterate over chunks and get their max open and close seq id */
1792 	for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
1793 		o_seq_id = spdk_max(o_seq_id, chunk->md->seq_id);
1794 		c_seq_id = spdk_max(c_seq_id, chunk->md->close_seq_id);
1795 	}
1796 
1797 	*open_seq_id = o_seq_id;
1798 	*close_seq_id = c_seq_id;
1799 }
1800 
1801 typedef void (*ftl_chunk_ops_cb)(struct ftl_nv_cache_chunk *chunk, void *cntx, bool status);
1802 
1803 static void
1804 write_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
1805 {
1806 	struct ftl_basic_rq *brq = arg;
1807 	struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
1808 
1809 	ftl_stats_bdev_io_completed(brq->dev, FTL_STATS_TYPE_MD_NV_CACHE, bdev_io);
1810 
1811 	brq->success = success;
1812 	if (spdk_likely(success)) {
1813 		chunk_advance_blocks(chunk->nv_cache, chunk, brq->num_blocks);
1814 	}
1815 
1816 	spdk_bdev_free_io(bdev_io);
1817 	brq->owner.cb(brq);
1818 }
1819 
1820 static void
1821 _ftl_chunk_basic_rq_write(void *_brq)
1822 {
1823 	struct ftl_basic_rq *brq = _brq;
1824 	struct ftl_nv_cache *nv_cache = brq->io.chunk->nv_cache;
1825 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1826 	int rc;
1827 
1828 	rc = ftl_nv_cache_bdev_write_blocks_with_md(dev, nv_cache->bdev_desc, nv_cache->cache_ioch,
1829 			brq->io_payload, NULL, brq->io.addr,
1830 			brq->num_blocks, write_brq_end, brq);
1831 	if (spdk_unlikely(rc)) {
1832 		if (rc == -ENOMEM) {
1833 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc);
1834 			brq->io.bdev_io_wait.bdev = bdev;
1835 			brq->io.bdev_io_wait.cb_fn = _ftl_chunk_basic_rq_write;
1836 			brq->io.bdev_io_wait.cb_arg = brq;
1837 			spdk_bdev_queue_io_wait(bdev, nv_cache->cache_ioch, &brq->io.bdev_io_wait);
1838 		} else {
1839 			ftl_abort();
1840 		}
1841 	}
1842 }
1843 
1844 static void
1845 ftl_chunk_basic_rq_write(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq)
1846 {
1847 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1848 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1849 
1850 	brq->io.chunk = chunk;
1851 	brq->success = false;
1852 
1853 	_ftl_chunk_basic_rq_write(brq);
1854 
1855 	chunk->md->write_pointer += brq->num_blocks;
1856 	dev->stats.io_activity_total += brq->num_blocks;
1857 }
1858 
1859 static void
1860 read_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
1861 {
1862 	struct ftl_basic_rq *brq = arg;
1863 
1864 	ftl_stats_bdev_io_completed(brq->dev, FTL_STATS_TYPE_MD_NV_CACHE, bdev_io);
1865 
1866 	brq->success = success;
1867 
1868 	brq->owner.cb(brq);
1869 	spdk_bdev_free_io(bdev_io);
1870 }
1871 
1872 static int
1873 ftl_chunk_basic_rq_read(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq)
1874 {
1875 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1876 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1877 	int rc;
1878 
1879 	brq->io.chunk = chunk;
1880 	brq->success = false;
1881 
1882 	rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, nv_cache->bdev_desc, nv_cache->cache_ioch,
1883 			brq->io_payload, NULL, brq->io.addr, brq->num_blocks, read_brq_end, brq);
1884 
1885 	if (spdk_likely(!rc)) {
1886 		dev->stats.io_activity_total += brq->num_blocks;
1887 	}
1888 
1889 	return rc;
1890 }
1891 
1892 static void
1893 chunk_open_cb(int status, void *ctx)
1894 {
1895 	struct ftl_nv_cache_chunk *chunk = (struct ftl_nv_cache_chunk *)ctx;
1896 
1897 	if (spdk_unlikely(status)) {
1898 #ifdef SPDK_FTL_RETRY_ON_ERROR
1899 		ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx);
1900 		return;
1901 #else
1902 		ftl_abort();
1903 #endif
1904 	}
1905 
1906 	chunk->md->state = FTL_CHUNK_STATE_OPEN;
1907 }
1908 
1909 static void
1910 ftl_chunk_open(struct ftl_nv_cache_chunk *chunk)
1911 {
1912 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1913 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1914 	struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD);
1915 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
1916 
1917 	if (chunk_alloc_p2l_map(chunk)) {
1918 		assert(0);
1919 		/*
1920 		 * We control number of opening chunk and it shall be consistent with size of chunk
1921 		 * P2L map pool
1922 		 */
1923 		ftl_abort();
1924 		return;
1925 	}
1926 
1927 	chunk->nv_cache->chunk_open_count++;
1928 
1929 	assert(chunk->md->write_pointer == 0);
1930 	assert(chunk->md->blocks_written == 0);
1931 
1932 	memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
1933 	p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_OPEN;
1934 	p2l_map->chunk_dma_md->p2l_map_checksum = 0;
1935 
1936 	ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, p2l_map->chunk_dma_md,
1937 			       NULL, chunk_open_cb, chunk,
1938 			       &chunk->md_persist_entry_ctx);
1939 }
1940 
1941 static void
1942 chunk_close_cb(int status, void *ctx)
1943 {
1944 	struct ftl_nv_cache_chunk *chunk = (struct ftl_nv_cache_chunk *)ctx;
1945 
1946 	assert(chunk->md->write_pointer == chunk->nv_cache->chunk_blocks);
1947 
1948 	if (spdk_likely(!status)) {
1949 		chunk->md->p2l_map_checksum = chunk->p2l_map.chunk_dma_md->p2l_map_checksum;
1950 		chunk_free_p2l_map(chunk);
1951 
1952 		assert(chunk->nv_cache->chunk_open_count > 0);
1953 		chunk->nv_cache->chunk_open_count--;
1954 
1955 		/* Chunk full move it on full list */
1956 		TAILQ_INSERT_TAIL(&chunk->nv_cache->chunk_full_list, chunk, entry);
1957 		chunk->nv_cache->chunk_full_count++;
1958 
1959 		chunk->nv_cache->last_seq_id = chunk->md->close_seq_id;
1960 
1961 		chunk->md->state = FTL_CHUNK_STATE_CLOSED;
1962 	} else {
1963 #ifdef SPDK_FTL_RETRY_ON_ERROR
1964 		ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx);
1965 #else
1966 		ftl_abort();
1967 #endif
1968 	}
1969 }
1970 
1971 static void
1972 chunk_map_write_cb(struct ftl_basic_rq *brq)
1973 {
1974 	struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
1975 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1976 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1977 	struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD);
1978 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
1979 	uint32_t chunk_map_crc;
1980 
1981 	if (spdk_likely(brq->success)) {
1982 		chunk_map_crc = spdk_crc32c_update(p2l_map->chunk_map,
1983 						   chunk->nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE, 0);
1984 		memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
1985 		p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_CLOSED;
1986 		p2l_map->chunk_dma_md->p2l_map_checksum = chunk_map_crc;
1987 		ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, chunk->p2l_map.chunk_dma_md,
1988 				       NULL, chunk_close_cb, chunk,
1989 				       &chunk->md_persist_entry_ctx);
1990 	} else {
1991 #ifdef SPDK_FTL_RETRY_ON_ERROR
1992 		/* retry */
1993 		chunk->md->write_pointer -= brq->num_blocks;
1994 		ftl_chunk_basic_rq_write(chunk, brq);
1995 #else
1996 		ftl_abort();
1997 #endif
1998 	}
1999 }
2000 
2001 static void
2002 ftl_chunk_close(struct ftl_nv_cache_chunk *chunk)
2003 {
2004 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
2005 	struct ftl_basic_rq *brq = &chunk->metadata_rq;
2006 	void *metadata = chunk->p2l_map.chunk_map;
2007 
2008 	chunk->md->close_seq_id = ftl_get_next_seq_id(dev);
2009 	ftl_basic_rq_init(dev, brq, metadata, chunk->nv_cache->tail_md_chunk_blocks);
2010 	ftl_basic_rq_set_owner(brq, chunk_map_write_cb, chunk);
2011 
2012 	assert(chunk->md->write_pointer == chunk_tail_md_offset(chunk->nv_cache));
2013 	brq->io.addr = chunk->offset + chunk->md->write_pointer;
2014 
2015 	ftl_chunk_basic_rq_write(chunk, brq);
2016 }
2017 
2018 static void read_tail_md_cb(struct ftl_basic_rq *brq);
2019 static void recover_open_chunk_cb(struct ftl_basic_rq *brq);
2020 
2021 static void
2022 restore_chunk_close_cb(int status, void *ctx)
2023 {
2024 	struct ftl_basic_rq *parent = (struct ftl_basic_rq *)ctx;
2025 	struct ftl_nv_cache_chunk *chunk = parent->io.chunk;
2026 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
2027 
2028 	if (spdk_unlikely(status)) {
2029 		parent->success = false;
2030 	} else {
2031 		chunk->md->p2l_map_checksum = p2l_map->chunk_dma_md->p2l_map_checksum;
2032 		chunk->md->state = FTL_CHUNK_STATE_CLOSED;
2033 	}
2034 
2035 	read_tail_md_cb(parent);
2036 }
2037 
2038 static void
2039 restore_fill_p2l_map_cb(struct ftl_basic_rq *parent)
2040 {
2041 	struct ftl_nv_cache_chunk *chunk = parent->io.chunk;
2042 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
2043 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
2044 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
2045 	struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD);
2046 	uint32_t chunk_map_crc;
2047 
2048 	/* Set original callback */
2049 	ftl_basic_rq_set_owner(parent, recover_open_chunk_cb, parent->owner.priv);
2050 
2051 	if (spdk_unlikely(!parent->success)) {
2052 		read_tail_md_cb(parent);
2053 		return;
2054 	}
2055 
2056 	chunk_map_crc = spdk_crc32c_update(p2l_map->chunk_map,
2057 					   chunk->nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE, 0);
2058 	memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
2059 	p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_CLOSED;
2060 	p2l_map->chunk_dma_md->write_pointer = chunk->nv_cache->chunk_blocks;
2061 	p2l_map->chunk_dma_md->blocks_written = chunk->nv_cache->chunk_blocks;
2062 	p2l_map->chunk_dma_md->p2l_map_checksum = chunk_map_crc;
2063 
2064 	ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, p2l_map->chunk_dma_md, NULL,
2065 			       restore_chunk_close_cb, parent, &chunk->md_persist_entry_ctx);
2066 }
2067 
2068 static void
2069 restore_fill_tail_md(struct ftl_basic_rq *parent, struct ftl_nv_cache_chunk *chunk)
2070 {
2071 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
2072 	void *metadata;
2073 
2074 	chunk->md->close_seq_id = ftl_get_next_seq_id(dev);
2075 
2076 	metadata = chunk->p2l_map.chunk_map;
2077 	ftl_basic_rq_init(dev, parent, metadata, chunk->nv_cache->tail_md_chunk_blocks);
2078 	ftl_basic_rq_set_owner(parent, restore_fill_p2l_map_cb, parent->owner.priv);
2079 
2080 	parent->io.addr = chunk->offset + chunk_tail_md_offset(chunk->nv_cache);
2081 	parent->io.chunk = chunk;
2082 
2083 	ftl_chunk_basic_rq_write(chunk, parent);
2084 }
2085 
2086 static void
2087 read_open_chunk_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
2088 {
2089 	struct ftl_rq *rq = (struct ftl_rq *)cb_arg;
2090 	struct ftl_basic_rq *parent = (struct ftl_basic_rq *)rq->owner.priv;
2091 	struct ftl_nv_cache_chunk *chunk = parent->io.chunk;
2092 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
2093 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
2094 	union ftl_md_vss *md;
2095 	uint64_t cache_offset = bdev_io->u.bdev.offset_blocks;
2096 	uint64_t len = bdev_io->u.bdev.num_blocks;
2097 	ftl_addr addr = ftl_addr_from_nvc_offset(dev, cache_offset);
2098 	int rc;
2099 
2100 	ftl_stats_bdev_io_completed(dev, FTL_STATS_TYPE_USER, bdev_io);
2101 
2102 	spdk_bdev_free_io(bdev_io);
2103 
2104 	if (!success) {
2105 		parent->success = false;
2106 		read_tail_md_cb(parent);
2107 		return;
2108 	}
2109 
2110 	while (rq->iter.idx < rq->iter.count) {
2111 		/* Get metadata */
2112 		md = rq->entries[rq->iter.idx].io_md;
2113 		if (md->nv_cache.seq_id != chunk->md->seq_id) {
2114 			md->nv_cache.lba = FTL_LBA_INVALID;
2115 		}
2116 		/*
2117 		 * The p2l map contains effectively random data at this point (since it contains arbitrary
2118 		 * blocks from potentially not even filled tail md), so even LBA_INVALID needs to be set explicitly
2119 		 */
2120 
2121 		ftl_chunk_set_addr(chunk,  md->nv_cache.lba, addr + rq->iter.idx);
2122 		rq->iter.idx++;
2123 	}
2124 
2125 	if (cache_offset + len < chunk->offset + chunk_tail_md_offset(nv_cache)) {
2126 		cache_offset += len;
2127 		len = spdk_min(dev->xfer_size, chunk->offset + chunk_tail_md_offset(nv_cache) - cache_offset);
2128 		rq->iter.idx = 0;
2129 		rq->iter.count = len;
2130 
2131 		rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, nv_cache->bdev_desc,
2132 				nv_cache->cache_ioch,
2133 				rq->io_payload,
2134 				rq->io_md,
2135 				cache_offset, len,
2136 				read_open_chunk_cb,
2137 				rq);
2138 
2139 		if (rc) {
2140 			ftl_rq_del(rq);
2141 			parent->success = false;
2142 			read_tail_md_cb(parent);
2143 			return;
2144 		}
2145 	} else {
2146 		ftl_rq_del(rq);
2147 		restore_fill_tail_md(parent, chunk);
2148 	}
2149 }
2150 
2151 static void
2152 restore_open_chunk(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *parent)
2153 {
2154 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
2155 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
2156 	struct ftl_rq *rq;
2157 	uint64_t addr;
2158 	uint64_t len = dev->xfer_size;
2159 	int rc;
2160 
2161 	/*
2162 	 * We've just read the p2l map, prefill it with INVALID LBA
2163 	 * TODO we need to do this because tail md blocks (p2l map) are also represented in the p2l map, instead of just user data region
2164 	 */
2165 	memset(chunk->p2l_map.chunk_map, -1, FTL_BLOCK_SIZE * nv_cache->tail_md_chunk_blocks);
2166 
2167 	/* Need to read user data, recalculate chunk's P2L and write tail md with it */
2168 	rq = ftl_rq_new(dev, dev->nv_cache.md_size);
2169 	if (!rq) {
2170 		parent->success = false;
2171 		read_tail_md_cb(parent);
2172 		return;
2173 	}
2174 
2175 	rq->owner.priv = parent;
2176 	rq->iter.idx = 0;
2177 	rq->iter.count = len;
2178 
2179 	addr = chunk->offset;
2180 
2181 	len = spdk_min(dev->xfer_size, chunk->offset + chunk_tail_md_offset(nv_cache) - addr);
2182 
2183 	rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, nv_cache->bdev_desc,
2184 			nv_cache->cache_ioch,
2185 			rq->io_payload,
2186 			rq->io_md,
2187 			addr, len,
2188 			read_open_chunk_cb,
2189 			rq);
2190 
2191 	if (rc) {
2192 		ftl_rq_del(rq);
2193 		parent->success = false;
2194 		read_tail_md_cb(parent);
2195 	}
2196 }
2197 
2198 static void
2199 read_tail_md_cb(struct ftl_basic_rq *brq)
2200 {
2201 	brq->owner.cb(brq);
2202 }
2203 
2204 static int
2205 ftl_chunk_read_tail_md(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq,
2206 		       void (*cb)(struct ftl_basic_rq *brq), void *cb_ctx)
2207 {
2208 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
2209 	void *metadata;
2210 	int rc;
2211 
2212 	metadata = chunk->p2l_map.chunk_map;
2213 	ftl_basic_rq_init(dev, brq, metadata, chunk->nv_cache->tail_md_chunk_blocks);
2214 	ftl_basic_rq_set_owner(brq, cb, cb_ctx);
2215 
2216 	brq->io.addr = chunk->offset + chunk_tail_md_offset(chunk->nv_cache);
2217 	rc = ftl_chunk_basic_rq_read(chunk, brq);
2218 
2219 	return rc;
2220 }
2221 
2222 struct restore_chunk_md_ctx {
2223 	ftl_chunk_md_cb cb;
2224 	void *cb_ctx;
2225 	int status;
2226 	uint64_t qd;
2227 	uint64_t id;
2228 };
2229 
2230 static inline bool
2231 is_chunk_count_valid(struct ftl_nv_cache *nv_cache)
2232 {
2233 	uint64_t chunk_count = 0;
2234 
2235 	chunk_count += nv_cache->chunk_open_count;
2236 	chunk_count += nv_cache->chunk_free_count;
2237 	chunk_count += nv_cache->chunk_full_count;
2238 	chunk_count += nv_cache->chunk_comp_count;
2239 	chunk_count += nv_cache->chunk_inactive_count;
2240 
2241 	return chunk_count == nv_cache->chunk_count;
2242 }
2243 
2244 static void
2245 walk_tail_md_cb(struct ftl_basic_rq *brq)
2246 {
2247 	struct ftl_mngt_process *mngt = brq->owner.priv;
2248 	struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
2249 	struct restore_chunk_md_ctx *ctx = ftl_mngt_get_step_ctx(mngt);
2250 	int rc = 0;
2251 
2252 	if (brq->success) {
2253 		rc = ctx->cb(chunk, ctx->cb_ctx);
2254 	} else {
2255 		rc = -EIO;
2256 	}
2257 
2258 	if (rc) {
2259 		ctx->status = rc;
2260 	}
2261 	ctx->qd--;
2262 	chunk_free_p2l_map(chunk);
2263 	ftl_mngt_continue_step(mngt);
2264 }
2265 
2266 static void
2267 ftl_mngt_nv_cache_walk_tail_md(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt,
2268 			       uint64_t seq_id, ftl_chunk_md_cb cb, void *cb_ctx)
2269 {
2270 	struct ftl_nv_cache *nvc = &dev->nv_cache;
2271 	struct restore_chunk_md_ctx *ctx;
2272 
2273 	ctx = ftl_mngt_get_step_ctx(mngt);
2274 	if (!ctx) {
2275 		if (ftl_mngt_alloc_step_ctx(mngt, sizeof(*ctx))) {
2276 			ftl_mngt_fail_step(mngt);
2277 			return;
2278 		}
2279 		ctx = ftl_mngt_get_step_ctx(mngt);
2280 		assert(ctx);
2281 
2282 		ctx->cb = cb;
2283 		ctx->cb_ctx = cb_ctx;
2284 	}
2285 
2286 	/*
2287 	 * This function generates a high queue depth and will utilize ftl_mngt_continue_step during completions to make sure all chunks
2288 	 * are processed before returning an error (if any were found) or continuing on.
2289 	 */
2290 	if (0 == ctx->qd && ctx->id == nvc->chunk_count) {
2291 		if (!is_chunk_count_valid(nvc)) {
2292 			FTL_ERRLOG(dev, "Recovery ERROR, invalid number of chunk\n");
2293 			assert(false);
2294 			ctx->status = -EINVAL;
2295 		}
2296 
2297 		if (ctx->status) {
2298 			ftl_mngt_fail_step(mngt);
2299 		} else {
2300 			ftl_mngt_next_step(mngt);
2301 		}
2302 		return;
2303 	}
2304 
2305 	while (ctx->id < nvc->chunk_count) {
2306 		struct ftl_nv_cache_chunk *chunk = &nvc->chunks[ctx->id];
2307 		int rc;
2308 
2309 		if (!chunk->recovery) {
2310 			/* This chunk is inactive or empty and not used in recovery */
2311 			ctx->id++;
2312 			continue;
2313 		}
2314 
2315 		if (seq_id && (chunk->md->close_seq_id <= seq_id)) {
2316 			ctx->id++;
2317 			continue;
2318 		}
2319 
2320 		if (chunk_alloc_p2l_map(chunk)) {
2321 			/* No more free P2L map, break and continue later */
2322 			break;
2323 		}
2324 		ctx->id++;
2325 
2326 		rc = ftl_chunk_read_tail_md(chunk, &chunk->metadata_rq, walk_tail_md_cb, mngt);
2327 
2328 		if (0 == rc) {
2329 			ctx->qd++;
2330 		} else {
2331 			chunk_free_p2l_map(chunk);
2332 			ctx->status = rc;
2333 		}
2334 	}
2335 
2336 	if (0 == ctx->qd) {
2337 		/*
2338 		 * No QD could happen due to all leftover chunks being in free state.
2339 		 * Additionally ftl_chunk_read_tail_md could fail starting with the first IO in a given patch.
2340 		 * For streamlining of all potential error handling (since many chunks are reading P2L at the same time),
2341 		 * we're using ftl_mngt_continue_step to arrive at the same spot of checking for mngt step end (see beginning of function).
2342 		 */
2343 		ftl_mngt_continue_step(mngt);
2344 	}
2345 
2346 }
2347 
2348 void
2349 ftl_mngt_nv_cache_restore_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt,
2350 			      ftl_chunk_md_cb cb, void *cb_ctx)
2351 {
2352 	ftl_mngt_nv_cache_walk_tail_md(dev, mngt, dev->sb->ckpt_seq_id, cb, cb_ctx);
2353 }
2354 
2355 static void
2356 restore_chunk_state_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
2357 {
2358 	struct ftl_mngt_process *mngt = md->owner.cb_ctx;
2359 	struct ftl_nv_cache *nvc = &dev->nv_cache;
2360 	struct ftl_nv_cache_chunk *chunk;
2361 	uint64_t i;
2362 
2363 	if (status) {
2364 		/* Restore error, end step */
2365 		ftl_mngt_fail_step(mngt);
2366 		return;
2367 	}
2368 
2369 	for (i = 0; i < nvc->chunk_count; i++) {
2370 		chunk = &nvc->chunks[i];
2371 
2372 		if (false == nvc->nvc_type->ops.is_chunk_active(dev, chunk->offset) &&
2373 		    chunk->md->state != FTL_CHUNK_STATE_INACTIVE) {
2374 			status = -EINVAL;
2375 			break;
2376 		}
2377 
2378 		if (chunk->md->version != FTL_NVC_VERSION_CURRENT) {
2379 			status = -EINVAL;
2380 			break;
2381 		}
2382 
2383 		switch (chunk->md->state) {
2384 		case FTL_CHUNK_STATE_FREE:
2385 			break;
2386 		case FTL_CHUNK_STATE_OPEN:
2387 			TAILQ_REMOVE(&nvc->chunk_free_list, chunk, entry);
2388 			nvc->chunk_free_count--;
2389 
2390 			TAILQ_INSERT_TAIL(&nvc->chunk_open_list, chunk, entry);
2391 			nvc->chunk_open_count++;
2392 
2393 			/* Chunk is not empty, mark it to be recovered */
2394 			chunk->recovery = true;
2395 			break;
2396 		case FTL_CHUNK_STATE_CLOSED:
2397 			TAILQ_REMOVE(&nvc->chunk_free_list, chunk, entry);
2398 			nvc->chunk_free_count--;
2399 
2400 			TAILQ_INSERT_TAIL(&nvc->chunk_full_list, chunk, entry);
2401 			nvc->chunk_full_count++;
2402 
2403 			/* Chunk is not empty, mark it to be recovered */
2404 			chunk->recovery = true;
2405 			break;
2406 		case FTL_CHUNK_STATE_INACTIVE:
2407 			break;
2408 		default:
2409 			status = -EINVAL;
2410 		}
2411 	}
2412 
2413 	if (status) {
2414 		ftl_mngt_fail_step(mngt);
2415 	} else {
2416 		ftl_mngt_next_step(mngt);
2417 	}
2418 }
2419 
2420 void
2421 ftl_mngt_nv_cache_restore_chunk_state(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
2422 {
2423 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
2424 
2425 	md->owner.cb_ctx = mngt;
2426 	md->cb = restore_chunk_state_cb;
2427 	ftl_md_restore(md);
2428 }
2429 
2430 static void
2431 recover_open_chunk_cb(struct ftl_basic_rq *brq)
2432 {
2433 	struct ftl_mngt_process *mngt = brq->owner.priv;
2434 	struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
2435 	struct ftl_nv_cache *nvc = chunk->nv_cache;
2436 	struct spdk_ftl_dev *dev = ftl_mngt_get_dev(mngt);
2437 
2438 	chunk_free_p2l_map(chunk);
2439 
2440 	if (!brq->success) {
2441 		FTL_ERRLOG(dev, "Recovery chunk ERROR, offset = %"PRIu64", seq id %"PRIu64"\n", chunk->offset,
2442 			   chunk->md->seq_id);
2443 		ftl_mngt_fail_step(mngt);
2444 		return;
2445 	}
2446 
2447 	FTL_NOTICELOG(dev, "Recovered chunk, offset = %"PRIu64", seq id %"PRIu64"\n", chunk->offset,
2448 		      chunk->md->seq_id);
2449 
2450 	TAILQ_REMOVE(&nvc->chunk_open_list, chunk, entry);
2451 	nvc->chunk_open_count--;
2452 
2453 	TAILQ_INSERT_TAIL(&nvc->chunk_full_list, chunk, entry);
2454 	nvc->chunk_full_count++;
2455 
2456 	/* This is closed chunk */
2457 	chunk->md->write_pointer = nvc->chunk_blocks;
2458 	chunk->md->blocks_written = nvc->chunk_blocks;
2459 
2460 	ftl_mngt_continue_step(mngt);
2461 }
2462 
2463 void
2464 ftl_mngt_nv_cache_recover_open_chunk(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
2465 {
2466 	struct ftl_nv_cache *nvc = &dev->nv_cache;
2467 	struct ftl_nv_cache_chunk *chunk;
2468 	struct ftl_basic_rq *brq = ftl_mngt_get_step_ctx(mngt);
2469 
2470 	if (!brq) {
2471 		if (TAILQ_EMPTY(&nvc->chunk_open_list)) {
2472 			FTL_NOTICELOG(dev, "No open chunks to recover P2L\n");
2473 			ftl_mngt_next_step(mngt);
2474 			return;
2475 		}
2476 
2477 		if (ftl_mngt_alloc_step_ctx(mngt, sizeof(*brq))) {
2478 			ftl_mngt_fail_step(mngt);
2479 			return;
2480 		}
2481 		brq = ftl_mngt_get_step_ctx(mngt);
2482 		ftl_basic_rq_set_owner(brq, recover_open_chunk_cb, mngt);
2483 	}
2484 
2485 	if (TAILQ_EMPTY(&nvc->chunk_open_list)) {
2486 		if (!is_chunk_count_valid(nvc)) {
2487 			FTL_ERRLOG(dev, "Recovery ERROR, invalid number of chunk\n");
2488 			ftl_mngt_fail_step(mngt);
2489 			return;
2490 		}
2491 
2492 		/*
2493 		 * Now all chunks loaded and closed, do final step of restoring
2494 		 * chunks state
2495 		 */
2496 		if (ftl_nv_cache_load_state(nvc)) {
2497 			ftl_mngt_fail_step(mngt);
2498 		} else {
2499 			ftl_mngt_next_step(mngt);
2500 		}
2501 	} else {
2502 		chunk = TAILQ_FIRST(&nvc->chunk_open_list);
2503 		if (chunk_alloc_p2l_map(chunk)) {
2504 			ftl_mngt_fail_step(mngt);
2505 			return;
2506 		}
2507 
2508 		brq->io.chunk = chunk;
2509 
2510 		FTL_NOTICELOG(dev, "Start recovery open chunk, offset = %"PRIu64", seq id %"PRIu64"\n",
2511 			      chunk->offset, chunk->md->seq_id);
2512 		restore_open_chunk(chunk, brq);
2513 	}
2514 }
2515 
2516 int
2517 ftl_nv_cache_chunks_busy(struct ftl_nv_cache *nv_cache)
2518 {
2519 	/* chunk_current is migrating to closed status when closing, any others should already be
2520 	 * moved to free chunk list. Also need to wait for free md requests */
2521 	return nv_cache->chunk_open_count == 0 && nv_cache->chunk_free_persist_count == 0;
2522 }
2523 
2524 void
2525 ftl_nv_cache_halt(struct ftl_nv_cache *nv_cache)
2526 {
2527 	struct ftl_nv_cache_chunk *chunk;
2528 	uint64_t free_space;
2529 
2530 	nv_cache->halt = true;
2531 
2532 	/* Set chunks on open list back to free state since no user data has been written to it */
2533 	while (!TAILQ_EMPTY(&nv_cache->chunk_open_list)) {
2534 		chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
2535 
2536 		/* Chunks are moved between lists on metadata update submission, but state is changed
2537 		 * on completion. Breaking early in such a case to make sure all the necessary resources
2538 		 * will be freed (during next pass(es) of ftl_nv_cache_halt).
2539 		 */
2540 		if (chunk->md->state != FTL_CHUNK_STATE_OPEN) {
2541 			break;
2542 		}
2543 
2544 		TAILQ_REMOVE(&nv_cache->chunk_open_list, chunk, entry);
2545 		chunk_free_p2l_map(chunk);
2546 		ftl_nv_cache_chunk_md_initialize(chunk->md);
2547 		assert(nv_cache->chunk_open_count > 0);
2548 		nv_cache->chunk_open_count--;
2549 	}
2550 
2551 	/* Close current chunk by skipping all not written blocks */
2552 	chunk = nv_cache->chunk_current;
2553 	if (chunk != NULL) {
2554 		nv_cache->chunk_current = NULL;
2555 		if (chunk_is_closed(chunk)) {
2556 			return;
2557 		}
2558 
2559 		free_space = chunk_get_free_space(nv_cache, chunk);
2560 		chunk->md->blocks_skipped = free_space;
2561 		chunk->md->blocks_written += free_space;
2562 		chunk->md->write_pointer += free_space;
2563 		ftl_chunk_close(chunk);
2564 	}
2565 }
2566 
2567 uint64_t
2568 ftl_nv_cache_acquire_trim_seq_id(struct ftl_nv_cache *nv_cache)
2569 {
2570 	struct ftl_nv_cache_chunk *chunk = nv_cache->chunk_current;
2571 	uint64_t seq_id, free_space;
2572 
2573 	if (!chunk) {
2574 		chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
2575 		if (chunk && chunk->md->state == FTL_CHUNK_STATE_OPEN) {
2576 			return chunk->md->seq_id;
2577 		} else {
2578 			return 0;
2579 		}
2580 	}
2581 
2582 	if (chunk_is_closed(chunk)) {
2583 		return 0;
2584 	}
2585 
2586 	seq_id = nv_cache->chunk_current->md->seq_id;
2587 	free_space = chunk_get_free_space(nv_cache, chunk);
2588 
2589 	chunk->md->blocks_skipped = free_space;
2590 	chunk->md->blocks_written += free_space;
2591 	chunk->md->write_pointer += free_space;
2592 	if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
2593 		ftl_chunk_close(chunk);
2594 	}
2595 	nv_cache->chunk_current = NULL;
2596 
2597 	seq_id++;
2598 	return seq_id;
2599 }
2600 
2601 static double
2602 ftl_nv_cache_get_chunk_utilization(struct ftl_nv_cache *nv_cache,
2603 				   struct ftl_nv_cache_chunk *chunk)
2604 {
2605 	double capacity = nv_cache->chunk_blocks;
2606 	double used = chunk->md->blocks_written + chunk->md->blocks_skipped;
2607 
2608 	return used / capacity;
2609 }
2610 
2611 static const char *
2612 ftl_nv_cache_get_chunk_state_name(struct ftl_nv_cache_chunk *chunk)
2613 {
2614 	static const char *names[] = {
2615 		"FREE", "OPEN", "CLOSED", "INACTIVE"
2616 	};
2617 
2618 	assert(chunk->md->state < SPDK_COUNTOF(names));
2619 	if (chunk->md->state < SPDK_COUNTOF(names)) {
2620 		return names[chunk->md->state];
2621 	} else {
2622 		assert(false);
2623 		return "?";
2624 	}
2625 }
2626 
2627 static void
2628 ftl_property_dump_cache_dev(struct spdk_ftl_dev *dev, const struct ftl_property *property,
2629 			    struct spdk_json_write_ctx *w)
2630 {
2631 	uint64_t i;
2632 	struct ftl_nv_cache_chunk *chunk;
2633 
2634 	spdk_json_write_named_string(w, "type", dev->nv_cache.nvc_type->name);
2635 	spdk_json_write_named_array_begin(w, "chunks");
2636 	for (i = 0, chunk = dev->nv_cache.chunks; i < dev->nv_cache.chunk_count; i++, chunk++) {
2637 		spdk_json_write_object_begin(w);
2638 		spdk_json_write_named_uint64(w, "id", i);
2639 		spdk_json_write_named_string(w, "state", ftl_nv_cache_get_chunk_state_name(chunk));
2640 		spdk_json_write_named_double(w, "utilization",
2641 					     ftl_nv_cache_get_chunk_utilization(&dev->nv_cache, chunk));
2642 		spdk_json_write_object_end(w);
2643 	}
2644 	spdk_json_write_array_end(w);
2645 }
2646 
2647 void
2648 ftl_nv_cache_chunk_md_initialize(struct ftl_nv_cache_chunk_md *md)
2649 {
2650 	memset(md, 0, sizeof(*md));
2651 	md->version = FTL_NVC_VERSION_CURRENT;
2652 }
2653