xref: /spdk/lib/ftl/ftl_nv_cache.c (revision 307b8c112ffd90a26d53dd15fad67bd9038ef526)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 
7 #include "spdk/bdev.h"
8 #include "spdk/bdev_module.h"
9 #include "spdk/ftl.h"
10 #include "spdk/string.h"
11 
12 #include "ftl_nv_cache.h"
13 #include "ftl_nv_cache_io.h"
14 #include "ftl_core.h"
15 #include "ftl_band.h"
16 #include "utils/ftl_addr_utils.h"
17 #include "mngt/ftl_mngt.h"
18 
19 static inline uint64_t nvc_data_blocks(struct ftl_nv_cache *nv_cache) __attribute__((unused));
20 static struct ftl_nv_cache_compactor *compactor_alloc(struct spdk_ftl_dev *dev);
21 static void compactor_free(struct spdk_ftl_dev *dev, struct ftl_nv_cache_compactor *compactor);
22 static void compaction_process_ftl_done(struct ftl_rq *rq);
23 
24 static inline const struct ftl_layout_region *
25 nvc_data_region(struct ftl_nv_cache *nv_cache)
26 {
27 	struct spdk_ftl_dev *dev;
28 
29 	dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
30 	return &dev->layout.region[FTL_LAYOUT_REGION_TYPE_DATA_NVC];
31 }
32 
33 static inline void
34 nvc_validate_md(struct ftl_nv_cache *nv_cache,
35 		struct ftl_nv_cache_chunk_md *chunk_md)
36 {
37 	struct ftl_md *md = nv_cache->md;
38 	void *buffer = ftl_md_get_buffer(md);
39 	uint64_t size = ftl_md_get_buffer_size(md);
40 	void *ptr = chunk_md;
41 
42 	if (ptr < buffer) {
43 		ftl_abort();
44 	}
45 
46 	ptr += sizeof(*chunk_md);
47 	if (ptr > buffer + size) {
48 		ftl_abort();
49 	}
50 }
51 
52 static inline uint64_t
53 nvc_data_offset(struct ftl_nv_cache *nv_cache)
54 {
55 	return nvc_data_region(nv_cache)->current.offset;
56 }
57 
58 static inline uint64_t
59 nvc_data_blocks(struct ftl_nv_cache *nv_cache)
60 {
61 	return nvc_data_region(nv_cache)->current.blocks;
62 }
63 
64 size_t
65 ftl_nv_cache_chunk_tail_md_num_blocks(const struct ftl_nv_cache *nv_cache)
66 {
67 	struct spdk_ftl_dev *dev =  SPDK_CONTAINEROF(nv_cache,
68 				    struct spdk_ftl_dev, nv_cache);
69 	return spdk_divide_round_up(dev->layout.nvc.chunk_data_blocks * dev->layout.l2p.addr_size,
70 				    FTL_BLOCK_SIZE);
71 }
72 
73 static size_t
74 nv_cache_p2l_map_pool_elem_size(const struct ftl_nv_cache *nv_cache)
75 {
76 	/* Map pool element holds the whole tail md */
77 	return nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE;
78 }
79 
80 static uint64_t
81 get_chunk_idx(struct ftl_nv_cache_chunk *chunk)
82 {
83 	struct ftl_nv_cache_chunk *first_chunk = chunk->nv_cache->chunks;
84 
85 	return (chunk->offset - first_chunk->offset) / chunk->nv_cache->chunk_blocks;
86 }
87 
88 int
89 ftl_nv_cache_init(struct spdk_ftl_dev *dev)
90 {
91 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
92 	struct ftl_nv_cache_chunk *chunk;
93 	struct ftl_nv_cache_chunk_md *md;
94 	struct ftl_nv_cache_compactor *compactor;
95 	uint64_t i, offset;
96 
97 	nv_cache->halt = true;
98 
99 	nv_cache->md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
100 	if (!nv_cache->md) {
101 		FTL_ERRLOG(dev, "No NV cache metadata object\n");
102 		return -1;
103 	}
104 
105 	nv_cache->md_pool = ftl_mempool_create(dev->conf.user_io_pool_size,
106 					       nv_cache->md_size * dev->xfer_size,
107 					       FTL_BLOCK_SIZE, SPDK_ENV_SOCKET_ID_ANY);
108 	if (!nv_cache->md_pool) {
109 		FTL_ERRLOG(dev, "Failed to initialize NV cache metadata pool\n");
110 		return -1;
111 	}
112 
113 	/*
114 	 * Initialize chunk info
115 	 */
116 	nv_cache->chunk_blocks = dev->layout.nvc.chunk_data_blocks;
117 	nv_cache->chunk_count = dev->layout.nvc.chunk_count;
118 	nv_cache->tail_md_chunk_blocks = ftl_nv_cache_chunk_tail_md_num_blocks(nv_cache);
119 
120 	/* Allocate chunks */
121 	nv_cache->chunks = calloc(nv_cache->chunk_count,
122 				  sizeof(nv_cache->chunks[0]));
123 	if (!nv_cache->chunks) {
124 		FTL_ERRLOG(dev, "Failed to initialize NV cache chunks\n");
125 		return -1;
126 	}
127 
128 	TAILQ_INIT(&nv_cache->chunk_free_list);
129 	TAILQ_INIT(&nv_cache->chunk_open_list);
130 	TAILQ_INIT(&nv_cache->chunk_full_list);
131 	TAILQ_INIT(&nv_cache->chunk_comp_list);
132 	TAILQ_INIT(&nv_cache->needs_free_persist_list);
133 
134 	/* First chunk metadata */
135 	md = ftl_md_get_buffer(nv_cache->md);
136 	if (!md) {
137 		FTL_ERRLOG(dev, "No NV cache metadata\n");
138 		return -1;
139 	}
140 
141 	nv_cache->chunk_free_count = nv_cache->chunk_count;
142 
143 	chunk = nv_cache->chunks;
144 	offset = nvc_data_offset(nv_cache);
145 	for (i = 0; i < nv_cache->chunk_count; i++, chunk++, md++) {
146 		chunk->nv_cache = nv_cache;
147 		chunk->md = md;
148 		nvc_validate_md(nv_cache, md);
149 		chunk->offset = offset;
150 		offset += nv_cache->chunk_blocks;
151 		TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
152 	}
153 	assert(offset <= nvc_data_offset(nv_cache) + nvc_data_blocks(nv_cache));
154 
155 	/* Start compaction when full chunks exceed given % of entire chunks */
156 	nv_cache->chunk_compaction_threshold = nv_cache->chunk_count *
157 					       dev->conf.nv_cache.chunk_compaction_threshold / 100;
158 	TAILQ_INIT(&nv_cache->compactor_list);
159 	for (i = 0; i < FTL_NV_CACHE_NUM_COMPACTORS; i++) {
160 		compactor = compactor_alloc(dev);
161 
162 		if (!compactor) {
163 			FTL_ERRLOG(dev, "Cannot allocate compaction process\n");
164 			return -1;
165 		}
166 
167 		TAILQ_INSERT_TAIL(&nv_cache->compactor_list, compactor, entry);
168 	}
169 
170 #define FTL_MAX_OPEN_CHUNKS 2
171 	nv_cache->p2l_pool = ftl_mempool_create(FTL_MAX_OPEN_CHUNKS,
172 						nv_cache_p2l_map_pool_elem_size(nv_cache),
173 						FTL_BLOCK_SIZE,
174 						SPDK_ENV_SOCKET_ID_ANY);
175 	if (!nv_cache->p2l_pool) {
176 		return -ENOMEM;
177 	}
178 
179 	/* One entry per open chunk */
180 	nv_cache->chunk_md_pool = ftl_mempool_create(FTL_MAX_OPEN_CHUNKS,
181 				  sizeof(struct ftl_nv_cache_chunk_md),
182 				  FTL_BLOCK_SIZE,
183 				  SPDK_ENV_SOCKET_ID_ANY);
184 	if (!nv_cache->chunk_md_pool) {
185 		return -ENOMEM;
186 	}
187 
188 	/* Each compactor can be reading a different chunk which it needs to switch state to free to at the end,
189 	 * plus one backup each for high invalidity chunks processing (if there's a backlog of chunks with extremely
190 	 * small, even 0, validity then they can be processed by the compactors quickly and trigger a lot of updates
191 	 * to free state at once) */
192 	nv_cache->free_chunk_md_pool = ftl_mempool_create(2 * FTL_NV_CACHE_NUM_COMPACTORS,
193 				       sizeof(struct ftl_nv_cache_chunk_md),
194 				       FTL_BLOCK_SIZE,
195 				       SPDK_ENV_SOCKET_ID_ANY);
196 	if (!nv_cache->free_chunk_md_pool) {
197 		return -ENOMEM;
198 	}
199 
200 	nv_cache->throttle.interval_tsc = FTL_NV_CACHE_THROTTLE_INTERVAL_MS *
201 					  (spdk_get_ticks_hz() / 1000);
202 	nv_cache->chunk_free_target = spdk_divide_round_up(nv_cache->chunk_count *
203 				      dev->conf.nv_cache.chunk_free_target,
204 				      100);
205 	return 0;
206 }
207 
208 void
209 ftl_nv_cache_deinit(struct spdk_ftl_dev *dev)
210 {
211 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
212 	struct ftl_nv_cache_compactor *compactor;
213 
214 	while (!TAILQ_EMPTY(&nv_cache->compactor_list)) {
215 		compactor = TAILQ_FIRST(&nv_cache->compactor_list);
216 		TAILQ_REMOVE(&nv_cache->compactor_list, compactor, entry);
217 
218 		compactor_free(dev, compactor);
219 	}
220 
221 	ftl_mempool_destroy(nv_cache->md_pool);
222 	ftl_mempool_destroy(nv_cache->p2l_pool);
223 	ftl_mempool_destroy(nv_cache->chunk_md_pool);
224 	ftl_mempool_destroy(nv_cache->free_chunk_md_pool);
225 	nv_cache->md_pool = NULL;
226 	nv_cache->p2l_pool = NULL;
227 	nv_cache->chunk_md_pool = NULL;
228 	nv_cache->free_chunk_md_pool = NULL;
229 
230 	free(nv_cache->chunks);
231 	nv_cache->chunks = NULL;
232 }
233 
234 static uint64_t
235 chunk_get_free_space(struct ftl_nv_cache *nv_cache,
236 		     struct ftl_nv_cache_chunk *chunk)
237 {
238 	assert(chunk->md->write_pointer + nv_cache->tail_md_chunk_blocks <=
239 	       nv_cache->chunk_blocks);
240 	return nv_cache->chunk_blocks - chunk->md->write_pointer -
241 	       nv_cache->tail_md_chunk_blocks;
242 }
243 
244 static bool
245 chunk_is_closed(struct ftl_nv_cache_chunk *chunk)
246 {
247 	return chunk->md->write_pointer == chunk->nv_cache->chunk_blocks;
248 }
249 
250 static void ftl_chunk_close(struct ftl_nv_cache_chunk *chunk);
251 
252 static uint64_t
253 ftl_nv_cache_get_wr_buffer(struct ftl_nv_cache *nv_cache, struct ftl_io *io)
254 {
255 	uint64_t address = FTL_LBA_INVALID;
256 	uint64_t num_blocks = io->num_blocks;
257 	uint64_t free_space;
258 	struct ftl_nv_cache_chunk *chunk;
259 
260 	do {
261 		chunk = nv_cache->chunk_current;
262 		/* Chunk has been closed so pick new one */
263 		if (chunk && chunk_is_closed(chunk))  {
264 			chunk = NULL;
265 		}
266 
267 		if (!chunk) {
268 			chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
269 			if (chunk && chunk->md->state == FTL_CHUNK_STATE_OPEN) {
270 				TAILQ_REMOVE(&nv_cache->chunk_open_list, chunk, entry);
271 				nv_cache->chunk_current = chunk;
272 			} else {
273 				break;
274 			}
275 		}
276 
277 		free_space = chunk_get_free_space(nv_cache, chunk);
278 
279 		if (free_space >= num_blocks) {
280 			/* Enough space in chunk */
281 
282 			/* Calculate address in NV cache */
283 			address = chunk->offset + chunk->md->write_pointer;
284 
285 			/* Set chunk in IO */
286 			io->nv_cache_chunk = chunk;
287 
288 			/* Move write pointer */
289 			chunk->md->write_pointer += num_blocks;
290 			break;
291 		}
292 
293 		/* Not enough space in nv_cache_chunk */
294 		nv_cache->chunk_current = NULL;
295 
296 		if (0 == free_space) {
297 			continue;
298 		}
299 
300 		chunk->md->blocks_skipped = free_space;
301 		chunk->md->blocks_written += free_space;
302 		chunk->md->write_pointer += free_space;
303 
304 		if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
305 			ftl_chunk_close(chunk);
306 		}
307 	} while (1);
308 
309 	return address;
310 }
311 
312 void
313 ftl_nv_cache_fill_md(struct ftl_io *io)
314 {
315 	struct ftl_nv_cache_chunk *chunk = io->nv_cache_chunk;
316 	uint64_t i;
317 	union ftl_md_vss *metadata = io->md;
318 	uint64_t lba = ftl_io_get_lba(io, 0);
319 
320 	for (i = 0; i < io->num_blocks; ++i, lba++, metadata++) {
321 		metadata->nv_cache.lba = lba;
322 		metadata->nv_cache.seq_id = chunk->md->seq_id;
323 	}
324 }
325 
326 uint64_t
327 chunk_tail_md_offset(struct ftl_nv_cache *nv_cache)
328 {
329 	return nv_cache->chunk_blocks - nv_cache->tail_md_chunk_blocks;
330 }
331 
332 static void
333 chunk_advance_blocks(struct ftl_nv_cache *nv_cache, struct ftl_nv_cache_chunk *chunk,
334 		     uint64_t advanced_blocks)
335 {
336 	chunk->md->blocks_written += advanced_blocks;
337 
338 	assert(chunk->md->blocks_written <= nv_cache->chunk_blocks);
339 
340 	if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
341 		ftl_chunk_close(chunk);
342 	}
343 }
344 
345 static uint64_t
346 chunk_user_blocks_written(struct ftl_nv_cache_chunk *chunk)
347 {
348 	return chunk->md->blocks_written - chunk->md->blocks_skipped -
349 	       chunk->nv_cache->tail_md_chunk_blocks;
350 }
351 
352 static bool
353 is_chunk_compacted(struct ftl_nv_cache_chunk *chunk)
354 {
355 	assert(chunk->md->blocks_written != 0);
356 
357 	if (chunk_user_blocks_written(chunk) == chunk->md->blocks_compacted) {
358 		return true;
359 	}
360 
361 	return false;
362 }
363 
364 static int
365 ftl_chunk_alloc_md_entry(struct ftl_nv_cache_chunk *chunk)
366 {
367 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
368 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
369 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
370 	struct ftl_layout_region *region = &dev->layout.region[FTL_LAYOUT_REGION_TYPE_NVC_MD];
371 
372 	p2l_map->chunk_dma_md = ftl_mempool_get(nv_cache->chunk_md_pool);
373 
374 	if (!p2l_map->chunk_dma_md) {
375 		return -ENOMEM;
376 	}
377 
378 	memset(p2l_map->chunk_dma_md, 0, region->entry_size * FTL_BLOCK_SIZE);
379 	return 0;
380 }
381 
382 static void
383 ftl_chunk_free_md_entry(struct ftl_nv_cache_chunk *chunk)
384 {
385 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
386 
387 	ftl_mempool_put(chunk->nv_cache->chunk_md_pool, p2l_map->chunk_dma_md);
388 	p2l_map->chunk_dma_md = NULL;
389 }
390 
391 static void
392 ftl_chunk_free(struct ftl_nv_cache_chunk *chunk)
393 {
394 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
395 
396 	/* Reset chunk */
397 	memset(chunk->md, 0, sizeof(*chunk->md));
398 
399 	TAILQ_INSERT_TAIL(&nv_cache->needs_free_persist_list, chunk, entry);
400 	nv_cache->chunk_free_persist_count++;
401 }
402 
403 static int
404 ftl_chunk_alloc_chunk_free_entry(struct ftl_nv_cache_chunk *chunk)
405 {
406 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
407 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
408 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
409 	struct ftl_layout_region *region = &dev->layout.region[FTL_LAYOUT_REGION_TYPE_NVC_MD];
410 
411 	p2l_map->chunk_dma_md = ftl_mempool_get(nv_cache->free_chunk_md_pool);
412 
413 	if (!p2l_map->chunk_dma_md) {
414 		return -ENOMEM;
415 	}
416 
417 	memset(p2l_map->chunk_dma_md, 0, region->entry_size * FTL_BLOCK_SIZE);
418 	return 0;
419 }
420 
421 static void
422 ftl_chunk_free_chunk_free_entry(struct ftl_nv_cache_chunk *chunk)
423 {
424 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
425 
426 	ftl_mempool_put(chunk->nv_cache->free_chunk_md_pool, p2l_map->chunk_dma_md);
427 	p2l_map->chunk_dma_md = NULL;
428 }
429 
430 static void
431 chunk_free_cb(int status, void *ctx)
432 {
433 	struct ftl_nv_cache_chunk *chunk = (struct ftl_nv_cache_chunk *)ctx;
434 
435 	if (spdk_likely(!status)) {
436 		struct ftl_nv_cache *nv_cache = chunk->nv_cache;
437 
438 		nv_cache->chunk_free_persist_count--;
439 		TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
440 		nv_cache->chunk_free_count++;
441 		nv_cache->chunk_full_count--;
442 		chunk->md->state = FTL_CHUNK_STATE_FREE;
443 		chunk->md->close_seq_id = 0;
444 		ftl_chunk_free_chunk_free_entry(chunk);
445 	} else {
446 #ifdef SPDK_FTL_RETRY_ON_ERROR
447 		ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx);
448 #else
449 		ftl_abort();
450 #endif
451 	}
452 }
453 
454 static void
455 ftl_chunk_persist_free_state(struct ftl_nv_cache *nv_cache)
456 {
457 	int rc;
458 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
459 	struct ftl_p2l_map *p2l_map;
460 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
461 	struct ftl_layout_region *region = &dev->layout.region[FTL_LAYOUT_REGION_TYPE_NVC_MD];
462 	struct ftl_nv_cache_chunk *tchunk, *chunk = NULL;
463 
464 	TAILQ_FOREACH_SAFE(chunk, &nv_cache->needs_free_persist_list, entry, tchunk) {
465 		p2l_map = &chunk->p2l_map;
466 		rc = ftl_chunk_alloc_chunk_free_entry(chunk);
467 		if (rc) {
468 			break;
469 		}
470 
471 		TAILQ_REMOVE(&nv_cache->needs_free_persist_list, chunk, entry);
472 
473 		memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
474 		p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_FREE;
475 		p2l_map->chunk_dma_md->close_seq_id = 0;
476 		p2l_map->chunk_dma_md->p2l_map_checksum = 0;
477 
478 		ftl_md_persist_entry(md, get_chunk_idx(chunk), p2l_map->chunk_dma_md, NULL,
479 				     chunk_free_cb, chunk, &chunk->md_persist_entry_ctx);
480 	}
481 }
482 
483 static void
484 compaction_stats_update(struct ftl_nv_cache_chunk *chunk)
485 {
486 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
487 	struct compaction_bw_stats *compaction_bw = &nv_cache->compaction_recent_bw;
488 	double *ptr;
489 
490 	if (spdk_unlikely(chunk->compaction_length_tsc == 0)) {
491 		return;
492 	}
493 
494 	if (spdk_likely(compaction_bw->count == FTL_NV_CACHE_COMPACTION_SMA_N)) {
495 		ptr = compaction_bw->buf + compaction_bw->first;
496 		compaction_bw->first++;
497 		if (compaction_bw->first == FTL_NV_CACHE_COMPACTION_SMA_N) {
498 			compaction_bw->first = 0;
499 		}
500 		compaction_bw->sum -= *ptr;
501 	} else {
502 		ptr = compaction_bw->buf + compaction_bw->count;
503 		compaction_bw->count++;
504 	}
505 
506 	*ptr = (double)chunk->md->blocks_compacted * FTL_BLOCK_SIZE / chunk->compaction_length_tsc;
507 	chunk->compaction_length_tsc = 0;
508 
509 	compaction_bw->sum += *ptr;
510 	nv_cache->compaction_sma = compaction_bw->sum / compaction_bw->count;
511 }
512 
513 static void
514 chunk_compaction_advance(struct ftl_nv_cache_chunk *chunk, uint64_t num_blocks)
515 {
516 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
517 	uint64_t tsc = spdk_thread_get_last_tsc(spdk_get_thread());
518 
519 	chunk->compaction_length_tsc += tsc - chunk->compaction_start_tsc;
520 	chunk->compaction_start_tsc = tsc;
521 
522 	chunk->md->blocks_compacted += num_blocks;
523 	if (!is_chunk_compacted(chunk)) {
524 		return;
525 	}
526 
527 	/* Remove chunk from compacted list */
528 	TAILQ_REMOVE(&nv_cache->chunk_comp_list, chunk, entry);
529 	nv_cache->chunk_comp_count--;
530 
531 	compaction_stats_update(chunk);
532 
533 	ftl_chunk_free(chunk);
534 }
535 
536 static bool
537 is_compaction_required(struct ftl_nv_cache *nv_cache)
538 {
539 	uint64_t full;
540 
541 	if (spdk_unlikely(nv_cache->halt)) {
542 		return false;
543 	}
544 
545 	full = nv_cache->chunk_full_count - nv_cache->compaction_active_count;
546 	if (full >= nv_cache->chunk_compaction_threshold) {
547 		return true;
548 	}
549 
550 	return false;
551 }
552 
553 static void compaction_process_finish_read(struct ftl_nv_cache_compactor *compactor);
554 static void compaction_process_pin_lba(struct ftl_nv_cache_compactor *comp);
555 
556 static void
557 _compaction_process_pin_lba(void *_comp)
558 {
559 	struct ftl_nv_cache_compactor *comp = _comp;
560 
561 	compaction_process_pin_lba(comp);
562 }
563 
564 static void
565 compaction_process_pin_lba_cb(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx)
566 {
567 	struct ftl_nv_cache_compactor *comp = pin_ctx->cb_ctx;
568 	struct ftl_rq *rq = comp->rd;
569 
570 	if (status) {
571 		rq->iter.status = status;
572 		pin_ctx->lba = FTL_LBA_INVALID;
573 	}
574 
575 	if (--rq->iter.remaining == 0) {
576 		if (rq->iter.status) {
577 			/* unpin and try again */
578 			ftl_rq_unpin(rq);
579 			spdk_thread_send_msg(spdk_get_thread(), _compaction_process_pin_lba, comp);
580 			return;
581 		}
582 
583 		compaction_process_finish_read(comp);
584 	}
585 }
586 
587 static void
588 compaction_process_pin_lba(struct ftl_nv_cache_compactor *comp)
589 {
590 	union ftl_md_vss *md;
591 	struct ftl_nv_cache_chunk *chunk = comp->rd->owner.priv;
592 	struct spdk_ftl_dev *dev = comp->rd->dev;
593 	uint64_t i;
594 	uint32_t count = comp->rd->iter.count;
595 	struct ftl_rq_entry *entry;
596 	struct ftl_l2p_pin_ctx *pin_ctx;
597 
598 	assert(comp->rd->iter.idx == 0);
599 	comp->rd->iter.remaining = count;
600 	comp->rd->iter.status = 0;
601 
602 	for (i = 0; i < count; i++) {
603 		entry = &comp->rd->entries[i];
604 		pin_ctx = &entry->l2p_pin_ctx;
605 		md = entry->io_md;
606 		if (md->nv_cache.lba == FTL_LBA_INVALID || md->nv_cache.seq_id != chunk->md->seq_id) {
607 			ftl_l2p_pin_skip(dev, compaction_process_pin_lba_cb, comp, pin_ctx);
608 		} else {
609 			ftl_l2p_pin(dev, md->nv_cache.lba, 1, compaction_process_pin_lba_cb, comp, pin_ctx);
610 		}
611 	}
612 }
613 
614 static int compaction_submit_read(struct ftl_nv_cache_compactor *compactor, ftl_addr addr,
615 				  uint64_t num_blocks);
616 
617 static void
618 compaction_retry_read(void *_compactor)
619 {
620 	struct ftl_nv_cache_compactor *compactor = _compactor;
621 	struct ftl_rq *rq = compactor->rd;
622 	struct spdk_bdev *bdev;
623 	int ret;
624 
625 	ret = compaction_submit_read(compactor, rq->io.addr, rq->iter.count);
626 
627 	if (spdk_likely(!ret)) {
628 		return;
629 	}
630 
631 	if (ret == -ENOMEM) {
632 		bdev = spdk_bdev_desc_get_bdev(compactor->nv_cache->bdev_desc);
633 		compactor->bdev_io_wait.bdev = bdev;
634 		compactor->bdev_io_wait.cb_fn = compaction_retry_read;
635 		compactor->bdev_io_wait.cb_arg = compactor;
636 		spdk_bdev_queue_io_wait(bdev, compactor->nv_cache->cache_ioch, &compactor->bdev_io_wait);
637 	} else {
638 		ftl_abort();
639 	}
640 }
641 
642 static void
643 compaction_process_read_cb(struct spdk_bdev_io *bdev_io,
644 			   bool success, void *cb_arg)
645 {
646 	struct ftl_nv_cache_compactor *compactor = cb_arg;
647 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(compactor->nv_cache, struct spdk_ftl_dev, nv_cache);
648 
649 	ftl_stats_bdev_io_completed(dev, FTL_STATS_TYPE_CMP, bdev_io);
650 
651 	spdk_bdev_free_io(bdev_io);
652 
653 	if (!success) {
654 		/* retry */
655 		spdk_thread_send_msg(spdk_get_thread(), compaction_retry_read, compactor);
656 		return;
657 	}
658 
659 	compaction_process_pin_lba(compactor);
660 }
661 
662 static bool
663 is_chunk_to_read(struct ftl_nv_cache_chunk *chunk)
664 {
665 	assert(chunk->md->blocks_written != 0);
666 
667 	if (chunk_user_blocks_written(chunk) == chunk->md->read_pointer) {
668 		return false;
669 	}
670 
671 	return true;
672 }
673 
674 static struct ftl_nv_cache_chunk *
675 get_chunk_for_compaction(struct ftl_nv_cache *nv_cache)
676 {
677 	struct ftl_nv_cache_chunk *chunk = NULL;
678 
679 	if (!TAILQ_EMPTY(&nv_cache->chunk_comp_list)) {
680 		chunk = TAILQ_FIRST(&nv_cache->chunk_comp_list);
681 		if (is_chunk_to_read(chunk)) {
682 			return chunk;
683 		}
684 	}
685 
686 	if (!TAILQ_EMPTY(&nv_cache->chunk_full_list)) {
687 		chunk = TAILQ_FIRST(&nv_cache->chunk_full_list);
688 		TAILQ_REMOVE(&nv_cache->chunk_full_list, chunk, entry);
689 
690 		assert(chunk->md->write_pointer);
691 	} else {
692 		return NULL;
693 	}
694 
695 	if (spdk_likely(chunk)) {
696 		assert(chunk->md->write_pointer != 0);
697 		TAILQ_INSERT_HEAD(&nv_cache->chunk_comp_list, chunk, entry);
698 		nv_cache->chunk_comp_count++;
699 	}
700 
701 	return chunk;
702 }
703 
704 static uint64_t
705 chunk_blocks_to_read(struct ftl_nv_cache_chunk *chunk)
706 {
707 	uint64_t blocks_written;
708 	uint64_t blocks_to_read;
709 
710 	assert(chunk->md->blocks_written >= chunk->md->blocks_skipped);
711 	blocks_written = chunk_user_blocks_written(chunk);
712 
713 	assert(blocks_written >= chunk->md->read_pointer);
714 	blocks_to_read = blocks_written - chunk->md->read_pointer;
715 
716 	return blocks_to_read;
717 }
718 
719 static void
720 compactor_deactivate(struct ftl_nv_cache_compactor *compactor)
721 {
722 	struct ftl_nv_cache *nv_cache = compactor->nv_cache;
723 
724 	nv_cache->compaction_active_count--;
725 	TAILQ_INSERT_TAIL(&nv_cache->compactor_list, compactor, entry);
726 }
727 
728 static int
729 compaction_submit_read(struct ftl_nv_cache_compactor *compactor, ftl_addr addr,
730 		       uint64_t num_blocks)
731 {
732 	struct ftl_nv_cache *nv_cache = compactor->nv_cache;
733 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
734 
735 	return ftl_nv_cache_bdev_readv_blocks_with_md(dev, nv_cache->bdev_desc,
736 			nv_cache->cache_ioch,
737 			compactor->rd->io_vec, num_blocks,
738 			compactor->rd->io_md,
739 			ftl_addr_to_nvc_offset(dev, addr), num_blocks,
740 			compaction_process_read_cb,
741 			compactor);
742 }
743 
744 static void
745 compaction_process_pad(struct ftl_nv_cache_compactor *compactor)
746 {
747 	struct ftl_rq *wr = compactor->wr;
748 	const uint64_t num_entries = wr->num_blocks;
749 	struct ftl_rq_entry *iter;
750 
751 	iter = &wr->entries[wr->iter.idx];
752 
753 	while (wr->iter.idx < num_entries) {
754 		iter->addr = FTL_ADDR_INVALID;
755 		iter->owner.priv = NULL;
756 		iter->lba = FTL_LBA_INVALID;
757 		iter->seq_id = 0;
758 		iter++;
759 		wr->iter.idx++;
760 	}
761 }
762 
763 static void
764 compaction_process(struct ftl_nv_cache_compactor *compactor)
765 {
766 	struct ftl_nv_cache *nv_cache = compactor->nv_cache;
767 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache,
768 				   struct spdk_ftl_dev, nv_cache);
769 	struct ftl_nv_cache_chunk *chunk;
770 	uint64_t to_read, addr, begin, end, offset;
771 	int rc;
772 
773 	/* Check if all read blocks done */
774 	assert(compactor->rd->iter.idx <= compactor->rd->iter.count);
775 	if (compactor->rd->iter.idx < compactor->rd->iter.count) {
776 		compaction_process_finish_read(compactor);
777 		return;
778 	}
779 
780 	/*
781 	 * Get currently handled chunk
782 	 */
783 	chunk = get_chunk_for_compaction(nv_cache);
784 	if (!chunk) {
785 		/* No chunks to compact, pad this request */
786 		compaction_process_pad(compactor);
787 		ftl_writer_queue_rq(&dev->writer_user, compactor->wr);
788 		return;
789 	}
790 
791 	chunk->compaction_start_tsc = spdk_thread_get_last_tsc(spdk_get_thread());
792 
793 	/*
794 	 * Get range of blocks to read
795 	 */
796 	to_read = chunk_blocks_to_read(chunk);
797 	assert(to_read > 0);
798 
799 	addr = ftl_addr_from_nvc_offset(dev, chunk->offset + chunk->md->read_pointer);
800 	begin = ftl_bitmap_find_first_set(dev->valid_map, addr, addr + to_read);
801 	if (begin != UINT64_MAX) {
802 		offset = spdk_min(begin - addr, to_read);
803 	} else {
804 		offset = to_read;
805 	}
806 
807 	if (offset) {
808 		chunk->md->read_pointer += offset;
809 		chunk_compaction_advance(chunk, offset);
810 		to_read -= offset;
811 		if (!to_read) {
812 			compactor_deactivate(compactor);
813 			return;
814 		}
815 	}
816 
817 	end = ftl_bitmap_find_first_clear(dev->valid_map, begin + 1, begin + to_read);
818 	if (end != UINT64_MAX) {
819 		to_read = end - begin;
820 	}
821 
822 	addr = begin;
823 	to_read = spdk_min(to_read, compactor->rd->num_blocks);
824 
825 	/* Read data and metadata from NV cache */
826 	rc = compaction_submit_read(compactor, addr, to_read);
827 	if (spdk_unlikely(rc)) {
828 		/* An error occurred, inactivate this compactor, it will retry
829 		 * in next iteration
830 		 */
831 		compactor_deactivate(compactor);
832 		return;
833 	}
834 
835 	/* IO has started, initialize compaction */
836 	compactor->rd->owner.priv = chunk;
837 	compactor->rd->iter.idx = 0;
838 	compactor->rd->iter.count = to_read;
839 	compactor->rd->io.addr = addr;
840 
841 	/* Move read pointer in the chunk */
842 	chunk->md->read_pointer += to_read;
843 }
844 
845 static void
846 compaction_process_start(struct ftl_nv_cache_compactor *compactor)
847 {
848 	compactor->nv_cache->compaction_active_count++;
849 	compaction_process(compactor);
850 }
851 
852 static void
853 compaction_process_ftl_done(struct ftl_rq *rq)
854 {
855 	struct spdk_ftl_dev *dev = rq->dev;
856 	struct ftl_nv_cache_compactor *compactor = rq->owner.priv;
857 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
858 	struct ftl_band *band = rq->io.band;
859 	struct ftl_rq_entry *entry;
860 	ftl_addr addr;
861 	uint64_t i;
862 
863 	if (spdk_unlikely(false == rq->success)) {
864 		/* IO error retry writing */
865 #ifdef SPDK_FTL_RETRY_ON_ERROR
866 		ftl_writer_queue_rq(&dev->writer_user, rq);
867 		return;
868 #else
869 		ftl_abort();
870 #endif
871 	}
872 
873 	/* Update L2P table */
874 	addr = rq->io.addr;
875 	for (i = 0, entry = rq->entries; i < rq->num_blocks; i++, entry++) {
876 		struct ftl_nv_cache_chunk *chunk = entry->owner.priv;
877 
878 		if (entry->lba == FTL_LBA_INVALID) {
879 			assert(entry->addr == FTL_ADDR_INVALID);
880 			addr = ftl_band_next_addr(band, addr, 1);
881 			continue;
882 		}
883 
884 		ftl_l2p_update_base(dev, entry->lba, addr, entry->addr);
885 		ftl_l2p_unpin(dev, entry->lba, 1);
886 
887 		chunk_compaction_advance(chunk, 1);
888 		addr = ftl_band_next_addr(band, addr, 1);
889 	}
890 
891 	compactor->wr->iter.idx = 0;
892 
893 	if (is_compaction_required(nv_cache)) {
894 		compaction_process(compactor);
895 	} else {
896 		compactor_deactivate(compactor);
897 	}
898 }
899 
900 static void
901 compaction_process_finish_read(struct ftl_nv_cache_compactor *compactor)
902 {
903 	struct ftl_rq *wr = compactor->wr;
904 	struct ftl_rq *rd = compactor->rd;
905 	ftl_addr cache_addr = rd->io.addr;
906 	struct ftl_nv_cache_chunk *chunk = rd->owner.priv;
907 	struct spdk_ftl_dev *dev;
908 	struct ftl_rq_entry *iter;
909 	union ftl_md_vss *md;
910 	ftl_addr current_addr;
911 	const uint64_t num_entries = wr->num_blocks;
912 	uint64_t tsc = spdk_thread_get_last_tsc(spdk_get_thread());
913 
914 	chunk->compaction_length_tsc += tsc - chunk->compaction_start_tsc;
915 	chunk->compaction_start_tsc = tsc;
916 
917 	dev = SPDK_CONTAINEROF(compactor->nv_cache,
918 			       struct spdk_ftl_dev, nv_cache);
919 
920 	assert(wr->iter.idx < num_entries);
921 	assert(rd->iter.idx < rd->iter.count);
922 
923 	cache_addr += rd->iter.idx;
924 	iter = &wr->entries[wr->iter.idx];
925 
926 	while (wr->iter.idx < num_entries && rd->iter.idx < rd->iter.count) {
927 		/* Get metadata */
928 		md = rd->entries[rd->iter.idx].io_md;
929 		if (md->nv_cache.lba == FTL_LBA_INVALID || md->nv_cache.seq_id != chunk->md->seq_id) {
930 			cache_addr++;
931 			rd->iter.idx++;
932 			chunk_compaction_advance(chunk, 1);
933 			continue;
934 		}
935 
936 		current_addr = ftl_l2p_get(dev, md->nv_cache.lba);
937 		if (current_addr == cache_addr) {
938 			/* Swap payload */
939 			ftl_rq_swap_payload(wr, wr->iter.idx, rd, rd->iter.idx);
940 
941 			/*
942 			 * Address still the same, we may continue to compact it
943 			 * back to  FTL, set valid number of entries within
944 			 * this batch
945 			 */
946 			iter->addr = current_addr;
947 			iter->owner.priv = chunk;
948 			iter->lba = md->nv_cache.lba;
949 			iter->seq_id = chunk->md->seq_id;
950 
951 			/* Advance within batch */
952 			iter++;
953 			wr->iter.idx++;
954 		} else {
955 			/* This address already invalidated, just omit this block */
956 			chunk_compaction_advance(chunk, 1);
957 			ftl_l2p_unpin(dev, md->nv_cache.lba, 1);
958 		}
959 
960 		/* Advance within reader */
961 		rd->iter.idx++;
962 		cache_addr++;
963 	}
964 
965 	if (num_entries == wr->iter.idx) {
966 		/*
967 		 * Request contains data to be placed on FTL, compact it
968 		 */
969 		ftl_writer_queue_rq(&dev->writer_user, wr);
970 	} else {
971 		if (is_compaction_required(compactor->nv_cache)) {
972 			compaction_process(compactor);
973 		} else {
974 			compactor_deactivate(compactor);
975 		}
976 	}
977 }
978 
979 static void
980 compactor_free(struct spdk_ftl_dev *dev, struct ftl_nv_cache_compactor *compactor)
981 {
982 	if (!compactor) {
983 		return;
984 	}
985 
986 	ftl_rq_del(compactor->wr);
987 	ftl_rq_del(compactor->rd);
988 	free(compactor);
989 }
990 
991 static struct ftl_nv_cache_compactor *
992 compactor_alloc(struct spdk_ftl_dev *dev)
993 {
994 	struct ftl_nv_cache_compactor *compactor;
995 
996 	compactor = calloc(1, sizeof(*compactor));
997 	if (!compactor) {
998 		goto error;
999 	}
1000 
1001 	/* Allocate help request for writing */
1002 	compactor->wr = ftl_rq_new(dev, dev->md_size);
1003 	if (!compactor->wr) {
1004 		goto error;
1005 	}
1006 
1007 	/* Allocate help request for reading */
1008 	compactor->rd = ftl_rq_new(dev, dev->nv_cache.md_size);
1009 	if (!compactor->rd) {
1010 		goto error;
1011 	}
1012 
1013 	compactor->nv_cache = &dev->nv_cache;
1014 	compactor->wr->owner.priv = compactor;
1015 	compactor->wr->owner.cb = compaction_process_ftl_done;
1016 	compactor->wr->owner.compaction = true;
1017 
1018 	return compactor;
1019 
1020 error:
1021 	compactor_free(dev, compactor);
1022 	return NULL;
1023 }
1024 
1025 static void
1026 ftl_nv_cache_submit_cb_done(struct ftl_io *io)
1027 {
1028 	struct ftl_nv_cache *nv_cache = &io->dev->nv_cache;
1029 
1030 	chunk_advance_blocks(nv_cache, io->nv_cache_chunk, io->num_blocks);
1031 	io->nv_cache_chunk = NULL;
1032 
1033 	ftl_mempool_put(nv_cache->md_pool, io->md);
1034 	ftl_io_complete(io);
1035 }
1036 
1037 static void
1038 ftl_nv_cache_l2p_update(struct ftl_io *io)
1039 {
1040 	struct spdk_ftl_dev *dev = io->dev;
1041 	ftl_addr next_addr = io->addr;
1042 	size_t i;
1043 
1044 	for (i = 0; i < io->num_blocks; ++i, ++next_addr) {
1045 		ftl_l2p_update_cache(dev, ftl_io_get_lba(io, i), next_addr, io->map[i]);
1046 	}
1047 
1048 	ftl_l2p_unpin(dev, io->lba, io->num_blocks);
1049 	ftl_nv_cache_submit_cb_done(io);
1050 }
1051 
1052 static void
1053 ftl_nv_cache_submit_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1054 {
1055 	struct ftl_io *io = cb_arg;
1056 
1057 	ftl_stats_bdev_io_completed(io->dev, FTL_STATS_TYPE_USER, bdev_io);
1058 
1059 	spdk_bdev_free_io(bdev_io);
1060 
1061 	if (spdk_unlikely(!success)) {
1062 		FTL_ERRLOG(io->dev, "Non-volatile cache write failed at %"PRIx64"\n",
1063 			   io->addr);
1064 		io->status = -EIO;
1065 		ftl_nv_cache_submit_cb_done(io);
1066 	} else {
1067 		ftl_nv_cache_l2p_update(io);
1068 	}
1069 }
1070 
1071 static void
1072 nv_cache_write(void *_io)
1073 {
1074 	struct ftl_io *io = _io;
1075 	struct spdk_ftl_dev *dev = io->dev;
1076 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
1077 	int rc;
1078 
1079 	rc = ftl_nv_cache_bdev_writev_blocks_with_md(dev,
1080 			nv_cache->bdev_desc, nv_cache->cache_ioch,
1081 			io->iov, io->iov_cnt, io->md,
1082 			ftl_addr_to_nvc_offset(dev, io->addr), io->num_blocks,
1083 			ftl_nv_cache_submit_cb, io);
1084 	if (spdk_unlikely(rc)) {
1085 		if (rc == -ENOMEM) {
1086 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc);
1087 			io->bdev_io_wait.bdev = bdev;
1088 			io->bdev_io_wait.cb_fn = nv_cache_write;
1089 			io->bdev_io_wait.cb_arg = io;
1090 			spdk_bdev_queue_io_wait(bdev, nv_cache->cache_ioch, &io->bdev_io_wait);
1091 		} else {
1092 			ftl_abort();
1093 		}
1094 	}
1095 }
1096 
1097 static void
1098 ftl_nv_cache_pin_cb(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx)
1099 {
1100 	struct ftl_io *io = pin_ctx->cb_ctx;
1101 	size_t i;
1102 
1103 	if (spdk_unlikely(status != 0)) {
1104 		/* Retry on the internal L2P fault */
1105 		FTL_ERRLOG(dev, "Cannot PIN LBA for NV cache write failed at %"PRIx64"\n",
1106 			   io->addr);
1107 		io->status = -EAGAIN;
1108 		ftl_nv_cache_submit_cb_done(io);
1109 		return;
1110 	}
1111 
1112 	/* Remember previous l2p mapping to resolve conflicts in case of outstanding write-after-write */
1113 	for (i = 0; i < io->num_blocks; ++i) {
1114 		io->map[i] = ftl_l2p_get(dev, ftl_io_get_lba(io, i));
1115 	}
1116 
1117 	assert(io->iov_pos == 0);
1118 
1119 	ftl_trace_submission(io->dev, io, io->addr, io->num_blocks);
1120 
1121 	nv_cache_write(io);
1122 }
1123 
1124 bool
1125 ftl_nv_cache_write(struct ftl_io *io)
1126 {
1127 	struct spdk_ftl_dev *dev = io->dev;
1128 	uint64_t cache_offset;
1129 
1130 	io->md = ftl_mempool_get(dev->nv_cache.md_pool);
1131 	if (spdk_unlikely(!io->md)) {
1132 		return false;
1133 	}
1134 
1135 	/* Reserve area on the write buffer cache */
1136 	cache_offset = ftl_nv_cache_get_wr_buffer(&dev->nv_cache, io);
1137 	if (cache_offset == FTL_LBA_INVALID) {
1138 		/* No free space in NV cache, resubmit request */
1139 		ftl_mempool_put(dev->nv_cache.md_pool, io->md);
1140 		return false;
1141 	}
1142 	io->addr = ftl_addr_from_nvc_offset(dev, cache_offset);
1143 	io->nv_cache_chunk = dev->nv_cache.chunk_current;
1144 
1145 	ftl_nv_cache_fill_md(io);
1146 	ftl_l2p_pin(io->dev, io->lba, io->num_blocks,
1147 		    ftl_nv_cache_pin_cb, io,
1148 		    &io->l2p_pin_ctx);
1149 
1150 	dev->nv_cache.throttle.blocks_submitted += io->num_blocks;
1151 
1152 	return true;
1153 }
1154 
1155 int
1156 ftl_nv_cache_read(struct ftl_io *io, ftl_addr addr, uint32_t num_blocks,
1157 		  spdk_bdev_io_completion_cb cb, void *cb_arg)
1158 {
1159 	int rc;
1160 	struct ftl_nv_cache *nv_cache = &io->dev->nv_cache;
1161 
1162 	assert(ftl_addr_in_nvc(io->dev, addr));
1163 
1164 	rc = ftl_nv_cache_bdev_read_blocks_with_md(io->dev, nv_cache->bdev_desc, nv_cache->cache_ioch,
1165 			ftl_io_iovec_addr(io), NULL, ftl_addr_to_nvc_offset(io->dev, addr),
1166 			num_blocks, cb, cb_arg);
1167 
1168 	return rc;
1169 }
1170 
1171 bool
1172 ftl_nv_cache_is_halted(struct ftl_nv_cache *nv_cache)
1173 {
1174 	struct ftl_nv_cache_compactor *compactor;
1175 
1176 	if (nv_cache->compaction_active_count) {
1177 		return false;
1178 	}
1179 
1180 	TAILQ_FOREACH(compactor, &nv_cache->compactor_list, entry) {
1181 		if (compactor->rd->iter.idx != 0 || compactor->wr->iter.idx != 0) {
1182 			return false;
1183 		}
1184 	}
1185 
1186 	if (nv_cache->chunk_open_count > 0) {
1187 		return false;
1188 	}
1189 
1190 	return true;
1191 }
1192 
1193 static void
1194 ftl_nv_cache_compaction_reset(struct ftl_nv_cache_compactor *compactor)
1195 {
1196 	struct ftl_rq *rd = compactor->rd;
1197 	struct ftl_rq *wr = compactor->wr;
1198 	uint64_t lba;
1199 	uint64_t i;
1200 
1201 	for (i = rd->iter.idx; i < rd->iter.count; i++) {
1202 		lba = ((union ftl_md_vss *)rd->entries[i].io_md)->nv_cache.lba;
1203 		if (lba != FTL_LBA_INVALID) {
1204 			ftl_l2p_unpin(rd->dev, lba, 1);
1205 		}
1206 	}
1207 
1208 	rd->iter.idx = 0;
1209 	rd->iter.count = 0;
1210 
1211 	for (i = 0; i < wr->iter.idx; i++) {
1212 		lba = wr->entries[i].lba;
1213 		assert(lba != FTL_LBA_INVALID);
1214 		ftl_l2p_unpin(wr->dev, lba, 1);
1215 	}
1216 
1217 	wr->iter.idx = 0;
1218 }
1219 
1220 void
1221 ftl_chunk_map_set_lba(struct ftl_nv_cache_chunk *chunk,
1222 		      uint64_t offset, uint64_t lba)
1223 {
1224 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1225 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1226 
1227 	ftl_lba_store(dev, p2l_map->chunk_map, offset, lba);
1228 }
1229 
1230 uint64_t
1231 ftl_chunk_map_get_lba(struct ftl_nv_cache_chunk *chunk, uint64_t offset)
1232 {
1233 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1234 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1235 
1236 	return ftl_lba_load(dev, p2l_map->chunk_map, offset);
1237 }
1238 
1239 static void
1240 ftl_chunk_set_addr(struct ftl_nv_cache_chunk *chunk, uint64_t lba, ftl_addr addr)
1241 {
1242 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1243 	uint64_t cache_offset = ftl_addr_to_nvc_offset(dev, addr);
1244 	uint64_t offset;
1245 
1246 	offset = (cache_offset - chunk->offset) % chunk->nv_cache->chunk_blocks;
1247 	ftl_chunk_map_set_lba(chunk, offset, lba);
1248 }
1249 
1250 struct ftl_nv_cache_chunk *
1251 ftl_nv_cache_get_chunk_from_addr(struct spdk_ftl_dev *dev, ftl_addr addr)
1252 {
1253 	struct ftl_nv_cache_chunk *chunk = dev->nv_cache.chunks;
1254 	uint64_t chunk_idx;
1255 	uint64_t cache_offset = ftl_addr_to_nvc_offset(dev, addr);
1256 
1257 	assert(chunk != NULL);
1258 	chunk_idx = (cache_offset - chunk->offset) / chunk->nv_cache->chunk_blocks;
1259 	chunk += chunk_idx;
1260 
1261 	return chunk;
1262 }
1263 
1264 void
1265 ftl_nv_cache_set_addr(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr addr)
1266 {
1267 	struct ftl_nv_cache_chunk *chunk;
1268 
1269 	chunk = ftl_nv_cache_get_chunk_from_addr(dev, addr);
1270 
1271 	assert(lba != FTL_LBA_INVALID);
1272 
1273 	ftl_chunk_set_addr(chunk, lba, addr);
1274 	ftl_bitmap_set(dev->valid_map, addr);
1275 }
1276 
1277 static void
1278 ftl_nv_cache_throttle_update(struct ftl_nv_cache *nv_cache)
1279 {
1280 	double err;
1281 	double modifier;
1282 
1283 	err = ((double)nv_cache->chunk_free_count - nv_cache->chunk_free_target) / nv_cache->chunk_count;
1284 	modifier = FTL_NV_CACHE_THROTTLE_MODIFIER_KP * err;
1285 
1286 	if (modifier < FTL_NV_CACHE_THROTTLE_MODIFIER_MIN) {
1287 		modifier = FTL_NV_CACHE_THROTTLE_MODIFIER_MIN;
1288 	} else if (modifier > FTL_NV_CACHE_THROTTLE_MODIFIER_MAX) {
1289 		modifier = FTL_NV_CACHE_THROTTLE_MODIFIER_MAX;
1290 	}
1291 
1292 	if (spdk_unlikely(nv_cache->compaction_sma == 0 || nv_cache->compaction_active_count == 0)) {
1293 		nv_cache->throttle.blocks_submitted_limit = UINT64_MAX;
1294 	} else {
1295 		double blocks_per_interval = nv_cache->compaction_sma * nv_cache->throttle.interval_tsc /
1296 					     FTL_BLOCK_SIZE;
1297 		nv_cache->throttle.blocks_submitted_limit = blocks_per_interval * (1.0 + modifier);
1298 	}
1299 }
1300 
1301 static void
1302 ftl_nv_cache_process_throttle(struct ftl_nv_cache *nv_cache)
1303 {
1304 	uint64_t tsc = spdk_thread_get_last_tsc(spdk_get_thread());
1305 
1306 	if (spdk_unlikely(!nv_cache->throttle.start_tsc)) {
1307 		nv_cache->throttle.start_tsc = tsc;
1308 	} else if (tsc - nv_cache->throttle.start_tsc >= nv_cache->throttle.interval_tsc) {
1309 		ftl_nv_cache_throttle_update(nv_cache);
1310 		nv_cache->throttle.start_tsc = tsc;
1311 		nv_cache->throttle.blocks_submitted = 0;
1312 	}
1313 }
1314 
1315 static void ftl_chunk_open(struct ftl_nv_cache_chunk *chunk);
1316 
1317 void
1318 ftl_nv_cache_process(struct spdk_ftl_dev *dev)
1319 {
1320 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
1321 
1322 	assert(dev->nv_cache.bdev_desc);
1323 
1324 	if (nv_cache->chunk_open_count < FTL_MAX_OPEN_CHUNKS && spdk_likely(!nv_cache->halt) &&
1325 	    !TAILQ_EMPTY(&nv_cache->chunk_free_list)) {
1326 		struct ftl_nv_cache_chunk *chunk = TAILQ_FIRST(&nv_cache->chunk_free_list);
1327 		TAILQ_REMOVE(&nv_cache->chunk_free_list, chunk, entry);
1328 		TAILQ_INSERT_TAIL(&nv_cache->chunk_open_list, chunk, entry);
1329 		nv_cache->chunk_free_count--;
1330 		chunk->md->seq_id = ftl_get_next_seq_id(dev);
1331 		ftl_chunk_open(chunk);
1332 	}
1333 
1334 	if (is_compaction_required(nv_cache) && !TAILQ_EMPTY(&nv_cache->compactor_list)) {
1335 		struct ftl_nv_cache_compactor *comp =
1336 			TAILQ_FIRST(&nv_cache->compactor_list);
1337 
1338 		TAILQ_REMOVE(&nv_cache->compactor_list, comp, entry);
1339 
1340 		compaction_process_start(comp);
1341 	}
1342 
1343 	ftl_chunk_persist_free_state(nv_cache);
1344 
1345 	if (spdk_unlikely(nv_cache->halt)) {
1346 		struct ftl_nv_cache_compactor *compactor;
1347 
1348 		TAILQ_FOREACH(compactor, &nv_cache->compactor_list, entry) {
1349 			ftl_nv_cache_compaction_reset(compactor);
1350 		}
1351 	}
1352 
1353 	ftl_nv_cache_process_throttle(nv_cache);
1354 }
1355 
1356 static bool
1357 ftl_nv_cache_full(struct ftl_nv_cache *nv_cache)
1358 {
1359 	if (0 == nv_cache->chunk_open_count && NULL == nv_cache->chunk_current) {
1360 		return true;
1361 	} else {
1362 		return false;
1363 	}
1364 }
1365 
1366 bool
1367 ftl_nv_cache_throttle(struct spdk_ftl_dev *dev)
1368 {
1369 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
1370 
1371 	if (dev->nv_cache.throttle.blocks_submitted >= nv_cache->throttle.blocks_submitted_limit ||
1372 	    ftl_nv_cache_full(nv_cache)) {
1373 		return true;
1374 	}
1375 
1376 	return false;
1377 }
1378 
1379 static void
1380 chunk_free_p2l_map(struct ftl_nv_cache_chunk *chunk)
1381 {
1382 
1383 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1384 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1385 
1386 	ftl_mempool_put(nv_cache->p2l_pool, p2l_map->chunk_map);
1387 	p2l_map->chunk_map = NULL;
1388 
1389 	ftl_chunk_free_md_entry(chunk);
1390 }
1391 
1392 int
1393 ftl_nv_cache_save_state(struct ftl_nv_cache *nv_cache)
1394 {
1395 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1396 	struct ftl_nv_cache_chunk *chunk;
1397 	int status = 0;
1398 	uint64_t i;
1399 
1400 	assert(nv_cache->chunk_open_count == 0);
1401 
1402 	if (nv_cache->compaction_active_count) {
1403 		FTL_ERRLOG(dev, "Cannot save NV cache state, compaction in progress\n");
1404 		return -EINVAL;
1405 	}
1406 
1407 	chunk = nv_cache->chunks;
1408 	if (!chunk) {
1409 		FTL_ERRLOG(dev, "Cannot save NV cache state, no NV cache metadata\n");
1410 		return -ENOMEM;
1411 	}
1412 
1413 	for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
1414 		nvc_validate_md(nv_cache, chunk->md);
1415 
1416 		if (chunk->md->read_pointer)  {
1417 			/* Only full chunks can be compacted */
1418 			if (chunk->md->blocks_written != nv_cache->chunk_blocks) {
1419 				assert(0);
1420 				status = -EINVAL;
1421 				break;
1422 			}
1423 
1424 			/*
1425 			 * Chunk in the middle of compaction, start over after
1426 			 * load
1427 			 */
1428 			chunk->md->read_pointer = chunk->md->blocks_compacted = 0;
1429 		} else if (chunk->md->blocks_written == nv_cache->chunk_blocks) {
1430 			/* Full chunk */
1431 		} else if (0 == chunk->md->blocks_written) {
1432 			/* Empty chunk */
1433 		} else {
1434 			assert(0);
1435 			status = -EINVAL;
1436 			break;
1437 		}
1438 	}
1439 
1440 	if (status) {
1441 		FTL_ERRLOG(dev, "Cannot save NV cache state, inconsistent NV cache"
1442 			   "metadata\n");
1443 	}
1444 
1445 	return status;
1446 }
1447 
1448 static int
1449 sort_chunks_cmp(const void *a, const void *b)
1450 {
1451 	struct ftl_nv_cache_chunk *a_chunk = *(struct ftl_nv_cache_chunk **)a;
1452 	struct ftl_nv_cache_chunk *b_chunk = *(struct ftl_nv_cache_chunk **)b;
1453 
1454 	return a_chunk->md->seq_id - b_chunk->md->seq_id;
1455 }
1456 
1457 static int
1458 sort_chunks(struct ftl_nv_cache *nv_cache)
1459 {
1460 	struct ftl_nv_cache_chunk **chunks_list;
1461 	struct ftl_nv_cache_chunk *chunk;
1462 	uint32_t i;
1463 
1464 	if (TAILQ_EMPTY(&nv_cache->chunk_full_list)) {
1465 		return 0;
1466 	}
1467 
1468 	chunks_list = calloc(nv_cache->chunk_full_count,
1469 			     sizeof(chunks_list[0]));
1470 	if (!chunks_list) {
1471 		return -ENOMEM;
1472 	}
1473 
1474 	i = 0;
1475 	TAILQ_FOREACH(chunk, &nv_cache->chunk_full_list, entry) {
1476 		chunks_list[i] = chunk;
1477 		i++;
1478 	}
1479 	assert(i == nv_cache->chunk_full_count);
1480 
1481 	qsort(chunks_list, nv_cache->chunk_full_count, sizeof(chunks_list[0]),
1482 	      sort_chunks_cmp);
1483 
1484 	TAILQ_INIT(&nv_cache->chunk_full_list);
1485 	for (i = 0; i < nv_cache->chunk_full_count; i++) {
1486 		chunk = chunks_list[i];
1487 		TAILQ_INSERT_TAIL(&nv_cache->chunk_full_list, chunk, entry);
1488 	}
1489 
1490 	free(chunks_list);
1491 	return 0;
1492 }
1493 
1494 static int
1495 chunk_alloc_p2l_map(struct ftl_nv_cache_chunk *chunk)
1496 {
1497 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1498 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1499 
1500 	assert(p2l_map->ref_cnt == 0);
1501 	assert(p2l_map->chunk_map == NULL);
1502 
1503 	p2l_map->chunk_map = ftl_mempool_get(nv_cache->p2l_pool);
1504 
1505 	if (!p2l_map->chunk_map) {
1506 		return -ENOMEM;
1507 	}
1508 
1509 	if (ftl_chunk_alloc_md_entry(chunk)) {
1510 		ftl_mempool_put(nv_cache->p2l_pool, p2l_map->chunk_map);
1511 		p2l_map->chunk_map = NULL;
1512 		return -ENOMEM;
1513 	}
1514 
1515 	/* Set the P2L to FTL_LBA_INVALID */
1516 	memset(p2l_map->chunk_map, -1, FTL_BLOCK_SIZE * nv_cache->tail_md_chunk_blocks);
1517 
1518 	return 0;
1519 }
1520 
1521 int
1522 ftl_nv_cache_load_state(struct ftl_nv_cache *nv_cache)
1523 {
1524 	struct ftl_nv_cache_chunk *chunk;
1525 	uint64_t chunks_number, offset, i;
1526 	int status = 0;
1527 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1528 
1529 	nv_cache->chunk_current = NULL;
1530 	TAILQ_INIT(&nv_cache->chunk_free_list);
1531 	TAILQ_INIT(&nv_cache->chunk_full_list);
1532 	nv_cache->chunk_full_count = nv_cache->chunk_free_count = 0;
1533 
1534 	assert(nv_cache->chunk_open_count == 0);
1535 	offset = nvc_data_offset(nv_cache);
1536 	chunk = nv_cache->chunks;
1537 	if (!chunk) {
1538 		FTL_ERRLOG(dev, "No NV cache metadata\n");
1539 		return -1;
1540 	}
1541 
1542 	for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
1543 		chunk->nv_cache = nv_cache;
1544 		nvc_validate_md(nv_cache, chunk->md);
1545 
1546 		if (offset != chunk->offset) {
1547 			status = -EINVAL;
1548 			goto error;
1549 		}
1550 
1551 		if (chunk->md->blocks_written == nv_cache->chunk_blocks) {
1552 			/* Chunk full, move it on full list */
1553 			TAILQ_INSERT_TAIL(&nv_cache->chunk_full_list, chunk, entry);
1554 			nv_cache->chunk_full_count++;
1555 		} else if (0 == chunk->md->blocks_written) {
1556 			/* Chunk empty, move it on empty list */
1557 			TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
1558 			nv_cache->chunk_free_count++;
1559 		} else {
1560 			status = -EINVAL;
1561 			goto error;
1562 		}
1563 
1564 		offset += nv_cache->chunk_blocks;
1565 	}
1566 
1567 	chunks_number = nv_cache->chunk_free_count + nv_cache->chunk_full_count;
1568 	assert(nv_cache->chunk_current == NULL);
1569 
1570 	if (chunks_number != nv_cache->chunk_count) {
1571 		FTL_ERRLOG(dev, "Inconsistent NV cache metadata\n");
1572 		status = -EINVAL;
1573 		goto error;
1574 	}
1575 
1576 	status = sort_chunks(nv_cache);
1577 	if (status) {
1578 		FTL_ERRLOG(dev, "FTL NV Cache: sorting chunks ERROR\n");
1579 	}
1580 
1581 	FTL_NOTICELOG(dev, "FTL NV Cache: full chunks = %lu, empty chunks = %lu\n",
1582 		      nv_cache->chunk_full_count, nv_cache->chunk_free_count);
1583 
1584 	if (0 == status) {
1585 		FTL_NOTICELOG(dev, "FTL NV Cache: state loaded successfully\n");
1586 	} else {
1587 		FTL_ERRLOG(dev, "FTL NV Cache: loading state ERROR\n");
1588 	}
1589 
1590 error:
1591 	return status;
1592 }
1593 
1594 void
1595 ftl_nv_cache_get_max_seq_id(struct ftl_nv_cache *nv_cache, uint64_t *open_seq_id,
1596 			    uint64_t *close_seq_id)
1597 {
1598 	uint64_t i, o_seq_id = 0, c_seq_id = 0;
1599 	struct ftl_nv_cache_chunk *chunk;
1600 
1601 	chunk = nv_cache->chunks;
1602 	assert(chunk);
1603 
1604 	/* Iterate over chunks and get their max open and close seq id */
1605 	for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
1606 		o_seq_id = spdk_max(o_seq_id, chunk->md->seq_id);
1607 		c_seq_id = spdk_max(c_seq_id, chunk->md->close_seq_id);
1608 	}
1609 
1610 	*open_seq_id = o_seq_id;
1611 	*close_seq_id = c_seq_id;
1612 }
1613 
1614 typedef void (*ftl_chunk_ops_cb)(struct ftl_nv_cache_chunk *chunk, void *cntx, bool status);
1615 
1616 static void
1617 write_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
1618 {
1619 	struct ftl_basic_rq *brq = arg;
1620 	struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
1621 
1622 	ftl_stats_bdev_io_completed(brq->dev, FTL_STATS_TYPE_MD_NV_CACHE, bdev_io);
1623 
1624 	brq->success = success;
1625 	if (spdk_likely(success)) {
1626 		chunk_advance_blocks(chunk->nv_cache, chunk, brq->num_blocks);
1627 	}
1628 
1629 	spdk_bdev_free_io(bdev_io);
1630 	brq->owner.cb(brq);
1631 }
1632 
1633 static void
1634 _ftl_chunk_basic_rq_write(void *_brq)
1635 {
1636 	struct ftl_basic_rq *brq = _brq;
1637 	struct ftl_nv_cache *nv_cache = brq->io.chunk->nv_cache;
1638 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1639 	int rc;
1640 
1641 	rc = ftl_nv_cache_bdev_write_blocks_with_md(dev, nv_cache->bdev_desc, nv_cache->cache_ioch,
1642 			brq->io_payload, NULL, brq->io.addr,
1643 			brq->num_blocks, write_brq_end, brq);
1644 	if (spdk_unlikely(rc)) {
1645 		if (rc == -ENOMEM) {
1646 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc);
1647 			brq->io.bdev_io_wait.bdev = bdev;
1648 			brq->io.bdev_io_wait.cb_fn = _ftl_chunk_basic_rq_write;
1649 			brq->io.bdev_io_wait.cb_arg = brq;
1650 			spdk_bdev_queue_io_wait(bdev, nv_cache->cache_ioch, &brq->io.bdev_io_wait);
1651 		} else {
1652 			ftl_abort();
1653 		}
1654 	}
1655 }
1656 
1657 static void
1658 ftl_chunk_basic_rq_write(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq)
1659 {
1660 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1661 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1662 
1663 	brq->io.chunk = chunk;
1664 	brq->success = false;
1665 
1666 	_ftl_chunk_basic_rq_write(brq);
1667 
1668 	chunk->md->write_pointer += brq->num_blocks;
1669 	dev->stats.io_activity_total += brq->num_blocks;
1670 }
1671 
1672 static void
1673 read_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
1674 {
1675 	struct ftl_basic_rq *brq = arg;
1676 
1677 	ftl_stats_bdev_io_completed(brq->dev, FTL_STATS_TYPE_MD_NV_CACHE, bdev_io);
1678 
1679 	brq->success = success;
1680 
1681 	brq->owner.cb(brq);
1682 	spdk_bdev_free_io(bdev_io);
1683 }
1684 
1685 static int
1686 ftl_chunk_basic_rq_read(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq)
1687 {
1688 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1689 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1690 	int rc;
1691 
1692 	brq->io.chunk = chunk;
1693 	brq->success = false;
1694 
1695 	rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, nv_cache->bdev_desc, nv_cache->cache_ioch,
1696 			brq->io_payload, NULL, brq->io.addr, brq->num_blocks, read_brq_end, brq);
1697 
1698 	if (spdk_likely(!rc)) {
1699 		dev->stats.io_activity_total += brq->num_blocks;
1700 	}
1701 
1702 	return rc;
1703 }
1704 
1705 static void
1706 chunk_open_cb(int status, void *ctx)
1707 {
1708 	struct ftl_nv_cache_chunk *chunk = (struct ftl_nv_cache_chunk *)ctx;
1709 
1710 	if (spdk_unlikely(status)) {
1711 #ifdef SPDK_FTL_RETRY_ON_ERROR
1712 		ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx);
1713 		return;
1714 #else
1715 		ftl_abort();
1716 #endif
1717 	}
1718 
1719 	chunk->md->state = FTL_CHUNK_STATE_OPEN;
1720 }
1721 
1722 static void
1723 ftl_chunk_open(struct ftl_nv_cache_chunk *chunk)
1724 {
1725 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1726 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1727 	struct ftl_layout_region *region = &dev->layout.region[FTL_LAYOUT_REGION_TYPE_NVC_MD];
1728 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
1729 
1730 	if (chunk_alloc_p2l_map(chunk)) {
1731 		assert(0);
1732 		/*
1733 		 * We control number of opening chunk and it shall be consistent with size of chunk
1734 		 * P2L map pool
1735 		 */
1736 		ftl_abort();
1737 		return;
1738 	}
1739 
1740 	chunk->nv_cache->chunk_open_count++;
1741 
1742 	assert(chunk->md->write_pointer == 0);
1743 	assert(chunk->md->blocks_written == 0);
1744 
1745 	memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
1746 	p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_OPEN;
1747 	p2l_map->chunk_dma_md->p2l_map_checksum = 0;
1748 
1749 	ftl_md_persist_entry(md, get_chunk_idx(chunk), p2l_map->chunk_dma_md,
1750 			     NULL, chunk_open_cb, chunk,
1751 			     &chunk->md_persist_entry_ctx);
1752 }
1753 
1754 static void
1755 chunk_close_cb(int status, void *ctx)
1756 {
1757 	struct ftl_nv_cache_chunk *chunk = (struct ftl_nv_cache_chunk *)ctx;
1758 
1759 	assert(chunk->md->write_pointer == chunk->nv_cache->chunk_blocks);
1760 
1761 	if (spdk_likely(!status)) {
1762 		chunk->md->p2l_map_checksum = chunk->p2l_map.chunk_dma_md->p2l_map_checksum;
1763 		chunk_free_p2l_map(chunk);
1764 
1765 		assert(chunk->nv_cache->chunk_open_count > 0);
1766 		chunk->nv_cache->chunk_open_count--;
1767 
1768 		/* Chunk full move it on full list */
1769 		TAILQ_INSERT_TAIL(&chunk->nv_cache->chunk_full_list, chunk, entry);
1770 		chunk->nv_cache->chunk_full_count++;
1771 
1772 		chunk->nv_cache->last_seq_id = chunk->md->close_seq_id;
1773 
1774 		chunk->md->state = FTL_CHUNK_STATE_CLOSED;
1775 	} else {
1776 #ifdef SPDK_FTL_RETRY_ON_ERROR
1777 		ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx);
1778 #else
1779 		ftl_abort();
1780 #endif
1781 	}
1782 }
1783 
1784 static void
1785 chunk_map_write_cb(struct ftl_basic_rq *brq)
1786 {
1787 	struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
1788 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1789 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1790 	struct ftl_layout_region *region = &dev->layout.region[FTL_LAYOUT_REGION_TYPE_NVC_MD];
1791 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
1792 	uint32_t chunk_map_crc;
1793 
1794 	if (spdk_likely(brq->success)) {
1795 		chunk_map_crc = spdk_crc32c_update(p2l_map->chunk_map,
1796 						   chunk->nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE, 0);
1797 		memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
1798 		p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_CLOSED;
1799 		p2l_map->chunk_dma_md->p2l_map_checksum = chunk_map_crc;
1800 		ftl_md_persist_entry(md, get_chunk_idx(chunk), chunk->p2l_map.chunk_dma_md,
1801 				     NULL, chunk_close_cb, chunk,
1802 				     &chunk->md_persist_entry_ctx);
1803 	} else {
1804 #ifdef SPDK_FTL_RETRY_ON_ERROR
1805 		/* retry */
1806 		chunk->md->write_pointer -= brq->num_blocks;
1807 		ftl_chunk_basic_rq_write(chunk, brq);
1808 #else
1809 		ftl_abort();
1810 #endif
1811 	}
1812 }
1813 
1814 static void
1815 ftl_chunk_close(struct ftl_nv_cache_chunk *chunk)
1816 {
1817 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1818 	struct ftl_basic_rq *brq = &chunk->metadata_rq;
1819 	void *metadata = chunk->p2l_map.chunk_map;
1820 
1821 	chunk->md->close_seq_id = ftl_get_next_seq_id(dev);
1822 	ftl_basic_rq_init(dev, brq, metadata, chunk->nv_cache->tail_md_chunk_blocks);
1823 	ftl_basic_rq_set_owner(brq, chunk_map_write_cb, chunk);
1824 
1825 	assert(chunk->md->write_pointer == chunk_tail_md_offset(chunk->nv_cache));
1826 	brq->io.addr = chunk->offset + chunk->md->write_pointer;
1827 
1828 	ftl_chunk_basic_rq_write(chunk, brq);
1829 }
1830 
1831 static int ftl_chunk_read_tail_md(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq,
1832 				  void (*cb)(struct ftl_basic_rq *brq), void *cb_ctx);
1833 static void read_tail_md_cb(struct ftl_basic_rq *brq);
1834 static void recover_open_chunk_cb(struct ftl_basic_rq *brq);
1835 
1836 static void
1837 restore_chunk_close_cb(int status, void *ctx)
1838 {
1839 	struct ftl_basic_rq *parent = (struct ftl_basic_rq *)ctx;
1840 	struct ftl_nv_cache_chunk *chunk = parent->io.chunk;
1841 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1842 
1843 	if (spdk_unlikely(status)) {
1844 		parent->success = false;
1845 	} else {
1846 		chunk->md->p2l_map_checksum = p2l_map->chunk_dma_md->p2l_map_checksum;
1847 		chunk->md->state = FTL_CHUNK_STATE_CLOSED;
1848 	}
1849 
1850 	read_tail_md_cb(parent);
1851 }
1852 
1853 static void
1854 restore_fill_p2l_map_cb(struct ftl_basic_rq *parent)
1855 {
1856 	struct ftl_nv_cache_chunk *chunk = parent->io.chunk;
1857 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1858 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1859 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
1860 	struct ftl_layout_region *region = &dev->layout.region[FTL_LAYOUT_REGION_TYPE_NVC_MD];
1861 	uint32_t chunk_map_crc;
1862 
1863 	/* Set original callback */
1864 	ftl_basic_rq_set_owner(parent, recover_open_chunk_cb, parent->owner.priv);
1865 
1866 	if (spdk_unlikely(!parent->success)) {
1867 		read_tail_md_cb(parent);
1868 		return;
1869 	}
1870 
1871 	chunk_map_crc = spdk_crc32c_update(p2l_map->chunk_map,
1872 					   chunk->nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE, 0);
1873 	memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
1874 	p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_CLOSED;
1875 	p2l_map->chunk_dma_md->write_pointer = chunk->nv_cache->chunk_blocks;
1876 	p2l_map->chunk_dma_md->blocks_written = chunk->nv_cache->chunk_blocks;
1877 	p2l_map->chunk_dma_md->p2l_map_checksum = chunk_map_crc;
1878 
1879 	ftl_md_persist_entry(md, get_chunk_idx(chunk), p2l_map->chunk_dma_md, NULL,
1880 			     restore_chunk_close_cb, parent, &chunk->md_persist_entry_ctx);
1881 }
1882 
1883 static void
1884 restore_fill_tail_md(struct ftl_basic_rq *parent, struct ftl_nv_cache_chunk *chunk)
1885 {
1886 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1887 	void *metadata;
1888 
1889 	chunk->md->close_seq_id = ftl_get_next_seq_id(dev);
1890 
1891 	metadata = chunk->p2l_map.chunk_map;
1892 	ftl_basic_rq_init(dev, parent, metadata, chunk->nv_cache->tail_md_chunk_blocks);
1893 	ftl_basic_rq_set_owner(parent, restore_fill_p2l_map_cb, parent->owner.priv);
1894 
1895 	parent->io.addr = chunk->offset + chunk_tail_md_offset(chunk->nv_cache);
1896 	parent->io.chunk = chunk;
1897 
1898 	ftl_chunk_basic_rq_write(chunk, parent);
1899 }
1900 
1901 static void
1902 read_open_chunk_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1903 {
1904 	struct ftl_rq *rq = (struct ftl_rq *)cb_arg;
1905 	struct ftl_basic_rq *parent = (struct ftl_basic_rq *)rq->owner.priv;
1906 	struct ftl_nv_cache_chunk *chunk = parent->io.chunk;
1907 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1908 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1909 	union ftl_md_vss *md;
1910 	uint64_t cache_offset = bdev_io->u.bdev.offset_blocks;
1911 	uint64_t len = bdev_io->u.bdev.num_blocks;
1912 	ftl_addr addr = ftl_addr_from_nvc_offset(dev, cache_offset);
1913 	int rc;
1914 
1915 	ftl_stats_bdev_io_completed(dev, FTL_STATS_TYPE_USER, bdev_io);
1916 
1917 	spdk_bdev_free_io(bdev_io);
1918 
1919 	if (!success) {
1920 		parent->success = false;
1921 		read_tail_md_cb(parent);
1922 		return;
1923 	}
1924 
1925 	while (rq->iter.idx < rq->iter.count) {
1926 		/* Get metadata */
1927 		md = rq->entries[rq->iter.idx].io_md;
1928 		if (md->nv_cache.seq_id != chunk->md->seq_id) {
1929 			md->nv_cache.lba = FTL_LBA_INVALID;
1930 		}
1931 		/*
1932 		 * The p2l map contains effectively random data at this point (since it contains arbitrary
1933 		 * blocks from potentially not even filled tail md), so even LBA_INVALID needs to be set explicitly
1934 		 */
1935 
1936 		ftl_chunk_set_addr(chunk,  md->nv_cache.lba, addr + rq->iter.idx);
1937 		rq->iter.idx++;
1938 	}
1939 
1940 	if (cache_offset + len < chunk->offset + chunk_tail_md_offset(nv_cache)) {
1941 		cache_offset += len;
1942 		len = spdk_min(dev->xfer_size, chunk->offset + chunk_tail_md_offset(nv_cache) - cache_offset);
1943 		rq->iter.idx = 0;
1944 		rq->iter.count = len;
1945 
1946 		rc = ftl_nv_cache_bdev_readv_blocks_with_md(dev, nv_cache->bdev_desc,
1947 				nv_cache->cache_ioch,
1948 				rq->io_vec, len,
1949 				rq->io_md,
1950 				cache_offset, len,
1951 				read_open_chunk_cb,
1952 				rq);
1953 
1954 		if (rc) {
1955 			ftl_rq_del(rq);
1956 			parent->success = false;
1957 			read_tail_md_cb(parent);
1958 			return;
1959 		}
1960 	} else {
1961 		ftl_rq_del(rq);
1962 		restore_fill_tail_md(parent, chunk);
1963 	}
1964 }
1965 
1966 static void
1967 restore_open_chunk(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *parent)
1968 {
1969 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1970 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1971 	struct ftl_rq *rq;
1972 	uint64_t addr;
1973 	uint64_t len = dev->xfer_size;
1974 	int rc;
1975 
1976 	/*
1977 	 * We've just read the p2l map, prefill it with INVALID LBA
1978 	 * TODO we need to do this because tail md blocks (p2l map) are also represented in the p2l map, instead of just user data region
1979 	 */
1980 	memset(chunk->p2l_map.chunk_map, -1, FTL_BLOCK_SIZE * nv_cache->tail_md_chunk_blocks);
1981 
1982 	/* Need to read user data, recalculate chunk's P2L and write tail md with it */
1983 	rq = ftl_rq_new(dev, dev->nv_cache.md_size);
1984 	if (!rq) {
1985 		parent->success = false;
1986 		read_tail_md_cb(parent);
1987 		return;
1988 	}
1989 
1990 	rq->owner.priv = parent;
1991 	rq->iter.idx = 0;
1992 	rq->iter.count = len;
1993 
1994 	addr = chunk->offset;
1995 
1996 	len = spdk_min(dev->xfer_size, chunk->offset + chunk_tail_md_offset(nv_cache) - addr);
1997 
1998 	rc = ftl_nv_cache_bdev_readv_blocks_with_md(dev, nv_cache->bdev_desc,
1999 			nv_cache->cache_ioch,
2000 			rq->io_vec, len,
2001 			rq->io_md,
2002 			addr, len,
2003 			read_open_chunk_cb,
2004 			rq);
2005 
2006 	if (rc) {
2007 		ftl_rq_del(rq);
2008 		parent->success = false;
2009 		read_tail_md_cb(parent);
2010 	}
2011 }
2012 
2013 static void
2014 read_tail_md_cb(struct ftl_basic_rq *brq)
2015 {
2016 	brq->owner.cb(brq);
2017 }
2018 
2019 static int
2020 ftl_chunk_read_tail_md(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq,
2021 		       void (*cb)(struct ftl_basic_rq *brq), void *cb_ctx)
2022 {
2023 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
2024 	void *metadata;
2025 	int rc;
2026 
2027 	metadata = chunk->p2l_map.chunk_map;
2028 	ftl_basic_rq_init(dev, brq, metadata, chunk->nv_cache->tail_md_chunk_blocks);
2029 	ftl_basic_rq_set_owner(brq, cb, cb_ctx);
2030 
2031 	brq->io.addr = chunk->offset + chunk_tail_md_offset(chunk->nv_cache);
2032 	rc = ftl_chunk_basic_rq_read(chunk, brq);
2033 
2034 	return rc;
2035 }
2036 
2037 struct restore_chunk_md_ctx {
2038 	ftl_chunk_md_cb cb;
2039 	void *cb_ctx;
2040 	int status;
2041 	uint64_t qd;
2042 	uint64_t id;
2043 };
2044 
2045 static inline bool
2046 is_chunk_count_valid(struct ftl_nv_cache *nv_cache)
2047 {
2048 	uint64_t chunk_count = 0;
2049 
2050 	chunk_count += nv_cache->chunk_open_count;
2051 	chunk_count += nv_cache->chunk_free_count;
2052 	chunk_count += nv_cache->chunk_full_count;
2053 	chunk_count += nv_cache->chunk_comp_count;
2054 
2055 	return chunk_count == nv_cache->chunk_count;
2056 }
2057 
2058 static void
2059 walk_tail_md_cb(struct ftl_basic_rq *brq)
2060 {
2061 	struct ftl_mngt_process *mngt = brq->owner.priv;
2062 	struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
2063 	struct restore_chunk_md_ctx *ctx = ftl_mngt_get_step_ctx(mngt);
2064 	int rc = 0;
2065 
2066 	if (brq->success) {
2067 		rc = ctx->cb(chunk, ctx->cb_ctx);
2068 	} else {
2069 		rc = -EIO;
2070 	}
2071 
2072 	if (rc) {
2073 		ctx->status = rc;
2074 	}
2075 	ctx->qd--;
2076 	chunk_free_p2l_map(chunk);
2077 	ftl_mngt_continue_step(mngt);
2078 }
2079 
2080 static void
2081 ftl_mngt_nv_cache_walk_tail_md(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt,
2082 			       uint64_t seq_id, ftl_chunk_md_cb cb, void *cb_ctx)
2083 {
2084 	struct ftl_nv_cache *nvc = &dev->nv_cache;
2085 	struct restore_chunk_md_ctx *ctx;
2086 
2087 	ctx = ftl_mngt_get_step_ctx(mngt);
2088 	if (!ctx) {
2089 		if (ftl_mngt_alloc_step_ctx(mngt, sizeof(*ctx))) {
2090 			ftl_mngt_fail_step(mngt);
2091 			return;
2092 		}
2093 		ctx = ftl_mngt_get_step_ctx(mngt);
2094 		assert(ctx);
2095 
2096 		ctx->cb = cb;
2097 		ctx->cb_ctx = cb_ctx;
2098 	}
2099 
2100 	/*
2101 	 * This function generates a high queue depth and will utilize ftl_mngt_continue_step during completions to make sure all chunks
2102 	 * are processed before returning an error (if any were found) or continuing on.
2103 	 */
2104 	if (0 == ctx->qd && ctx->id == nvc->chunk_count) {
2105 		if (!is_chunk_count_valid(nvc)) {
2106 			FTL_ERRLOG(dev, "Recovery ERROR, invalid number of chunk\n");
2107 			assert(false);
2108 			ctx->status = -EINVAL;
2109 		}
2110 
2111 		if (ctx->status) {
2112 			ftl_mngt_fail_step(mngt);
2113 		} else {
2114 			ftl_mngt_next_step(mngt);
2115 		}
2116 		return;
2117 	}
2118 
2119 	while (ctx->id < nvc->chunk_count) {
2120 		struct ftl_nv_cache_chunk *chunk = &nvc->chunks[ctx->id];
2121 		int rc;
2122 
2123 		if (!chunk->recovery) {
2124 			/* This chunk is empty and not used in recovery */
2125 			ctx->id++;
2126 			continue;
2127 		}
2128 
2129 		if (seq_id && (chunk->md->close_seq_id <= seq_id)) {
2130 			ctx->id++;
2131 			continue;
2132 		}
2133 
2134 		if (chunk_alloc_p2l_map(chunk)) {
2135 			/* No more free P2L map, break and continue later */
2136 			break;
2137 		}
2138 		ctx->id++;
2139 
2140 		rc = ftl_chunk_read_tail_md(chunk, &chunk->metadata_rq, walk_tail_md_cb, mngt);
2141 
2142 		if (0 == rc) {
2143 			ctx->qd++;
2144 		} else {
2145 			chunk_free_p2l_map(chunk);
2146 			ctx->status = rc;
2147 		}
2148 	}
2149 
2150 	if (0 == ctx->qd) {
2151 		/*
2152 		 * No QD could happen due to all leftover chunks being in free state.
2153 		 * Additionally ftl_chunk_read_tail_md could fail starting with the first IO in a given patch.
2154 		 * For streamlining of all potential error handling (since many chunks are reading P2L at the same time),
2155 		 * we're using ftl_mngt_continue_step to arrive at the same spot of checking for mngt step end (see beginning of function).
2156 		 */
2157 		ftl_mngt_continue_step(mngt);
2158 	}
2159 
2160 }
2161 
2162 void
2163 ftl_mngt_nv_cache_restore_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt,
2164 			      ftl_chunk_md_cb cb, void *cb_ctx)
2165 {
2166 	ftl_mngt_nv_cache_walk_tail_md(dev, mngt, dev->sb->ckpt_seq_id, cb, cb_ctx);
2167 }
2168 
2169 static void
2170 restore_chunk_state_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
2171 {
2172 	struct ftl_mngt_process *mngt = md->owner.cb_ctx;
2173 	struct ftl_nv_cache *nvc = &dev->nv_cache;
2174 	struct ftl_nv_cache_chunk *chunk;
2175 	uint64_t i;
2176 
2177 	if (status) {
2178 		/* Restore error, end step */
2179 		ftl_mngt_fail_step(mngt);
2180 		return;
2181 	}
2182 
2183 	for (i = 0; i < nvc->chunk_count; i++) {
2184 		chunk = &nvc->chunks[i];
2185 
2186 		switch (chunk->md->state) {
2187 		case FTL_CHUNK_STATE_FREE:
2188 			break;
2189 		case FTL_CHUNK_STATE_OPEN:
2190 			TAILQ_REMOVE(&nvc->chunk_free_list, chunk, entry);
2191 			nvc->chunk_free_count--;
2192 
2193 			TAILQ_INSERT_TAIL(&nvc->chunk_open_list, chunk, entry);
2194 			nvc->chunk_open_count++;
2195 
2196 			/* Chunk is not empty, mark it to be recovered */
2197 			chunk->recovery = true;
2198 			break;
2199 		case FTL_CHUNK_STATE_CLOSED:
2200 			TAILQ_REMOVE(&nvc->chunk_free_list, chunk, entry);
2201 			nvc->chunk_free_count--;
2202 
2203 			TAILQ_INSERT_TAIL(&nvc->chunk_full_list, chunk, entry);
2204 			nvc->chunk_full_count++;
2205 
2206 			/* Chunk is not empty, mark it to be recovered */
2207 			chunk->recovery = true;
2208 			break;
2209 		default:
2210 			status = -EINVAL;
2211 		}
2212 	}
2213 
2214 	if (status) {
2215 		ftl_mngt_fail_step(mngt);
2216 	} else {
2217 		ftl_mngt_next_step(mngt);
2218 	}
2219 }
2220 
2221 void
2222 ftl_mngt_nv_cache_restore_chunk_state(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
2223 {
2224 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
2225 
2226 	md->owner.cb_ctx = mngt;
2227 	md->cb = restore_chunk_state_cb;
2228 	ftl_md_restore(md);
2229 }
2230 
2231 static void
2232 recover_open_chunk_cb(struct ftl_basic_rq *brq)
2233 {
2234 	struct ftl_mngt_process *mngt = brq->owner.priv;
2235 	struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
2236 	struct ftl_nv_cache *nvc = chunk->nv_cache;
2237 	struct spdk_ftl_dev *dev = ftl_mngt_get_dev(mngt);
2238 
2239 	chunk_free_p2l_map(chunk);
2240 
2241 	if (!brq->success) {
2242 		FTL_ERRLOG(dev, "Recovery chunk ERROR, offset = %"PRIu64", seq id %"PRIu64"\n", chunk->offset,
2243 			   chunk->md->seq_id);
2244 		ftl_mngt_fail_step(mngt);
2245 		return;
2246 	}
2247 
2248 	FTL_NOTICELOG(dev, "Recovered chunk, offset = %"PRIu64", seq id %"PRIu64"\n", chunk->offset,
2249 		      chunk->md->seq_id);
2250 
2251 	TAILQ_REMOVE(&nvc->chunk_open_list, chunk, entry);
2252 	nvc->chunk_open_count--;
2253 
2254 	TAILQ_INSERT_TAIL(&nvc->chunk_full_list, chunk, entry);
2255 	nvc->chunk_full_count++;
2256 
2257 	/* This is closed chunk */
2258 	chunk->md->write_pointer = nvc->chunk_blocks;
2259 	chunk->md->blocks_written = nvc->chunk_blocks;
2260 
2261 	ftl_mngt_continue_step(mngt);
2262 }
2263 
2264 void
2265 ftl_mngt_nv_cache_recover_open_chunk(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
2266 {
2267 	struct ftl_nv_cache *nvc = &dev->nv_cache;
2268 	struct ftl_nv_cache_chunk *chunk;
2269 	struct ftl_basic_rq *brq = ftl_mngt_get_step_ctx(mngt);
2270 
2271 	if (!brq) {
2272 		if (TAILQ_EMPTY(&nvc->chunk_open_list)) {
2273 			FTL_NOTICELOG(dev, "No open chunks to recover P2L\n");
2274 			ftl_mngt_next_step(mngt);
2275 			return;
2276 		}
2277 
2278 		if (ftl_mngt_alloc_step_ctx(mngt, sizeof(*brq))) {
2279 			ftl_mngt_fail_step(mngt);
2280 			return;
2281 		}
2282 		brq = ftl_mngt_get_step_ctx(mngt);
2283 		ftl_basic_rq_set_owner(brq, recover_open_chunk_cb, mngt);
2284 	}
2285 
2286 	if (TAILQ_EMPTY(&nvc->chunk_open_list)) {
2287 		if (!is_chunk_count_valid(nvc)) {
2288 			FTL_ERRLOG(dev, "Recovery ERROR, invalid number of chunk\n");
2289 			ftl_mngt_fail_step(mngt);
2290 			return;
2291 		}
2292 
2293 		/*
2294 		 * Now all chunks loaded and closed, do final step of restoring
2295 		 * chunks state
2296 		 */
2297 		if (ftl_nv_cache_load_state(nvc)) {
2298 			ftl_mngt_fail_step(mngt);
2299 		} else {
2300 			ftl_mngt_next_step(mngt);
2301 		}
2302 	} else {
2303 		chunk = TAILQ_FIRST(&nvc->chunk_open_list);
2304 		if (chunk_alloc_p2l_map(chunk)) {
2305 			ftl_mngt_fail_step(mngt);
2306 			return;
2307 		}
2308 
2309 		brq->io.chunk = chunk;
2310 
2311 		FTL_NOTICELOG(dev, "Start recovery open chunk, offset = %"PRIu64", seq id %"PRIu64"\n",
2312 			      chunk->offset, chunk->md->seq_id);
2313 		restore_open_chunk(chunk, brq);
2314 	}
2315 }
2316 
2317 int
2318 ftl_nv_cache_chunks_busy(struct ftl_nv_cache *nv_cache)
2319 {
2320 	/* chunk_current is migrating to closed status when closing, any others should already be
2321 	 * moved to free chunk list. Also need to wait for free md requests */
2322 	return nv_cache->chunk_open_count == 0 && nv_cache->chunk_free_persist_count == 0;
2323 }
2324 
2325 void
2326 ftl_nv_cache_halt(struct ftl_nv_cache *nv_cache)
2327 {
2328 	struct ftl_nv_cache_chunk *chunk;
2329 	uint64_t free_space;
2330 
2331 	nv_cache->halt = true;
2332 
2333 	/* Set chunks on open list back to free state since no user data has been written to it */
2334 	while (!TAILQ_EMPTY(&nv_cache->chunk_open_list)) {
2335 		chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
2336 
2337 		/* Chunks are moved between lists on metadata update submission, but state is changed
2338 		 * on completion. Breaking early in such a case to make sure all the necessary resources
2339 		 * will be freed (during next pass(es) of ftl_nv_cache_halt).
2340 		 */
2341 		if (chunk->md->state != FTL_CHUNK_STATE_OPEN) {
2342 			break;
2343 		}
2344 
2345 		TAILQ_REMOVE(&nv_cache->chunk_open_list, chunk, entry);
2346 		chunk_free_p2l_map(chunk);
2347 		memset(chunk->md, 0, sizeof(*chunk->md));
2348 		assert(nv_cache->chunk_open_count > 0);
2349 		nv_cache->chunk_open_count--;
2350 	}
2351 
2352 	/* Close current chunk by skipping all not written blocks */
2353 	chunk = nv_cache->chunk_current;
2354 	if (chunk != NULL) {
2355 		nv_cache->chunk_current = NULL;
2356 		if (chunk_is_closed(chunk)) {
2357 			return;
2358 		}
2359 
2360 		free_space = chunk_get_free_space(nv_cache, chunk);
2361 		chunk->md->blocks_skipped = free_space;
2362 		chunk->md->blocks_written += free_space;
2363 		chunk->md->write_pointer += free_space;
2364 		ftl_chunk_close(chunk);
2365 	}
2366 }
2367 
2368 uint64_t
2369 ftl_nv_cache_acquire_trim_seq_id(struct ftl_nv_cache *nv_cache)
2370 {
2371 	struct ftl_nv_cache_chunk *chunk = nv_cache->chunk_current;
2372 	uint64_t seq_id, free_space;
2373 
2374 	if (!chunk) {
2375 		chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
2376 		if (chunk && chunk->md->state == FTL_CHUNK_STATE_OPEN) {
2377 			return chunk->md->seq_id;
2378 		} else {
2379 			return 0;
2380 		}
2381 	}
2382 
2383 	if (chunk_is_closed(chunk)) {
2384 		return 0;
2385 	}
2386 
2387 	seq_id = nv_cache->chunk_current->md->seq_id;
2388 	free_space = chunk_get_free_space(nv_cache, chunk);
2389 
2390 	chunk->md->blocks_skipped = free_space;
2391 	chunk->md->blocks_written += free_space;
2392 	chunk->md->write_pointer += free_space;
2393 	if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
2394 		ftl_chunk_close(chunk);
2395 	}
2396 	nv_cache->chunk_current = NULL;
2397 
2398 	seq_id++;
2399 	return seq_id;
2400 }
2401