xref: /spdk/lib/ftl/ftl_nv_cache.c (revision 12fbe739a31b09aff0d05f354d4f3bbef99afc55)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 
7 #include "spdk/bdev.h"
8 #include "spdk/bdev_module.h"
9 #include "spdk/ftl.h"
10 #include "spdk/string.h"
11 
12 #include "ftl_nv_cache.h"
13 #include "ftl_nv_cache_io.h"
14 #include "ftl_core.h"
15 #include "ftl_band.h"
16 #include "utils/ftl_addr_utils.h"
17 #include "mngt/ftl_mngt.h"
18 
19 static inline uint64_t nvc_data_blocks(struct ftl_nv_cache *nv_cache) __attribute__((unused));
20 static struct ftl_nv_cache_compactor *compactor_alloc(struct spdk_ftl_dev *dev);
21 static void compactor_free(struct spdk_ftl_dev *dev, struct ftl_nv_cache_compactor *compactor);
22 static void compaction_process_ftl_done(struct ftl_rq *rq);
23 static void compaction_process_read_entry(void *arg);
24 
25 static inline const struct ftl_layout_region *
26 nvc_data_region(struct ftl_nv_cache *nv_cache)
27 {
28 	struct spdk_ftl_dev *dev;
29 
30 	dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
31 	return &dev->layout.region[FTL_LAYOUT_REGION_TYPE_DATA_NVC];
32 }
33 
34 static inline void
35 nvc_validate_md(struct ftl_nv_cache *nv_cache,
36 		struct ftl_nv_cache_chunk_md *chunk_md)
37 {
38 	struct ftl_md *md = nv_cache->md;
39 	void *buffer = ftl_md_get_buffer(md);
40 	uint64_t size = ftl_md_get_buffer_size(md);
41 	void *ptr = chunk_md;
42 
43 	if (ptr < buffer) {
44 		ftl_abort();
45 	}
46 
47 	ptr += sizeof(*chunk_md);
48 	if (ptr > buffer + size) {
49 		ftl_abort();
50 	}
51 }
52 
53 static inline uint64_t
54 nvc_data_offset(struct ftl_nv_cache *nv_cache)
55 {
56 	return nvc_data_region(nv_cache)->current.offset;
57 }
58 
59 static inline uint64_t
60 nvc_data_blocks(struct ftl_nv_cache *nv_cache)
61 {
62 	return nvc_data_region(nv_cache)->current.blocks;
63 }
64 
65 size_t
66 ftl_nv_cache_chunk_tail_md_num_blocks(const struct ftl_nv_cache *nv_cache)
67 {
68 	struct spdk_ftl_dev *dev =  SPDK_CONTAINEROF(nv_cache,
69 				    struct spdk_ftl_dev, nv_cache);
70 	return spdk_divide_round_up(dev->layout.nvc.chunk_data_blocks * dev->layout.l2p.addr_size,
71 				    FTL_BLOCK_SIZE);
72 }
73 
74 static size_t
75 nv_cache_p2l_map_pool_elem_size(const struct ftl_nv_cache *nv_cache)
76 {
77 	/* Map pool element holds the whole tail md */
78 	return nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE;
79 }
80 
81 static uint64_t
82 get_chunk_idx(struct ftl_nv_cache_chunk *chunk)
83 {
84 	struct ftl_nv_cache_chunk *first_chunk = chunk->nv_cache->chunks;
85 
86 	return (chunk->offset - first_chunk->offset) / chunk->nv_cache->chunk_blocks;
87 }
88 
89 int
90 ftl_nv_cache_init(struct spdk_ftl_dev *dev)
91 {
92 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
93 	struct ftl_nv_cache_chunk *chunk;
94 	struct ftl_nv_cache_chunk_md *md;
95 	struct ftl_nv_cache_compactor *compactor;
96 	uint64_t i, offset;
97 
98 	nv_cache->halt = true;
99 
100 	nv_cache->md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
101 	if (!nv_cache->md) {
102 		FTL_ERRLOG(dev, "No NV cache metadata object\n");
103 		return -1;
104 	}
105 
106 	nv_cache->md_pool = ftl_mempool_create(dev->conf.user_io_pool_size,
107 					       nv_cache->md_size * dev->xfer_size,
108 					       FTL_BLOCK_SIZE, SPDK_ENV_SOCKET_ID_ANY);
109 	if (!nv_cache->md_pool) {
110 		FTL_ERRLOG(dev, "Failed to initialize NV cache metadata pool\n");
111 		return -1;
112 	}
113 
114 	/*
115 	 * Initialize chunk info
116 	 */
117 	nv_cache->chunk_blocks = dev->layout.nvc.chunk_data_blocks;
118 	nv_cache->chunk_count = dev->layout.nvc.chunk_count;
119 	nv_cache->tail_md_chunk_blocks = ftl_nv_cache_chunk_tail_md_num_blocks(nv_cache);
120 
121 	/* Allocate chunks */
122 	nv_cache->chunks = calloc(nv_cache->chunk_count,
123 				  sizeof(nv_cache->chunks[0]));
124 	if (!nv_cache->chunks) {
125 		FTL_ERRLOG(dev, "Failed to initialize NV cache chunks\n");
126 		return -1;
127 	}
128 
129 	TAILQ_INIT(&nv_cache->chunk_free_list);
130 	TAILQ_INIT(&nv_cache->chunk_open_list);
131 	TAILQ_INIT(&nv_cache->chunk_full_list);
132 	TAILQ_INIT(&nv_cache->chunk_comp_list);
133 	TAILQ_INIT(&nv_cache->needs_free_persist_list);
134 
135 	/* First chunk metadata */
136 	md = ftl_md_get_buffer(nv_cache->md);
137 	if (!md) {
138 		FTL_ERRLOG(dev, "No NV cache metadata\n");
139 		return -1;
140 	}
141 
142 	nv_cache->chunk_free_count = nv_cache->chunk_count;
143 
144 	chunk = nv_cache->chunks;
145 	offset = nvc_data_offset(nv_cache);
146 	for (i = 0; i < nv_cache->chunk_count; i++, chunk++, md++) {
147 		chunk->nv_cache = nv_cache;
148 		chunk->md = md;
149 		nvc_validate_md(nv_cache, md);
150 		chunk->offset = offset;
151 		offset += nv_cache->chunk_blocks;
152 		TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
153 	}
154 	assert(offset <= nvc_data_offset(nv_cache) + nvc_data_blocks(nv_cache));
155 
156 	/* Start compaction when full chunks exceed given % of entire chunks */
157 	nv_cache->chunk_compaction_threshold = nv_cache->chunk_count *
158 					       dev->conf.nv_cache.chunk_compaction_threshold / 100;
159 	TAILQ_INIT(&nv_cache->compactor_list);
160 	for (i = 0; i < FTL_NV_CACHE_NUM_COMPACTORS; i++) {
161 		compactor = compactor_alloc(dev);
162 
163 		if (!compactor) {
164 			FTL_ERRLOG(dev, "Cannot allocate compaction process\n");
165 			return -1;
166 		}
167 
168 		TAILQ_INSERT_TAIL(&nv_cache->compactor_list, compactor, entry);
169 	}
170 
171 #define FTL_MAX_OPEN_CHUNKS 2
172 	nv_cache->p2l_pool = ftl_mempool_create(FTL_MAX_OPEN_CHUNKS,
173 						nv_cache_p2l_map_pool_elem_size(nv_cache),
174 						FTL_BLOCK_SIZE,
175 						SPDK_ENV_SOCKET_ID_ANY);
176 	if (!nv_cache->p2l_pool) {
177 		return -ENOMEM;
178 	}
179 
180 	/* One entry per open chunk */
181 	nv_cache->chunk_md_pool = ftl_mempool_create(FTL_MAX_OPEN_CHUNKS,
182 				  sizeof(struct ftl_nv_cache_chunk_md),
183 				  FTL_BLOCK_SIZE,
184 				  SPDK_ENV_SOCKET_ID_ANY);
185 	if (!nv_cache->chunk_md_pool) {
186 		return -ENOMEM;
187 	}
188 
189 	/* Each compactor can be reading a different chunk which it needs to switch state to free to at the end,
190 	 * plus one backup each for high invalidity chunks processing (if there's a backlog of chunks with extremely
191 	 * small, even 0, validity then they can be processed by the compactors quickly and trigger a lot of updates
192 	 * to free state at once) */
193 	nv_cache->free_chunk_md_pool = ftl_mempool_create(2 * FTL_NV_CACHE_NUM_COMPACTORS,
194 				       sizeof(struct ftl_nv_cache_chunk_md),
195 				       FTL_BLOCK_SIZE,
196 				       SPDK_ENV_SOCKET_ID_ANY);
197 	if (!nv_cache->free_chunk_md_pool) {
198 		return -ENOMEM;
199 	}
200 
201 	nv_cache->throttle.interval_tsc = FTL_NV_CACHE_THROTTLE_INTERVAL_MS *
202 					  (spdk_get_ticks_hz() / 1000);
203 	nv_cache->chunk_free_target = spdk_divide_round_up(nv_cache->chunk_count *
204 				      dev->conf.nv_cache.chunk_free_target,
205 				      100);
206 	return 0;
207 }
208 
209 void
210 ftl_nv_cache_deinit(struct spdk_ftl_dev *dev)
211 {
212 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
213 	struct ftl_nv_cache_compactor *compactor;
214 
215 	while (!TAILQ_EMPTY(&nv_cache->compactor_list)) {
216 		compactor = TAILQ_FIRST(&nv_cache->compactor_list);
217 		TAILQ_REMOVE(&nv_cache->compactor_list, compactor, entry);
218 
219 		compactor_free(dev, compactor);
220 	}
221 
222 	ftl_mempool_destroy(nv_cache->md_pool);
223 	ftl_mempool_destroy(nv_cache->p2l_pool);
224 	ftl_mempool_destroy(nv_cache->chunk_md_pool);
225 	ftl_mempool_destroy(nv_cache->free_chunk_md_pool);
226 	nv_cache->md_pool = NULL;
227 	nv_cache->p2l_pool = NULL;
228 	nv_cache->chunk_md_pool = NULL;
229 	nv_cache->free_chunk_md_pool = NULL;
230 
231 	free(nv_cache->chunks);
232 	nv_cache->chunks = NULL;
233 }
234 
235 static uint64_t
236 chunk_get_free_space(struct ftl_nv_cache *nv_cache,
237 		     struct ftl_nv_cache_chunk *chunk)
238 {
239 	assert(chunk->md->write_pointer + nv_cache->tail_md_chunk_blocks <=
240 	       nv_cache->chunk_blocks);
241 	return nv_cache->chunk_blocks - chunk->md->write_pointer -
242 	       nv_cache->tail_md_chunk_blocks;
243 }
244 
245 static bool
246 chunk_is_closed(struct ftl_nv_cache_chunk *chunk)
247 {
248 	return chunk->md->write_pointer == chunk->nv_cache->chunk_blocks;
249 }
250 
251 static void ftl_chunk_close(struct ftl_nv_cache_chunk *chunk);
252 
253 static uint64_t
254 ftl_nv_cache_get_wr_buffer(struct ftl_nv_cache *nv_cache, struct ftl_io *io)
255 {
256 	uint64_t address = FTL_LBA_INVALID;
257 	uint64_t num_blocks = io->num_blocks;
258 	uint64_t free_space;
259 	struct ftl_nv_cache_chunk *chunk;
260 
261 	do {
262 		chunk = nv_cache->chunk_current;
263 		/* Chunk has been closed so pick new one */
264 		if (chunk && chunk_is_closed(chunk))  {
265 			chunk = NULL;
266 		}
267 
268 		if (!chunk) {
269 			chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
270 			if (chunk && chunk->md->state == FTL_CHUNK_STATE_OPEN) {
271 				TAILQ_REMOVE(&nv_cache->chunk_open_list, chunk, entry);
272 				nv_cache->chunk_current = chunk;
273 			} else {
274 				break;
275 			}
276 		}
277 
278 		free_space = chunk_get_free_space(nv_cache, chunk);
279 
280 		if (free_space >= num_blocks) {
281 			/* Enough space in chunk */
282 
283 			/* Calculate address in NV cache */
284 			address = chunk->offset + chunk->md->write_pointer;
285 
286 			/* Set chunk in IO */
287 			io->nv_cache_chunk = chunk;
288 
289 			/* Move write pointer */
290 			chunk->md->write_pointer += num_blocks;
291 			break;
292 		}
293 
294 		/* Not enough space in nv_cache_chunk */
295 		nv_cache->chunk_current = NULL;
296 
297 		if (0 == free_space) {
298 			continue;
299 		}
300 
301 		chunk->md->blocks_skipped = free_space;
302 		chunk->md->blocks_written += free_space;
303 		chunk->md->write_pointer += free_space;
304 
305 		if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
306 			ftl_chunk_close(chunk);
307 		}
308 	} while (1);
309 
310 	return address;
311 }
312 
313 void
314 ftl_nv_cache_fill_md(struct ftl_io *io)
315 {
316 	struct ftl_nv_cache_chunk *chunk = io->nv_cache_chunk;
317 	uint64_t i;
318 	union ftl_md_vss *metadata = io->md;
319 	uint64_t lba = ftl_io_get_lba(io, 0);
320 
321 	for (i = 0; i < io->num_blocks; ++i, lba++, metadata++) {
322 		metadata->nv_cache.lba = lba;
323 		metadata->nv_cache.seq_id = chunk->md->seq_id;
324 	}
325 }
326 
327 uint64_t
328 chunk_tail_md_offset(struct ftl_nv_cache *nv_cache)
329 {
330 	return nv_cache->chunk_blocks - nv_cache->tail_md_chunk_blocks;
331 }
332 
333 static void
334 chunk_advance_blocks(struct ftl_nv_cache *nv_cache, struct ftl_nv_cache_chunk *chunk,
335 		     uint64_t advanced_blocks)
336 {
337 	chunk->md->blocks_written += advanced_blocks;
338 
339 	assert(chunk->md->blocks_written <= nv_cache->chunk_blocks);
340 
341 	if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
342 		ftl_chunk_close(chunk);
343 	}
344 }
345 
346 static uint64_t
347 chunk_user_blocks_written(struct ftl_nv_cache_chunk *chunk)
348 {
349 	return chunk->md->blocks_written - chunk->md->blocks_skipped -
350 	       chunk->nv_cache->tail_md_chunk_blocks;
351 }
352 
353 static bool
354 is_chunk_compacted(struct ftl_nv_cache_chunk *chunk)
355 {
356 	assert(chunk->md->blocks_written != 0);
357 
358 	if (chunk_user_blocks_written(chunk) == chunk->md->blocks_compacted) {
359 		return true;
360 	}
361 
362 	return false;
363 }
364 
365 static int
366 ftl_chunk_alloc_md_entry(struct ftl_nv_cache_chunk *chunk)
367 {
368 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
369 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
370 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
371 	struct ftl_layout_region *region = &dev->layout.region[FTL_LAYOUT_REGION_TYPE_NVC_MD];
372 
373 	p2l_map->chunk_dma_md = ftl_mempool_get(nv_cache->chunk_md_pool);
374 
375 	if (!p2l_map->chunk_dma_md) {
376 		return -ENOMEM;
377 	}
378 
379 	memset(p2l_map->chunk_dma_md, 0, region->entry_size * FTL_BLOCK_SIZE);
380 	return 0;
381 }
382 
383 static void
384 ftl_chunk_free_md_entry(struct ftl_nv_cache_chunk *chunk)
385 {
386 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
387 
388 	ftl_mempool_put(chunk->nv_cache->chunk_md_pool, p2l_map->chunk_dma_md);
389 	p2l_map->chunk_dma_md = NULL;
390 }
391 
392 static void
393 ftl_chunk_free(struct ftl_nv_cache_chunk *chunk)
394 {
395 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
396 
397 	/* Reset chunk */
398 	memset(chunk->md, 0, sizeof(*chunk->md));
399 
400 	TAILQ_INSERT_TAIL(&nv_cache->needs_free_persist_list, chunk, entry);
401 	nv_cache->chunk_free_persist_count++;
402 }
403 
404 static int
405 ftl_chunk_alloc_chunk_free_entry(struct ftl_nv_cache_chunk *chunk)
406 {
407 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
408 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
409 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
410 	struct ftl_layout_region *region = &dev->layout.region[FTL_LAYOUT_REGION_TYPE_NVC_MD];
411 
412 	p2l_map->chunk_dma_md = ftl_mempool_get(nv_cache->free_chunk_md_pool);
413 
414 	if (!p2l_map->chunk_dma_md) {
415 		return -ENOMEM;
416 	}
417 
418 	memset(p2l_map->chunk_dma_md, 0, region->entry_size * FTL_BLOCK_SIZE);
419 	return 0;
420 }
421 
422 static void
423 ftl_chunk_free_chunk_free_entry(struct ftl_nv_cache_chunk *chunk)
424 {
425 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
426 
427 	ftl_mempool_put(chunk->nv_cache->free_chunk_md_pool, p2l_map->chunk_dma_md);
428 	p2l_map->chunk_dma_md = NULL;
429 }
430 
431 static void
432 chunk_free_cb(int status, void *ctx)
433 {
434 	struct ftl_nv_cache_chunk *chunk = (struct ftl_nv_cache_chunk *)ctx;
435 
436 	if (spdk_likely(!status)) {
437 		struct ftl_nv_cache *nv_cache = chunk->nv_cache;
438 
439 		nv_cache->chunk_free_persist_count--;
440 		TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
441 		nv_cache->chunk_free_count++;
442 		nv_cache->chunk_full_count--;
443 		chunk->md->state = FTL_CHUNK_STATE_FREE;
444 		chunk->md->close_seq_id = 0;
445 		ftl_chunk_free_chunk_free_entry(chunk);
446 	} else {
447 #ifdef SPDK_FTL_RETRY_ON_ERROR
448 		ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx);
449 #else
450 		ftl_abort();
451 #endif
452 	}
453 }
454 
455 static void
456 ftl_chunk_persist_free_state(struct ftl_nv_cache *nv_cache)
457 {
458 	int rc;
459 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
460 	struct ftl_p2l_map *p2l_map;
461 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
462 	struct ftl_layout_region *region = &dev->layout.region[FTL_LAYOUT_REGION_TYPE_NVC_MD];
463 	struct ftl_nv_cache_chunk *tchunk, *chunk = NULL;
464 
465 	TAILQ_FOREACH_SAFE(chunk, &nv_cache->needs_free_persist_list, entry, tchunk) {
466 		p2l_map = &chunk->p2l_map;
467 		rc = ftl_chunk_alloc_chunk_free_entry(chunk);
468 		if (rc) {
469 			break;
470 		}
471 
472 		TAILQ_REMOVE(&nv_cache->needs_free_persist_list, chunk, entry);
473 
474 		memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
475 		p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_FREE;
476 		p2l_map->chunk_dma_md->close_seq_id = 0;
477 		p2l_map->chunk_dma_md->p2l_map_checksum = 0;
478 
479 		ftl_md_persist_entry(md, get_chunk_idx(chunk), p2l_map->chunk_dma_md, NULL,
480 				     chunk_free_cb, chunk, &chunk->md_persist_entry_ctx);
481 	}
482 }
483 
484 static void
485 compaction_stats_update(struct ftl_nv_cache_chunk *chunk)
486 {
487 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
488 	struct compaction_bw_stats *compaction_bw = &nv_cache->compaction_recent_bw;
489 	double *ptr;
490 
491 	if (spdk_unlikely(chunk->compaction_length_tsc == 0)) {
492 		return;
493 	}
494 
495 	if (spdk_likely(compaction_bw->count == FTL_NV_CACHE_COMPACTION_SMA_N)) {
496 		ptr = compaction_bw->buf + compaction_bw->first;
497 		compaction_bw->first++;
498 		if (compaction_bw->first == FTL_NV_CACHE_COMPACTION_SMA_N) {
499 			compaction_bw->first = 0;
500 		}
501 		compaction_bw->sum -= *ptr;
502 	} else {
503 		ptr = compaction_bw->buf + compaction_bw->count;
504 		compaction_bw->count++;
505 	}
506 
507 	*ptr = (double)chunk->md->blocks_compacted * FTL_BLOCK_SIZE / chunk->compaction_length_tsc;
508 	chunk->compaction_length_tsc = 0;
509 
510 	compaction_bw->sum += *ptr;
511 	nv_cache->compaction_sma = compaction_bw->sum / compaction_bw->count;
512 }
513 
514 static void
515 chunk_compaction_advance(struct ftl_nv_cache_chunk *chunk, uint64_t num_blocks)
516 {
517 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
518 	uint64_t tsc = spdk_thread_get_last_tsc(spdk_get_thread());
519 
520 	chunk->compaction_length_tsc += tsc - chunk->compaction_start_tsc;
521 	chunk->compaction_start_tsc = tsc;
522 
523 	chunk->md->blocks_compacted += num_blocks;
524 	assert(chunk->md->blocks_compacted <= chunk_user_blocks_written(chunk));
525 	if (!is_chunk_compacted(chunk)) {
526 		return;
527 	}
528 
529 	/* Remove chunk from compacted list */
530 	TAILQ_REMOVE(&nv_cache->chunk_comp_list, chunk, entry);
531 	nv_cache->chunk_comp_count--;
532 
533 	compaction_stats_update(chunk);
534 
535 	ftl_chunk_free(chunk);
536 }
537 
538 static bool
539 is_compaction_required(struct ftl_nv_cache *nv_cache)
540 {
541 	if (spdk_unlikely(nv_cache->halt)) {
542 		return false;
543 	}
544 
545 	if (nv_cache->chunk_full_count >= nv_cache->chunk_compaction_threshold) {
546 		return true;
547 	}
548 
549 	return false;
550 }
551 
552 static void compaction_process_finish_read(struct ftl_nv_cache_compactor *compactor);
553 static void compaction_process_pin_lba(struct ftl_nv_cache_compactor *comp);
554 
555 static void
556 _compaction_process_pin_lba(void *_comp)
557 {
558 	struct ftl_nv_cache_compactor *comp = _comp;
559 
560 	compaction_process_pin_lba(comp);
561 }
562 
563 static void
564 compaction_process_pin_lba_cb(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx)
565 {
566 	struct ftl_nv_cache_compactor *comp = pin_ctx->cb_ctx;
567 	struct ftl_rq *rq = comp->rq;
568 
569 	if (status) {
570 		rq->iter.status = status;
571 		pin_ctx->lba = FTL_LBA_INVALID;
572 	}
573 
574 	if (--rq->iter.remaining == 0) {
575 		if (rq->iter.status) {
576 			/* unpin and try again */
577 			ftl_rq_unpin(rq);
578 			spdk_thread_send_msg(spdk_get_thread(), _compaction_process_pin_lba, comp);
579 			return;
580 		}
581 
582 		compaction_process_finish_read(comp);
583 	}
584 }
585 
586 static void
587 compaction_process_pin_lba(struct ftl_nv_cache_compactor *comp)
588 {
589 	struct ftl_rq *rq = comp->rq;
590 	struct spdk_ftl_dev *dev = rq->dev;
591 	struct ftl_rq_entry *entry;
592 
593 	assert(rq->iter.count);
594 	rq->iter.remaining = rq->iter.count;
595 	rq->iter.status = 0;
596 
597 	FTL_RQ_ENTRY_LOOP(rq, entry, rq->iter.count) {
598 		struct ftl_nv_cache_chunk *chunk = entry->owner.priv;
599 		struct ftl_l2p_pin_ctx *pin_ctx = &entry->l2p_pin_ctx;
600 		union ftl_md_vss *md = entry->io_md;
601 
602 		if (md->nv_cache.lba == FTL_LBA_INVALID || md->nv_cache.seq_id != chunk->md->seq_id) {
603 			ftl_l2p_pin_skip(dev, compaction_process_pin_lba_cb, comp, pin_ctx);
604 		} else {
605 			ftl_l2p_pin(dev, md->nv_cache.lba, 1, compaction_process_pin_lba_cb, comp, pin_ctx);
606 		}
607 	}
608 }
609 
610 static void
611 compaction_process_read_entry_cb(struct spdk_bdev_io *bdev_io, bool success, void *arg)
612 {
613 	struct ftl_rq_entry *entry = arg;
614 	struct ftl_rq *rq = ftl_rq_from_entry(entry);
615 	struct spdk_ftl_dev *dev = rq->dev;
616 	struct ftl_nv_cache_compactor *compactor = rq->owner.priv;
617 
618 	ftl_stats_bdev_io_completed(dev, FTL_STATS_TYPE_CMP, bdev_io);
619 
620 	spdk_bdev_free_io(bdev_io);
621 
622 	if (!success) {
623 		/* retry */
624 		spdk_thread_send_msg(spdk_get_thread(), compaction_process_read_entry, entry);
625 		return;
626 	}
627 
628 	assert(rq->iter.remaining >= entry->bdev_io.num_blocks);
629 	rq->iter.remaining -= entry->bdev_io.num_blocks;
630 	if (0 == rq->iter.remaining) {
631 		/* All IOs processed, go to next phase - pining */
632 		compaction_process_pin_lba(compactor);
633 	}
634 }
635 
636 static void
637 compaction_process_read_entry(void *arg)
638 {
639 	struct ftl_rq_entry *entry = arg;
640 	struct ftl_rq *rq = ftl_rq_from_entry(entry);
641 	struct spdk_ftl_dev *dev = rq->dev;
642 
643 	int rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, dev->nv_cache.bdev_desc,
644 			dev->nv_cache.cache_ioch, entry->io_payload, entry->io_md,
645 			entry->bdev_io.offset_blocks, entry->bdev_io.num_blocks,
646 			compaction_process_read_entry_cb, entry);
647 
648 	if (spdk_unlikely(rc)) {
649 		if (rc == -ENOMEM) {
650 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->nv_cache.bdev_desc);
651 			entry->bdev_io.wait_entry.bdev = bdev;
652 			entry->bdev_io.wait_entry.cb_fn = compaction_process_read_entry;
653 			entry->bdev_io.wait_entry.cb_arg = entry;
654 			spdk_bdev_queue_io_wait(bdev, dev->nv_cache.cache_ioch, &entry->bdev_io.wait_entry);
655 		} else {
656 			ftl_abort();
657 		}
658 	}
659 
660 	dev->stats.io_activity_total += entry->bdev_io.num_blocks;
661 }
662 
663 static bool
664 is_chunk_to_read(struct ftl_nv_cache_chunk *chunk)
665 {
666 	assert(chunk->md->blocks_written != 0);
667 
668 	if (chunk_user_blocks_written(chunk) == chunk->md->read_pointer) {
669 		return false;
670 	}
671 
672 	return true;
673 }
674 
675 static struct ftl_nv_cache_chunk *
676 get_chunk_for_compaction(struct ftl_nv_cache *nv_cache)
677 {
678 	struct ftl_nv_cache_chunk *chunk = NULL;
679 
680 	if (!TAILQ_EMPTY(&nv_cache->chunk_comp_list)) {
681 		chunk = TAILQ_FIRST(&nv_cache->chunk_comp_list);
682 		if (is_chunk_to_read(chunk)) {
683 			return chunk;
684 		}
685 	}
686 
687 	if (!TAILQ_EMPTY(&nv_cache->chunk_full_list)) {
688 		chunk = TAILQ_FIRST(&nv_cache->chunk_full_list);
689 		TAILQ_REMOVE(&nv_cache->chunk_full_list, chunk, entry);
690 
691 		assert(chunk->md->write_pointer);
692 	} else {
693 		return NULL;
694 	}
695 
696 	if (spdk_likely(chunk)) {
697 		assert(chunk->md->write_pointer != 0);
698 		TAILQ_INSERT_HEAD(&nv_cache->chunk_comp_list, chunk, entry);
699 		nv_cache->chunk_comp_count++;
700 	}
701 
702 	return chunk;
703 }
704 
705 static uint64_t
706 chunk_blocks_to_read(struct ftl_nv_cache_chunk *chunk)
707 {
708 	uint64_t blocks_written;
709 	uint64_t blocks_to_read;
710 
711 	assert(chunk->md->blocks_written >= chunk->md->blocks_skipped);
712 	blocks_written = chunk_user_blocks_written(chunk);
713 
714 	assert(blocks_written >= chunk->md->read_pointer);
715 	blocks_to_read = blocks_written - chunk->md->read_pointer;
716 
717 	return blocks_to_read;
718 }
719 
720 static void
721 compactor_deactivate(struct ftl_nv_cache_compactor *compactor)
722 {
723 	struct ftl_nv_cache *nv_cache = compactor->nv_cache;
724 
725 	compactor->rq->iter.count = 0;
726 	assert(nv_cache->compaction_active_count);
727 	nv_cache->compaction_active_count--;
728 	TAILQ_INSERT_TAIL(&nv_cache->compactor_list, compactor, entry);
729 }
730 
731 static void
732 compaction_process_invalidate_entry(struct ftl_rq_entry *entry)
733 {
734 	entry->addr = FTL_ADDR_INVALID;
735 	entry->lba = FTL_LBA_INVALID;
736 	entry->seq_id = 0;
737 	entry->owner.priv = NULL;
738 }
739 
740 static void
741 compaction_process_pad(struct ftl_nv_cache_compactor *compactor, uint64_t idx)
742 {
743 	struct ftl_rq *rq = compactor->rq;
744 	struct ftl_rq_entry *entry;
745 
746 	assert(idx < rq->num_blocks);
747 	FTL_RQ_ENTRY_LOOP_FROM(rq, &rq->entries[idx], entry, rq->num_blocks) {
748 		compaction_process_invalidate_entry(entry);
749 	}
750 }
751 
752 static void
753 compaction_process_read(struct ftl_nv_cache_compactor *compactor)
754 {
755 	struct ftl_rq *rq = compactor->rq;
756 	struct ftl_nv_cache *nv_cache = compactor->nv_cache;
757 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
758 	struct ftl_rq_entry *entry, *io;
759 
760 	assert(rq->iter.count);
761 	rq->iter.remaining = rq->iter.count;
762 
763 	io = rq->entries;
764 	io->bdev_io.num_blocks = 1;
765 	io->bdev_io.offset_blocks = ftl_addr_to_nvc_offset(dev, io->addr);
766 	FTL_RQ_ENTRY_LOOP_FROM(rq, &rq->entries[1], entry,  rq->iter.count) {
767 		if (entry->addr == io->addr + io->bdev_io.num_blocks) {
768 			io->bdev_io.num_blocks++;
769 		} else {
770 			compaction_process_read_entry(io);
771 			io = entry;
772 			io->bdev_io.num_blocks = 1;
773 			io->bdev_io.offset_blocks = ftl_addr_to_nvc_offset(dev, io->addr);
774 		}
775 	}
776 	compaction_process_read_entry(io);
777 }
778 
779 static ftl_addr
780 compaction_chunk_read_pos(struct spdk_ftl_dev *dev, struct ftl_nv_cache_chunk *chunk)
781 {
782 	ftl_addr start, pos;
783 	uint64_t skip, to_read = chunk_blocks_to_read(chunk);
784 
785 	if (0 == to_read) {
786 		return FTL_ADDR_INVALID;
787 	}
788 
789 	start = ftl_addr_from_nvc_offset(dev, chunk->offset + chunk->md->read_pointer);
790 	pos = ftl_bitmap_find_first_set(dev->valid_map, start, start + to_read - 1);
791 
792 	if (pos == UINT64_MAX) {
793 		chunk->md->read_pointer += to_read;
794 		chunk_compaction_advance(chunk, to_read);
795 		return FTL_ADDR_INVALID;
796 	}
797 
798 	assert(pos >= start);
799 	skip = pos - start;
800 	if (skip) {
801 		chunk->md->read_pointer += skip;
802 		chunk_compaction_advance(chunk, skip);
803 	}
804 
805 	return pos;
806 }
807 
808 static bool
809 compaction_entry_read_pos(struct ftl_nv_cache *nv_cache, struct ftl_rq_entry *entry)
810 {
811 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
812 	struct ftl_nv_cache_chunk *chunk = NULL;
813 	ftl_addr addr = FTL_ADDR_INVALID;
814 
815 	while (!chunk) {
816 		/* Get currently handled chunk */
817 		chunk = get_chunk_for_compaction(nv_cache);
818 		if (!chunk) {
819 			return false;
820 		}
821 		chunk->compaction_start_tsc = spdk_thread_get_last_tsc(spdk_get_thread());
822 
823 		/* Get next read position in chunk */
824 		addr = compaction_chunk_read_pos(dev, chunk);
825 		if (FTL_ADDR_INVALID == addr) {
826 			chunk = NULL;
827 		}
828 	}
829 
830 	assert(FTL_ADDR_INVALID != addr);
831 
832 	/* Set entry address info and chunk */
833 	entry->addr = addr;
834 	entry->owner.priv = chunk;
835 
836 	/* Move read pointer in the chunk */
837 	chunk->md->read_pointer++;
838 
839 	return true;
840 }
841 
842 static void
843 compaction_process_start(struct ftl_nv_cache_compactor *compactor)
844 {
845 	struct ftl_rq *rq = compactor->rq;
846 	struct ftl_nv_cache *nv_cache = compactor->nv_cache;
847 	struct ftl_rq_entry *entry;
848 
849 	assert(0 == compactor->rq->iter.count);
850 	FTL_RQ_ENTRY_LOOP(rq, entry, rq->num_blocks) {
851 		if (!compaction_entry_read_pos(nv_cache, entry)) {
852 			compaction_process_pad(compactor, entry->index);
853 			break;
854 		}
855 		rq->iter.count++;
856 	}
857 
858 	if (rq->iter.count) {
859 		/* Schedule Read IOs */
860 		compaction_process_read(compactor);
861 	} else {
862 		compactor_deactivate(compactor);
863 	}
864 }
865 
866 static void
867 compaction_process(struct ftl_nv_cache *nv_cache)
868 {
869 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
870 	struct ftl_nv_cache_compactor *compactor;
871 
872 	if (!is_compaction_required(nv_cache)) {
873 		return;
874 	}
875 
876 	compactor = TAILQ_FIRST(&nv_cache->compactor_list);
877 	if (!compactor) {
878 		return;
879 	}
880 
881 	TAILQ_REMOVE(&nv_cache->compactor_list, compactor, entry);
882 	compactor->nv_cache->compaction_active_count++;
883 	compaction_process_start(compactor);
884 	ftl_add_io_activity(dev);
885 }
886 
887 static void
888 compaction_process_ftl_done(struct ftl_rq *rq)
889 {
890 	struct spdk_ftl_dev *dev = rq->dev;
891 	struct ftl_nv_cache_compactor *compactor = rq->owner.priv;
892 	struct ftl_band *band = rq->io.band;
893 	struct ftl_rq_entry *entry;
894 	ftl_addr addr;
895 
896 	if (spdk_unlikely(false == rq->success)) {
897 		/* IO error retry writing */
898 #ifdef SPDK_FTL_RETRY_ON_ERROR
899 		ftl_writer_queue_rq(&dev->writer_user, rq);
900 		return;
901 #else
902 		ftl_abort();
903 #endif
904 	}
905 
906 	assert(rq->iter.count);
907 
908 	/* Update L2P table */
909 	addr = rq->io.addr;
910 	FTL_RQ_ENTRY_LOOP(rq, entry, rq->iter.count) {
911 		struct ftl_nv_cache_chunk *chunk = entry->owner.priv;
912 
913 		if (entry->lba != FTL_LBA_INVALID) {
914 			ftl_l2p_update_base(dev, entry->lba, addr, entry->addr);
915 			ftl_l2p_unpin(dev, entry->lba, 1);
916 			chunk_compaction_advance(chunk, 1);
917 		} else {
918 			assert(entry->addr == FTL_ADDR_INVALID);
919 		}
920 
921 		addr = ftl_band_next_addr(band, addr, 1);
922 		compaction_process_invalidate_entry(entry);
923 	}
924 
925 	compactor_deactivate(compactor);
926 }
927 
928 static void
929 compaction_process_finish_read(struct ftl_nv_cache_compactor *compactor)
930 {
931 	struct ftl_rq *rq = compactor->rq;
932 	struct spdk_ftl_dev *dev = rq->dev;
933 	struct ftl_rq_entry *entry;
934 	ftl_addr current_addr;
935 	uint64_t skip = 0;
936 
937 	FTL_RQ_ENTRY_LOOP(rq, entry, rq->iter.count) {
938 		struct ftl_nv_cache_chunk *chunk = entry->owner.priv;
939 		union ftl_md_vss *md = entry->io_md;
940 
941 		if (md->nv_cache.lba == FTL_LBA_INVALID || md->nv_cache.seq_id != chunk->md->seq_id) {
942 			skip++;
943 			compaction_process_invalidate_entry(entry);
944 			chunk_compaction_advance(chunk, 1);
945 			continue;
946 		}
947 
948 		current_addr = ftl_l2p_get(dev, md->nv_cache.lba);
949 		if (current_addr == entry->addr) {
950 			entry->lba = md->nv_cache.lba;
951 			entry->seq_id = chunk->md->seq_id;
952 		} else {
953 			/* This address already invalidated, just omit this block */
954 			chunk_compaction_advance(chunk, 1);
955 			ftl_l2p_unpin(dev, md->nv_cache.lba, 1);
956 			compaction_process_invalidate_entry(entry);
957 			skip++;
958 		}
959 	}
960 
961 	if (skip < rq->iter.count) {
962 		/*
963 		 * Request contains data to be placed on FTL, compact it
964 		 */
965 		ftl_writer_queue_rq(&dev->writer_user, rq);
966 	} else {
967 		compactor_deactivate(compactor);
968 	}
969 }
970 
971 static void
972 compactor_free(struct spdk_ftl_dev *dev, struct ftl_nv_cache_compactor *compactor)
973 {
974 	if (!compactor) {
975 		return;
976 	}
977 
978 	ftl_rq_del(compactor->rq);
979 	free(compactor);
980 }
981 
982 static struct ftl_nv_cache_compactor *
983 compactor_alloc(struct spdk_ftl_dev *dev)
984 {
985 	struct ftl_nv_cache_compactor *compactor;
986 	struct ftl_rq_entry *entry;
987 
988 	compactor = calloc(1, sizeof(*compactor));
989 	if (!compactor) {
990 		goto error;
991 	}
992 
993 	/* Allocate help request for reading */
994 	compactor->rq = ftl_rq_new(dev, dev->nv_cache.md_size);
995 	if (!compactor->rq) {
996 		goto error;
997 	}
998 
999 	compactor->nv_cache = &dev->nv_cache;
1000 	compactor->rq->owner.priv = compactor;
1001 	compactor->rq->owner.cb = compaction_process_ftl_done;
1002 	compactor->rq->owner.compaction = true;
1003 
1004 	FTL_RQ_ENTRY_LOOP(compactor->rq, entry, compactor->rq->num_blocks) {
1005 		compaction_process_invalidate_entry(entry);
1006 	}
1007 
1008 	return compactor;
1009 
1010 error:
1011 	compactor_free(dev, compactor);
1012 	return NULL;
1013 }
1014 
1015 static void
1016 ftl_nv_cache_submit_cb_done(struct ftl_io *io)
1017 {
1018 	struct ftl_nv_cache *nv_cache = &io->dev->nv_cache;
1019 
1020 	chunk_advance_blocks(nv_cache, io->nv_cache_chunk, io->num_blocks);
1021 	io->nv_cache_chunk = NULL;
1022 
1023 	ftl_mempool_put(nv_cache->md_pool, io->md);
1024 	ftl_io_complete(io);
1025 }
1026 
1027 static void
1028 ftl_nv_cache_l2p_update(struct ftl_io *io)
1029 {
1030 	struct spdk_ftl_dev *dev = io->dev;
1031 	ftl_addr next_addr = io->addr;
1032 	size_t i;
1033 
1034 	for (i = 0; i < io->num_blocks; ++i, ++next_addr) {
1035 		ftl_l2p_update_cache(dev, ftl_io_get_lba(io, i), next_addr, io->map[i]);
1036 	}
1037 
1038 	ftl_l2p_unpin(dev, io->lba, io->num_blocks);
1039 	ftl_nv_cache_submit_cb_done(io);
1040 }
1041 
1042 static void
1043 ftl_nv_cache_submit_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1044 {
1045 	struct ftl_io *io = cb_arg;
1046 
1047 	ftl_stats_bdev_io_completed(io->dev, FTL_STATS_TYPE_USER, bdev_io);
1048 
1049 	spdk_bdev_free_io(bdev_io);
1050 
1051 	if (spdk_unlikely(!success)) {
1052 		FTL_ERRLOG(io->dev, "Non-volatile cache write failed at %"PRIx64"\n",
1053 			   io->addr);
1054 		io->status = -EIO;
1055 		ftl_nv_cache_submit_cb_done(io);
1056 	} else {
1057 		ftl_nv_cache_l2p_update(io);
1058 	}
1059 }
1060 
1061 static void
1062 nv_cache_write(void *_io)
1063 {
1064 	struct ftl_io *io = _io;
1065 	struct spdk_ftl_dev *dev = io->dev;
1066 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
1067 	int rc;
1068 
1069 	rc = spdk_bdev_writev_blocks_with_md(nv_cache->bdev_desc, nv_cache->cache_ioch,
1070 					     io->iov, io->iov_cnt, io->md,
1071 					     ftl_addr_to_nvc_offset(dev, io->addr), io->num_blocks,
1072 					     ftl_nv_cache_submit_cb, io);
1073 	if (spdk_unlikely(rc)) {
1074 		if (rc == -ENOMEM) {
1075 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc);
1076 			io->bdev_io_wait.bdev = bdev;
1077 			io->bdev_io_wait.cb_fn = nv_cache_write;
1078 			io->bdev_io_wait.cb_arg = io;
1079 			spdk_bdev_queue_io_wait(bdev, nv_cache->cache_ioch, &io->bdev_io_wait);
1080 		} else {
1081 			ftl_abort();
1082 		}
1083 	}
1084 }
1085 
1086 static void
1087 ftl_nv_cache_pin_cb(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx)
1088 {
1089 	struct ftl_io *io = pin_ctx->cb_ctx;
1090 	size_t i;
1091 
1092 	if (spdk_unlikely(status != 0)) {
1093 		/* Retry on the internal L2P fault */
1094 		FTL_ERRLOG(dev, "Cannot PIN LBA for NV cache write failed at %"PRIx64"\n",
1095 			   io->addr);
1096 		io->status = -EAGAIN;
1097 		ftl_nv_cache_submit_cb_done(io);
1098 		return;
1099 	}
1100 
1101 	/* Remember previous l2p mapping to resolve conflicts in case of outstanding write-after-write */
1102 	for (i = 0; i < io->num_blocks; ++i) {
1103 		io->map[i] = ftl_l2p_get(dev, ftl_io_get_lba(io, i));
1104 	}
1105 
1106 	assert(io->iov_pos == 0);
1107 
1108 	ftl_trace_submission(io->dev, io, io->addr, io->num_blocks);
1109 
1110 	nv_cache_write(io);
1111 }
1112 
1113 bool
1114 ftl_nv_cache_write(struct ftl_io *io)
1115 {
1116 	struct spdk_ftl_dev *dev = io->dev;
1117 	uint64_t cache_offset;
1118 
1119 	io->md = ftl_mempool_get(dev->nv_cache.md_pool);
1120 	if (spdk_unlikely(!io->md)) {
1121 		return false;
1122 	}
1123 
1124 	/* Reserve area on the write buffer cache */
1125 	cache_offset = ftl_nv_cache_get_wr_buffer(&dev->nv_cache, io);
1126 	if (cache_offset == FTL_LBA_INVALID) {
1127 		/* No free space in NV cache, resubmit request */
1128 		ftl_mempool_put(dev->nv_cache.md_pool, io->md);
1129 		return false;
1130 	}
1131 	io->addr = ftl_addr_from_nvc_offset(dev, cache_offset);
1132 	io->nv_cache_chunk = dev->nv_cache.chunk_current;
1133 
1134 	ftl_nv_cache_fill_md(io);
1135 	ftl_l2p_pin(io->dev, io->lba, io->num_blocks,
1136 		    ftl_nv_cache_pin_cb, io,
1137 		    &io->l2p_pin_ctx);
1138 
1139 	dev->nv_cache.throttle.blocks_submitted += io->num_blocks;
1140 
1141 	return true;
1142 }
1143 
1144 int
1145 ftl_nv_cache_read(struct ftl_io *io, ftl_addr addr, uint32_t num_blocks,
1146 		  spdk_bdev_io_completion_cb cb, void *cb_arg)
1147 {
1148 	int rc;
1149 	struct ftl_nv_cache *nv_cache = &io->dev->nv_cache;
1150 
1151 	assert(ftl_addr_in_nvc(io->dev, addr));
1152 
1153 	rc = ftl_nv_cache_bdev_read_blocks_with_md(io->dev, nv_cache->bdev_desc, nv_cache->cache_ioch,
1154 			ftl_io_iovec_addr(io), NULL, ftl_addr_to_nvc_offset(io->dev, addr),
1155 			num_blocks, cb, cb_arg);
1156 
1157 	return rc;
1158 }
1159 
1160 bool
1161 ftl_nv_cache_is_halted(struct ftl_nv_cache *nv_cache)
1162 {
1163 	if (nv_cache->compaction_active_count) {
1164 		return false;
1165 	}
1166 
1167 	if (nv_cache->chunk_open_count > 0) {
1168 		return false;
1169 	}
1170 
1171 	return true;
1172 }
1173 
1174 void
1175 ftl_chunk_map_set_lba(struct ftl_nv_cache_chunk *chunk,
1176 		      uint64_t offset, uint64_t lba)
1177 {
1178 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1179 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1180 
1181 	ftl_lba_store(dev, p2l_map->chunk_map, offset, lba);
1182 }
1183 
1184 uint64_t
1185 ftl_chunk_map_get_lba(struct ftl_nv_cache_chunk *chunk, uint64_t offset)
1186 {
1187 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1188 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1189 
1190 	return ftl_lba_load(dev, p2l_map->chunk_map, offset);
1191 }
1192 
1193 static void
1194 ftl_chunk_set_addr(struct ftl_nv_cache_chunk *chunk, uint64_t lba, ftl_addr addr)
1195 {
1196 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1197 	uint64_t cache_offset = ftl_addr_to_nvc_offset(dev, addr);
1198 	uint64_t offset;
1199 
1200 	offset = (cache_offset - chunk->offset) % chunk->nv_cache->chunk_blocks;
1201 	ftl_chunk_map_set_lba(chunk, offset, lba);
1202 }
1203 
1204 struct ftl_nv_cache_chunk *
1205 ftl_nv_cache_get_chunk_from_addr(struct spdk_ftl_dev *dev, ftl_addr addr)
1206 {
1207 	struct ftl_nv_cache_chunk *chunk = dev->nv_cache.chunks;
1208 	uint64_t chunk_idx;
1209 	uint64_t cache_offset = ftl_addr_to_nvc_offset(dev, addr);
1210 
1211 	assert(chunk != NULL);
1212 	chunk_idx = (cache_offset - chunk->offset) / chunk->nv_cache->chunk_blocks;
1213 	chunk += chunk_idx;
1214 
1215 	return chunk;
1216 }
1217 
1218 void
1219 ftl_nv_cache_set_addr(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr addr)
1220 {
1221 	struct ftl_nv_cache_chunk *chunk;
1222 
1223 	chunk = ftl_nv_cache_get_chunk_from_addr(dev, addr);
1224 
1225 	assert(lba != FTL_LBA_INVALID);
1226 
1227 	ftl_chunk_set_addr(chunk, lba, addr);
1228 	ftl_bitmap_set(dev->valid_map, addr);
1229 }
1230 
1231 static void
1232 ftl_nv_cache_throttle_update(struct ftl_nv_cache *nv_cache)
1233 {
1234 	double err;
1235 	double modifier;
1236 
1237 	err = ((double)nv_cache->chunk_free_count - nv_cache->chunk_free_target) / nv_cache->chunk_count;
1238 	modifier = FTL_NV_CACHE_THROTTLE_MODIFIER_KP * err;
1239 
1240 	if (modifier < FTL_NV_CACHE_THROTTLE_MODIFIER_MIN) {
1241 		modifier = FTL_NV_CACHE_THROTTLE_MODIFIER_MIN;
1242 	} else if (modifier > FTL_NV_CACHE_THROTTLE_MODIFIER_MAX) {
1243 		modifier = FTL_NV_CACHE_THROTTLE_MODIFIER_MAX;
1244 	}
1245 
1246 	if (spdk_unlikely(nv_cache->compaction_sma == 0 || nv_cache->compaction_active_count == 0)) {
1247 		nv_cache->throttle.blocks_submitted_limit = UINT64_MAX;
1248 	} else {
1249 		double blocks_per_interval = nv_cache->compaction_sma * nv_cache->throttle.interval_tsc /
1250 					     FTL_BLOCK_SIZE;
1251 		nv_cache->throttle.blocks_submitted_limit = blocks_per_interval * (1.0 + modifier);
1252 	}
1253 }
1254 
1255 static void
1256 ftl_nv_cache_process_throttle(struct ftl_nv_cache *nv_cache)
1257 {
1258 	uint64_t tsc = spdk_thread_get_last_tsc(spdk_get_thread());
1259 
1260 	if (spdk_unlikely(!nv_cache->throttle.start_tsc)) {
1261 		nv_cache->throttle.start_tsc = tsc;
1262 	} else if (tsc - nv_cache->throttle.start_tsc >= nv_cache->throttle.interval_tsc) {
1263 		ftl_nv_cache_throttle_update(nv_cache);
1264 		nv_cache->throttle.start_tsc = tsc;
1265 		nv_cache->throttle.blocks_submitted = 0;
1266 	}
1267 }
1268 
1269 static void ftl_chunk_open(struct ftl_nv_cache_chunk *chunk);
1270 
1271 void
1272 ftl_nv_cache_process(struct spdk_ftl_dev *dev)
1273 {
1274 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
1275 
1276 	assert(dev->nv_cache.bdev_desc);
1277 
1278 	if (nv_cache->chunk_open_count < FTL_MAX_OPEN_CHUNKS && spdk_likely(!nv_cache->halt) &&
1279 	    !TAILQ_EMPTY(&nv_cache->chunk_free_list)) {
1280 		struct ftl_nv_cache_chunk *chunk = TAILQ_FIRST(&nv_cache->chunk_free_list);
1281 		TAILQ_REMOVE(&nv_cache->chunk_free_list, chunk, entry);
1282 		TAILQ_INSERT_TAIL(&nv_cache->chunk_open_list, chunk, entry);
1283 		nv_cache->chunk_free_count--;
1284 		chunk->md->seq_id = ftl_get_next_seq_id(dev);
1285 		ftl_chunk_open(chunk);
1286 		ftl_add_io_activity(dev);
1287 	}
1288 
1289 	compaction_process(nv_cache);
1290 	ftl_chunk_persist_free_state(nv_cache);
1291 	ftl_nv_cache_process_throttle(nv_cache);
1292 }
1293 
1294 static bool
1295 ftl_nv_cache_full(struct ftl_nv_cache *nv_cache)
1296 {
1297 	if (0 == nv_cache->chunk_open_count && NULL == nv_cache->chunk_current) {
1298 		return true;
1299 	} else {
1300 		return false;
1301 	}
1302 }
1303 
1304 bool
1305 ftl_nv_cache_throttle(struct spdk_ftl_dev *dev)
1306 {
1307 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
1308 
1309 	if (dev->nv_cache.throttle.blocks_submitted >= nv_cache->throttle.blocks_submitted_limit ||
1310 	    ftl_nv_cache_full(nv_cache)) {
1311 		return true;
1312 	}
1313 
1314 	return false;
1315 }
1316 
1317 static void
1318 chunk_free_p2l_map(struct ftl_nv_cache_chunk *chunk)
1319 {
1320 
1321 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1322 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1323 
1324 	ftl_mempool_put(nv_cache->p2l_pool, p2l_map->chunk_map);
1325 	p2l_map->chunk_map = NULL;
1326 
1327 	ftl_chunk_free_md_entry(chunk);
1328 }
1329 
1330 int
1331 ftl_nv_cache_save_state(struct ftl_nv_cache *nv_cache)
1332 {
1333 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1334 	struct ftl_nv_cache_chunk *chunk;
1335 	int status = 0;
1336 	uint64_t i;
1337 
1338 	assert(nv_cache->chunk_open_count == 0);
1339 
1340 	if (nv_cache->compaction_active_count) {
1341 		FTL_ERRLOG(dev, "Cannot save NV cache state, compaction in progress\n");
1342 		return -EINVAL;
1343 	}
1344 
1345 	chunk = nv_cache->chunks;
1346 	if (!chunk) {
1347 		FTL_ERRLOG(dev, "Cannot save NV cache state, no NV cache metadata\n");
1348 		return -ENOMEM;
1349 	}
1350 
1351 	for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
1352 		nvc_validate_md(nv_cache, chunk->md);
1353 
1354 		if (chunk->md->read_pointer)  {
1355 			/* Only full chunks can be compacted */
1356 			if (chunk->md->blocks_written != nv_cache->chunk_blocks) {
1357 				assert(0);
1358 				status = -EINVAL;
1359 				break;
1360 			}
1361 
1362 			/*
1363 			 * Chunk in the middle of compaction, start over after
1364 			 * load
1365 			 */
1366 			chunk->md->read_pointer = chunk->md->blocks_compacted = 0;
1367 		} else if (chunk->md->blocks_written == nv_cache->chunk_blocks) {
1368 			/* Full chunk */
1369 		} else if (0 == chunk->md->blocks_written) {
1370 			/* Empty chunk */
1371 		} else {
1372 			assert(0);
1373 			status = -EINVAL;
1374 			break;
1375 		}
1376 	}
1377 
1378 	if (status) {
1379 		FTL_ERRLOG(dev, "Cannot save NV cache state, inconsistent NV cache"
1380 			   "metadata\n");
1381 	}
1382 
1383 	return status;
1384 }
1385 
1386 static int
1387 sort_chunks_cmp(const void *a, const void *b)
1388 {
1389 	struct ftl_nv_cache_chunk *a_chunk = *(struct ftl_nv_cache_chunk **)a;
1390 	struct ftl_nv_cache_chunk *b_chunk = *(struct ftl_nv_cache_chunk **)b;
1391 
1392 	return a_chunk->md->seq_id - b_chunk->md->seq_id;
1393 }
1394 
1395 static int
1396 sort_chunks(struct ftl_nv_cache *nv_cache)
1397 {
1398 	struct ftl_nv_cache_chunk **chunks_list;
1399 	struct ftl_nv_cache_chunk *chunk;
1400 	uint32_t i;
1401 
1402 	if (TAILQ_EMPTY(&nv_cache->chunk_full_list)) {
1403 		return 0;
1404 	}
1405 
1406 	chunks_list = calloc(nv_cache->chunk_full_count,
1407 			     sizeof(chunks_list[0]));
1408 	if (!chunks_list) {
1409 		return -ENOMEM;
1410 	}
1411 
1412 	i = 0;
1413 	TAILQ_FOREACH(chunk, &nv_cache->chunk_full_list, entry) {
1414 		chunks_list[i] = chunk;
1415 		i++;
1416 	}
1417 	assert(i == nv_cache->chunk_full_count);
1418 
1419 	qsort(chunks_list, nv_cache->chunk_full_count, sizeof(chunks_list[0]),
1420 	      sort_chunks_cmp);
1421 
1422 	TAILQ_INIT(&nv_cache->chunk_full_list);
1423 	for (i = 0; i < nv_cache->chunk_full_count; i++) {
1424 		chunk = chunks_list[i];
1425 		TAILQ_INSERT_TAIL(&nv_cache->chunk_full_list, chunk, entry);
1426 	}
1427 
1428 	free(chunks_list);
1429 	return 0;
1430 }
1431 
1432 static int
1433 chunk_alloc_p2l_map(struct ftl_nv_cache_chunk *chunk)
1434 {
1435 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1436 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1437 
1438 	assert(p2l_map->ref_cnt == 0);
1439 	assert(p2l_map->chunk_map == NULL);
1440 
1441 	p2l_map->chunk_map = ftl_mempool_get(nv_cache->p2l_pool);
1442 
1443 	if (!p2l_map->chunk_map) {
1444 		return -ENOMEM;
1445 	}
1446 
1447 	if (ftl_chunk_alloc_md_entry(chunk)) {
1448 		ftl_mempool_put(nv_cache->p2l_pool, p2l_map->chunk_map);
1449 		p2l_map->chunk_map = NULL;
1450 		return -ENOMEM;
1451 	}
1452 
1453 	/* Set the P2L to FTL_LBA_INVALID */
1454 	memset(p2l_map->chunk_map, -1, FTL_BLOCK_SIZE * nv_cache->tail_md_chunk_blocks);
1455 
1456 	return 0;
1457 }
1458 
1459 int
1460 ftl_nv_cache_load_state(struct ftl_nv_cache *nv_cache)
1461 {
1462 	struct ftl_nv_cache_chunk *chunk;
1463 	uint64_t chunks_number, offset, i;
1464 	int status = 0;
1465 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1466 
1467 	nv_cache->chunk_current = NULL;
1468 	TAILQ_INIT(&nv_cache->chunk_free_list);
1469 	TAILQ_INIT(&nv_cache->chunk_full_list);
1470 	nv_cache->chunk_full_count = nv_cache->chunk_free_count = 0;
1471 
1472 	assert(nv_cache->chunk_open_count == 0);
1473 	offset = nvc_data_offset(nv_cache);
1474 	chunk = nv_cache->chunks;
1475 	if (!chunk) {
1476 		FTL_ERRLOG(dev, "No NV cache metadata\n");
1477 		return -1;
1478 	}
1479 
1480 	for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
1481 		chunk->nv_cache = nv_cache;
1482 		nvc_validate_md(nv_cache, chunk->md);
1483 
1484 		if (offset != chunk->offset) {
1485 			status = -EINVAL;
1486 			goto error;
1487 		}
1488 
1489 		if (chunk->md->blocks_written == nv_cache->chunk_blocks) {
1490 			/* Chunk full, move it on full list */
1491 			TAILQ_INSERT_TAIL(&nv_cache->chunk_full_list, chunk, entry);
1492 			nv_cache->chunk_full_count++;
1493 		} else if (0 == chunk->md->blocks_written) {
1494 			/* Chunk empty, move it on empty list */
1495 			TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
1496 			nv_cache->chunk_free_count++;
1497 		} else {
1498 			status = -EINVAL;
1499 			goto error;
1500 		}
1501 
1502 		offset += nv_cache->chunk_blocks;
1503 	}
1504 
1505 	chunks_number = nv_cache->chunk_free_count + nv_cache->chunk_full_count;
1506 	assert(nv_cache->chunk_current == NULL);
1507 
1508 	if (chunks_number != nv_cache->chunk_count) {
1509 		FTL_ERRLOG(dev, "Inconsistent NV cache metadata\n");
1510 		status = -EINVAL;
1511 		goto error;
1512 	}
1513 
1514 	status = sort_chunks(nv_cache);
1515 	if (status) {
1516 		FTL_ERRLOG(dev, "FTL NV Cache: sorting chunks ERROR\n");
1517 	}
1518 
1519 	FTL_NOTICELOG(dev, "FTL NV Cache: full chunks = %lu, empty chunks = %lu\n",
1520 		      nv_cache->chunk_full_count, nv_cache->chunk_free_count);
1521 
1522 	if (0 == status) {
1523 		FTL_NOTICELOG(dev, "FTL NV Cache: state loaded successfully\n");
1524 	} else {
1525 		FTL_ERRLOG(dev, "FTL NV Cache: loading state ERROR\n");
1526 	}
1527 
1528 error:
1529 	return status;
1530 }
1531 
1532 void
1533 ftl_nv_cache_get_max_seq_id(struct ftl_nv_cache *nv_cache, uint64_t *open_seq_id,
1534 			    uint64_t *close_seq_id)
1535 {
1536 	uint64_t i, o_seq_id = 0, c_seq_id = 0;
1537 	struct ftl_nv_cache_chunk *chunk;
1538 
1539 	chunk = nv_cache->chunks;
1540 	assert(chunk);
1541 
1542 	/* Iterate over chunks and get their max open and close seq id */
1543 	for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
1544 		o_seq_id = spdk_max(o_seq_id, chunk->md->seq_id);
1545 		c_seq_id = spdk_max(c_seq_id, chunk->md->close_seq_id);
1546 	}
1547 
1548 	*open_seq_id = o_seq_id;
1549 	*close_seq_id = c_seq_id;
1550 }
1551 
1552 typedef void (*ftl_chunk_ops_cb)(struct ftl_nv_cache_chunk *chunk, void *cntx, bool status);
1553 
1554 static void
1555 write_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
1556 {
1557 	struct ftl_basic_rq *brq = arg;
1558 	struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
1559 
1560 	ftl_stats_bdev_io_completed(brq->dev, FTL_STATS_TYPE_MD_NV_CACHE, bdev_io);
1561 
1562 	brq->success = success;
1563 	if (spdk_likely(success)) {
1564 		chunk_advance_blocks(chunk->nv_cache, chunk, brq->num_blocks);
1565 	}
1566 
1567 	spdk_bdev_free_io(bdev_io);
1568 	brq->owner.cb(brq);
1569 }
1570 
1571 static void
1572 _ftl_chunk_basic_rq_write(void *_brq)
1573 {
1574 	struct ftl_basic_rq *brq = _brq;
1575 	struct ftl_nv_cache *nv_cache = brq->io.chunk->nv_cache;
1576 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1577 	int rc;
1578 
1579 	rc = ftl_nv_cache_bdev_write_blocks_with_md(dev, nv_cache->bdev_desc, nv_cache->cache_ioch,
1580 			brq->io_payload, NULL, brq->io.addr,
1581 			brq->num_blocks, write_brq_end, brq);
1582 	if (spdk_unlikely(rc)) {
1583 		if (rc == -ENOMEM) {
1584 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc);
1585 			brq->io.bdev_io_wait.bdev = bdev;
1586 			brq->io.bdev_io_wait.cb_fn = _ftl_chunk_basic_rq_write;
1587 			brq->io.bdev_io_wait.cb_arg = brq;
1588 			spdk_bdev_queue_io_wait(bdev, nv_cache->cache_ioch, &brq->io.bdev_io_wait);
1589 		} else {
1590 			ftl_abort();
1591 		}
1592 	}
1593 }
1594 
1595 static void
1596 ftl_chunk_basic_rq_write(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq)
1597 {
1598 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1599 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1600 
1601 	brq->io.chunk = chunk;
1602 	brq->success = false;
1603 
1604 	_ftl_chunk_basic_rq_write(brq);
1605 
1606 	chunk->md->write_pointer += brq->num_blocks;
1607 	dev->stats.io_activity_total += brq->num_blocks;
1608 }
1609 
1610 static void
1611 read_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
1612 {
1613 	struct ftl_basic_rq *brq = arg;
1614 
1615 	ftl_stats_bdev_io_completed(brq->dev, FTL_STATS_TYPE_MD_NV_CACHE, bdev_io);
1616 
1617 	brq->success = success;
1618 
1619 	brq->owner.cb(brq);
1620 	spdk_bdev_free_io(bdev_io);
1621 }
1622 
1623 static int
1624 ftl_chunk_basic_rq_read(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq)
1625 {
1626 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1627 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1628 	int rc;
1629 
1630 	brq->io.chunk = chunk;
1631 	brq->success = false;
1632 
1633 	rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, nv_cache->bdev_desc, nv_cache->cache_ioch,
1634 			brq->io_payload, NULL, brq->io.addr, brq->num_blocks, read_brq_end, brq);
1635 
1636 	if (spdk_likely(!rc)) {
1637 		dev->stats.io_activity_total += brq->num_blocks;
1638 	}
1639 
1640 	return rc;
1641 }
1642 
1643 static void
1644 chunk_open_cb(int status, void *ctx)
1645 {
1646 	struct ftl_nv_cache_chunk *chunk = (struct ftl_nv_cache_chunk *)ctx;
1647 
1648 	if (spdk_unlikely(status)) {
1649 #ifdef SPDK_FTL_RETRY_ON_ERROR
1650 		ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx);
1651 		return;
1652 #else
1653 		ftl_abort();
1654 #endif
1655 	}
1656 
1657 	chunk->md->state = FTL_CHUNK_STATE_OPEN;
1658 }
1659 
1660 static void
1661 ftl_chunk_open(struct ftl_nv_cache_chunk *chunk)
1662 {
1663 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1664 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1665 	struct ftl_layout_region *region = &dev->layout.region[FTL_LAYOUT_REGION_TYPE_NVC_MD];
1666 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
1667 
1668 	if (chunk_alloc_p2l_map(chunk)) {
1669 		assert(0);
1670 		/*
1671 		 * We control number of opening chunk and it shall be consistent with size of chunk
1672 		 * P2L map pool
1673 		 */
1674 		ftl_abort();
1675 		return;
1676 	}
1677 
1678 	chunk->nv_cache->chunk_open_count++;
1679 
1680 	assert(chunk->md->write_pointer == 0);
1681 	assert(chunk->md->blocks_written == 0);
1682 
1683 	memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
1684 	p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_OPEN;
1685 	p2l_map->chunk_dma_md->p2l_map_checksum = 0;
1686 
1687 	ftl_md_persist_entry(md, get_chunk_idx(chunk), p2l_map->chunk_dma_md,
1688 			     NULL, chunk_open_cb, chunk,
1689 			     &chunk->md_persist_entry_ctx);
1690 }
1691 
1692 static void
1693 chunk_close_cb(int status, void *ctx)
1694 {
1695 	struct ftl_nv_cache_chunk *chunk = (struct ftl_nv_cache_chunk *)ctx;
1696 
1697 	assert(chunk->md->write_pointer == chunk->nv_cache->chunk_blocks);
1698 
1699 	if (spdk_likely(!status)) {
1700 		chunk->md->p2l_map_checksum = chunk->p2l_map.chunk_dma_md->p2l_map_checksum;
1701 		chunk_free_p2l_map(chunk);
1702 
1703 		assert(chunk->nv_cache->chunk_open_count > 0);
1704 		chunk->nv_cache->chunk_open_count--;
1705 
1706 		/* Chunk full move it on full list */
1707 		TAILQ_INSERT_TAIL(&chunk->nv_cache->chunk_full_list, chunk, entry);
1708 		chunk->nv_cache->chunk_full_count++;
1709 
1710 		chunk->nv_cache->last_seq_id = chunk->md->close_seq_id;
1711 
1712 		chunk->md->state = FTL_CHUNK_STATE_CLOSED;
1713 	} else {
1714 #ifdef SPDK_FTL_RETRY_ON_ERROR
1715 		ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx);
1716 #else
1717 		ftl_abort();
1718 #endif
1719 	}
1720 }
1721 
1722 static void
1723 chunk_map_write_cb(struct ftl_basic_rq *brq)
1724 {
1725 	struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
1726 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1727 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1728 	struct ftl_layout_region *region = &dev->layout.region[FTL_LAYOUT_REGION_TYPE_NVC_MD];
1729 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
1730 	uint32_t chunk_map_crc;
1731 
1732 	if (spdk_likely(brq->success)) {
1733 		chunk_map_crc = spdk_crc32c_update(p2l_map->chunk_map,
1734 						   chunk->nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE, 0);
1735 		memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
1736 		p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_CLOSED;
1737 		p2l_map->chunk_dma_md->p2l_map_checksum = chunk_map_crc;
1738 		ftl_md_persist_entry(md, get_chunk_idx(chunk), chunk->p2l_map.chunk_dma_md,
1739 				     NULL, chunk_close_cb, chunk,
1740 				     &chunk->md_persist_entry_ctx);
1741 	} else {
1742 #ifdef SPDK_FTL_RETRY_ON_ERROR
1743 		/* retry */
1744 		chunk->md->write_pointer -= brq->num_blocks;
1745 		ftl_chunk_basic_rq_write(chunk, brq);
1746 #else
1747 		ftl_abort();
1748 #endif
1749 	}
1750 }
1751 
1752 static void
1753 ftl_chunk_close(struct ftl_nv_cache_chunk *chunk)
1754 {
1755 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1756 	struct ftl_basic_rq *brq = &chunk->metadata_rq;
1757 	void *metadata = chunk->p2l_map.chunk_map;
1758 
1759 	chunk->md->close_seq_id = ftl_get_next_seq_id(dev);
1760 	ftl_basic_rq_init(dev, brq, metadata, chunk->nv_cache->tail_md_chunk_blocks);
1761 	ftl_basic_rq_set_owner(brq, chunk_map_write_cb, chunk);
1762 
1763 	assert(chunk->md->write_pointer == chunk_tail_md_offset(chunk->nv_cache));
1764 	brq->io.addr = chunk->offset + chunk->md->write_pointer;
1765 
1766 	ftl_chunk_basic_rq_write(chunk, brq);
1767 }
1768 
1769 static int ftl_chunk_read_tail_md(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq,
1770 				  void (*cb)(struct ftl_basic_rq *brq), void *cb_ctx);
1771 static void read_tail_md_cb(struct ftl_basic_rq *brq);
1772 static void recover_open_chunk_cb(struct ftl_basic_rq *brq);
1773 
1774 static void
1775 restore_chunk_close_cb(int status, void *ctx)
1776 {
1777 	struct ftl_basic_rq *parent = (struct ftl_basic_rq *)ctx;
1778 	struct ftl_nv_cache_chunk *chunk = parent->io.chunk;
1779 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1780 
1781 	if (spdk_unlikely(status)) {
1782 		parent->success = false;
1783 	} else {
1784 		chunk->md->p2l_map_checksum = p2l_map->chunk_dma_md->p2l_map_checksum;
1785 		chunk->md->state = FTL_CHUNK_STATE_CLOSED;
1786 	}
1787 
1788 	read_tail_md_cb(parent);
1789 }
1790 
1791 static void
1792 restore_fill_p2l_map_cb(struct ftl_basic_rq *parent)
1793 {
1794 	struct ftl_nv_cache_chunk *chunk = parent->io.chunk;
1795 	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1796 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1797 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
1798 	struct ftl_layout_region *region = &dev->layout.region[FTL_LAYOUT_REGION_TYPE_NVC_MD];
1799 	uint32_t chunk_map_crc;
1800 
1801 	/* Set original callback */
1802 	ftl_basic_rq_set_owner(parent, recover_open_chunk_cb, parent->owner.priv);
1803 
1804 	if (spdk_unlikely(!parent->success)) {
1805 		read_tail_md_cb(parent);
1806 		return;
1807 	}
1808 
1809 	chunk_map_crc = spdk_crc32c_update(p2l_map->chunk_map,
1810 					   chunk->nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE, 0);
1811 	memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
1812 	p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_CLOSED;
1813 	p2l_map->chunk_dma_md->write_pointer = chunk->nv_cache->chunk_blocks;
1814 	p2l_map->chunk_dma_md->blocks_written = chunk->nv_cache->chunk_blocks;
1815 	p2l_map->chunk_dma_md->p2l_map_checksum = chunk_map_crc;
1816 
1817 	ftl_md_persist_entry(md, get_chunk_idx(chunk), p2l_map->chunk_dma_md, NULL,
1818 			     restore_chunk_close_cb, parent, &chunk->md_persist_entry_ctx);
1819 }
1820 
1821 static void
1822 restore_fill_tail_md(struct ftl_basic_rq *parent, struct ftl_nv_cache_chunk *chunk)
1823 {
1824 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1825 	void *metadata;
1826 
1827 	chunk->md->close_seq_id = ftl_get_next_seq_id(dev);
1828 
1829 	metadata = chunk->p2l_map.chunk_map;
1830 	ftl_basic_rq_init(dev, parent, metadata, chunk->nv_cache->tail_md_chunk_blocks);
1831 	ftl_basic_rq_set_owner(parent, restore_fill_p2l_map_cb, parent->owner.priv);
1832 
1833 	parent->io.addr = chunk->offset + chunk_tail_md_offset(chunk->nv_cache);
1834 	parent->io.chunk = chunk;
1835 
1836 	ftl_chunk_basic_rq_write(chunk, parent);
1837 }
1838 
1839 static void
1840 read_open_chunk_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1841 {
1842 	struct ftl_rq *rq = (struct ftl_rq *)cb_arg;
1843 	struct ftl_basic_rq *parent = (struct ftl_basic_rq *)rq->owner.priv;
1844 	struct ftl_nv_cache_chunk *chunk = parent->io.chunk;
1845 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1846 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1847 	union ftl_md_vss *md;
1848 	uint64_t cache_offset = bdev_io->u.bdev.offset_blocks;
1849 	uint64_t len = bdev_io->u.bdev.num_blocks;
1850 	ftl_addr addr = ftl_addr_from_nvc_offset(dev, cache_offset);
1851 	int rc;
1852 
1853 	ftl_stats_bdev_io_completed(dev, FTL_STATS_TYPE_USER, bdev_io);
1854 
1855 	spdk_bdev_free_io(bdev_io);
1856 
1857 	if (!success) {
1858 		parent->success = false;
1859 		read_tail_md_cb(parent);
1860 		return;
1861 	}
1862 
1863 	while (rq->iter.idx < rq->iter.count) {
1864 		/* Get metadata */
1865 		md = rq->entries[rq->iter.idx].io_md;
1866 		if (md->nv_cache.seq_id != chunk->md->seq_id) {
1867 			md->nv_cache.lba = FTL_LBA_INVALID;
1868 		}
1869 		/*
1870 		 * The p2l map contains effectively random data at this point (since it contains arbitrary
1871 		 * blocks from potentially not even filled tail md), so even LBA_INVALID needs to be set explicitly
1872 		 */
1873 
1874 		ftl_chunk_set_addr(chunk,  md->nv_cache.lba, addr + rq->iter.idx);
1875 		rq->iter.idx++;
1876 	}
1877 
1878 	if (cache_offset + len < chunk->offset + chunk_tail_md_offset(nv_cache)) {
1879 		cache_offset += len;
1880 		len = spdk_min(dev->xfer_size, chunk->offset + chunk_tail_md_offset(nv_cache) - cache_offset);
1881 		rq->iter.idx = 0;
1882 		rq->iter.count = len;
1883 
1884 		rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, nv_cache->bdev_desc,
1885 				nv_cache->cache_ioch,
1886 				rq->io_payload,
1887 				rq->io_md,
1888 				cache_offset, len,
1889 				read_open_chunk_cb,
1890 				rq);
1891 
1892 		if (rc) {
1893 			ftl_rq_del(rq);
1894 			parent->success = false;
1895 			read_tail_md_cb(parent);
1896 			return;
1897 		}
1898 	} else {
1899 		ftl_rq_del(rq);
1900 		restore_fill_tail_md(parent, chunk);
1901 	}
1902 }
1903 
1904 static void
1905 restore_open_chunk(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *parent)
1906 {
1907 	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1908 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1909 	struct ftl_rq *rq;
1910 	uint64_t addr;
1911 	uint64_t len = dev->xfer_size;
1912 	int rc;
1913 
1914 	/*
1915 	 * We've just read the p2l map, prefill it with INVALID LBA
1916 	 * TODO we need to do this because tail md blocks (p2l map) are also represented in the p2l map, instead of just user data region
1917 	 */
1918 	memset(chunk->p2l_map.chunk_map, -1, FTL_BLOCK_SIZE * nv_cache->tail_md_chunk_blocks);
1919 
1920 	/* Need to read user data, recalculate chunk's P2L and write tail md with it */
1921 	rq = ftl_rq_new(dev, dev->nv_cache.md_size);
1922 	if (!rq) {
1923 		parent->success = false;
1924 		read_tail_md_cb(parent);
1925 		return;
1926 	}
1927 
1928 	rq->owner.priv = parent;
1929 	rq->iter.idx = 0;
1930 	rq->iter.count = len;
1931 
1932 	addr = chunk->offset;
1933 
1934 	len = spdk_min(dev->xfer_size, chunk->offset + chunk_tail_md_offset(nv_cache) - addr);
1935 
1936 	rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, nv_cache->bdev_desc,
1937 			nv_cache->cache_ioch,
1938 			rq->io_payload,
1939 			rq->io_md,
1940 			addr, len,
1941 			read_open_chunk_cb,
1942 			rq);
1943 
1944 	if (rc) {
1945 		ftl_rq_del(rq);
1946 		parent->success = false;
1947 		read_tail_md_cb(parent);
1948 	}
1949 }
1950 
1951 static void
1952 read_tail_md_cb(struct ftl_basic_rq *brq)
1953 {
1954 	brq->owner.cb(brq);
1955 }
1956 
1957 static int
1958 ftl_chunk_read_tail_md(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq,
1959 		       void (*cb)(struct ftl_basic_rq *brq), void *cb_ctx)
1960 {
1961 	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1962 	void *metadata;
1963 	int rc;
1964 
1965 	metadata = chunk->p2l_map.chunk_map;
1966 	ftl_basic_rq_init(dev, brq, metadata, chunk->nv_cache->tail_md_chunk_blocks);
1967 	ftl_basic_rq_set_owner(brq, cb, cb_ctx);
1968 
1969 	brq->io.addr = chunk->offset + chunk_tail_md_offset(chunk->nv_cache);
1970 	rc = ftl_chunk_basic_rq_read(chunk, brq);
1971 
1972 	return rc;
1973 }
1974 
1975 struct restore_chunk_md_ctx {
1976 	ftl_chunk_md_cb cb;
1977 	void *cb_ctx;
1978 	int status;
1979 	uint64_t qd;
1980 	uint64_t id;
1981 };
1982 
1983 static inline bool
1984 is_chunk_count_valid(struct ftl_nv_cache *nv_cache)
1985 {
1986 	uint64_t chunk_count = 0;
1987 
1988 	chunk_count += nv_cache->chunk_open_count;
1989 	chunk_count += nv_cache->chunk_free_count;
1990 	chunk_count += nv_cache->chunk_full_count;
1991 	chunk_count += nv_cache->chunk_comp_count;
1992 
1993 	return chunk_count == nv_cache->chunk_count;
1994 }
1995 
1996 static void
1997 walk_tail_md_cb(struct ftl_basic_rq *brq)
1998 {
1999 	struct ftl_mngt_process *mngt = brq->owner.priv;
2000 	struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
2001 	struct restore_chunk_md_ctx *ctx = ftl_mngt_get_step_ctx(mngt);
2002 	int rc = 0;
2003 
2004 	if (brq->success) {
2005 		rc = ctx->cb(chunk, ctx->cb_ctx);
2006 	} else {
2007 		rc = -EIO;
2008 	}
2009 
2010 	if (rc) {
2011 		ctx->status = rc;
2012 	}
2013 	ctx->qd--;
2014 	chunk_free_p2l_map(chunk);
2015 	ftl_mngt_continue_step(mngt);
2016 }
2017 
2018 static void
2019 ftl_mngt_nv_cache_walk_tail_md(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt,
2020 			       uint64_t seq_id, ftl_chunk_md_cb cb, void *cb_ctx)
2021 {
2022 	struct ftl_nv_cache *nvc = &dev->nv_cache;
2023 	struct restore_chunk_md_ctx *ctx;
2024 
2025 	ctx = ftl_mngt_get_step_ctx(mngt);
2026 	if (!ctx) {
2027 		if (ftl_mngt_alloc_step_ctx(mngt, sizeof(*ctx))) {
2028 			ftl_mngt_fail_step(mngt);
2029 			return;
2030 		}
2031 		ctx = ftl_mngt_get_step_ctx(mngt);
2032 		assert(ctx);
2033 
2034 		ctx->cb = cb;
2035 		ctx->cb_ctx = cb_ctx;
2036 	}
2037 
2038 	/*
2039 	 * This function generates a high queue depth and will utilize ftl_mngt_continue_step during completions to make sure all chunks
2040 	 * are processed before returning an error (if any were found) or continuing on.
2041 	 */
2042 	if (0 == ctx->qd && ctx->id == nvc->chunk_count) {
2043 		if (!is_chunk_count_valid(nvc)) {
2044 			FTL_ERRLOG(dev, "Recovery ERROR, invalid number of chunk\n");
2045 			assert(false);
2046 			ctx->status = -EINVAL;
2047 		}
2048 
2049 		if (ctx->status) {
2050 			ftl_mngt_fail_step(mngt);
2051 		} else {
2052 			ftl_mngt_next_step(mngt);
2053 		}
2054 		return;
2055 	}
2056 
2057 	while (ctx->id < nvc->chunk_count) {
2058 		struct ftl_nv_cache_chunk *chunk = &nvc->chunks[ctx->id];
2059 		int rc;
2060 
2061 		if (!chunk->recovery) {
2062 			/* This chunk is empty and not used in recovery */
2063 			ctx->id++;
2064 			continue;
2065 		}
2066 
2067 		if (seq_id && (chunk->md->close_seq_id <= seq_id)) {
2068 			ctx->id++;
2069 			continue;
2070 		}
2071 
2072 		if (chunk_alloc_p2l_map(chunk)) {
2073 			/* No more free P2L map, break and continue later */
2074 			break;
2075 		}
2076 		ctx->id++;
2077 
2078 		rc = ftl_chunk_read_tail_md(chunk, &chunk->metadata_rq, walk_tail_md_cb, mngt);
2079 
2080 		if (0 == rc) {
2081 			ctx->qd++;
2082 		} else {
2083 			chunk_free_p2l_map(chunk);
2084 			ctx->status = rc;
2085 		}
2086 	}
2087 
2088 	if (0 == ctx->qd) {
2089 		/*
2090 		 * No QD could happen due to all leftover chunks being in free state.
2091 		 * Additionally ftl_chunk_read_tail_md could fail starting with the first IO in a given patch.
2092 		 * For streamlining of all potential error handling (since many chunks are reading P2L at the same time),
2093 		 * we're using ftl_mngt_continue_step to arrive at the same spot of checking for mngt step end (see beginning of function).
2094 		 */
2095 		ftl_mngt_continue_step(mngt);
2096 	}
2097 
2098 }
2099 
2100 void
2101 ftl_mngt_nv_cache_restore_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt,
2102 			      ftl_chunk_md_cb cb, void *cb_ctx)
2103 {
2104 	ftl_mngt_nv_cache_walk_tail_md(dev, mngt, dev->sb->ckpt_seq_id, cb, cb_ctx);
2105 }
2106 
2107 static void
2108 restore_chunk_state_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
2109 {
2110 	struct ftl_mngt_process *mngt = md->owner.cb_ctx;
2111 	struct ftl_nv_cache *nvc = &dev->nv_cache;
2112 	struct ftl_nv_cache_chunk *chunk;
2113 	uint64_t i;
2114 
2115 	if (status) {
2116 		/* Restore error, end step */
2117 		ftl_mngt_fail_step(mngt);
2118 		return;
2119 	}
2120 
2121 	for (i = 0; i < nvc->chunk_count; i++) {
2122 		chunk = &nvc->chunks[i];
2123 
2124 		switch (chunk->md->state) {
2125 		case FTL_CHUNK_STATE_FREE:
2126 			break;
2127 		case FTL_CHUNK_STATE_OPEN:
2128 			TAILQ_REMOVE(&nvc->chunk_free_list, chunk, entry);
2129 			nvc->chunk_free_count--;
2130 
2131 			TAILQ_INSERT_TAIL(&nvc->chunk_open_list, chunk, entry);
2132 			nvc->chunk_open_count++;
2133 
2134 			/* Chunk is not empty, mark it to be recovered */
2135 			chunk->recovery = true;
2136 			break;
2137 		case FTL_CHUNK_STATE_CLOSED:
2138 			TAILQ_REMOVE(&nvc->chunk_free_list, chunk, entry);
2139 			nvc->chunk_free_count--;
2140 
2141 			TAILQ_INSERT_TAIL(&nvc->chunk_full_list, chunk, entry);
2142 			nvc->chunk_full_count++;
2143 
2144 			/* Chunk is not empty, mark it to be recovered */
2145 			chunk->recovery = true;
2146 			break;
2147 		default:
2148 			status = -EINVAL;
2149 		}
2150 	}
2151 
2152 	if (status) {
2153 		ftl_mngt_fail_step(mngt);
2154 	} else {
2155 		ftl_mngt_next_step(mngt);
2156 	}
2157 }
2158 
2159 void
2160 ftl_mngt_nv_cache_restore_chunk_state(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
2161 {
2162 	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
2163 
2164 	md->owner.cb_ctx = mngt;
2165 	md->cb = restore_chunk_state_cb;
2166 	ftl_md_restore(md);
2167 }
2168 
2169 static void
2170 recover_open_chunk_cb(struct ftl_basic_rq *brq)
2171 {
2172 	struct ftl_mngt_process *mngt = brq->owner.priv;
2173 	struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
2174 	struct ftl_nv_cache *nvc = chunk->nv_cache;
2175 	struct spdk_ftl_dev *dev = ftl_mngt_get_dev(mngt);
2176 
2177 	chunk_free_p2l_map(chunk);
2178 
2179 	if (!brq->success) {
2180 		FTL_ERRLOG(dev, "Recovery chunk ERROR, offset = %"PRIu64", seq id %"PRIu64"\n", chunk->offset,
2181 			   chunk->md->seq_id);
2182 		ftl_mngt_fail_step(mngt);
2183 		return;
2184 	}
2185 
2186 	FTL_NOTICELOG(dev, "Recovered chunk, offset = %"PRIu64", seq id %"PRIu64"\n", chunk->offset,
2187 		      chunk->md->seq_id);
2188 
2189 	TAILQ_REMOVE(&nvc->chunk_open_list, chunk, entry);
2190 	nvc->chunk_open_count--;
2191 
2192 	TAILQ_INSERT_TAIL(&nvc->chunk_full_list, chunk, entry);
2193 	nvc->chunk_full_count++;
2194 
2195 	/* This is closed chunk */
2196 	chunk->md->write_pointer = nvc->chunk_blocks;
2197 	chunk->md->blocks_written = nvc->chunk_blocks;
2198 
2199 	ftl_mngt_continue_step(mngt);
2200 }
2201 
2202 void
2203 ftl_mngt_nv_cache_recover_open_chunk(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
2204 {
2205 	struct ftl_nv_cache *nvc = &dev->nv_cache;
2206 	struct ftl_nv_cache_chunk *chunk;
2207 	struct ftl_basic_rq *brq = ftl_mngt_get_step_ctx(mngt);
2208 
2209 	if (!brq) {
2210 		if (TAILQ_EMPTY(&nvc->chunk_open_list)) {
2211 			FTL_NOTICELOG(dev, "No open chunks to recover P2L\n");
2212 			ftl_mngt_next_step(mngt);
2213 			return;
2214 		}
2215 
2216 		if (ftl_mngt_alloc_step_ctx(mngt, sizeof(*brq))) {
2217 			ftl_mngt_fail_step(mngt);
2218 			return;
2219 		}
2220 		brq = ftl_mngt_get_step_ctx(mngt);
2221 		ftl_basic_rq_set_owner(brq, recover_open_chunk_cb, mngt);
2222 	}
2223 
2224 	if (TAILQ_EMPTY(&nvc->chunk_open_list)) {
2225 		if (!is_chunk_count_valid(nvc)) {
2226 			FTL_ERRLOG(dev, "Recovery ERROR, invalid number of chunk\n");
2227 			ftl_mngt_fail_step(mngt);
2228 			return;
2229 		}
2230 
2231 		/*
2232 		 * Now all chunks loaded and closed, do final step of restoring
2233 		 * chunks state
2234 		 */
2235 		if (ftl_nv_cache_load_state(nvc)) {
2236 			ftl_mngt_fail_step(mngt);
2237 		} else {
2238 			ftl_mngt_next_step(mngt);
2239 		}
2240 	} else {
2241 		chunk = TAILQ_FIRST(&nvc->chunk_open_list);
2242 		if (chunk_alloc_p2l_map(chunk)) {
2243 			ftl_mngt_fail_step(mngt);
2244 			return;
2245 		}
2246 
2247 		brq->io.chunk = chunk;
2248 
2249 		FTL_NOTICELOG(dev, "Start recovery open chunk, offset = %"PRIu64", seq id %"PRIu64"\n",
2250 			      chunk->offset, chunk->md->seq_id);
2251 		restore_open_chunk(chunk, brq);
2252 	}
2253 }
2254 
2255 int
2256 ftl_nv_cache_chunks_busy(struct ftl_nv_cache *nv_cache)
2257 {
2258 	/* chunk_current is migrating to closed status when closing, any others should already be
2259 	 * moved to free chunk list. Also need to wait for free md requests */
2260 	return nv_cache->chunk_open_count == 0 && nv_cache->chunk_free_persist_count == 0;
2261 }
2262 
2263 void
2264 ftl_nv_cache_halt(struct ftl_nv_cache *nv_cache)
2265 {
2266 	struct ftl_nv_cache_chunk *chunk;
2267 	uint64_t free_space;
2268 
2269 	nv_cache->halt = true;
2270 
2271 	/* Set chunks on open list back to free state since no user data has been written to it */
2272 	while (!TAILQ_EMPTY(&nv_cache->chunk_open_list)) {
2273 		chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
2274 
2275 		/* Chunks are moved between lists on metadata update submission, but state is changed
2276 		 * on completion. Breaking early in such a case to make sure all the necessary resources
2277 		 * will be freed (during next pass(es) of ftl_nv_cache_halt).
2278 		 */
2279 		if (chunk->md->state != FTL_CHUNK_STATE_OPEN) {
2280 			break;
2281 		}
2282 
2283 		TAILQ_REMOVE(&nv_cache->chunk_open_list, chunk, entry);
2284 		chunk_free_p2l_map(chunk);
2285 		memset(chunk->md, 0, sizeof(*chunk->md));
2286 		assert(nv_cache->chunk_open_count > 0);
2287 		nv_cache->chunk_open_count--;
2288 	}
2289 
2290 	/* Close current chunk by skipping all not written blocks */
2291 	chunk = nv_cache->chunk_current;
2292 	if (chunk != NULL) {
2293 		nv_cache->chunk_current = NULL;
2294 		if (chunk_is_closed(chunk)) {
2295 			return;
2296 		}
2297 
2298 		free_space = chunk_get_free_space(nv_cache, chunk);
2299 		chunk->md->blocks_skipped = free_space;
2300 		chunk->md->blocks_written += free_space;
2301 		chunk->md->write_pointer += free_space;
2302 		ftl_chunk_close(chunk);
2303 	}
2304 }
2305 
2306 uint64_t
2307 ftl_nv_cache_acquire_trim_seq_id(struct ftl_nv_cache *nv_cache)
2308 {
2309 	struct ftl_nv_cache_chunk *chunk = nv_cache->chunk_current;
2310 	uint64_t seq_id, free_space;
2311 
2312 	if (!chunk) {
2313 		chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
2314 		if (chunk && chunk->md->state == FTL_CHUNK_STATE_OPEN) {
2315 			return chunk->md->seq_id;
2316 		} else {
2317 			return 0;
2318 		}
2319 	}
2320 
2321 	if (chunk_is_closed(chunk)) {
2322 		return 0;
2323 	}
2324 
2325 	seq_id = nv_cache->chunk_current->md->seq_id;
2326 	free_space = chunk_get_free_space(nv_cache, chunk);
2327 
2328 	chunk->md->blocks_skipped = free_space;
2329 	chunk->md->blocks_written += free_space;
2330 	chunk->md->write_pointer += free_space;
2331 	if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
2332 		ftl_chunk_close(chunk);
2333 	}
2334 	nv_cache->chunk_current = NULL;
2335 
2336 	seq_id++;
2337 	return seq_id;
2338 }
2339