Lines Matching defs:chunk
77 get_chunk_idx(struct ftl_nv_cache_chunk *chunk)
79 struct ftl_nv_cache_chunk *first_chunk = chunk->nv_cache->chunks;
81 return (chunk->offset - first_chunk->offset) / chunk->nv_cache->chunk_blocks;
120 /* Move the dummy region along with the active chunk */
133 /* Move to the next chunk */
148 /* Scrub the next chunk */
172 /* Setup a dummy region for the first chunk */
204 /* Scrub the first chunk */
219 struct ftl_nv_cache_chunk *chunk;
241 * Initialize chunk info
262 /* First chunk metadata */
269 chunk = nv_cache->chunks;
271 for (i = 0; i < nv_cache->chunk_count; i++, chunk++, md++) {
272 chunk->nv_cache = nv_cache;
273 chunk->md = md;
274 chunk->md->version = FTL_NVC_VERSION_CURRENT;
276 chunk->offset = offset;
279 if (nv_cache->nvc_type->ops.is_chunk_active(dev, chunk->offset)) {
281 TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
283 chunk->md->state = FTL_CHUNK_STATE_INACTIVE;
285 TAILQ_INSERT_TAIL(&nv_cache->chunk_inactive_list, chunk, entry);
313 /* One entry per open chunk */
322 /* Each compactor can be reading a different chunk which it needs to switch state to free to at the end,
383 struct ftl_nv_cache_chunk *chunk)
385 assert(chunk->md->write_pointer + nv_cache->tail_md_chunk_blocks <=
387 return nv_cache->chunk_blocks - chunk->md->write_pointer -
392 chunk_is_closed(struct ftl_nv_cache_chunk *chunk)
394 return chunk->md->write_pointer == chunk->nv_cache->chunk_blocks;
397 static void ftl_chunk_close(struct ftl_nv_cache_chunk *chunk);
405 struct ftl_nv_cache_chunk *chunk;
408 chunk = nv_cache->chunk_current;
410 if (chunk && chunk_is_closed(chunk)) {
411 chunk = NULL;
414 if (!chunk) {
415 chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
416 if (chunk && chunk->md->state == FTL_CHUNK_STATE_OPEN) {
417 TAILQ_REMOVE(&nv_cache->chunk_open_list, chunk, entry);
418 nv_cache->chunk_current = chunk;
424 free_space = chunk_get_free_space(nv_cache, chunk);
427 /* Enough space in chunk */
430 address = chunk->offset + chunk->md->write_pointer;
432 /* Set chunk in IO */
433 io->nv_cache_chunk = chunk;
436 chunk->md->write_pointer += num_blocks;
451 chunk->md->blocks_skipped = free_space;
452 chunk->md->blocks_written += free_space;
453 chunk->md->write_pointer += free_space;
455 if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
456 ftl_chunk_close(chunk);
466 struct ftl_nv_cache_chunk *chunk = io->nv_cache_chunk;
473 metadata->nv_cache.seq_id = chunk->md->seq_id;
484 chunk_advance_blocks(struct ftl_nv_cache *nv_cache, struct ftl_nv_cache_chunk *chunk,
487 chunk->md->blocks_written += advanced_blocks;
489 assert(chunk->md->blocks_written <= nv_cache->chunk_blocks);
491 if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
492 ftl_chunk_close(chunk);
497 chunk_user_blocks_written(struct ftl_nv_cache_chunk *chunk)
499 return chunk->md->blocks_written - chunk->md->blocks_skipped -
500 chunk->nv_cache->tail_md_chunk_blocks;
504 is_chunk_compacted(struct ftl_nv_cache_chunk *chunk)
506 assert(chunk->md->blocks_written != 0);
508 if (chunk_user_blocks_written(chunk) == chunk->md->blocks_compacted) {
516 ftl_chunk_alloc_md_entry(struct ftl_nv_cache_chunk *chunk)
518 struct ftl_nv_cache *nv_cache = chunk->nv_cache;
519 struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
532 ftl_chunk_free_md_entry(struct ftl_nv_cache_chunk *chunk)
534 struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
536 ftl_mempool_put(chunk->nv_cache->chunk_md_pool, p2l_map->chunk_dma_md);
540 static void chunk_free_p2l_map(struct ftl_nv_cache_chunk *chunk);
543 ftl_chunk_free(struct ftl_nv_cache_chunk *chunk)
545 struct ftl_nv_cache *nv_cache = chunk->nv_cache;
547 /* Reset chunk */
548 ftl_nv_cache_chunk_md_initialize(chunk->md);
550 TAILQ_INSERT_TAIL(&nv_cache->needs_free_persist_list, chunk, entry);
555 ftl_chunk_alloc_chunk_free_entry(struct ftl_nv_cache_chunk *chunk)
557 struct ftl_nv_cache *nv_cache = chunk->nv_cache;
558 struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
570 ftl_chunk_free_chunk_free_entry(struct ftl_nv_cache_chunk *chunk)
572 struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
574 ftl_mempool_put(chunk->nv_cache->free_chunk_md_pool, p2l_map->chunk_dma_md);
581 struct ftl_nv_cache_chunk *chunk = (struct ftl_nv_cache_chunk *)ctx;
584 struct ftl_nv_cache *nv_cache = chunk->nv_cache;
587 TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
590 chunk->md->state = FTL_CHUNK_STATE_FREE;
591 chunk->md->close_seq_id = 0;
592 ftl_chunk_free_chunk_free_entry(chunk);
595 ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx);
609 struct ftl_nv_cache_chunk *tchunk, *chunk = NULL;
612 TAILQ_FOREACH_SAFE(chunk, &nv_cache->needs_free_persist_list, entry, tchunk) {
613 p2l_map = &chunk->p2l_map;
614 rc = ftl_chunk_alloc_chunk_free_entry(chunk);
619 TAILQ_REMOVE(&nv_cache->needs_free_persist_list, chunk, entry);
621 memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
626 ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, p2l_map->chunk_dma_md, NULL,
627 chunk_free_cb, chunk, &chunk->md_persist_entry_ctx);
632 compaction_stats_update(struct ftl_nv_cache_chunk *chunk)
634 struct ftl_nv_cache *nv_cache = chunk->nv_cache;
638 if (spdk_unlikely(chunk->compaction_length_tsc == 0)) {
654 *ptr = (double)chunk->md->blocks_compacted * FTL_BLOCK_SIZE / chunk->compaction_length_tsc;
655 chunk->compaction_length_tsc = 0;
662 chunk_compaction_advance(struct ftl_nv_cache_chunk *chunk, uint64_t num_blocks)
664 struct ftl_nv_cache *nv_cache = chunk->nv_cache;
667 chunk->compaction_length_tsc += tsc - chunk->compaction_start_tsc;
668 chunk->compaction_start_tsc = tsc;
670 chunk->md->blocks_compacted += num_blocks;
671 assert(chunk->md->blocks_compacted <= chunk_user_blocks_written(chunk));
672 if (!is_chunk_compacted(chunk)) {
676 /* Remove chunk from compacted list */
677 TAILQ_REMOVE(&nv_cache->chunk_comp_list, chunk, entry);
680 compaction_stats_update(chunk);
682 chunk_free_p2l_map(chunk);
684 ftl_chunk_free(chunk);
825 is_chunk_to_read(struct ftl_nv_cache_chunk *chunk)
827 assert(chunk->md->blocks_written != 0);
829 if (chunk_user_blocks_written(chunk) == chunk->md->read_pointer) {
839 struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
840 struct ftl_nv_cache *nv_cache = chunk->nv_cache;
844 read_chunk_p2l_map(chunk);
850 TAILQ_INSERT_TAIL(&nv_cache->chunk_comp_list, chunk, entry);
853 static int chunk_alloc_p2l_map(struct ftl_nv_cache_chunk *chunk);
854 static int ftl_chunk_read_tail_md(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq,
860 struct ftl_nv_cache_chunk *chunk = arg;
863 if (chunk_alloc_p2l_map(chunk)) {
867 rc = ftl_chunk_read_tail_md(chunk, &chunk->metadata_rq, read_chunk_p2l_map_cb, NULL);
870 struct ftl_nv_cache *nv_cache = chunk->nv_cache;
872 struct spdk_bdev_io_wait_entry *wait_entry = &chunk->metadata_rq.io.bdev_io_wait;
876 wait_entry->cb_arg = chunk;
887 struct ftl_nv_cache_chunk *chunk = NULL;
893 chunk = TAILQ_FIRST(&nv_cache->chunk_full_list);
894 TAILQ_REMOVE(&nv_cache->chunk_full_list, chunk, entry);
895 assert(chunk->md->write_pointer);
898 read_chunk_p2l_map(chunk);
905 struct ftl_nv_cache_chunk *chunk = NULL;
911 chunk = TAILQ_FIRST(&nv_cache->chunk_comp_list);
912 if (!is_chunk_to_read(chunk)) {
916 return chunk;
920 chunk_blocks_to_read(struct ftl_nv_cache_chunk *chunk)
925 assert(chunk->md->blocks_written >= chunk->md->blocks_skipped);
926 blocks_written = chunk_user_blocks_written(chunk);
928 assert(blocks_written >= chunk->md->read_pointer);
929 blocks_to_read = blocks_written - chunk->md->read_pointer;
994 compaction_chunk_read_pos(struct spdk_ftl_dev *dev, struct ftl_nv_cache_chunk *chunk)
997 uint64_t skip, to_read = chunk_blocks_to_read(chunk);
1003 start = ftl_addr_from_nvc_offset(dev, chunk->offset + chunk->md->read_pointer);
1007 chunk->md->read_pointer += to_read;
1008 chunk_compaction_advance(chunk, to_read);
1015 chunk->md->read_pointer += skip;
1016 chunk_compaction_advance(chunk, skip);
1026 struct ftl_nv_cache_chunk *chunk = NULL;
1029 while (!chunk) {
1030 /* Get currently handled chunk */
1031 chunk = get_chunk_for_compaction(nv_cache);
1032 if (!chunk) {
1035 chunk->compaction_start_tsc = spdk_thread_get_last_tsc(spdk_get_thread());
1037 /* Get next read position in chunk */
1038 addr = compaction_chunk_read_pos(dev, chunk);
1040 chunk = NULL;
1046 /* Set entry address info and chunk */
1048 entry->owner.priv = chunk;
1049 entry->lba = ftl_chunk_map_get_lba(chunk, chunk->md->read_pointer);
1051 /* Move read pointer in the chunk */
1052 chunk->md->read_pointer++;
1134 struct ftl_nv_cache_chunk *chunk = entry->owner.priv;
1139 chunk_compaction_advance(chunk, 1);
1161 struct ftl_nv_cache_chunk *chunk = entry->owner.priv;
1167 chunk_compaction_advance(chunk, 1);
1173 entry->seq_id = chunk->md->seq_id;
1179 chunk_compaction_advance(chunk, 1);
1363 ftl_chunk_map_set_lba(struct ftl_nv_cache_chunk *chunk,
1366 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1367 struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1373 ftl_chunk_map_get_lba(struct ftl_nv_cache_chunk *chunk, uint64_t offset)
1375 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1376 struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1382 ftl_nv_cache_chunk_set_addr(struct ftl_nv_cache_chunk *chunk, uint64_t lba, ftl_addr addr)
1384 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1388 offset = (cache_offset - chunk->offset) % chunk->nv_cache->chunk_blocks;
1389 ftl_chunk_map_set_lba(chunk, offset, lba);
1395 struct ftl_nv_cache_chunk *chunk = dev->nv_cache.chunks;
1399 assert(chunk != NULL);
1400 chunk_idx = (cache_offset - chunk->offset) / chunk->nv_cache->chunk_blocks;
1401 chunk += chunk_idx;
1403 return chunk;
1409 struct ftl_nv_cache_chunk *chunk;
1411 chunk = ftl_nv_cache_get_chunk_from_addr(dev, addr);
1415 ftl_nv_cache_chunk_set_addr(chunk, lba, addr);
1457 static void ftl_chunk_open(struct ftl_nv_cache_chunk *chunk);
1468 struct ftl_nv_cache_chunk *chunk = TAILQ_FIRST(&nv_cache->chunk_free_list);
1469 TAILQ_REMOVE(&nv_cache->chunk_free_list, chunk, entry);
1470 TAILQ_INSERT_TAIL(&nv_cache->chunk_open_list, chunk, entry);
1472 chunk->md->seq_id = ftl_get_next_seq_id(dev);
1473 ftl_chunk_open(chunk);
1510 chunk_free_p2l_map(struct ftl_nv_cache_chunk *chunk)
1512 struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1513 struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1518 ftl_chunk_free_md_entry(chunk);
1525 struct ftl_nv_cache_chunk *chunk;
1536 chunk = nv_cache->chunks;
1537 if (!chunk) {
1542 for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
1543 nvc_validate_md(nv_cache, chunk->md);
1545 if (chunk->md->read_pointer) {
1547 if (chunk->md->blocks_written != nv_cache->chunk_blocks) {
1557 chunk->md->read_pointer = chunk->md->blocks_compacted = 0;
1558 } else if (chunk->md->blocks_written == nv_cache->chunk_blocks) {
1559 /* Full chunk */
1560 } else if (0 == chunk->md->blocks_written) {
1561 /* Empty chunk */
1590 struct ftl_nv_cache_chunk *chunk;
1604 TAILQ_FOREACH(chunk, &nv_cache->chunk_full_list, entry) {
1605 chunks_list[i] = chunk;
1615 chunk = chunks_list[i];
1616 TAILQ_INSERT_TAIL(&nv_cache->chunk_full_list, chunk, entry);
1624 chunk_alloc_p2l_map(struct ftl_nv_cache_chunk *chunk)
1626 struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1627 struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1638 if (ftl_chunk_alloc_md_entry(chunk)) {
1654 struct ftl_nv_cache_chunk *chunk;
1681 chunk = nv_cache->chunks;
1682 for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
1683 active = nv_cache->nvc_type->ops.is_chunk_active(dev, chunk->offset);
1685 if (chunk->md->state == FTL_CHUNK_STATE_FREE) {
1687 chunk->md->state = FTL_CHUNK_STATE_INACTIVE;
1689 } else if (chunk->md->state == FTL_CHUNK_STATE_INACTIVE) {
1691 chunk->md->state = FTL_CHUNK_STATE_FREE;
1697 chunk = nv_cache->chunks;
1698 for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
1699 chunk->nv_cache = nv_cache;
1700 nvc_validate_md(nv_cache, chunk->md);
1702 if (offset != chunk->offset) {
1707 if (chunk->md->version != FTL_NVC_VERSION_CURRENT) {
1712 active = nv_cache->nvc_type->ops.is_chunk_active(dev, chunk->offset);
1714 if (chunk->md->state != FTL_CHUNK_STATE_INACTIVE) {
1720 switch (chunk->md->state) {
1722 if (chunk->md->blocks_written || chunk->md->write_pointer) {
1727 TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
1736 if (chunk->md->blocks_written != nv_cache->chunk_blocks) {
1741 TAILQ_INSERT_TAIL(&nv_cache->chunk_full_list, chunk, entry);
1745 TAILQ_INSERT_TAIL(&nv_cache->chunk_inactive_list, chunk, entry);
1750 FTL_ERRLOG(dev, "Invalid chunk state\n");
1782 * upgrade. Recalculate the thresholds that depend on active chunk count.
1794 struct ftl_nv_cache_chunk *chunk;
1796 chunk = nv_cache->chunks;
1797 assert(chunk);
1800 for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
1801 o_seq_id = spdk_max(o_seq_id, chunk->md->seq_id);
1802 c_seq_id = spdk_max(c_seq_id, chunk->md->close_seq_id);
1809 typedef void (*ftl_chunk_ops_cb)(struct ftl_nv_cache_chunk *chunk, void *cntx, bool status);
1815 struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
1821 chunk_advance_blocks(chunk->nv_cache, chunk, brq->num_blocks);
1832 struct ftl_nv_cache *nv_cache = brq->io.chunk->nv_cache;
1853 ftl_chunk_basic_rq_write(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq)
1855 struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1858 brq->io.chunk = chunk;
1863 chunk->md->write_pointer += brq->num_blocks;
1881 ftl_chunk_basic_rq_read(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq)
1883 struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1887 brq->io.chunk = chunk;
1903 struct ftl_nv_cache_chunk *chunk = (struct ftl_nv_cache_chunk *)ctx;
1907 ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx);
1914 chunk->md->state = FTL_CHUNK_STATE_OPEN;
1918 ftl_chunk_open(struct ftl_nv_cache_chunk *chunk)
1920 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1921 struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1925 if (chunk_alloc_p2l_map(chunk)) {
1928 * We control number of opening chunk and it shall be consistent with size of chunk
1935 chunk->nv_cache->chunk_open_count++;
1937 assert(chunk->md->write_pointer == 0);
1938 assert(chunk->md->blocks_written == 0);
1941 dev->nv_cache.nvc_type->ops.on_chunk_open(dev, chunk);
1944 memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
1948 ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, p2l_map->chunk_dma_md,
1949 NULL, chunk_open_cb, chunk,
1950 &chunk->md_persist_entry_ctx);
1956 struct ftl_nv_cache_chunk *chunk = (struct ftl_nv_cache_chunk *)ctx;
1957 struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1960 assert(chunk->md->write_pointer == chunk->nv_cache->chunk_blocks);
1963 chunk->md->p2l_map_checksum = chunk->p2l_map.chunk_dma_md->p2l_map_checksum;
1964 chunk_free_p2l_map(chunk);
1966 assert(chunk->nv_cache->chunk_open_count > 0);
1967 chunk->nv_cache->chunk_open_count--;
1970 TAILQ_INSERT_TAIL(&chunk->nv_cache->chunk_full_list, chunk, entry);
1971 chunk->nv_cache->chunk_full_count++;
1973 chunk->nv_cache->last_seq_id = chunk->md->close_seq_id;
1975 chunk->md->state = FTL_CHUNK_STATE_CLOSED;
1977 nv_cache->nvc_type->ops.on_chunk_closed(dev, chunk);
1981 ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx);
1991 struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
1992 struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1993 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
2000 chunk->nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE, 0);
2001 memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
2004 ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, chunk->p2l_map.chunk_dma_md,
2005 NULL, chunk_close_cb, chunk,
2006 &chunk->md_persist_entry_ctx);
2010 chunk->md->write_pointer -= brq->num_blocks;
2011 ftl_chunk_basic_rq_write(chunk, brq);
2019 ftl_chunk_close(struct ftl_nv_cache_chunk *chunk)
2021 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
2022 struct ftl_basic_rq *brq = &chunk->metadata_rq;
2023 void *metadata = chunk->p2l_map.chunk_map;
2025 chunk->md->close_seq_id = ftl_get_next_seq_id(dev);
2026 ftl_basic_rq_init(dev, brq, metadata, chunk->nv_cache->tail_md_chunk_blocks);
2027 ftl_basic_rq_set_owner(brq, chunk_map_write_cb, chunk);
2029 assert(chunk->md->write_pointer == chunk_tail_md_offset(chunk->nv_cache));
2030 brq->io.addr = chunk->offset + chunk->md->write_pointer;
2032 ftl_chunk_basic_rq_write(chunk, brq);
2036 ftl_chunk_read_tail_md(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq,
2039 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
2043 metadata = chunk->p2l_map.chunk_map;
2044 ftl_basic_rq_init(dev, brq, metadata, chunk->nv_cache->tail_md_chunk_blocks);
2047 brq->io.addr = chunk->offset + chunk_tail_md_offset(chunk->nv_cache);
2048 rc = ftl_chunk_basic_rq_read(chunk, brq);
2079 struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
2084 rc = ctx->cb(chunk, ctx->cb_ctx);
2093 chunk_free_p2l_map(chunk);
2123 FTL_ERRLOG(dev, "Recovery ERROR, invalid number of chunk\n");
2137 struct ftl_nv_cache_chunk *chunk = &nvc->chunks[ctx->id];
2140 if (!chunk->recovery) {
2141 /* This chunk is inactive or empty and not used in recovery */
2146 if (seq_id && (chunk->md->close_seq_id <= seq_id)) {
2151 if (chunk_alloc_p2l_map(chunk)) {
2157 rc = ftl_chunk_read_tail_md(chunk, &chunk->metadata_rq, walk_tail_md_cb, mngt);
2162 chunk_free_p2l_map(chunk);
2191 struct ftl_nv_cache_chunk *chunk;
2201 chunk = &nvc->chunks[i];
2203 if (false == nvc->nvc_type->ops.is_chunk_active(dev, chunk->offset) &&
2204 chunk->md->state != FTL_CHUNK_STATE_INACTIVE) {
2209 if (chunk->md->version != FTL_NVC_VERSION_CURRENT) {
2214 switch (chunk->md->state) {
2218 TAILQ_REMOVE(&nvc->chunk_free_list, chunk, entry);
2221 TAILQ_INSERT_TAIL(&nvc->chunk_open_list, chunk, entry);
2225 chunk->recovery = true;
2228 TAILQ_REMOVE(&nvc->chunk_free_list, chunk, entry);
2231 TAILQ_INSERT_TAIL(&nvc->chunk_full_list, chunk, entry);
2235 chunk->recovery = true;
2262 struct ftl_nv_cache_chunk *chunk;
2272 ctx->chunk = TAILQ_FIRST(&nvc->chunk_open_list);
2274 FTL_NOTICELOG(dev, "Start recovery open chunk, offset = %"PRIu64", seq id %"PRIu64"\n",
2275 ctx->chunk->offset, ctx->chunk->md->seq_id);
2277 if (chunk_alloc_p2l_map(ctx->chunk)) {
2300 struct ftl_nv_cache_chunk *chunk = ctx->chunk;
2302 void *p2l_map = chunk->p2l_map.chunk_map;
2304 ftl_basic_rq_init(dev, rq, p2l_map, chunk->nv_cache->tail_md_chunk_blocks);
2307 rq->io.addr = chunk->offset + chunk_tail_md_offset(chunk->nv_cache);
2308 ftl_chunk_basic_rq_write(chunk, rq);
2316 struct ftl_nv_cache_chunk *chunk = ctx->chunk;
2317 struct ftl_nv_cache *nvc = chunk->nv_cache;
2321 *chunk->md = *chunk->p2l_map.chunk_dma_md;
2323 FTL_NOTICELOG(dev, "Recovered chunk, offset = %"PRIu64", seq id %"PRIu64"\n", chunk->offset,
2324 chunk->md->seq_id);
2326 TAILQ_REMOVE(&nvc->chunk_open_list, chunk, entry);
2329 TAILQ_INSERT_TAIL(&nvc->chunk_full_list, chunk, entry);
2342 struct ftl_nv_cache_chunk *chunk = ctx->chunk;
2343 struct ftl_nv_cache_chunk_md *chunk_md = chunk->p2l_map.chunk_dma_md;
2346 *chunk_md = *chunk->md;
2349 chunk_md->write_pointer = chunk->nv_cache->chunk_blocks;
2350 chunk_md->blocks_written = chunk->nv_cache->chunk_blocks;
2351 chunk_md->p2l_map_checksum = spdk_crc32c_update(chunk->p2l_map.chunk_map,
2352 chunk->nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE, 0);
2354 ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, chunk_md, NULL,
2356 &chunk->md_persist_entry_ctx);
2363 struct ftl_nv_cache *nvc = ctx->chunk->nv_cache;
2365 nvc->nvc_type->ops.recover_open_chunk(dev, mngt, ctx->chunk);
2372 struct ftl_nv_cache_chunk *chunk = ctx->chunk;
2374 if (chunk->p2l_map.chunk_map) {
2375 chunk_free_p2l_map(ctx->chunk);
2381 .name = "Recover open chunk",
2399 .name = "Chunk recovery, close chunk",
2429 FTL_ERRLOG(dev, "Recovery ERROR, invalid number of chunk\n");
2455 * moved to free chunk list. Also need to wait for free md requests */
2462 struct ftl_nv_cache_chunk *chunk;
2469 chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
2475 if (chunk->md->state != FTL_CHUNK_STATE_OPEN) {
2479 TAILQ_REMOVE(&nv_cache->chunk_open_list, chunk, entry);
2480 chunk_free_p2l_map(chunk);
2481 ftl_nv_cache_chunk_md_initialize(chunk->md);
2486 /* Close current chunk by skipping all not written blocks */
2487 chunk = nv_cache->chunk_current;
2488 if (chunk != NULL) {
2490 if (chunk_is_closed(chunk)) {
2494 free_space = chunk_get_free_space(nv_cache, chunk);
2495 chunk->md->blocks_skipped = free_space;
2496 chunk->md->blocks_written += free_space;
2497 chunk->md->write_pointer += free_space;
2498 ftl_chunk_close(chunk);
2505 struct ftl_nv_cache_chunk *chunk = nv_cache->chunk_current;
2508 if (!chunk) {
2509 chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
2510 if (chunk && chunk->md->state == FTL_CHUNK_STATE_OPEN) {
2511 return chunk->md->seq_id;
2517 if (chunk_is_closed(chunk)) {
2522 free_space = chunk_get_free_space(nv_cache, chunk);
2524 chunk->md->blocks_skipped = free_space;
2525 chunk->md->blocks_written += free_space;
2526 chunk->md->write_pointer += free_space;
2527 if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
2528 ftl_chunk_close(chunk);
2538 struct ftl_nv_cache_chunk *chunk)
2541 double used = chunk->md->blocks_written + chunk->md->blocks_skipped;
2547 ftl_nv_cache_get_chunk_state_name(struct ftl_nv_cache_chunk *chunk)
2553 assert(chunk->md->state < SPDK_COUNTOF(names));
2554 if (chunk->md->state < SPDK_COUNTOF(names)) {
2555 return names[chunk->md->state];
2567 struct ftl_nv_cache_chunk *chunk;
2571 for (i = 0, chunk = dev->nv_cache.chunks; i < dev->nv_cache.chunk_count; i++, chunk++) {
2574 spdk_json_write_named_string(w, "state", ftl_nv_cache_get_chunk_state_name(chunk));
2576 ftl_nv_cache_get_chunk_utilization(&dev->nv_cache, chunk));