Lines Matching defs:nv_cache

20 static inline uint64_t nvc_data_blocks(struct ftl_nv_cache *nv_cache) __attribute__((unused));
30 nvc_validate_md(struct ftl_nv_cache *nv_cache,
33 struct ftl_md *md = nv_cache->md;
49 nvc_data_offset(struct ftl_nv_cache *nv_cache)
55 nvc_data_blocks(struct ftl_nv_cache *nv_cache)
57 return nv_cache->chunk_blocks * nv_cache->chunk_count;
61 ftl_nv_cache_chunk_tail_md_num_blocks(const struct ftl_nv_cache *nv_cache)
63 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache,
64 struct spdk_ftl_dev, nv_cache);
70 nv_cache_p2l_map_pool_elem_size(const struct ftl_nv_cache *nv_cache)
73 return nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE;
79 struct ftl_nv_cache_chunk *first_chunk = chunk->nv_cache->chunks;
81 return (chunk->offset - first_chunk->offset) / chunk->nv_cache->chunk_blocks;
87 struct ftl_nv_cache *nvc = &dev->nv_cache;
92 dev->conf.nv_cache.chunk_compaction_threshold /
99 dev->conf.nv_cache.chunk_free_target,
116 if (dev->nv_cache.nvc_type->ops.is_chunk_active(dev, scrub_ctx->reg_chunk.current.offset)) {
150 vss.nv_cache.lba = FTL_ADDR_INVALID;
181 scrub_ctx->reg_chunk.vss_blksz = dev->nv_cache.md_size;
182 scrub_ctx->reg_chunk.bdev_desc = dev->nv_cache.bdev_desc;
183 scrub_ctx->reg_chunk.ioch = dev->nv_cache.cache_ioch;
206 vss.nv_cache.lba = FTL_ADDR_INVALID;
218 struct ftl_nv_cache *nv_cache = &dev->nv_cache;
224 nv_cache->halt = true;
226 nv_cache->md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
227 if (!nv_cache->md) {
232 nv_cache->md_pool = ftl_mempool_create(dev->conf.user_io_pool_size,
233 nv_cache->md_size * dev->xfer_size,
235 if (!nv_cache->md_pool) {
243 nv_cache->chunk_blocks = dev->layout.nvc.chunk_data_blocks;
244 nv_cache->chunk_count = dev->layout.nvc.chunk_count;
245 nv_cache->tail_md_chunk_blocks = ftl_nv_cache_chunk_tail_md_num_blocks(nv_cache);
248 nv_cache->chunks = calloc(nv_cache->chunk_count,
249 sizeof(nv_cache->chunks[0]));
250 if (!nv_cache->chunks) {
255 TAILQ_INIT(&nv_cache->chunk_free_list);
256 TAILQ_INIT(&nv_cache->chunk_open_list);
257 TAILQ_INIT(&nv_cache->chunk_full_list);
258 TAILQ_INIT(&nv_cache->chunk_comp_list);
259 TAILQ_INIT(&nv_cache->chunk_inactive_list);
260 TAILQ_INIT(&nv_cache->needs_free_persist_list);
263 md = ftl_md_get_buffer(nv_cache->md);
269 chunk = nv_cache->chunks;
270 offset = nvc_data_offset(nv_cache);
271 for (i = 0; i < nv_cache->chunk_count; i++, chunk++, md++) {
272 chunk->nv_cache = nv_cache;
275 nvc_validate_md(nv_cache, md);
277 offset += nv_cache->chunk_blocks;
279 if (nv_cache->nvc_type->ops.is_chunk_active(dev, chunk->offset)) {
280 nv_cache->chunk_free_count++;
281 TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
284 nv_cache->chunk_inactive_count++;
285 TAILQ_INSERT_TAIL(&nv_cache->chunk_inactive_list, chunk, entry);
288 assert(nv_cache->chunk_free_count + nv_cache->chunk_inactive_count == nv_cache->chunk_count);
289 assert(offset <= nvc_data_offset(nv_cache) + nvc_data_blocks(nv_cache));
291 TAILQ_INIT(&nv_cache->compactor_list);
300 TAILQ_INSERT_TAIL(&nv_cache->compactor_list, compactor, entry);
305 nv_cache->p2l_pool = ftl_mempool_create(FTL_MAX_OPEN_CHUNKS + FTL_MAX_COMPACTED_CHUNKS,
306 nv_cache_p2l_map_pool_elem_size(nv_cache),
309 if (!nv_cache->p2l_pool) {
314 nv_cache->chunk_md_pool = ftl_mempool_create(FTL_MAX_OPEN_CHUNKS + FTL_MAX_COMPACTED_CHUNKS,
318 if (!nv_cache->chunk_md_pool) {
326 nv_cache->free_chunk_md_pool = ftl_mempool_create(2 * FTL_NV_CACHE_NUM_COMPACTORS,
330 if (!nv_cache->free_chunk_md_pool) {
338 nv_cache->throttle.interval_tsc = FTL_NV_CACHE_THROTTLE_INTERVAL_MS *
340 nv_cache->chunk_free_target = spdk_divide_round_up(nv_cache->chunk_count *
341 dev->conf.nv_cache.chunk_free_target,
344 if (nv_cache->nvc_type->ops.init) {
345 return nv_cache->nvc_type->ops.init(dev);
354 struct ftl_nv_cache *nv_cache = &dev->nv_cache;
357 if (nv_cache->nvc_type->ops.deinit) {
358 nv_cache->nvc_type->ops.deinit(dev);
361 while (!TAILQ_EMPTY(&nv_cache->compactor_list)) {
362 compactor = TAILQ_FIRST(&nv_cache->compactor_list);
363 TAILQ_REMOVE(&nv_cache->compactor_list, compactor, entry);
368 ftl_mempool_destroy(nv_cache->md_pool);
369 ftl_mempool_destroy(nv_cache->p2l_pool);
370 ftl_mempool_destroy(nv_cache->chunk_md_pool);
371 ftl_mempool_destroy(nv_cache->free_chunk_md_pool);
372 nv_cache->md_pool = NULL;
373 nv_cache->p2l_pool = NULL;
374 nv_cache->chunk_md_pool = NULL;
375 nv_cache->free_chunk_md_pool = NULL;
377 free(nv_cache->chunks);
378 nv_cache->chunks = NULL;
382 chunk_get_free_space(struct ftl_nv_cache *nv_cache,
385 assert(chunk->md->write_pointer + nv_cache->tail_md_chunk_blocks <=
386 nv_cache->chunk_blocks);
387 return nv_cache->chunk_blocks - chunk->md->write_pointer -
388 nv_cache->tail_md_chunk_blocks;
394 return chunk->md->write_pointer == chunk->nv_cache->chunk_blocks;
400 ftl_nv_cache_get_wr_buffer(struct ftl_nv_cache *nv_cache, struct ftl_io *io)
408 chunk = nv_cache->chunk_current;
415 chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
417 TAILQ_REMOVE(&nv_cache->chunk_open_list, chunk, entry);
418 nv_cache->chunk_current = chunk;
424 free_space = chunk_get_free_space(nv_cache, chunk);
439 nv_cache->chunk_current = NULL;
445 nv_cache->chunk_current = NULL;
455 if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
472 metadata->nv_cache.lba = lba;
473 metadata->nv_cache.seq_id = chunk->md->seq_id;
478 chunk_tail_md_offset(struct ftl_nv_cache *nv_cache)
480 return nv_cache->chunk_blocks - nv_cache->tail_md_chunk_blocks;
484 chunk_advance_blocks(struct ftl_nv_cache *nv_cache, struct ftl_nv_cache_chunk *chunk,
489 assert(chunk->md->blocks_written <= nv_cache->chunk_blocks);
491 if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
500 chunk->nv_cache->tail_md_chunk_blocks;
518 struct ftl_nv_cache *nv_cache = chunk->nv_cache;
521 p2l_map->chunk_dma_md = ftl_mempool_get(nv_cache->chunk_md_pool);
536 ftl_mempool_put(chunk->nv_cache->chunk_md_pool, p2l_map->chunk_dma_md);
545 struct ftl_nv_cache *nv_cache = chunk->nv_cache;
550 TAILQ_INSERT_TAIL(&nv_cache->needs_free_persist_list, chunk, entry);
551 nv_cache->chunk_free_persist_count++;
557 struct ftl_nv_cache *nv_cache = chunk->nv_cache;
560 p2l_map->chunk_dma_md = ftl_mempool_get(nv_cache->free_chunk_md_pool);
574 ftl_mempool_put(chunk->nv_cache->free_chunk_md_pool, p2l_map->chunk_dma_md);
584 struct ftl_nv_cache *nv_cache = chunk->nv_cache;
586 nv_cache->chunk_free_persist_count--;
587 TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
588 nv_cache->chunk_free_count++;
589 nv_cache->chunk_full_count--;
603 ftl_chunk_persist_free_state(struct ftl_nv_cache *nv_cache)
605 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
612 TAILQ_FOREACH_SAFE(chunk, &nv_cache->needs_free_persist_list, entry, tchunk) {
619 TAILQ_REMOVE(&nv_cache->needs_free_persist_list, chunk, entry);
634 struct ftl_nv_cache *nv_cache = chunk->nv_cache;
635 struct compaction_bw_stats *compaction_bw = &nv_cache->compaction_recent_bw;
658 nv_cache->compaction_sma = compaction_bw->sum / compaction_bw->count;
664 struct ftl_nv_cache *nv_cache = chunk->nv_cache;
677 TAILQ_REMOVE(&nv_cache->chunk_comp_list, chunk, entry);
678 nv_cache->chunk_comp_count--;
688 is_compaction_required_for_upgrade(struct ftl_nv_cache *nv_cache)
690 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
693 if (nv_cache->chunk_full_count || nv_cache->chunk_open_count) {
702 is_compaction_required(struct ftl_nv_cache *nv_cache)
704 if (spdk_unlikely(nv_cache->halt)) {
705 return is_compaction_required_for_upgrade(nv_cache);
708 if (nv_cache->chunk_full_count >= nv_cache->chunk_compaction_threshold) {
805 rc = spdk_bdev_read_blocks(dev->nv_cache.bdev_desc, dev->nv_cache.cache_ioch,
811 struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->nv_cache.bdev_desc);
815 spdk_bdev_queue_io_wait(bdev, dev->nv_cache.cache_ioch, &entry->bdev_io.wait_entry);
840 struct ftl_nv_cache *nv_cache = chunk->nv_cache;
850 TAILQ_INSERT_TAIL(&nv_cache->chunk_comp_list, chunk, entry);
870 struct ftl_nv_cache *nv_cache = chunk->nv_cache;
871 struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc);
877 spdk_bdev_queue_io_wait(bdev, nv_cache->cache_ioch, wait_entry);
885 prepare_chunk_for_compaction(struct ftl_nv_cache *nv_cache)
889 if (TAILQ_EMPTY(&nv_cache->chunk_full_list)) {
893 chunk = TAILQ_FIRST(&nv_cache->chunk_full_list);
894 TAILQ_REMOVE(&nv_cache->chunk_full_list, chunk, entry);
897 nv_cache->chunk_comp_count++;
903 get_chunk_for_compaction(struct ftl_nv_cache *nv_cache)
907 if (TAILQ_EMPTY(&nv_cache->chunk_comp_list)) {
911 chunk = TAILQ_FIRST(&nv_cache->chunk_comp_list);
937 struct ftl_nv_cache *nv_cache = compactor->nv_cache;
940 assert(nv_cache->compaction_active_count);
941 nv_cache->compaction_active_count--;
942 TAILQ_INSERT_TAIL(&nv_cache->compactor_list, compactor, entry);
970 struct ftl_nv_cache *nv_cache = compactor->nv_cache;
971 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1023 compaction_entry_read_pos(struct ftl_nv_cache *nv_cache, struct ftl_rq_entry *entry)
1025 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1031 chunk = get_chunk_for_compaction(nv_cache);
1061 struct ftl_nv_cache *nv_cache = compactor->nv_cache;
1066 if (!compaction_entry_read_pos(nv_cache, entry)) {
1082 compaction_process(struct ftl_nv_cache *nv_cache)
1084 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1087 if (!is_compaction_required(nv_cache)) {
1091 if (nv_cache->chunk_comp_count < FTL_MAX_COMPACTED_CHUNKS) {
1092 prepare_chunk_for_compaction(nv_cache);
1095 if (TAILQ_EMPTY(&nv_cache->chunk_comp_list)) {
1099 compactor = TAILQ_FIRST(&nv_cache->compactor_list);
1104 TAILQ_REMOVE(&nv_cache->compactor_list, compactor, entry);
1105 compactor->nv_cache->compaction_active_count++;
1216 compactor->rq = ftl_rq_new(dev, dev->nv_cache.md_size);
1221 compactor->nv_cache = &dev->nv_cache;
1240 struct ftl_nv_cache *nv_cache = &io->dev->nv_cache;
1242 chunk_advance_blocks(nv_cache, io->nv_cache_chunk, io->num_blocks);
1287 dev->nv_cache.nvc_type->ops.write(io);
1312 cache_offset = ftl_nv_cache_get_wr_buffer(&dev->nv_cache, io);
1323 dev->nv_cache.throttle.blocks_submitted += io->num_blocks;
1333 struct ftl_nv_cache *nv_cache = &io->dev->nv_cache;
1337 rc = ftl_nv_cache_bdev_read_blocks_with_md(io->dev, nv_cache->bdev_desc, nv_cache->cache_ioch,
1345 ftl_nv_cache_is_halted(struct ftl_nv_cache *nv_cache)
1347 if (nv_cache->compaction_active_count) {
1351 if (nv_cache->chunk_open_count > 0) {
1355 if (is_compaction_required_for_upgrade(nv_cache)) {
1366 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1375 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1384 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1388 offset = (cache_offset - chunk->offset) % chunk->nv_cache->chunk_blocks;
1395 struct ftl_nv_cache_chunk *chunk = dev->nv_cache.chunks;
1400 chunk_idx = (cache_offset - chunk->offset) / chunk->nv_cache->chunk_blocks;
1420 ftl_nv_cache_throttle_update(struct ftl_nv_cache *nv_cache)
1425 err = ((double)nv_cache->chunk_free_count - nv_cache->chunk_free_target) / nv_cache->chunk_count;
1434 if (spdk_unlikely(nv_cache->compaction_sma == 0 || nv_cache->compaction_active_count == 0)) {
1435 nv_cache->throttle.blocks_submitted_limit = UINT64_MAX;
1437 double blocks_per_interval = nv_cache->compaction_sma * nv_cache->throttle.interval_tsc /
1439 nv_cache->throttle.blocks_submitted_limit = blocks_per_interval * (1.0 + modifier);
1444 ftl_nv_cache_process_throttle(struct ftl_nv_cache *nv_cache)
1448 if (spdk_unlikely(!nv_cache->throttle.start_tsc)) {
1449 nv_cache->throttle.start_tsc = tsc;
1450 } else if (tsc - nv_cache->throttle.start_tsc >= nv_cache->throttle.interval_tsc) {
1451 ftl_nv_cache_throttle_update(nv_cache);
1452 nv_cache->throttle.start_tsc = tsc;
1453 nv_cache->throttle.blocks_submitted = 0;
1462 struct ftl_nv_cache *nv_cache = &dev->nv_cache;
1464 assert(dev->nv_cache.bdev_desc);
1466 if (nv_cache->chunk_open_count < FTL_MAX_OPEN_CHUNKS && spdk_likely(!nv_cache->halt) &&
1467 !TAILQ_EMPTY(&nv_cache->chunk_free_list)) {
1468 struct ftl_nv_cache_chunk *chunk = TAILQ_FIRST(&nv_cache->chunk_free_list);
1469 TAILQ_REMOVE(&nv_cache->chunk_free_list, chunk, entry);
1470 TAILQ_INSERT_TAIL(&nv_cache->chunk_open_list, chunk, entry);
1471 nv_cache->chunk_free_count--;
1477 compaction_process(nv_cache);
1478 ftl_chunk_persist_free_state(nv_cache);
1479 ftl_nv_cache_process_throttle(nv_cache);
1481 if (nv_cache->nvc_type->ops.process) {
1482 nv_cache->nvc_type->ops.process(dev);
1487 ftl_nv_cache_full(struct ftl_nv_cache *nv_cache)
1489 if (0 == nv_cache->chunk_open_count && NULL == nv_cache->chunk_current) {
1499 struct ftl_nv_cache *nv_cache = &dev->nv_cache;
1501 if (dev->nv_cache.throttle.blocks_submitted >= nv_cache->throttle.blocks_submitted_limit ||
1502 ftl_nv_cache_full(nv_cache)) {
1512 struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1515 ftl_mempool_put(nv_cache->p2l_pool, p2l_map->chunk_map);
1522 ftl_nv_cache_save_state(struct ftl_nv_cache *nv_cache)
1524 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1529 assert(nv_cache->chunk_open_count == 0);
1531 if (nv_cache->compaction_active_count) {
1536 chunk = nv_cache->chunks;
1542 for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
1543 nvc_validate_md(nv_cache, chunk->md);
1547 if (chunk->md->blocks_written != nv_cache->chunk_blocks) {
1558 } else if (chunk->md->blocks_written == nv_cache->chunk_blocks) {
1587 sort_chunks(struct ftl_nv_cache *nv_cache)
1593 if (TAILQ_EMPTY(&nv_cache->chunk_full_list)) {
1597 chunks_list = calloc(nv_cache->chunk_full_count,
1604 TAILQ_FOREACH(chunk, &nv_cache->chunk_full_list, entry) {
1608 assert(i == nv_cache->chunk_full_count);
1610 qsort(chunks_list, nv_cache->chunk_full_count, sizeof(chunks_list[0]),
1613 TAILQ_INIT(&nv_cache->chunk_full_list);
1614 for (i = 0; i < nv_cache->chunk_full_count; i++) {
1616 TAILQ_INSERT_TAIL(&nv_cache->chunk_full_list, chunk, entry);
1626 struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1632 p2l_map->chunk_map = ftl_mempool_get(nv_cache->p2l_pool);
1639 ftl_mempool_put(nv_cache->p2l_pool, p2l_map->chunk_map);
1645 memset(p2l_map->chunk_map, -1, FTL_BLOCK_SIZE * nv_cache->tail_md_chunk_blocks);
1651 ftl_nv_cache_load_state(struct ftl_nv_cache *nv_cache)
1653 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1659 nv_cache->chunk_current = NULL;
1660 TAILQ_INIT(&nv_cache->chunk_free_list);
1661 TAILQ_INIT(&nv_cache->chunk_full_list);
1662 TAILQ_INIT(&nv_cache->chunk_inactive_list);
1663 nv_cache->chunk_full_count = 0;
1664 nv_cache->chunk_free_count = 0;
1665 nv_cache->chunk_inactive_count = 0;
1667 assert(nv_cache->chunk_open_count == 0);
1668 offset = nvc_data_offset(nv_cache);
1669 if (!nv_cache->chunks) {
1681 chunk = nv_cache->chunks;
1682 for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
1683 active = nv_cache->nvc_type->ops.is_chunk_active(dev, chunk->offset);
1697 chunk = nv_cache->chunks;
1698 for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
1699 chunk->nv_cache = nv_cache;
1700 nvc_validate_md(nv_cache, chunk->md);
1712 active = nv_cache->nvc_type->ops.is_chunk_active(dev, chunk->offset);
1727 TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
1728 nv_cache->chunk_free_count++;
1736 if (chunk->md->blocks_written != nv_cache->chunk_blocks) {
1741 TAILQ_INSERT_TAIL(&nv_cache->chunk_full_list, chunk, entry);
1742 nv_cache->chunk_full_count++;
1745 TAILQ_INSERT_TAIL(&nv_cache->chunk_inactive_list, chunk, entry);
1746 nv_cache->chunk_inactive_count++;
1754 offset += nv_cache->chunk_blocks;
1757 chunks_number = nv_cache->chunk_free_count + nv_cache->chunk_full_count +
1758 nv_cache->chunk_inactive_count;
1759 assert(nv_cache->chunk_current == NULL);
1761 if (chunks_number != nv_cache->chunk_count) {
1767 status = sort_chunks(nv_cache);
1773 nv_cache->chunk_full_count, nv_cache->chunk_free_count);
1790 ftl_nv_cache_get_max_seq_id(struct ftl_nv_cache *nv_cache, uint64_t *open_seq_id,
1796 chunk = nv_cache->chunks;
1800 for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
1821 chunk_advance_blocks(chunk->nv_cache, chunk, brq->num_blocks);
1832 struct ftl_nv_cache *nv_cache = brq->io.chunk->nv_cache;
1833 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1836 rc = ftl_nv_cache_bdev_write_blocks_with_md(dev, nv_cache->bdev_desc, nv_cache->cache_ioch,
1841 struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc);
1845 spdk_bdev_queue_io_wait(bdev, nv_cache->cache_ioch, &brq->io.bdev_io_wait);
1855 struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1856 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1883 struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1884 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1890 rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, nv_cache->bdev_desc, nv_cache->cache_ioch,
1920 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1935 chunk->nv_cache->chunk_open_count++;
1940 if (dev->nv_cache.nvc_type->ops.on_chunk_open) {
1941 dev->nv_cache.nvc_type->ops.on_chunk_open(dev, chunk);
1957 struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1958 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1960 assert(chunk->md->write_pointer == chunk->nv_cache->chunk_blocks);
1966 assert(chunk->nv_cache->chunk_open_count > 0);
1967 chunk->nv_cache->chunk_open_count--;
1970 TAILQ_INSERT_TAIL(&chunk->nv_cache->chunk_full_list, chunk, entry);
1971 chunk->nv_cache->chunk_full_count++;
1973 chunk->nv_cache->last_seq_id = chunk->md->close_seq_id;
1976 if (nv_cache->nvc_type->ops.on_chunk_closed) {
1977 nv_cache->nvc_type->ops.on_chunk_closed(dev, chunk);
1993 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
2000 chunk->nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE, 0);
2021 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
2026 ftl_basic_rq_init(dev, brq, metadata, chunk->nv_cache->tail_md_chunk_blocks);
2029 assert(chunk->md->write_pointer == chunk_tail_md_offset(chunk->nv_cache));
2039 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
2044 ftl_basic_rq_init(dev, brq, metadata, chunk->nv_cache->tail_md_chunk_blocks);
2047 brq->io.addr = chunk->offset + chunk_tail_md_offset(chunk->nv_cache);
2062 is_chunk_count_valid(struct ftl_nv_cache *nv_cache)
2066 chunk_count += nv_cache->chunk_open_count;
2067 chunk_count += nv_cache->chunk_free_count;
2068 chunk_count += nv_cache->chunk_full_count;
2069 chunk_count += nv_cache->chunk_comp_count;
2070 chunk_count += nv_cache->chunk_inactive_count;
2072 return chunk_count == nv_cache->chunk_count;
2101 struct ftl_nv_cache *nvc = &dev->nv_cache;
2190 struct ftl_nv_cache *nvc = &dev->nv_cache;
2268 struct ftl_nv_cache *nvc = &dev->nv_cache;
2304 ftl_basic_rq_init(dev, rq, p2l_map, chunk->nv_cache->tail_md_chunk_blocks);
2307 rq->io.addr = chunk->offset + chunk_tail_md_offset(chunk->nv_cache);
2317 struct ftl_nv_cache *nvc = chunk->nv_cache;
2318 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nvc, struct spdk_ftl_dev, nv_cache);
2349 chunk_md->write_pointer = chunk->nv_cache->chunk_blocks;
2350 chunk_md->blocks_written = chunk->nv_cache->chunk_blocks;
2352 chunk->nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE, 0);
2363 struct ftl_nv_cache *nvc = ctx->chunk->nv_cache;
2425 struct ftl_nv_cache *nvc = &dev->nv_cache;
2452 ftl_nv_cache_chunks_busy(struct ftl_nv_cache *nv_cache)
2456 return nv_cache->chunk_open_count == 0 && nv_cache->chunk_free_persist_count == 0;
2460 ftl_nv_cache_halt(struct ftl_nv_cache *nv_cache)
2465 nv_cache->halt = true;
2468 while (!TAILQ_EMPTY(&nv_cache->chunk_open_list)) {
2469 chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
2479 TAILQ_REMOVE(&nv_cache->chunk_open_list, chunk, entry);
2482 assert(nv_cache->chunk_open_count > 0);
2483 nv_cache->chunk_open_count--;
2487 chunk = nv_cache->chunk_current;
2489 nv_cache->chunk_current = NULL;
2494 free_space = chunk_get_free_space(nv_cache, chunk);
2503 ftl_nv_cache_acquire_trim_seq_id(struct ftl_nv_cache *nv_cache)
2505 struct ftl_nv_cache_chunk *chunk = nv_cache->chunk_current;
2509 chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
2521 seq_id = nv_cache->chunk_current->md->seq_id;
2522 free_space = chunk_get_free_space(nv_cache, chunk);
2527 if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
2530 nv_cache->chunk_current = NULL;
2537 ftl_nv_cache_get_chunk_utilization(struct ftl_nv_cache *nv_cache,
2540 double capacity = nv_cache->chunk_blocks;
2569 spdk_json_write_named_string(w, "type", dev->nv_cache.nvc_type->name);
2571 for (i = 0, chunk = dev->nv_cache.chunks; i < dev->nv_cache.chunk_count; i++, chunk++) {
2576 ftl_nv_cache_get_chunk_utilization(&dev->nv_cache, chunk));