120c3c166SMateusz Kozlowski /* SPDX-License-Identifier: BSD-3-Clause 220c3c166SMateusz Kozlowski * Copyright 2023 Solidigm All Rights Reserved 320c3c166SMateusz Kozlowski */ 420c3c166SMateusz Kozlowski 520c3c166SMateusz Kozlowski #include "ftl_nvc_dev.h" 620c3c166SMateusz Kozlowski #include "ftl_core.h" 7b0556d4aSLukasz Lasek #include "ftl_layout.h" 893036282SLukasz Lasek #include "utils/ftl_layout_tracker_bdev.h" 9d80bf110SMateusz Kozlowski #include "mngt/ftl_mngt.h" 10*6d6179ffSMateusz Kozlowski #include "ftl_nvc_bdev_common.h" 1120c3c166SMateusz Kozlowski 1220c3c166SMateusz Kozlowski static bool 1320c3c166SMateusz Kozlowski is_bdev_compatible(struct spdk_ftl_dev *dev, struct spdk_bdev *bdev) 1420c3c166SMateusz Kozlowski { 1520c3c166SMateusz Kozlowski if (!spdk_bdev_is_md_separate(bdev)) { 1620c3c166SMateusz Kozlowski /* It doesn't support separate metadata buffer IO */ 1720c3c166SMateusz Kozlowski return false; 1820c3c166SMateusz Kozlowski } 1920c3c166SMateusz Kozlowski 2020c3c166SMateusz Kozlowski if (spdk_bdev_get_md_size(bdev) != sizeof(union ftl_md_vss)) { 2120c3c166SMateusz Kozlowski /* Bdev's metadata is invalid size */ 2220c3c166SMateusz Kozlowski return false; 2320c3c166SMateusz Kozlowski } 2420c3c166SMateusz Kozlowski 2520c3c166SMateusz Kozlowski if (spdk_bdev_get_dif_type(bdev) != SPDK_DIF_DISABLE) { 2620c3c166SMateusz Kozlowski /* Unsupported DIF type used by bdev */ 2720c3c166SMateusz Kozlowski return false; 2820c3c166SMateusz Kozlowski } 2920c3c166SMateusz Kozlowski 3020c3c166SMateusz Kozlowski if (ftl_md_xfer_blocks(dev) * spdk_bdev_get_md_size(bdev) > FTL_ZERO_BUFFER_SIZE) { 3120c3c166SMateusz Kozlowski FTL_ERRLOG(dev, "Zero buffer too small for bdev %s metadata transfer\n", 3220c3c166SMateusz Kozlowski spdk_bdev_get_name(bdev)); 3320c3c166SMateusz Kozlowski return false; 3420c3c166SMateusz Kozlowski } 3520c3c166SMateusz Kozlowski 3620c3c166SMateusz Kozlowski return true; 3720c3c166SMateusz Kozlowski } 3820c3c166SMateusz Kozlowski 395a8a57c1SMateusz Kozlowski static void 405a8a57c1SMateusz Kozlowski write_io_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 415a8a57c1SMateusz Kozlowski { 425a8a57c1SMateusz Kozlowski struct ftl_io *io = cb_arg; 435a8a57c1SMateusz Kozlowski struct ftl_nv_cache *nv_cache = &io->dev->nv_cache; 445a8a57c1SMateusz Kozlowski 455a8a57c1SMateusz Kozlowski ftl_stats_bdev_io_completed(io->dev, FTL_STATS_TYPE_USER, bdev_io); 465a8a57c1SMateusz Kozlowski 475a8a57c1SMateusz Kozlowski spdk_bdev_free_io(bdev_io); 485a8a57c1SMateusz Kozlowski 495a8a57c1SMateusz Kozlowski ftl_mempool_put(nv_cache->md_pool, io->md); 505a8a57c1SMateusz Kozlowski 515a8a57c1SMateusz Kozlowski ftl_nv_cache_write_complete(io, success); 525a8a57c1SMateusz Kozlowski } 535a8a57c1SMateusz Kozlowski 545a8a57c1SMateusz Kozlowski static void write_io(struct ftl_io *io); 555a8a57c1SMateusz Kozlowski 565a8a57c1SMateusz Kozlowski static void 575a8a57c1SMateusz Kozlowski _nvc_vss_write(void *io) 585a8a57c1SMateusz Kozlowski { 595a8a57c1SMateusz Kozlowski write_io(io); 605a8a57c1SMateusz Kozlowski } 615a8a57c1SMateusz Kozlowski 625a8a57c1SMateusz Kozlowski static void 635a8a57c1SMateusz Kozlowski write_io(struct ftl_io *io) 645a8a57c1SMateusz Kozlowski { 655a8a57c1SMateusz Kozlowski struct spdk_ftl_dev *dev = io->dev; 665a8a57c1SMateusz Kozlowski struct ftl_nv_cache *nv_cache = &dev->nv_cache; 675a8a57c1SMateusz Kozlowski int rc; 685a8a57c1SMateusz Kozlowski 695a8a57c1SMateusz Kozlowski io->md = ftl_mempool_get(dev->nv_cache.md_pool); 705a8a57c1SMateusz Kozlowski if (spdk_unlikely(!io->md)) { 715a8a57c1SMateusz Kozlowski ftl_abort(); 725a8a57c1SMateusz Kozlowski } 735a8a57c1SMateusz Kozlowski 745a8a57c1SMateusz Kozlowski ftl_nv_cache_fill_md(io); 755a8a57c1SMateusz Kozlowski 765a8a57c1SMateusz Kozlowski rc = spdk_bdev_writev_blocks_with_md(nv_cache->bdev_desc, nv_cache->cache_ioch, 775a8a57c1SMateusz Kozlowski io->iov, io->iov_cnt, io->md, 785a8a57c1SMateusz Kozlowski ftl_addr_to_nvc_offset(dev, io->addr), io->num_blocks, 795a8a57c1SMateusz Kozlowski write_io_cb, io); 805a8a57c1SMateusz Kozlowski if (spdk_unlikely(rc)) { 815a8a57c1SMateusz Kozlowski if (rc == -ENOMEM) { 825a8a57c1SMateusz Kozlowski struct spdk_bdev *bdev; 835a8a57c1SMateusz Kozlowski 845a8a57c1SMateusz Kozlowski ftl_mempool_put(nv_cache->md_pool, io->md); 855a8a57c1SMateusz Kozlowski io->md = NULL; 865a8a57c1SMateusz Kozlowski 875a8a57c1SMateusz Kozlowski bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc); 885a8a57c1SMateusz Kozlowski io->bdev_io_wait.bdev = bdev; 895a8a57c1SMateusz Kozlowski io->bdev_io_wait.cb_fn = _nvc_vss_write; 905a8a57c1SMateusz Kozlowski io->bdev_io_wait.cb_arg = io; 915a8a57c1SMateusz Kozlowski spdk_bdev_queue_io_wait(bdev, nv_cache->cache_ioch, &io->bdev_io_wait); 925a8a57c1SMateusz Kozlowski } else { 935a8a57c1SMateusz Kozlowski ftl_abort(); 945a8a57c1SMateusz Kozlowski } 955a8a57c1SMateusz Kozlowski } 965a8a57c1SMateusz Kozlowski } 975a8a57c1SMateusz Kozlowski 98d80bf110SMateusz Kozlowski struct nvc_recover_open_chunk_ctx { 99d80bf110SMateusz Kozlowski struct ftl_nv_cache_chunk *chunk; 100d80bf110SMateusz Kozlowski struct ftl_rq *rq; 101d80bf110SMateusz Kozlowski uint64_t addr; 102d80bf110SMateusz Kozlowski uint64_t to_read; 103d80bf110SMateusz Kozlowski }; 104d80bf110SMateusz Kozlowski 105d80bf110SMateusz Kozlowski static void 106d80bf110SMateusz Kozlowski nvc_recover_open_chunk_read_vss_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 107d80bf110SMateusz Kozlowski { 108d80bf110SMateusz Kozlowski struct ftl_mngt_process *mngt = cb_arg; 109d80bf110SMateusz Kozlowski struct spdk_ftl_dev *dev = ftl_mngt_get_dev(mngt); 110d80bf110SMateusz Kozlowski struct nvc_recover_open_chunk_ctx *ctx = ftl_mngt_get_process_ctx(mngt); 111d80bf110SMateusz Kozlowski struct ftl_nv_cache_chunk *chunk = ctx->chunk; 112d80bf110SMateusz Kozlowski struct ftl_rq *rq = ctx->rq; 113d80bf110SMateusz Kozlowski union ftl_md_vss *md; 114d80bf110SMateusz Kozlowski uint64_t cache_offset = bdev_io->u.bdev.offset_blocks; 115d80bf110SMateusz Kozlowski uint64_t blocks = bdev_io->u.bdev.num_blocks; 116d80bf110SMateusz Kozlowski ftl_addr addr = ftl_addr_from_nvc_offset(dev, cache_offset); 117d80bf110SMateusz Kozlowski 118d80bf110SMateusz Kozlowski spdk_bdev_free_io(bdev_io); 119d80bf110SMateusz Kozlowski if (!success) { 120d80bf110SMateusz Kozlowski ftl_mngt_fail_step(mngt); 121d80bf110SMateusz Kozlowski return; 122d80bf110SMateusz Kozlowski } 123d80bf110SMateusz Kozlowski 124d80bf110SMateusz Kozlowski /* Rebuild P2L map */ 125d80bf110SMateusz Kozlowski for (rq->iter.idx = 0; rq->iter.idx < blocks; rq->iter.idx++) { 126d80bf110SMateusz Kozlowski md = rq->entries[rq->iter.idx].io_md; 127d80bf110SMateusz Kozlowski if (md->nv_cache.seq_id != chunk->md->seq_id) { 128d80bf110SMateusz Kozlowski md->nv_cache.lba = FTL_LBA_INVALID; 129d80bf110SMateusz Kozlowski md->nv_cache.seq_id = 0; 130d80bf110SMateusz Kozlowski } 131d80bf110SMateusz Kozlowski 132d80bf110SMateusz Kozlowski ftl_nv_cache_chunk_set_addr(chunk, md->nv_cache.lba, addr + rq->iter.idx); 133d80bf110SMateusz Kozlowski } 134d80bf110SMateusz Kozlowski 135d80bf110SMateusz Kozlowski assert(ctx->to_read >= blocks); 136d80bf110SMateusz Kozlowski ctx->addr += blocks; 137d80bf110SMateusz Kozlowski ctx->to_read -= blocks; 138d80bf110SMateusz Kozlowski ftl_mngt_continue_step(mngt); 139d80bf110SMateusz Kozlowski 140d80bf110SMateusz Kozlowski } 141d80bf110SMateusz Kozlowski 142d80bf110SMateusz Kozlowski static void 143d80bf110SMateusz Kozlowski nvc_recover_open_chunk_read_vss(struct spdk_ftl_dev *dev, 144d80bf110SMateusz Kozlowski struct ftl_mngt_process *mngt) 145d80bf110SMateusz Kozlowski { 146d80bf110SMateusz Kozlowski struct nvc_recover_open_chunk_ctx *ctx = ftl_mngt_get_process_ctx(mngt); 147d80bf110SMateusz Kozlowski uint64_t blocks = spdk_min(ctx->rq->num_blocks, ctx->to_read); 148d80bf110SMateusz Kozlowski int rc; 149d80bf110SMateusz Kozlowski 150d80bf110SMateusz Kozlowski if (blocks) { 151d80bf110SMateusz Kozlowski rc = spdk_bdev_read_blocks_with_md(dev->nv_cache.bdev_desc, dev->nv_cache.cache_ioch, 152d80bf110SMateusz Kozlowski ctx->rq->io_payload, ctx->rq->io_md, ctx->addr, blocks, 153d80bf110SMateusz Kozlowski nvc_recover_open_chunk_read_vss_cb, mngt); 154d80bf110SMateusz Kozlowski if (rc) { 155d80bf110SMateusz Kozlowski ftl_mngt_fail_step(mngt); 156d80bf110SMateusz Kozlowski return; 157d80bf110SMateusz Kozlowski } 158d80bf110SMateusz Kozlowski } else { 159d80bf110SMateusz Kozlowski ftl_mngt_next_step(mngt); 160d80bf110SMateusz Kozlowski } 161d80bf110SMateusz Kozlowski } 162d80bf110SMateusz Kozlowski 163d80bf110SMateusz Kozlowski static int 164d80bf110SMateusz Kozlowski nvc_recover_open_chunk_init_handler(struct spdk_ftl_dev *dev, 165d80bf110SMateusz Kozlowski struct ftl_mngt_process *mngt, void *init_ctx) 166d80bf110SMateusz Kozlowski { 167d80bf110SMateusz Kozlowski struct nvc_recover_open_chunk_ctx *ctx = ftl_mngt_get_process_ctx(mngt); 168d80bf110SMateusz Kozlowski 169d80bf110SMateusz Kozlowski ctx->chunk = init_ctx; 170d80bf110SMateusz Kozlowski ctx->rq = ftl_rq_new(dev, dev->nv_cache.md_size); 171d80bf110SMateusz Kozlowski if (NULL == ctx->rq) { 172d80bf110SMateusz Kozlowski return -ENOMEM; 173d80bf110SMateusz Kozlowski } 174d80bf110SMateusz Kozlowski 175d80bf110SMateusz Kozlowski ctx->addr = ctx->chunk->offset; 176d80bf110SMateusz Kozlowski ctx->to_read = chunk_tail_md_offset(&dev->nv_cache); 177d80bf110SMateusz Kozlowski 178d80bf110SMateusz Kozlowski return 0; 179d80bf110SMateusz Kozlowski } 180d80bf110SMateusz Kozlowski 181d80bf110SMateusz Kozlowski static void 182d80bf110SMateusz Kozlowski nvc_recover_open_chunk_deinit_handler(struct spdk_ftl_dev *dev, 183d80bf110SMateusz Kozlowski struct ftl_mngt_process *mngt) 184d80bf110SMateusz Kozlowski { 185d80bf110SMateusz Kozlowski struct nvc_recover_open_chunk_ctx *ctx = ftl_mngt_get_process_ctx(mngt); 186d80bf110SMateusz Kozlowski 187d80bf110SMateusz Kozlowski ftl_rq_del(ctx->rq); 188d80bf110SMateusz Kozlowski } 189d80bf110SMateusz Kozlowski 190d80bf110SMateusz Kozlowski static const struct ftl_mngt_process_desc desc_recover_open_chunk = { 191d80bf110SMateusz Kozlowski .name = "Recover open chunk", 192d80bf110SMateusz Kozlowski .ctx_size = sizeof(struct nvc_recover_open_chunk_ctx), 193d80bf110SMateusz Kozlowski .init_handler = nvc_recover_open_chunk_init_handler, 194d80bf110SMateusz Kozlowski .deinit_handler = nvc_recover_open_chunk_deinit_handler, 195d80bf110SMateusz Kozlowski .steps = { 196d80bf110SMateusz Kozlowski { 197d80bf110SMateusz Kozlowski .name = "Chunk recovery, read vss", 198d80bf110SMateusz Kozlowski .action = nvc_recover_open_chunk_read_vss 199d80bf110SMateusz Kozlowski }, 200d80bf110SMateusz Kozlowski {} 201d80bf110SMateusz Kozlowski } 202d80bf110SMateusz Kozlowski }; 203d80bf110SMateusz Kozlowski 204d80bf110SMateusz Kozlowski static void 205d80bf110SMateusz Kozlowski nvc_recover_open_chunk(struct spdk_ftl_dev *dev, 206d80bf110SMateusz Kozlowski struct ftl_mngt_process *mngt, 207d80bf110SMateusz Kozlowski struct ftl_nv_cache_chunk *chunk) 208d80bf110SMateusz Kozlowski { 209d80bf110SMateusz Kozlowski ftl_mngt_call_process(mngt, &desc_recover_open_chunk, chunk); 210d80bf110SMateusz Kozlowski } 211d80bf110SMateusz Kozlowski 21226f3b551SMateusz Kozlowski struct ftl_nv_cache_device_type nvc_bdev_vss = { 21320c3c166SMateusz Kozlowski .name = "bdev", 21420c3c166SMateusz Kozlowski .features = { 21520c3c166SMateusz Kozlowski }, 21620c3c166SMateusz Kozlowski .ops = { 21720c3c166SMateusz Kozlowski .is_bdev_compatible = is_bdev_compatible, 218*6d6179ffSMateusz Kozlowski .is_chunk_active = ftl_nvc_bdev_common_is_chunk_active, 219b0556d4aSLukasz Lasek .md_layout_ops = { 220*6d6179ffSMateusz Kozlowski .region_create = ftl_nvc_bdev_common_region_create, 221*6d6179ffSMateusz Kozlowski .region_open = ftl_nvc_bdev_common_region_open, 222b0556d4aSLukasz Lasek }, 2235a8a57c1SMateusz Kozlowski .write = write_io, 224d80bf110SMateusz Kozlowski .recover_open_chunk = nvc_recover_open_chunk, 22520c3c166SMateusz Kozlowski } 22620c3c166SMateusz Kozlowski }; 22720c3c166SMateusz Kozlowski FTL_NV_CACHE_DEVICE_TYPE_REGISTER(nvc_bdev_vss) 228