xref: /spdk/lib/ftl/nvc/ftl_nvc_bdev_vss.c (revision 698b2423d5f98e56c36dcf8484205bb034d0f6f5)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright 2023 Solidigm All Rights Reserved
3  */
4 
5 #include "ftl_nvc_dev.h"
6 #include "ftl_core.h"
7 #include "ftl_layout.h"
8 #include "utils/ftl_layout_tracker_bdev.h"
9 #include "mngt/ftl_mngt.h"
10 #include "ftl_nvc_bdev_common.h"
11 
12 static bool
13 is_bdev_compatible(struct spdk_ftl_dev *dev, struct spdk_bdev *bdev)
14 {
15 	if (!spdk_bdev_is_md_separate(bdev)) {
16 		/* It doesn't support separate metadata buffer IO */
17 		return false;
18 	}
19 
20 	if (spdk_bdev_get_md_size(bdev) != sizeof(union ftl_md_vss)) {
21 		/* Bdev's metadata is invalid size */
22 		return false;
23 	}
24 
25 	if (spdk_bdev_get_dif_type(bdev) != SPDK_DIF_DISABLE) {
26 		/* Unsupported DIF type used by bdev */
27 		return false;
28 	}
29 
30 	if (ftl_md_xfer_blocks(dev) * spdk_bdev_get_md_size(bdev) > FTL_ZERO_BUFFER_SIZE) {
31 		FTL_ERRLOG(dev, "Zero buffer too small for bdev %s metadata transfer\n",
32 			   spdk_bdev_get_name(bdev));
33 		return false;
34 	}
35 
36 	return true;
37 }
38 
39 static void
40 write_io_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
41 {
42 	struct ftl_io *io = cb_arg;
43 	struct ftl_nv_cache *nv_cache = &io->dev->nv_cache;
44 
45 	ftl_stats_bdev_io_completed(io->dev, FTL_STATS_TYPE_USER, bdev_io);
46 
47 	spdk_bdev_free_io(bdev_io);
48 
49 	ftl_mempool_put(nv_cache->md_pool, io->md);
50 
51 	ftl_nv_cache_write_complete(io, success);
52 }
53 
54 static void write_io(struct ftl_io *io);
55 
56 static void
57 _nvc_vss_write(void *io)
58 {
59 	write_io(io);
60 }
61 
62 static void
63 write_io(struct ftl_io *io)
64 {
65 	struct spdk_ftl_dev *dev = io->dev;
66 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
67 	int rc;
68 
69 	io->md = ftl_mempool_get(dev->nv_cache.md_pool);
70 	if (spdk_unlikely(!io->md)) {
71 		ftl_abort();
72 	}
73 
74 	ftl_nv_cache_fill_md(io);
75 
76 	rc = spdk_bdev_writev_blocks_with_md(nv_cache->bdev_desc, nv_cache->cache_ioch,
77 					     io->iov, io->iov_cnt, io->md,
78 					     ftl_addr_to_nvc_offset(dev, io->addr), io->num_blocks,
79 					     write_io_cb, io);
80 	if (spdk_unlikely(rc)) {
81 		if (rc == -ENOMEM) {
82 			struct spdk_bdev *bdev;
83 
84 			ftl_mempool_put(nv_cache->md_pool, io->md);
85 			io->md = NULL;
86 
87 			bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc);
88 			io->bdev_io_wait.bdev = bdev;
89 			io->bdev_io_wait.cb_fn = _nvc_vss_write;
90 			io->bdev_io_wait.cb_arg = io;
91 			spdk_bdev_queue_io_wait(bdev, nv_cache->cache_ioch, &io->bdev_io_wait);
92 		} else {
93 			ftl_abort();
94 		}
95 	}
96 }
97 
98 struct nvc_recover_open_chunk_ctx {
99 	struct ftl_nv_cache_chunk *chunk;
100 	struct ftl_rq *rq;
101 	uint64_t addr;
102 	uint64_t to_read;
103 };
104 
105 static void
106 nvc_recover_open_chunk_read_vss_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
107 {
108 	struct ftl_mngt_process *mngt = cb_arg;
109 	struct spdk_ftl_dev *dev = ftl_mngt_get_dev(mngt);
110 	struct nvc_recover_open_chunk_ctx *ctx = ftl_mngt_get_process_ctx(mngt);
111 	struct ftl_nv_cache_chunk *chunk = ctx->chunk;
112 	struct ftl_rq *rq = ctx->rq;
113 	union ftl_md_vss *md;
114 	uint64_t cache_offset = bdev_io->u.bdev.offset_blocks;
115 	uint64_t blocks = bdev_io->u.bdev.num_blocks;
116 	ftl_addr addr = ftl_addr_from_nvc_offset(dev, cache_offset);
117 
118 	spdk_bdev_free_io(bdev_io);
119 	if (!success) {
120 		ftl_mngt_fail_step(mngt);
121 		return;
122 	}
123 
124 	/* Rebuild P2L map */
125 	for (rq->iter.idx = 0; rq->iter.idx < blocks; rq->iter.idx++) {
126 		md = rq->entries[rq->iter.idx].io_md;
127 		if (md->nv_cache.seq_id != chunk->md->seq_id) {
128 			md->nv_cache.lba = FTL_LBA_INVALID;
129 			md->nv_cache.seq_id = 0;
130 		}
131 
132 		ftl_nv_cache_chunk_set_addr(chunk, md->nv_cache.lba, addr + rq->iter.idx);
133 	}
134 
135 	assert(ctx->to_read >= blocks);
136 	ctx->addr += blocks;
137 	ctx->to_read -= blocks;
138 	ftl_mngt_continue_step(mngt);
139 
140 }
141 
142 static void
143 nvc_recover_open_chunk_read_vss(struct spdk_ftl_dev *dev,
144 				struct ftl_mngt_process *mngt)
145 {
146 	struct nvc_recover_open_chunk_ctx *ctx = ftl_mngt_get_process_ctx(mngt);
147 	uint64_t blocks = spdk_min(ctx->rq->num_blocks, ctx->to_read);
148 	int rc;
149 
150 	if (blocks) {
151 		rc = spdk_bdev_read_blocks_with_md(dev->nv_cache.bdev_desc, dev->nv_cache.cache_ioch,
152 						   ctx->rq->io_payload, ctx->rq->io_md, ctx->addr, blocks,
153 						   nvc_recover_open_chunk_read_vss_cb, mngt);
154 		if (rc) {
155 			ftl_mngt_fail_step(mngt);
156 			return;
157 		}
158 	} else {
159 		ftl_mngt_next_step(mngt);
160 	}
161 }
162 
163 static int
164 nvc_recover_open_chunk_init_handler(struct spdk_ftl_dev *dev,
165 				    struct ftl_mngt_process *mngt, void *init_ctx)
166 {
167 	struct nvc_recover_open_chunk_ctx *ctx = ftl_mngt_get_process_ctx(mngt);
168 
169 	ctx->chunk = init_ctx;
170 	ctx->rq = ftl_rq_new(dev, dev->nv_cache.md_size);
171 	if (NULL == ctx->rq) {
172 		return -ENOMEM;
173 	}
174 
175 	ctx->addr = ctx->chunk->offset;
176 	ctx->to_read = chunk_tail_md_offset(&dev->nv_cache);
177 
178 	return 0;
179 }
180 
181 static void
182 nvc_recover_open_chunk_deinit_handler(struct spdk_ftl_dev *dev,
183 				      struct ftl_mngt_process *mngt)
184 {
185 	struct nvc_recover_open_chunk_ctx *ctx = ftl_mngt_get_process_ctx(mngt);
186 
187 	ftl_rq_del(ctx->rq);
188 }
189 
190 static const struct ftl_mngt_process_desc desc_recover_open_chunk = {
191 	.name = "Recover open chunk",
192 	.ctx_size = sizeof(struct nvc_recover_open_chunk_ctx),
193 	.init_handler = nvc_recover_open_chunk_init_handler,
194 	.deinit_handler = nvc_recover_open_chunk_deinit_handler,
195 	.steps = {
196 		{
197 			.name = "Chunk recovery, read vss",
198 			.action = nvc_recover_open_chunk_read_vss
199 		},
200 		{}
201 	}
202 };
203 
204 static void
205 nvc_recover_open_chunk(struct spdk_ftl_dev *dev,
206 		       struct ftl_mngt_process *mngt,
207 		       struct ftl_nv_cache_chunk *chunk)
208 {
209 	ftl_mngt_call_process(mngt, &desc_recover_open_chunk, chunk);
210 }
211 
212 struct ftl_nv_cache_device_type nvc_bdev_vss = {
213 	.name = "bdev",
214 	.features = {
215 	},
216 	.ops = {
217 		.is_bdev_compatible = is_bdev_compatible,
218 		.is_chunk_active = ftl_nvc_bdev_common_is_chunk_active,
219 		.md_layout_ops = {
220 			.region_create = ftl_nvc_bdev_common_region_create,
221 			.region_open = ftl_nvc_bdev_common_region_open,
222 		},
223 		.write = write_io,
224 		.recover_open_chunk = nvc_recover_open_chunk,
225 	}
226 };
227 FTL_NV_CACHE_DEVICE_TYPE_REGISTER(nvc_bdev_vss)
228