xref: /spdk/lib/ftl/nvc/ftl_nvc_bdev_non_vss.c (revision d1c32ad335cec2a0b8b8c871a77842132fcbebc9)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright 2023 Solidigm All Rights Reserved
3  */
4 
5 #include "ftl_nvc_dev.h"
6 #include "ftl_core.h"
7 #include "ftl_layout.h"
8 #include "ftl_nv_cache.h"
9 #include "mngt/ftl_mngt.h"
10 #include "ftl_nvc_bdev_common.h"
11 
12 static void write_io(struct ftl_io *io);
13 static void p2l_log_cb(struct ftl_io *io);
14 
15 static int
16 init(struct spdk_ftl_dev *dev)
17 {
18 	int rc;
19 
20 	rc = ftl_p2l_log_init(dev);
21 	if (rc) {
22 		return 0;
23 	}
24 
25 	return 0;
26 }
27 
28 static void
29 deinit(struct spdk_ftl_dev *dev)
30 {
31 	ftl_p2l_log_deinit(dev);
32 }
33 
34 static bool
35 is_bdev_compatible(struct spdk_ftl_dev *dev, struct spdk_bdev *bdev)
36 {
37 	if (spdk_bdev_get_md_size(bdev) != 0) {
38 		/* Bdev's metadata is invalid size */
39 		return false;
40 	}
41 
42 	return true;
43 }
44 
45 static void
46 on_chunk_open(struct spdk_ftl_dev *dev, struct ftl_nv_cache_chunk *chunk)
47 {
48 	assert(NULL == chunk->p2l_log);
49 	chunk->p2l_log = ftl_p2l_log_acquire(dev, chunk->md->seq_id, p2l_log_cb);
50 	chunk->md->p2l_log_type = ftl_p2l_log_type(chunk->p2l_log);
51 }
52 
53 static void
54 on_chunk_closed(struct spdk_ftl_dev *dev, struct ftl_nv_cache_chunk *chunk)
55 {
56 	assert(chunk->p2l_log);
57 	ftl_p2l_log_release(dev, chunk->p2l_log);
58 	chunk->p2l_log = NULL;
59 }
60 
61 static void
62 p2l_log_cb(struct ftl_io *io)
63 {
64 	ftl_nv_cache_write_complete(io, true);
65 }
66 
67 static void
68 write_io_cb(struct spdk_bdev_io *bdev_io, bool success, void *ctx)
69 {
70 	struct ftl_io *io = ctx;
71 
72 	ftl_stats_bdev_io_completed(io->dev, FTL_STATS_TYPE_USER, bdev_io);
73 	spdk_bdev_free_io(bdev_io);
74 
75 	if (spdk_likely(success)) {
76 		struct ftl_p2l_log *log = io->nv_cache_chunk->p2l_log;
77 		ftl_p2l_log_io(log, io);
78 	} else {
79 		ftl_nv_cache_write_complete(io, false);
80 	}
81 }
82 
83 static void
84 write_io_retry(void *ctx)
85 {
86 	struct ftl_io *io = ctx;
87 
88 	write_io(io);
89 }
90 
91 static void
92 write_io(struct ftl_io *io)
93 {
94 	struct spdk_ftl_dev *dev = io->dev;
95 	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
96 	int rc;
97 
98 	rc = spdk_bdev_writev_blocks(nv_cache->bdev_desc, nv_cache->cache_ioch,
99 				     io->iov, io->iov_cnt,
100 				     ftl_addr_to_nvc_offset(dev, io->addr), io->num_blocks,
101 				     write_io_cb, io);
102 	if (spdk_unlikely(rc)) {
103 		if (rc == -ENOMEM) {
104 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc);
105 			io->bdev_io_wait.bdev = bdev;
106 			io->bdev_io_wait.cb_fn = write_io_retry;
107 			io->bdev_io_wait.cb_arg = io;
108 			spdk_bdev_queue_io_wait(bdev, nv_cache->cache_ioch, &io->bdev_io_wait);
109 		} else {
110 			ftl_abort();
111 		}
112 	}
113 }
114 
115 static void
116 process(struct spdk_ftl_dev *dev)
117 {
118 	ftl_p2l_log_flush(dev);
119 }
120 
121 struct recovery_chunk_ctx {
122 	struct ftl_nv_cache_chunk *chunk;
123 };
124 
125 static void
126 recovery_chunk_recover_p2l_map_cb(void *cb_arg, int status)
127 {
128 	struct ftl_mngt_process *mngt = cb_arg;
129 
130 	if (status) {
131 		ftl_mngt_fail_step(mngt);
132 	} else {
133 		ftl_mngt_next_step(mngt);
134 	}
135 }
136 
137 static int
138 recovery_chunk_recover_p2l_map_read_cb(struct spdk_ftl_dev *dev, void *cb_arg,
139 				       uint64_t lba, ftl_addr addr, uint64_t seq_id)
140 {
141 	struct ftl_mngt_process *mngt = cb_arg;
142 	struct recovery_chunk_ctx *ctx = ftl_mngt_get_process_ctx(mngt);
143 	struct ftl_nv_cache_chunk *chunk = ctx->chunk;
144 
145 	ftl_nv_cache_chunk_set_addr(chunk, lba, addr);
146 
147 	/* TODO We could stop scanning when getting all LBA within the chunk */
148 	return 0;
149 }
150 
151 
152 static void
153 recovery_chunk_recover_p2l_map(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
154 {
155 	struct recovery_chunk_ctx *ctx = ftl_mngt_get_process_ctx(mngt);
156 	struct ftl_nv_cache_chunk *chunk = ctx->chunk;
157 	int rc;
158 
159 	rc = ftl_p2l_log_read(dev, chunk->md->p2l_log_type, chunk->md->seq_id,
160 			      recovery_chunk_recover_p2l_map_cb, mngt,
161 			      recovery_chunk_recover_p2l_map_read_cb);
162 
163 	if (rc) {
164 		ftl_mngt_fail_step(mngt);
165 	}
166 }
167 
168 static int
169 recovery_chunk_init(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt,
170 		    void *init_ctx)
171 {
172 	struct recovery_chunk_ctx *ctx = ftl_mngt_get_process_ctx(mngt);
173 
174 	ctx->chunk = init_ctx;
175 	return 0;
176 }
177 
178 static const struct ftl_mngt_process_desc desc_chunk_recovery = {
179 	.name = "Recover open chunk",
180 	.ctx_size = sizeof(struct recovery_chunk_ctx),
181 	.init_handler = recovery_chunk_init,
182 	.steps = {
183 		{
184 			.name = "Recover chunk P2L map",
185 			.action = recovery_chunk_recover_p2l_map,
186 		},
187 		{}
188 	}
189 };
190 
191 static void
192 recover_open_chunk(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt,
193 		   struct ftl_nv_cache_chunk *chunk)
194 {
195 	ftl_mngt_call_process(mngt, &desc_chunk_recovery, chunk);
196 }
197 
198 static int
199 setup_layout(struct spdk_ftl_dev *dev)
200 {
201 	const struct ftl_md_layout_ops *md_ops = &dev->nv_cache.nvc_type->ops.md_layout_ops;
202 	const uint64_t blocks = ftl_p2l_log_get_md_blocks_required(dev, 1, ftl_get_num_blocks_in_band(dev));
203 	enum ftl_layout_region_type region_type;
204 
205 	for (region_type = FTL_LAYOUT_REGION_TYPE_P2L_LOG_IO_MIN;
206 	     region_type <= FTL_LAYOUT_REGION_TYPE_P2L_LOG_IO_MAX;
207 	     region_type++) {
208 		if (md_ops->region_create(dev, region_type, FTL_P2L_LOG_VERSION_CURRENT, blocks)) {
209 			return -1;
210 		}
211 
212 		if (md_ops->region_open(dev, region_type, FTL_P2L_LOG_VERSION_CURRENT,
213 					FTL_BLOCK_SIZE, blocks,
214 					&dev->layout.region[region_type])) {
215 			return -1;
216 		}
217 	}
218 
219 	return 0;
220 }
221 
222 struct ftl_nv_cache_device_type nvc_bdev_non_vss = {
223 	.name = "bdev-non-vss",
224 	.features = {
225 	},
226 	.ops = {
227 		.init = init,
228 		.deinit = deinit,
229 		.on_chunk_open = on_chunk_open,
230 		.on_chunk_closed = on_chunk_closed,
231 		.is_bdev_compatible = is_bdev_compatible,
232 		.is_chunk_active = ftl_nvc_bdev_common_is_chunk_active,
233 		.setup_layout = setup_layout,
234 		.md_layout_ops = {
235 			.region_create = ftl_nvc_bdev_common_region_create,
236 			.region_open = ftl_nvc_bdev_common_region_open,
237 		},
238 		.process = process,
239 		.write = write_io,
240 		.recover_open_chunk = recover_open_chunk
241 	}
242 };
243 FTL_NV_CACHE_DEVICE_TYPE_REGISTER(nvc_bdev_non_vss)
244