xref: /spdk/lib/ftl/ftl_core.h (revision d4d015a572e1af7b2818e44218c1e661a61545ec)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #ifndef FTL_CORE_H
7 #define FTL_CORE_H
8 
9 #include "spdk/stdinc.h"
10 #include "spdk/uuid.h"
11 #include "spdk/thread.h"
12 #include "spdk/util.h"
13 #include "spdk/likely.h"
14 #include "spdk/queue.h"
15 #include "spdk/ftl.h"
16 #include "spdk/bdev.h"
17 
18 #include "ftl_internal.h"
19 #include "ftl_io.h"
20 #include "ftl_trace.h"
21 #include "ftl_nv_cache.h"
22 #include "ftl_writer.h"
23 #include "ftl_layout.h"
24 #include "ftl_sb.h"
25 #include "ftl_l2p.h"
26 #include "base/ftl_base_dev.h"
27 #include "utils/ftl_bitmap.h"
28 #include "utils/ftl_log.h"
29 #include "utils/ftl_property.h"
30 
31 /*
32  * We need to reserve at least 2 buffers for band close / open sequence
33  * alone, plus additional (8) buffers for handling relocations.
34  */
35 #define P2L_MEMPOOL_SIZE (2 + 8)
36 
37 /* When using VSS on nvcache, FTL sometimes doesn't require the contents of metadata.
38  * Some devices have bugs when sending a NULL pointer as part of metadata when namespace
39  * is formatted with VSS. This buffer is passed to such calls to avoid the bug. */
40 #define FTL_ZERO_BUFFER_SIZE 0x100000
41 extern void *g_ftl_write_buf;
42 extern void *g_ftl_read_buf;
43 
44 struct ftl_layout_tracker_bdev;
45 
46 struct spdk_ftl_dev {
47 	/* Configuration */
48 	struct spdk_ftl_conf		conf;
49 
50 	/* FTL device layout */
51 	struct ftl_layout		layout;
52 
53 	/* FTL superblock */
54 	struct ftl_superblock		*sb;
55 
56 	/* FTL shm superblock */
57 	struct ftl_superblock_shm	*sb_shm;
58 	struct ftl_md			*sb_shm_md;
59 
60 	/* Queue of registered IO channels */
61 	TAILQ_HEAD(, ftl_io_channel)	ioch_queue;
62 
63 	/* Underlying device */
64 	struct spdk_bdev_desc		*base_bdev_desc;
65 
66 	/* Base device type */
67 	const struct ftl_base_device_type *base_type;
68 
69 	/* Cached properties of the underlying device */
70 	uint64_t			num_blocks_in_band;
71 	bool				is_zoned;
72 
73 	/* Indicates the device is fully initialized */
74 	bool				initialized;
75 
76 	/* Indicates the device is about to be stopped */
77 	bool				halt;
78 
79 	/* Indicates if the device is registered as an IO device */
80 	bool				io_device_registered;
81 
82 	/* Management process to be continued after IO device unregistration completes */
83 	struct ftl_mngt_process		*unregister_process;
84 
85 	/* Non-volatile write buffer cache */
86 	struct ftl_nv_cache		nv_cache;
87 
88 	/* P2L map memory pool */
89 	struct ftl_mempool		*p2l_pool;
90 
91 	/* Underlying SHM buf for P2L map mempool */
92 	struct ftl_md			*p2l_pool_md;
93 
94 	/* Band md memory pool */
95 	struct ftl_mempool		*band_md_pool;
96 
97 	/* Traces */
98 	struct ftl_trace		trace;
99 
100 	/* Statistics */
101 	struct ftl_stats		stats;
102 
103 	/* Array of bands */
104 	struct ftl_band			*bands;
105 
106 	/* Number of operational bands */
107 	uint64_t			num_bands;
108 
109 	/* Next write band */
110 	struct ftl_band			*next_band;
111 
112 	/* Free band list */
113 	TAILQ_HEAD(, ftl_band)		free_bands;
114 
115 	/* Closed bands list */
116 	TAILQ_HEAD(, ftl_band)		shut_bands;
117 
118 	/* Number of free bands */
119 	uint64_t			num_free;
120 
121 	/* Logical -> physical table */
122 	void				*l2p;
123 
124 	/* l2p deferred pins list */
125 	TAILQ_HEAD(, ftl_l2p_pin_ctx)	l2p_deferred_pins;
126 
127 	/* Size of the l2p table */
128 	uint64_t			num_lbas;
129 
130 	/* P2L valid map */
131 	struct ftl_bitmap		*valid_map;
132 
133 	/* Metadata size */
134 	uint64_t			md_size;
135 
136 	/* Transfer unit size */
137 	uint64_t			xfer_size;
138 
139 	/* Current user write limit */
140 	int				limit;
141 
142 	/* Inflight IO operations */
143 	uint32_t			num_inflight;
144 
145 	/* Manages data relocation */
146 	struct ftl_reloc		*reloc;
147 
148 	/* Thread on which the poller is running */
149 	struct spdk_thread		*core_thread;
150 
151 	/* IO channel to the FTL device, used for internal management operations
152 	 * consuming FTL's external API
153 	 */
154 	struct spdk_io_channel		*ioch;
155 
156 	/* Underlying device IO channel */
157 	struct spdk_io_channel		*base_ioch;
158 
159 	/* Poller */
160 	struct spdk_poller		*core_poller;
161 
162 	/* Read submission queue */
163 	TAILQ_HEAD(, ftl_io)		rd_sq;
164 
165 	/* Write submission queue */
166 	TAILQ_HEAD(, ftl_io)		wr_sq;
167 
168 	/* Trim submission queue */
169 	TAILQ_HEAD(, ftl_io)		trim_sq;
170 
171 	/* Trim valid map */
172 	struct ftl_bitmap		*trim_map;
173 	struct ftl_md			*trim_map_md;
174 	size_t				trim_qd;
175 	bool				trim_in_progress;
176 	struct ftl_md_io_entry_ctx	trim_md_io_entry_ctx;
177 
178 	/* Writer for user IOs */
179 	struct ftl_writer		writer_user;
180 
181 	/* Writer for GC IOs */
182 	struct ftl_writer		writer_gc;
183 
184 	uint32_t			num_logical_bands_in_physical;
185 
186 	/* Retry init sequence */
187 	bool				init_retry;
188 
189 	/* P2L checkpointing */
190 	struct {
191 		/* Free regions */
192 		TAILQ_HEAD(, ftl_p2l_ckpt)	free;
193 		/* In use regions */
194 		TAILQ_HEAD(, ftl_p2l_ckpt)	inuse;
195 
196 		struct {
197 			/* Free logs */
198 			TAILQ_HEAD(, ftl_p2l_log)	free;
199 			/* In use logs */
200 			TAILQ_HEAD(, ftl_p2l_log)	inuse;
201 		} log;
202 	} p2l_ckpt;
203 
204 	/* MD layout region tracker for nvc device */
205 	struct ftl_layout_tracker_bdev *nvc_layout_tracker;
206 
207 	/* MD layout region tracker for a base devics */
208 	struct ftl_layout_tracker_bdev *base_layout_tracker;
209 
210 	/* FTL properties which can be configured by user */
211 	struct ftl_properties			*properties;
212 };
213 
214 void ftl_apply_limits(struct spdk_ftl_dev *dev);
215 
216 void ftl_invalidate_addr(struct spdk_ftl_dev *dev, ftl_addr addr);
217 
218 int ftl_core_poller(void *ctx);
219 
220 int ftl_io_channel_poll(void *arg);
221 
222 struct ftl_io_channel *ftl_io_channel_get_ctx(struct spdk_io_channel *ioch);
223 
224 bool ftl_needs_reloc(struct spdk_ftl_dev *dev);
225 
226 struct ftl_band *ftl_band_get_next_free(struct spdk_ftl_dev *dev);
227 
228 void ftl_set_trim_map(struct spdk_ftl_dev *dev, uint64_t lba, uint64_t num_blocks,
229 		      uint64_t seq_id);
230 
231 void ftl_recover_max_seq(struct spdk_ftl_dev *dev);
232 
233 void ftl_stats_bdev_io_completed(struct spdk_ftl_dev *dev, enum ftl_stats_type type,
234 				 struct spdk_bdev_io *bdev_io);
235 
236 void ftl_stats_crc_error(struct spdk_ftl_dev *dev, enum ftl_stats_type type);
237 
238 int ftl_trim(struct spdk_ftl_dev *dev, struct ftl_io *io, struct spdk_io_channel *ch,
239 	     uint64_t lba, size_t lba_cnt, spdk_ftl_fn cb_fn, void *cb_arg);
240 
241 static inline uint64_t
242 ftl_get_num_blocks_in_band(const struct spdk_ftl_dev *dev)
243 {
244 	return dev->num_blocks_in_band;
245 }
246 
247 static inline uint32_t
248 ftl_get_write_unit_size(struct spdk_bdev *bdev)
249 {
250 	/* Full block of P2L map worth of xfer_sz is needed for P2L checkpointing */
251 	return FTL_NUM_LBA_IN_BLOCK;
252 }
253 
254 static inline struct spdk_thread *
255 ftl_get_core_thread(const struct spdk_ftl_dev *dev)
256 {
257 	return dev->core_thread;
258 }
259 
260 static inline void
261 ftl_add_io_activity(struct spdk_ftl_dev *dev)
262 {
263 	dev->stats.io_activity_total++;
264 }
265 
266 static inline uint64_t
267 ftl_get_num_bands(const struct spdk_ftl_dev *dev)
268 {
269 	return dev->num_bands;
270 }
271 
272 static inline bool
273 ftl_check_core_thread(const struct spdk_ftl_dev *dev)
274 {
275 	return dev->core_thread == spdk_get_thread();
276 }
277 
278 static inline int
279 ftl_addr_packed(const struct spdk_ftl_dev *dev)
280 {
281 	return dev->layout.l2p.addr_size < sizeof(ftl_addr);
282 }
283 
284 static inline int
285 ftl_addr_in_nvc(const struct spdk_ftl_dev *dev, ftl_addr addr)
286 {
287 	assert(addr != FTL_ADDR_INVALID);
288 	return addr >= dev->layout.base.total_blocks;
289 }
290 
291 static inline uint64_t
292 ftl_addr_to_nvc_offset(const struct spdk_ftl_dev *dev, ftl_addr addr)
293 {
294 	assert(ftl_addr_in_nvc(dev, addr));
295 	return addr - dev->layout.base.total_blocks;
296 }
297 
298 static inline ftl_addr
299 ftl_addr_from_nvc_offset(const struct spdk_ftl_dev *dev, uint64_t cache_offset)
300 {
301 	return cache_offset + dev->layout.base.total_blocks;
302 }
303 
304 static inline uint64_t
305 ftl_get_next_seq_id(struct spdk_ftl_dev *dev)
306 {
307 	return ++dev->sb->seq_id;
308 }
309 
310 static inline size_t
311 ftl_p2l_map_num_blocks(const struct spdk_ftl_dev *dev)
312 {
313 	return spdk_divide_round_up(ftl_get_num_blocks_in_band(dev) *
314 				    sizeof(struct ftl_p2l_map_entry), FTL_BLOCK_SIZE);
315 }
316 
317 static inline size_t
318 ftl_tail_md_num_blocks(const struct spdk_ftl_dev *dev)
319 {
320 	return spdk_divide_round_up(
321 		       ftl_p2l_map_num_blocks(dev),
322 		       dev->xfer_size) * dev->xfer_size;
323 }
324 
325 /*
326  * shm_ready being set is a necessary part of the validity of the shm superblock
327  * If it's not set, then the recovery or startup must proceed from disk
328  *
329  * - If both sb and shm_sb are clean, then shm memory can be relied on for startup
330  * - If shm_sb wasn't set to clean, then disk startup/recovery needs to be done (which depends on the sb->clean flag)
331  * - sb->clean clear and sb_shm->clean is technically not possible (due to the order of these operations), but it should
332  * probably do a full recovery from disk to be on the safe side (which the ftl_fast_recovery will guarantee)
333  */
334 
335 static inline bool
336 ftl_fast_startup(const struct spdk_ftl_dev *dev)
337 {
338 	return dev->sb->clean && dev->sb_shm->shm_clean && dev->sb_shm->shm_ready;
339 }
340 
341 static inline bool
342 ftl_fast_recovery(const struct spdk_ftl_dev *dev)
343 {
344 	return !dev->sb->clean && !dev->sb_shm->shm_clean && dev->sb_shm->shm_ready;
345 }
346 
347 #endif /* FTL_CORE_H */
348