xref: /spdk/lib/ftl/ftl_core.h (revision 877573897ad52be4fa8989f7617bd655b87e05c4)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #ifndef FTL_CORE_H
7 #define FTL_CORE_H
8 
9 #include "spdk/stdinc.h"
10 #include "spdk/uuid.h"
11 #include "spdk/thread.h"
12 #include "spdk/util.h"
13 #include "spdk/likely.h"
14 #include "spdk/queue.h"
15 #include "spdk/ftl.h"
16 #include "spdk/bdev.h"
17 
18 #include "ftl_internal.h"
19 #include "ftl_io.h"
20 #include "ftl_trace.h"
21 #include "ftl_nv_cache.h"
22 #include "ftl_writer.h"
23 #include "ftl_layout.h"
24 #include "ftl_sb.h"
25 #include "ftl_l2p.h"
26 #include "utils/ftl_bitmap.h"
27 #include "utils/ftl_log.h"
28 
29 /*
30  * We need to reserve at least 2 buffers for band close / open sequence
31  * alone, plus additional (8) buffers for handling relocations.
32  */
33 #define P2L_MEMPOOL_SIZE (2 + 8)
34 
35 /* When using VSS on nvcache, FTL sometimes doesn't require the contents of metadata.
36  * Some devices have bugs when sending a NULL pointer as part of metadata when namespace
37  * is formatted with VSS. This buffer is passed to such calls to avoid the bug. */
38 #define FTL_ZERO_BUFFER_SIZE 0x100000
39 extern void *g_ftl_write_buf;
40 extern void *g_ftl_read_buf;
41 
42 struct spdk_ftl_dev {
43 	/* Configuration */
44 	struct spdk_ftl_conf		conf;
45 
46 	/* FTL device layout */
47 	struct ftl_layout		layout;
48 
49 	/* FTL superblock */
50 	struct ftl_superblock		*sb;
51 
52 	/* FTL shm superblock */
53 	struct ftl_superblock_shm	*sb_shm;
54 	struct ftl_md			*sb_shm_md;
55 
56 	/* Queue of registered IO channels */
57 	TAILQ_HEAD(, ftl_io_channel)	ioch_queue;
58 
59 	/* Underlying device */
60 	struct spdk_bdev_desc		*base_bdev_desc;
61 
62 	/* Cached properties of the underlying device */
63 	uint64_t			num_blocks_in_band;
64 	bool				is_zoned;
65 
66 	/* Indicates the device is fully initialized */
67 	bool				initialized;
68 
69 	/* Indicates the device is about to be stopped */
70 	bool				halt;
71 
72 	/* Indicates if the device is registered as an IO device */
73 	bool				io_device_registered;
74 
75 	/* Management process to be continued after IO device unregistration completes */
76 	struct ftl_mngt_process		*unregister_process;
77 
78 	/* Non-volatile write buffer cache */
79 	struct ftl_nv_cache		nv_cache;
80 
81 	/* P2L map memory pool */
82 	struct ftl_mempool		*p2l_pool;
83 
84 	/* Underlying SHM buf for P2L map mempool */
85 	struct ftl_md			*p2l_pool_md;
86 
87 	/* Band md memory pool */
88 	struct ftl_mempool		*band_md_pool;
89 
90 	/* Traces */
91 	struct ftl_trace		trace;
92 
93 	/* Statistics */
94 	struct ftl_stats		stats;
95 
96 	/* Array of bands */
97 	struct ftl_band			*bands;
98 
99 	/* Number of operational bands */
100 	uint64_t			num_bands;
101 
102 	/* Next write band */
103 	struct ftl_band			*next_band;
104 
105 	/* Free band list */
106 	TAILQ_HEAD(, ftl_band)		free_bands;
107 
108 	/* Closed bands list */
109 	TAILQ_HEAD(, ftl_band)		shut_bands;
110 
111 	/* Number of free bands */
112 	uint64_t			num_free;
113 
114 	/* Logical -> physical table */
115 	void				*l2p;
116 
117 	/* l2p deferred pins list */
118 	TAILQ_HEAD(, ftl_l2p_pin_ctx)	l2p_deferred_pins;
119 
120 	/* Size of the l2p table */
121 	uint64_t			num_lbas;
122 
123 	/* P2L valid map */
124 	struct ftl_bitmap		*valid_map;
125 
126 	/* Metadata size */
127 	uint64_t			md_size;
128 
129 	/* Transfer unit size */
130 	uint64_t			xfer_size;
131 
132 	/* Current user write limit */
133 	int				limit;
134 
135 	/* Inflight IO operations */
136 	uint32_t			num_inflight;
137 
138 	/* Manages data relocation */
139 	struct ftl_reloc		*reloc;
140 
141 	/* Thread on which the poller is running */
142 	struct spdk_thread		*core_thread;
143 
144 	/* IO channel to the FTL device, used for internal management operations
145 	 * consuming FTL's external API
146 	 */
147 	struct spdk_io_channel		*ioch;
148 
149 	/* Underlying device IO channel */
150 	struct spdk_io_channel		*base_ioch;
151 
152 	/* Poller */
153 	struct spdk_poller		*core_poller;
154 
155 	/* Read submission queue */
156 	TAILQ_HEAD(, ftl_io)		rd_sq;
157 
158 	/* Write submission queue */
159 	TAILQ_HEAD(, ftl_io)		wr_sq;
160 
161 	/* Trim submission queue */
162 	TAILQ_HEAD(, ftl_io)		unmap_sq;
163 
164 	/* Trim valid map */
165 	struct ftl_bitmap		*unmap_map;
166 	struct ftl_md			*unmap_map_md;
167 	size_t				unmap_qd;
168 	bool				unmap_in_progress;
169 
170 	/* Writer for user IOs */
171 	struct ftl_writer		writer_user;
172 
173 	/* Writer for GC IOs */
174 	struct ftl_writer		writer_gc;
175 
176 	uint32_t			num_logical_bands_in_physical;
177 
178 	/* Retry init sequence */
179 	bool				init_retry;
180 
181 	/* P2L checkpointing */
182 	struct {
183 		/* Free regions */
184 		TAILQ_HEAD(, ftl_p2l_ckpt)	free;
185 		/* In use regions */
186 		TAILQ_HEAD(, ftl_p2l_ckpt)	inuse;
187 	} p2l_ckpt;
188 };
189 
190 void ftl_apply_limits(struct spdk_ftl_dev *dev);
191 
192 void ftl_invalidate_addr(struct spdk_ftl_dev *dev, ftl_addr addr);
193 
194 int ftl_core_poller(void *ctx);
195 
196 int ftl_io_channel_poll(void *arg);
197 
198 struct ftl_io_channel *ftl_io_channel_get_ctx(struct spdk_io_channel *ioch);
199 
200 bool ftl_needs_reloc(struct spdk_ftl_dev *dev);
201 
202 struct ftl_band *ftl_band_get_next_free(struct spdk_ftl_dev *dev);
203 
204 void ftl_set_unmap_map(struct spdk_ftl_dev *dev, uint64_t lba, uint64_t num_blocks,
205 		       uint64_t seq_id);
206 
207 void ftl_recover_max_seq(struct spdk_ftl_dev *dev);
208 
209 void ftl_stats_bdev_io_completed(struct spdk_ftl_dev *dev, enum ftl_stats_type type,
210 				 struct spdk_bdev_io *bdev_io);
211 
212 void ftl_stats_crc_error(struct spdk_ftl_dev *dev, enum ftl_stats_type type);
213 
214 int ftl_unmap(struct spdk_ftl_dev *dev, struct ftl_io *io, struct spdk_io_channel *ch,
215 	      uint64_t lba, size_t lba_cnt, spdk_ftl_fn cb_fn, void *cb_arg);
216 
217 static inline uint64_t
218 ftl_get_num_blocks_in_band(const struct spdk_ftl_dev *dev)
219 {
220 	return dev->num_blocks_in_band;
221 }
222 
223 static inline uint32_t
224 ftl_get_write_unit_size(struct spdk_bdev *bdev)
225 {
226 	/* Full block of P2L map worth of xfer_sz is needed for P2L checkpointing */
227 	return FTL_NUM_LBA_IN_BLOCK;
228 }
229 
230 static inline struct spdk_thread *
231 ftl_get_core_thread(const struct spdk_ftl_dev *dev)
232 {
233 	return dev->core_thread;
234 }
235 
236 static inline void
237 ftl_add_io_activity(struct spdk_ftl_dev *dev)
238 {
239 	dev->stats.io_activity_total++;
240 }
241 
242 static inline uint64_t
243 ftl_get_num_bands(const struct spdk_ftl_dev *dev)
244 {
245 	return dev->num_bands;
246 }
247 
248 static inline bool
249 ftl_check_core_thread(const struct spdk_ftl_dev *dev)
250 {
251 	return dev->core_thread == spdk_get_thread();
252 }
253 
254 static inline int
255 ftl_addr_packed(const struct spdk_ftl_dev *dev)
256 {
257 	return dev->layout.l2p.addr_size < sizeof(ftl_addr);
258 }
259 
260 static inline int
261 ftl_addr_in_nvc(const struct spdk_ftl_dev *dev, ftl_addr addr)
262 {
263 	assert(addr != FTL_ADDR_INVALID);
264 	return addr >= dev->layout.base.total_blocks;
265 }
266 
267 static inline uint64_t
268 ftl_addr_to_nvc_offset(const struct spdk_ftl_dev *dev, ftl_addr addr)
269 {
270 	assert(ftl_addr_in_nvc(dev, addr));
271 	return addr - dev->layout.base.total_blocks;
272 }
273 
274 static inline ftl_addr
275 ftl_addr_from_nvc_offset(const struct spdk_ftl_dev *dev, uint64_t cache_offset)
276 {
277 	return cache_offset + dev->layout.base.total_blocks;
278 }
279 
280 static inline uint64_t
281 ftl_get_next_seq_id(struct spdk_ftl_dev *dev)
282 {
283 	return ++dev->sb->seq_id;
284 }
285 
286 static inline size_t
287 ftl_p2l_map_num_blocks(const struct spdk_ftl_dev *dev)
288 {
289 	return spdk_divide_round_up(ftl_get_num_blocks_in_band(dev) *
290 				    sizeof(struct ftl_p2l_map_entry), FTL_BLOCK_SIZE);
291 }
292 
293 static inline size_t
294 ftl_tail_md_num_blocks(const struct spdk_ftl_dev *dev)
295 {
296 	return spdk_divide_round_up(
297 		       ftl_p2l_map_num_blocks(dev),
298 		       dev->xfer_size) * dev->xfer_size;
299 }
300 
301 /*
302  * shm_ready being set is a necessary part of the validity of the shm superblock
303  * If it's not set, then the recovery or startup must proceed from disk
304  *
305  * - If both sb and shm_sb are clean, then shm memory can be relied on for startup
306  * - If shm_sb wasn't set to clean, then disk startup/recovery needs to be done (which depends on the sb->clean flag)
307  * - sb->clean clear and sb_shm->clean is technically not possible (due to the order of these operations), but it should
308  * probably do a full recovery from disk to be on the safe side (which the ftl_fast_recovery will guarantee)
309  */
310 
311 static inline bool
312 ftl_fast_startup(const struct spdk_ftl_dev *dev)
313 {
314 	return dev->sb->clean && dev->sb_shm->shm_clean && dev->sb_shm->shm_ready;
315 }
316 
317 static inline bool
318 ftl_fast_recovery(const struct spdk_ftl_dev *dev)
319 {
320 	return !dev->sb->clean && !dev->sb_shm->shm_clean && dev->sb_shm->shm_ready;
321 }
322 
323 #endif /* FTL_CORE_H */
324