xref: /spdk/lib/ftl/ftl_core.h (revision db75f4b6780ac678f18dc38dc3900e6f5afb69ba)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #ifndef FTL_CORE_H
7 #define FTL_CORE_H
8 
9 #include "spdk/stdinc.h"
10 #include "spdk/uuid.h"
11 #include "spdk/thread.h"
12 #include "spdk/util.h"
13 #include "spdk/likely.h"
14 #include "spdk/queue.h"
15 #include "spdk/ftl.h"
16 #include "spdk/bdev.h"
17 
18 #include "ftl_internal.h"
19 #include "ftl_io.h"
20 #include "ftl_nv_cache.h"
21 #include "ftl_writer.h"
22 #include "ftl_layout.h"
23 #include "ftl_sb.h"
24 #include "ftl_l2p.h"
25 #include "utils/ftl_bitmap.h"
26 #include "utils/ftl_log.h"
27 
28 /*
29  * We need to reserve at least 2 buffers for band close / open sequence
30  * alone, plus additional (8) buffers for handling relocations.
31  */
32 #define P2L_MEMPOOL_SIZE (2 + 8)
33 
34 /* When using VSS on nvcache, FTL sometimes doesn't require the contents of metadata.
35  * Some devices have bugs when sending a NULL pointer as part of metadata when namespace
36  * is formatted with VSS. This buffer is passed to such calls to avoid the bug. */
37 #define FTL_ZERO_BUFFER_SIZE 0x100000
38 extern void *g_ftl_write_buf;
39 extern void *g_ftl_read_buf;
40 
41 struct spdk_ftl_dev {
42 	/* Configuration */
43 	struct spdk_ftl_conf		conf;
44 
45 	/* FTL device layout */
46 	struct ftl_layout		layout;
47 
48 	/* FTL superblock */
49 	struct ftl_superblock		*sb;
50 
51 	/* FTL shm superblock */
52 	struct ftl_superblock_shm	*sb_shm;
53 	struct ftl_md			*sb_shm_md;
54 
55 	/* Queue of registered IO channels */
56 	TAILQ_HEAD(, ftl_io_channel)	ioch_queue;
57 
58 	/* Underlying device */
59 	struct spdk_bdev_desc		*base_bdev_desc;
60 
61 	/* Cached properties of the underlying device */
62 	uint64_t			num_blocks_in_band;
63 	bool				is_zoned;
64 
65 	/* Indicates the device is fully initialized */
66 	bool				initialized;
67 
68 	/* Indicates the device is about to be stopped */
69 	bool				halt;
70 
71 	/* Indicates if the device is registered as an IO device */
72 	bool				io_device_registered;
73 
74 	/* Management process to be continued after IO device unregistration completes */
75 	struct ftl_mngt_process		*unregister_process;
76 
77 	/* Non-volatile write buffer cache */
78 	struct ftl_nv_cache		nv_cache;
79 
80 	/* P2L map memory pool */
81 	struct ftl_mempool		*p2l_pool;
82 
83 	/* Underlying SHM buf for P2L map mempool */
84 	struct ftl_md			*p2l_pool_md;
85 
86 	/* Band md memory pool */
87 	struct ftl_mempool		*band_md_pool;
88 
89 	/* counters for poller busy, include
90 	   1. nv cache read/write
91 	   2. metadata read/write
92 	   3. base bdev read/write */
93 	uint64_t			io_activity_total;
94 
95 	/* Array of bands */
96 	struct ftl_band			*bands;
97 
98 	/* Number of operational bands */
99 	uint64_t			num_bands;
100 
101 	/* Next write band */
102 	struct ftl_band			*next_band;
103 
104 	/* Free band list */
105 	TAILQ_HEAD(, ftl_band)		free_bands;
106 
107 	/* Closed bands list */
108 	TAILQ_HEAD(, ftl_band)		shut_bands;
109 
110 	/* Number of free bands */
111 	uint64_t			num_free;
112 
113 	/* Logical -> physical table */
114 	void				*l2p;
115 
116 	/* Size of the l2p table */
117 	uint64_t			num_lbas;
118 
119 	/* P2L valid map */
120 	struct ftl_bitmap		*valid_map;
121 
122 	/* Metadata size */
123 	uint64_t			md_size;
124 
125 	/* Transfer unit size */
126 	uint64_t			xfer_size;
127 
128 	/* Current user write limit */
129 	int				limit;
130 
131 	/* Inflight IO operations */
132 	uint32_t			num_inflight;
133 
134 	/* Manages data relocation */
135 	struct ftl_reloc		*reloc;
136 
137 	/* Thread on which the poller is running */
138 	struct spdk_thread		*core_thread;
139 
140 	/* IO channel to the FTL device, used for internal management operations
141 	 * consuming FTL's external API
142 	 */
143 	struct spdk_io_channel		*ioch;
144 
145 	/* Underlying device IO channel */
146 	struct spdk_io_channel		*base_ioch;
147 
148 	/* Poller */
149 	struct spdk_poller		*core_poller;
150 
151 	/* Read submission queue */
152 	TAILQ_HEAD(, ftl_io)		rd_sq;
153 
154 	/* Write submission queue */
155 	TAILQ_HEAD(, ftl_io)		wr_sq;
156 
157 	/* Writer for user IOs */
158 	struct ftl_writer		writer_user;
159 
160 	/* Writer for GC IOs */
161 	struct ftl_writer		writer_gc;
162 
163 	uint32_t			num_logical_bands_in_physical;
164 
165 	/* Retry init sequence */
166 	bool				init_retry;
167 };
168 
169 void ftl_apply_limits(struct spdk_ftl_dev *dev);
170 
171 void ftl_invalidate_addr(struct spdk_ftl_dev *dev, ftl_addr addr);
172 
173 int ftl_core_poller(void *ctx);
174 
175 int ftl_io_channel_poll(void *arg);
176 
177 struct ftl_io_channel *ftl_io_channel_get_ctx(struct spdk_io_channel *ioch);
178 
179 bool ftl_needs_reloc(struct spdk_ftl_dev *dev);
180 
181 struct ftl_band *ftl_band_get_next_free(struct spdk_ftl_dev *dev);
182 
183 static inline uint64_t
184 ftl_get_num_blocks_in_band(const struct spdk_ftl_dev *dev)
185 {
186 	return dev->num_blocks_in_band;
187 }
188 
189 static inline uint64_t
190 ftl_addr_get_band(const struct spdk_ftl_dev *dev, ftl_addr addr)
191 {
192 	return addr / ftl_get_num_blocks_in_band(dev);
193 }
194 
195 static inline uint32_t
196 ftl_get_write_unit_size(struct spdk_bdev *bdev)
197 {
198 	if (spdk_bdev_is_zoned(bdev)) {
199 		return spdk_bdev_get_write_unit_size(bdev);
200 	}
201 
202 	/* TODO: this should be passed via input parameter */
203 	return 32;
204 }
205 
206 static inline struct spdk_thread *
207 ftl_get_core_thread(const struct spdk_ftl_dev *dev)
208 {
209 	return dev->core_thread;
210 }
211 
212 static inline uint64_t
213 ftl_get_num_bands(const struct spdk_ftl_dev *dev)
214 {
215 	return dev->num_bands;
216 }
217 
218 static inline bool
219 ftl_check_core_thread(const struct spdk_ftl_dev *dev)
220 {
221 	return dev->core_thread == spdk_get_thread();
222 }
223 
224 static inline int
225 ftl_addr_packed(const struct spdk_ftl_dev *dev)
226 {
227 	return dev->layout.l2p.addr_size < sizeof(ftl_addr);
228 }
229 
230 static inline int
231 ftl_addr_in_nvc(const struct spdk_ftl_dev *dev, ftl_addr addr)
232 {
233 	assert(addr != FTL_ADDR_INVALID);
234 	return addr >= dev->layout.base.total_blocks;
235 }
236 
237 static inline uint64_t
238 ftl_addr_to_nvc_offset(const struct spdk_ftl_dev *dev, ftl_addr addr)
239 {
240 	assert(ftl_addr_in_nvc(dev, addr));
241 	return addr - dev->layout.base.total_blocks;
242 }
243 
244 static inline ftl_addr
245 ftl_addr_from_nvc_offset(const struct spdk_ftl_dev *dev, uint64_t cache_offset)
246 {
247 	return cache_offset + dev->layout.base.total_blocks;
248 }
249 
250 static inline size_t
251 ftl_p2l_map_num_blocks(const struct spdk_ftl_dev *dev)
252 {
253 	return spdk_divide_round_up(ftl_get_num_blocks_in_band(dev) * sizeof(uint64_t),
254 				    FTL_BLOCK_SIZE);
255 }
256 
257 static inline size_t
258 ftl_tail_md_num_blocks(const struct spdk_ftl_dev *dev)
259 {
260 	return spdk_divide_round_up(
261 		       ftl_p2l_map_num_blocks(dev),
262 		       dev->xfer_size) * dev->xfer_size;
263 }
264 
265 /*
266  * shm_ready being set is a necessary part of the validity of the shm superblock
267  * If it's not set, then the recovery or startup must proceed from disk
268  *
269  * - If both sb and shm_sb are clean, then shm memory can be relied on for startup
270  * - If shm_sb wasn't set to clean, then disk startup/recovery needs to be done (which depends on the sb->clean flag)
271  * - sb->clean clear and sb_shm->clean is technically not possible (due to the order of these operations), but it should
272  * probably do a full recovery from disk to be on the safe side (which the ftl_fast_recovery will guarantee)
273  */
274 
275 static inline bool
276 ftl_fast_startup(const struct spdk_ftl_dev *dev)
277 {
278 	return dev->sb->clean && dev->sb_shm->shm_clean && dev->sb_shm->shm_ready;
279 }
280 
281 static inline bool
282 ftl_fast_recovery(const struct spdk_ftl_dev *dev)
283 {
284 	return !dev->sb->clean && !dev->sb_shm->shm_clean && dev->sb_shm->shm_ready;
285 }
286 
287 #endif /* FTL_CORE_H */
288