xref: /spdk/lib/ftl/ftl_io.h (revision 0098e636761237b77c12c30c2408263a5d2260cc)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #ifndef FTL_IO_H
7 #define FTL_IO_H
8 
9 #include "spdk/stdinc.h"
10 #include "spdk/nvme.h"
11 #include "spdk/ftl.h"
12 
13 #include "ftl_addr.h"
14 #include "ftl_trace.h"
15 
16 struct spdk_ftl_dev;
17 struct ftl_band;
18 struct ftl_batch;
19 struct ftl_io;
20 
21 typedef int (*ftl_md_pack_fn)(struct ftl_band *);
22 typedef void (*ftl_io_fn)(struct ftl_io *, void *, int);
23 
24 /* IO flags */
25 enum ftl_io_flags {
26 	/* Indicates whether IO is already initialized */
27 	FTL_IO_INITIALIZED	= (1 << 0),
28 	/* Internal based IO (defrag, metadata etc.) */
29 	FTL_IO_INTERNAL		= (1 << 1),
30 	/* Indicates that the IO should not go through if there's */
31 	/* already another one scheduled to the same LBA */
32 	FTL_IO_WEAK		= (1 << 2),
33 	/* Indicates that the IO is used for padding */
34 	FTL_IO_PAD		= (1 << 3),
35 	/* The IO operates on metadata */
36 	FTL_IO_MD		= (1 << 4),
37 	/* Using physical instead of logical address */
38 	FTL_IO_PHYSICAL_MODE	= (1 << 5),
39 	/* Indicates that IO contains noncontiguous LBAs */
40 	FTL_IO_VECTOR_LBA	= (1 << 6),
41 	/* The IO is directed to non-volatile cache */
42 	FTL_IO_CACHE		= (1 << 7),
43 	/* Indicates that physical address should be taken from IO struct, */
44 	/* not assigned by wptr, only works if wptr is also in direct mode */
45 	FTL_IO_DIRECT_ACCESS	= (1 << 8),
46 	/* Bypass the non-volatile cache */
47 	FTL_IO_BYPASS_CACHE	= (1 << 9),
48 };
49 
50 enum ftl_io_type {
51 	FTL_IO_READ,
52 	FTL_IO_WRITE,
53 	FTL_IO_ERASE,
54 };
55 
56 #define FTL_IO_MAX_IOVEC 64
57 
58 struct ftl_io_init_opts {
59 	struct spdk_ftl_dev			*dev;
60 
61 	/* IO descriptor */
62 	struct ftl_io				*io;
63 
64 	/* Parent request */
65 	struct ftl_io				*parent;
66 
67 	/* Size of IO descriptor */
68 	size_t                                  size;
69 
70 	/* IO flags */
71 	int                                     flags;
72 
73 	/* IO type */
74 	enum ftl_io_type			type;
75 
76 	/* Transfer batch, set for IO going through the write buffer */
77 	struct ftl_batch			*batch;
78 
79 	/* Band to which the IO is directed */
80 	struct ftl_band				*band;
81 
82 	/* Number of logical blocks */
83 	size_t                                  num_blocks;
84 
85 	/* Data */
86 	struct iovec				iovs[FTL_IO_MAX_IOVEC];
87 	int					iovcnt;
88 
89 	/* Metadata */
90 	void                                    *md;
91 
92 	/* Callback's function */
93 	ftl_io_fn				cb_fn;
94 
95 	/* Callback's context */
96 	void					*cb_ctx;
97 };
98 
99 struct ftl_io_channel;
100 
101 struct ftl_wbuf_entry {
102 	/* IO channel that owns the write buffer entry */
103 	struct ftl_io_channel			*ioch;
104 	/* Data payload (single block) */
105 	void					*payload;
106 	/* Index within the IO channel's wbuf_entries array */
107 	uint32_t				index;
108 	uint32_t				io_flags;
109 	/* Points at the band the data is copied from.  Only valid for internal
110 	 * requests coming from reloc.
111 	 */
112 	struct ftl_band				*band;
113 	/* Physical address of that particular block.  Valid once the data has
114 	 * been written out.
115 	 */
116 	struct ftl_addr				addr;
117 	/* Logical block address */
118 	uint64_t				lba;
119 
120 	/* Trace ID of the requests the entry is part of */
121 	uint64_t				trace;
122 
123 	/* Indicates that the entry was written out and is still present in the
124 	 * L2P table.
125 	 */
126 	bool					valid;
127 	/* Lock that protects the entry from being evicted from the L2P */
128 	pthread_spinlock_t			lock;
129 	TAILQ_ENTRY(ftl_wbuf_entry)		tailq;
130 };
131 
132 #define FTL_IO_CHANNEL_INDEX_INVALID ((uint64_t)-1)
133 
134 struct ftl_io_channel {
135 	/* Device */
136 	struct spdk_ftl_dev			*dev;
137 	/* IO pool element size */
138 	size_t					elem_size;
139 	/* Index within the IO channel array */
140 	uint64_t				index;
141 	/* IO pool */
142 	struct spdk_mempool			*io_pool;
143 	/* Underlying device IO channel */
144 	struct spdk_io_channel			*base_ioch;
145 	/* Persistent cache IO channel */
146 	struct spdk_io_channel			*cache_ioch;
147 	/* Poller used for completing write requests and retrying IO */
148 	struct spdk_poller			*poller;
149 	/* Write completion queue */
150 	TAILQ_HEAD(, ftl_io)			write_cmpl_queue;
151 	TAILQ_HEAD(, ftl_io)			retry_queue;
152 	TAILQ_ENTRY(ftl_io_channel)		tailq;
153 
154 	/* Array of write buffer entries */
155 	struct ftl_wbuf_entry			*wbuf_entries;
156 	/* Write buffer data payload */
157 	void					*wbuf_payload;
158 	/* Number of write buffer entries */
159 	uint32_t				num_entries;
160 	/* Write buffer queues */
161 	struct spdk_ring			*free_queue;
162 	struct spdk_ring			*submit_queue;
163 	/* Maximum number of concurrent user writes */
164 	uint32_t				qdepth_limit;
165 	/* Current number of concurrent user writes */
166 	uint32_t				qdepth_current;
167 	/* Means that the IO channel is being flushed */
168 	bool					flush;
169 };
170 
171 /* General IO descriptor */
172 struct ftl_io {
173 	/* Device */
174 	struct spdk_ftl_dev			*dev;
175 
176 	/* IO channel */
177 	struct spdk_io_channel			*ioch;
178 
179 	union {
180 		/* LBA table */
181 		uint64_t			*vector;
182 
183 		/* First LBA */
184 		uint64_t			single;
185 	} lba;
186 
187 	/* First block address */
188 	struct ftl_addr				addr;
189 
190 	/* Number of processed blocks */
191 	size_t					pos;
192 
193 	/* Number of blocks */
194 	size_t					num_blocks;
195 
196 	/* IO vector pointer */
197 	struct iovec				*iov;
198 
199 	/* IO vector buffer for internal requests */
200 	struct iovec				iov_buf[FTL_IO_MAX_IOVEC];
201 
202 	/* Metadata */
203 	void					*md;
204 
205 	/* Number of IO vectors */
206 	size_t					iov_cnt;
207 
208 	/* Position within the iovec */
209 	size_t					iov_pos;
210 
211 	/* Offset within the iovec (in blocks) */
212 	size_t					iov_off;
213 
214 	/* Transfer batch (valid only for writes going through the write buffer) */
215 	struct ftl_batch			*batch;
216 
217 	/* Band this IO is being written to */
218 	struct ftl_band				*band;
219 
220 	/* Request status */
221 	int					status;
222 
223 	/* Number of split requests */
224 	size_t					req_cnt;
225 
226 	/* Callback's function */
227 	ftl_io_fn				cb_fn;
228 
229 	/* Callback's context */
230 	void					*cb_ctx;
231 
232 	/* User callback function */
233 	spdk_ftl_fn				user_fn;
234 
235 	/* Flags */
236 	int					flags;
237 
238 	/* IO type */
239 	enum ftl_io_type			type;
240 
241 	/* Done flag */
242 	bool					done;
243 
244 	/* Parent request */
245 	struct ftl_io				*parent;
246 	/* Child requests list */
247 	LIST_HEAD(, ftl_io)			children;
248 	/* Child list link */
249 	LIST_ENTRY(ftl_io)			child_entry;
250 	/* Children lock */
251 	pthread_spinlock_t			lock;
252 
253 	/* Trace group id */
254 	uint64_t				trace;
255 
256 	/* Used by retry and write completion queues */
257 	TAILQ_ENTRY(ftl_io)			ioch_entry;
258 };
259 
260 /* Metadata IO */
261 struct ftl_md_io {
262 	/* Parent IO structure */
263 	struct ftl_io				io;
264 
265 	/* Serialization/deserialization callback */
266 	ftl_md_pack_fn				pack_fn;
267 
268 	/* Callback's function */
269 	ftl_io_fn				cb_fn;
270 
271 	/* Callback's context */
272 	void					*cb_ctx;
273 };
274 
275 static inline bool
276 ftl_io_mode_physical(const struct ftl_io *io)
277 {
278 	return io->flags & FTL_IO_PHYSICAL_MODE;
279 }
280 
281 static inline bool
282 ftl_io_mode_logical(const struct ftl_io *io)
283 {
284 	return !ftl_io_mode_physical(io);
285 }
286 
287 static inline bool
288 ftl_io_done(const struct ftl_io *io)
289 {
290 	return io->req_cnt == 0 && io->pos == io->num_blocks;
291 }
292 
293 struct ftl_io *ftl_io_alloc(struct spdk_io_channel *ch);
294 struct ftl_io *ftl_io_alloc_child(struct ftl_io *parent);
295 void ftl_io_fail(struct ftl_io *io, int status);
296 void ftl_io_free(struct ftl_io *io);
297 struct ftl_io *ftl_io_init_internal(const struct ftl_io_init_opts *opts);
298 void ftl_io_reinit(struct ftl_io *io, ftl_io_fn cb,
299 		   void *ctx, int flags, int type);
300 void ftl_io_clear(struct ftl_io *io);
301 void ftl_io_inc_req(struct ftl_io *io);
302 void ftl_io_dec_req(struct ftl_io *io);
303 struct iovec *ftl_io_iovec(struct ftl_io *io);
304 uint64_t ftl_io_current_lba(const struct ftl_io *io);
305 uint64_t ftl_io_get_lba(const struct ftl_io *io, size_t offset);
306 void ftl_io_advance(struct ftl_io *io, size_t num_blocks);
307 size_t ftl_iovec_num_blocks(struct iovec *iov, size_t iov_cnt);
308 void *ftl_io_iovec_addr(struct ftl_io *io);
309 size_t ftl_io_iovec_len_left(struct ftl_io *io);
310 struct ftl_io *ftl_io_wbuf_init(struct spdk_ftl_dev *dev, struct ftl_addr addr,
311 				struct ftl_band *band, struct ftl_batch *batch, ftl_io_fn cb);
312 struct ftl_io *ftl_io_erase_init(struct ftl_band *band, size_t num_blocks, ftl_io_fn cb);
313 struct ftl_io *ftl_io_user_init(struct spdk_io_channel *ioch, uint64_t lba, size_t num_blocks,
314 				struct iovec *iov, size_t iov_cnt, spdk_ftl_fn cb_fn,
315 				void *cb_arg, int type);
316 void *ftl_io_get_md(const struct ftl_io *io);
317 void ftl_io_complete(struct ftl_io *io);
318 void ftl_io_shrink_iovec(struct ftl_io *io, size_t num_blocks);
319 void ftl_io_process_error(struct ftl_io *io, const struct spdk_nvme_cpl *status);
320 void ftl_io_reset(struct ftl_io *io);
321 void ftl_io_call_foreach_child(struct ftl_io *io, int (*callback)(struct ftl_io *));
322 
323 #endif /* FTL_IO_H */
324