xref: /spdk/lib/ftl/ftl_io.h (revision cdb0726b95631d46eaf4f2e39ddb6533f150fd27)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #ifndef FTL_IO_H
7 #define FTL_IO_H
8 
9 #include "spdk/stdinc.h"
10 #include "spdk/nvme.h"
11 #include "spdk/ftl.h"
12 #include "spdk/bdev.h"
13 #include "spdk/util.h"
14 
15 #include "ftl_internal.h"
16 #include "ftl_l2p.h"
17 #include "utils/ftl_md.h"
18 
19 struct spdk_ftl_dev;
20 struct ftl_io;
21 
22 typedef void (*ftl_io_fn)(struct ftl_io *, void *, int);
23 
24 /* IO flags */
25 enum ftl_io_flags {
26 	/* Indicates whether IO is already initialized */
27 	FTL_IO_INITIALIZED	= (1 << 0),
28 	/* Indicated whether the user IO pinned the L2P pages containing LBAs */
29 	FTL_IO_PINNED		= (1 << 1),
30 };
31 
32 enum ftl_io_type {
33 	FTL_IO_READ,
34 	FTL_IO_WRITE,
35 	FTL_IO_UNMAP,
36 };
37 
38 #define FTL_IO_MAX_IOVEC 4
39 
40 struct ftl_io_channel {
41 	/*  Device */
42 	struct spdk_ftl_dev		*dev;
43 	/*  Entry of IO channels queue/list */
44 	TAILQ_ENTRY(ftl_io_channel)	entry;
45 	/*  IO map pool */
46 	struct ftl_mempool		*map_pool;
47 	/*  Poller used for completing user requests and retrying IO */
48 	struct spdk_poller		*poller;
49 	/*  Submission queue */
50 	struct spdk_ring		*sq;
51 	/*  Completion queue */
52 	struct spdk_ring		*cq;
53 };
54 
55 /* General IO descriptor for user requests */
56 struct ftl_io {
57 	/* Device */
58 	struct spdk_ftl_dev		*dev;
59 
60 	/* IO channel */
61 	struct spdk_io_channel		*ioch;
62 
63 	/* LBA address */
64 	uint64_t			lba;
65 
66 	/* First address of write when sent to cache device */
67 	ftl_addr			addr;
68 
69 	/* Number of processed blocks */
70 	size_t				pos;
71 
72 	/* Number of blocks */
73 	size_t				num_blocks;
74 
75 	/* IO vector pointer */
76 	struct iovec			*iov;
77 
78 	/* Metadata */
79 	void				*md;
80 
81 	/* Number of IO vectors */
82 	size_t				iov_cnt;
83 
84 	/* Position within the io vector array */
85 	size_t				iov_pos;
86 
87 	/* Offset within the iovec (in blocks) */
88 	size_t				iov_off;
89 
90 	/* Request status */
91 	int				status;
92 
93 	/* Number of split requests */
94 	size_t				req_cnt;
95 
96 	/* Callback's context */
97 	void				*cb_ctx;
98 
99 	/* User callback function */
100 	spdk_ftl_fn			user_fn;
101 
102 	/* Flags */
103 	int				flags;
104 
105 	/* IO type */
106 	enum ftl_io_type		type;
107 
108 	/* Done flag */
109 	bool				done;
110 
111 	/* Used by retry and write completion queues */
112 	TAILQ_ENTRY(ftl_io)		queue_entry;
113 
114 	/* Reference to the chunk within NV cache */
115 	struct ftl_nv_cache_chunk	*nv_cache_chunk;
116 
117 	/* For l2p pinning */
118 	struct ftl_l2p_pin_ctx		l2p_pin_ctx;
119 
120 	/* Logical to physical mapping for this IO, number of entries equals to
121 	 * number of transfer blocks */
122 	ftl_addr			*map;
123 
124 	struct spdk_bdev_io_wait_entry	bdev_io_wait;
125 };
126 
127 /* */
128 struct ftl_rq_entry {
129 	/* Data payload of single entry (block) */
130 	void *io_payload;
131 
132 	void *io_md;
133 
134 	/*
135 	 * Physical address of block described by ftl_rq_entry.
136 	 * Valid after write command is completed (due to potential append reordering)
137 	 */
138 	ftl_addr addr;
139 
140 	/* Logical block address */
141 	uint64_t lba;
142 
143 	/* Index of this entry within FTL request */
144 	const uint64_t index;
145 
146 	struct {
147 		void *priv;
148 	} owner;
149 
150 	/* For l2p pinning */
151 	struct ftl_l2p_pin_ctx l2p_pin_ctx;
152 
153 	struct {
154 		uint64_t offset_blocks;
155 		uint64_t num_blocks;
156 		struct spdk_bdev_io_wait_entry wait_entry;
157 	} bdev_io;
158 };
159 
160 /*
161  * Descriptor used for internal requests (compaction and reloc). May be split into multiple
162  * IO requests (as valid blocks that need to be relocated may not be contiguous) - utilizing
163  * the ftl_rq_entry array
164  */
165 struct ftl_rq {
166 	struct spdk_ftl_dev *dev;
167 
168 	/* Request queue entry */
169 	TAILQ_ENTRY(ftl_rq) qentry;
170 
171 	/* Number of block within the request */
172 	uint64_t num_blocks;
173 
174 	/* Extended metadata for IO. Its size is io_md_size * num_blocks */
175 	void *io_md;
176 
177 	/* Size of extended metadata size for one entry */
178 	uint64_t io_md_size;
179 
180 	/* Size of IO vector array */
181 	uint64_t io_vec_size;
182 
183 	/* Array of IO vectors, its size equals to num_blocks */
184 	struct iovec *io_vec;
185 
186 	/* Payload for IO */
187 	void *io_payload;
188 
189 	/* Request result status */
190 	bool success;
191 
192 	/* Fields for owner of this request */
193 	struct {
194 		/* End request callback */
195 		void (*cb)(struct ftl_rq *rq);
196 
197 		/* Owner context */
198 		void *priv;
199 
200 		/* This is compaction IO */
201 		bool compaction;
202 	} owner;
203 
204 	/* Iterator fields for processing state of the request */
205 	struct {
206 		uint32_t idx;
207 
208 		uint32_t count;
209 
210 		/* Queue depth on this request */
211 		uint32_t qd;
212 
213 		uint32_t remaining;
214 		int status;
215 	} iter;
216 
217 	/* Private fields for issuing IO */
218 	struct {
219 		/* Request physical address, on IO completion set for append device */
220 		ftl_addr addr;
221 
222 		/* Zone to which IO is issued */
223 		struct ftl_zone *zone;
224 
225 		struct spdk_bdev_io_wait_entry bdev_io_wait;
226 	} io;
227 
228 	/* For writing P2L metadata */
229 	struct ftl_md_io_entry_ctx md_persist_entry_ctx;
230 
231 	struct ftl_rq_entry entries[];
232 };
233 
234 /* Used for reading/writing LBA map during runtime and recovery */
235 struct ftl_basic_rq {
236 	struct spdk_ftl_dev *dev;
237 
238 	/* Request queue entry */
239 	TAILQ_ENTRY(ftl_basic_rq) qentry;
240 
241 	/* Number of block within the request */
242 	uint64_t num_blocks;
243 
244 	/* Payload for IO */
245 	void *io_payload;
246 
247 	/* Request result status */
248 	bool success;
249 
250 	/* Fields for owner of this request */
251 	struct {
252 		/* End request callback */
253 		void (*cb)(struct ftl_basic_rq *brq);
254 
255 		/* Owner context */
256 		void *priv;
257 	} owner;
258 
259 	/* Private fields for issuing IO */
260 	struct {
261 		/* Request physical address, on IO completion set for append device */
262 		ftl_addr addr;
263 
264 		/* Zone to which IO is issued */
265 		struct ftl_zone *zone;
266 
267 		/* Chunk to which IO is issued */
268 		struct ftl_nv_cache_chunk *chunk;
269 
270 		struct spdk_bdev_io_wait_entry bdev_io_wait;
271 	} io;
272 };
273 
274 void ftl_io_fail(struct ftl_io *io, int status);
275 void ftl_io_clear(struct ftl_io *io);
276 void ftl_io_inc_req(struct ftl_io *io);
277 void ftl_io_dec_req(struct ftl_io *io);
278 struct iovec *ftl_io_iovec(struct ftl_io *io);
279 uint64_t ftl_io_current_lba(const struct ftl_io *io);
280 uint64_t ftl_io_get_lba(const struct ftl_io *io, size_t offset);
281 void ftl_io_advance(struct ftl_io *io, size_t num_blocks);
282 size_t ftl_iovec_num_blocks(struct iovec *iov, size_t iov_cnt);
283 void *ftl_io_iovec_addr(struct ftl_io *io);
284 size_t ftl_io_iovec_len_left(struct ftl_io *io);
285 int ftl_io_init(struct spdk_io_channel *ioch, struct ftl_io *io, uint64_t lba,
286 		size_t num_blocks, struct iovec *iov, size_t iov_cnt, spdk_ftl_fn cb_fn,
287 		void *cb_arg, int type);
288 void ftl_io_complete(struct ftl_io *io);
289 
290 static inline void
291 ftl_basic_rq_init(struct spdk_ftl_dev *dev, struct ftl_basic_rq *brq,
292 		  void *io_payload, uint64_t num_blocks)
293 {
294 	brq->dev = dev;
295 	brq->io_payload = io_payload;
296 	brq->num_blocks = num_blocks;
297 	brq->success = false;
298 }
299 
300 static inline void
301 ftl_basic_rq_set_owner(struct ftl_basic_rq *brq, void (*cb)(struct ftl_basic_rq *brq), void *priv)
302 {
303 	brq->owner.cb = cb;
304 	brq->owner.priv = priv;
305 }
306 
307 static inline void
308 ftl_rq_swap_payload(struct ftl_rq *a, uint32_t aidx,
309 		    struct ftl_rq *b, uint32_t bidx)
310 {
311 	assert(aidx < a->num_blocks);
312 	assert(bidx < b->num_blocks);
313 
314 	void *a_payload = a->io_vec[aidx].iov_base;
315 	void *b_payload = b->io_vec[bidx].iov_base;
316 
317 	a->io_vec[aidx].iov_base = b_payload;
318 	a->entries[aidx].io_payload = b_payload;
319 
320 	b->io_vec[bidx].iov_base = a_payload;
321 	b->entries[bidx].io_payload = a_payload;
322 }
323 
324 static inline struct ftl_rq *
325 ftl_rq_from_entry(struct ftl_rq_entry *entry)
326 {
327 	uint64_t idx = entry->index;
328 	struct ftl_rq *rq = SPDK_CONTAINEROF(entry, struct ftl_rq, entries[idx]);
329 	return rq;
330 }
331 
332 
333 static inline bool
334 ftl_io_done(const struct ftl_io *io)
335 {
336 	return io->req_cnt == 0 && io->pos == io->num_blocks;
337 }
338 
339 #endif /* FTL_IO_H */
340