xref: /spdk/lib/ftl/ftl_io.h (revision db75f4b6780ac678f18dc38dc3900e6f5afb69ba)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #ifndef FTL_IO_H
7 #define FTL_IO_H
8 
9 #include "spdk/stdinc.h"
10 #include "spdk/nvme.h"
11 #include "spdk/ftl.h"
12 #include "spdk/bdev.h"
13 #include "spdk/util.h"
14 
15 #include "ftl_internal.h"
16 #include "ftl_l2p.h"
17 #include "utils/ftl_md.h"
18 
19 struct spdk_ftl_dev;
20 struct ftl_band;
21 struct ftl_io;
22 
23 typedef void (*ftl_io_fn)(struct ftl_io *, void *, int);
24 
25 /* IO flags */
26 enum ftl_io_flags {
27 	/* Indicates whether IO is already initialized */
28 	FTL_IO_INITIALIZED	= (1 << 0),
29 	/* Indicated whether the user IO pinned the L2P pages containing LBAs */
30 	FTL_IO_PINNED		= (1 << 1),
31 };
32 
33 enum ftl_io_type {
34 	FTL_IO_READ,
35 	FTL_IO_WRITE,
36 	FTL_IO_UNMAP,
37 };
38 
39 #define FTL_IO_MAX_IOVEC 4
40 
41 struct ftl_io_channel {
42 	/*  Device */
43 	struct spdk_ftl_dev		*dev;
44 	/*  Entry of IO channels queue/list */
45 	TAILQ_ENTRY(ftl_io_channel)	entry;
46 	/*  IO map pool */
47 	struct ftl_mempool		*map_pool;
48 	/*  Poller used for completing user requests and retrying IO */
49 	struct spdk_poller		*poller;
50 	/*  Submission queue */
51 	struct spdk_ring		*sq;
52 	/*  Completion queue */
53 	struct spdk_ring		*cq;
54 };
55 
56 /* General IO descriptor for user requests */
57 struct ftl_io {
58 	/* Device */
59 	struct spdk_ftl_dev		*dev;
60 
61 	/* IO channel */
62 	struct spdk_io_channel		*ioch;
63 
64 	/* LBA address */
65 	uint64_t			lba;
66 
67 	/* First address of write when sent to cache device */
68 	ftl_addr			addr;
69 
70 	/* Number of processed blocks */
71 	size_t				pos;
72 
73 	/* Number of blocks */
74 	size_t				num_blocks;
75 
76 	/* IO vector pointer */
77 	struct iovec			*iov;
78 
79 	/* Metadata */
80 	void				*md;
81 
82 	/* Number of IO vectors */
83 	size_t				iov_cnt;
84 
85 	/* Position within the io vector array */
86 	size_t				iov_pos;
87 
88 	/* Offset within the iovec (in blocks) */
89 	size_t				iov_off;
90 
91 	/* Band this IO is being written to */
92 	struct ftl_band			*band;
93 
94 	/* Request status */
95 	int				status;
96 
97 	/* Number of split requests */
98 	size_t				req_cnt;
99 
100 	/* Callback's context */
101 	void				*cb_ctx;
102 
103 	/* User callback function */
104 	spdk_ftl_fn			user_fn;
105 
106 	/* Flags */
107 	int				flags;
108 
109 	/* IO type */
110 	enum ftl_io_type		type;
111 
112 	/* Done flag */
113 	bool				done;
114 
115 	/* Used by retry and write completion queues */
116 	TAILQ_ENTRY(ftl_io)		queue_entry;
117 
118 	/* Reference to the chunk within NV cache */
119 	struct ftl_nv_cache_chunk	*nv_cache_chunk;
120 
121 	/* For l2p pinning */
122 	struct ftl_l2p_pin_ctx		l2p_pin_ctx;
123 
124 	/* Logical to physical mapping for this IO, number of entries equals to
125 	 * number of transfer blocks */
126 	ftl_addr			*map;
127 
128 	struct spdk_bdev_io_wait_entry	bdev_io_wait;
129 };
130 
131 /* */
132 struct ftl_rq_entry {
133 	/* Data payload of single entry (block) */
134 	void *io_payload;
135 
136 	void *io_md;
137 
138 	/*
139 	 * Physical address of block described by ftl_rq_entry.
140 	 * Valid after write command is completed (due to potential append reordering)
141 	 */
142 	ftl_addr addr;
143 
144 	/* Logical block address */
145 	uint64_t lba;
146 
147 	/* Index of this entry within FTL request */
148 	const uint64_t index;
149 
150 	struct {
151 		void *priv;
152 	} owner;
153 
154 	/* If request issued in iterative way, it contains IO information */
155 	struct {
156 		struct ftl_band *band;
157 	} io;
158 
159 	/* For l2p pinning */
160 	struct ftl_l2p_pin_ctx l2p_pin_ctx;
161 
162 	struct {
163 		uint64_t offset_blocks;
164 		uint64_t num_blocks;
165 		struct spdk_bdev_io_wait_entry wait_entry;
166 	} bdev_io;
167 };
168 
169 /*
170  * Descriptor used for internal requests (compaction and reloc). May be split into multiple
171  * IO requests (as valid blocks that need to be relocated may not be contiguous) - utilizing
172  * the ftl_rq_entry array
173  */
174 struct ftl_rq {
175 	struct spdk_ftl_dev *dev;
176 
177 	/* Request queue entry */
178 	TAILQ_ENTRY(ftl_rq) qentry;
179 
180 	/* Number of block within the request */
181 	uint64_t num_blocks;
182 
183 	/* Extended metadata for IO. Its size is io_md_size * num_blocks */
184 	void *io_md;
185 
186 	/* Size of extended metadata size for one entry */
187 	uint64_t io_md_size;
188 
189 	/* Size of IO vector array */
190 	uint64_t io_vec_size;
191 
192 	/* Array of IO vectors, its size equals to num_blocks */
193 	struct iovec *io_vec;
194 
195 	/* Payload for IO */
196 	void *io_payload;
197 
198 	/* Request result status */
199 	bool success;
200 
201 	/* Fields for owner of this request */
202 	struct {
203 		/* End request callback */
204 		void (*cb)(struct ftl_rq *rq);
205 
206 		/* IO error request callback */
207 		void (*error)(struct ftl_rq *rq, struct ftl_band *band,
208 			      uint64_t idx, uint64_t count);
209 
210 		/* Owner context */
211 		void *priv;
212 
213 		/* This is compaction IO */
214 		bool compaction;
215 	} owner;
216 
217 	/* Iterator fields for processing state of the request */
218 	struct {
219 		uint32_t idx;
220 
221 		uint32_t count;
222 
223 		/* Queue depth on this request */
224 		uint32_t qd;
225 
226 		uint32_t remaining;
227 		int status;
228 	} iter;
229 
230 	/* Private fields for issuing IO */
231 	struct {
232 		/* Request physical address, on IO completion set for append device */
233 		ftl_addr addr;
234 
235 		/* Band to which IO is issued */
236 		struct ftl_band *band;
237 
238 		struct spdk_bdev_io_wait_entry bdev_io_wait;
239 	} io;
240 
241 	/* For writing P2L metadata */
242 	struct ftl_md_io_entry_ctx md_persist_entry_ctx;
243 
244 	struct ftl_rq_entry entries[];
245 };
246 
247 /* Used for reading/writing P2L map during runtime and recovery */
248 struct ftl_basic_rq {
249 	struct spdk_ftl_dev *dev;
250 
251 	/* Request queue entry */
252 	TAILQ_ENTRY(ftl_basic_rq) qentry;
253 
254 	/* Number of block within the request */
255 	uint64_t num_blocks;
256 
257 	/* Payload for IO */
258 	void *io_payload;
259 
260 	/* Request result status */
261 	bool success;
262 
263 	/* Fields for owner of this request */
264 	struct {
265 		/* End request callback */
266 		void (*cb)(struct ftl_basic_rq *brq);
267 
268 		/* Owner context */
269 		void *priv;
270 	} owner;
271 
272 	/* Private fields for issuing IO */
273 	struct {
274 		/* Request physical address, on IO completion set for append device */
275 		ftl_addr addr;
276 
277 		/* Band to which IO is issued */
278 		struct ftl_band *band;
279 
280 		/* Chunk to which IO is issued */
281 		struct ftl_nv_cache_chunk *chunk;
282 
283 		struct spdk_bdev_io_wait_entry bdev_io_wait;
284 	} io;
285 };
286 
287 void ftl_io_fail(struct ftl_io *io, int status);
288 void ftl_io_clear(struct ftl_io *io);
289 void ftl_io_inc_req(struct ftl_io *io);
290 void ftl_io_dec_req(struct ftl_io *io);
291 struct iovec *ftl_io_iovec(struct ftl_io *io);
292 uint64_t ftl_io_current_lba(const struct ftl_io *io);
293 uint64_t ftl_io_get_lba(const struct ftl_io *io, size_t offset);
294 void ftl_io_advance(struct ftl_io *io, size_t num_blocks);
295 size_t ftl_iovec_num_blocks(struct iovec *iov, size_t iov_cnt);
296 void *ftl_io_iovec_addr(struct ftl_io *io);
297 size_t ftl_io_iovec_len_left(struct ftl_io *io);
298 int ftl_io_init(struct spdk_io_channel *ioch, struct ftl_io *io, uint64_t lba,
299 		size_t num_blocks, struct iovec *iov, size_t iov_cnt, spdk_ftl_fn cb_fn,
300 		void *cb_arg, int type);
301 void ftl_io_complete(struct ftl_io *io);
302 void ftl_rq_del(struct ftl_rq *rq);
303 struct ftl_rq *ftl_rq_new(struct spdk_ftl_dev *dev, uint32_t io_md_size);
304 void ftl_rq_unpin(struct ftl_rq *rq);
305 
306 static inline void
307 ftl_basic_rq_init(struct spdk_ftl_dev *dev, struct ftl_basic_rq *brq,
308 		  void *io_payload, uint64_t num_blocks)
309 {
310 	brq->dev = dev;
311 	brq->io_payload = io_payload;
312 	brq->num_blocks = num_blocks;
313 	brq->success = false;
314 }
315 
316 static inline void
317 ftl_basic_rq_set_owner(struct ftl_basic_rq *brq, void (*cb)(struct ftl_basic_rq *brq), void *priv)
318 {
319 	brq->owner.cb = cb;
320 	brq->owner.priv = priv;
321 }
322 
323 static inline void
324 ftl_rq_swap_payload(struct ftl_rq *a, uint32_t aidx,
325 		    struct ftl_rq *b, uint32_t bidx)
326 {
327 	assert(aidx < a->num_blocks);
328 	assert(bidx < b->num_blocks);
329 
330 	void *a_payload = a->io_vec[aidx].iov_base;
331 	void *b_payload = b->io_vec[bidx].iov_base;
332 
333 	a->io_vec[aidx].iov_base = b_payload;
334 	a->entries[aidx].io_payload = b_payload;
335 
336 	b->io_vec[bidx].iov_base = a_payload;
337 	b->entries[bidx].io_payload = a_payload;
338 }
339 
340 static inline struct ftl_rq *
341 ftl_rq_from_entry(struct ftl_rq_entry *entry)
342 {
343 	uint64_t idx = entry->index;
344 	struct ftl_rq *rq = SPDK_CONTAINEROF(entry, struct ftl_rq, entries[idx]);
345 	return rq;
346 }
347 
348 
349 static inline bool
350 ftl_io_done(const struct ftl_io *io)
351 {
352 	return io->req_cnt == 0 && io->pos == io->num_blocks;
353 }
354 
355 #endif /* FTL_IO_H */
356