xref: /spdk/lib/ftl/ftl_io.h (revision f869197b76ff6981e901b6d9a05789e1b993494a)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #ifndef FTL_IO_H
7 #define FTL_IO_H
8 
9 #include "spdk/stdinc.h"
10 #include "spdk/nvme.h"
11 #include "spdk/ftl.h"
12 #include "spdk/bdev.h"
13 #include "spdk/util.h"
14 
15 #include "ftl_internal.h"
16 #include "ftl_l2p.h"
17 #include "utils/ftl_md.h"
18 
19 struct spdk_ftl_dev;
20 struct ftl_band;
21 struct ftl_io;
22 
23 typedef void (*ftl_io_fn)(struct ftl_io *, void *, int);
24 
25 /* IO flags */
26 enum ftl_io_flags {
27 	/* Indicates whether IO is already initialized */
28 	FTL_IO_INITIALIZED	= (1 << 0),
29 	/* Indicated whether the user IO pinned the L2P pages containing LBAs */
30 	FTL_IO_PINNED		= (1 << 1),
31 };
32 
33 enum ftl_io_type {
34 	FTL_IO_READ,
35 	FTL_IO_WRITE,
36 	FTL_IO_UNMAP,
37 };
38 
39 #define FTL_IO_MAX_IOVEC 4
40 
41 struct ftl_io_channel {
42 	/*  Device */
43 	struct spdk_ftl_dev		*dev;
44 	/*  Entry of IO channels queue/list */
45 	TAILQ_ENTRY(ftl_io_channel)	entry;
46 	/*  IO map pool */
47 	struct ftl_mempool		*map_pool;
48 	/*  Poller used for completing user requests and retrying IO */
49 	struct spdk_poller		*poller;
50 	/*  Submission queue */
51 	struct spdk_ring		*sq;
52 	/*  Completion queue */
53 	struct spdk_ring		*cq;
54 };
55 
56 /* General IO descriptor for user requests */
57 struct ftl_io {
58 	/* Device */
59 	struct spdk_ftl_dev		*dev;
60 
61 	/* IO channel */
62 	struct spdk_io_channel		*ioch;
63 
64 	/* LBA address */
65 	uint64_t			lba;
66 
67 	/* First address of write when sent to cache device */
68 	ftl_addr			addr;
69 
70 	/* Number of processed blocks */
71 	size_t				pos;
72 
73 	/* Number of blocks */
74 	size_t				num_blocks;
75 
76 	/* IO vector pointer */
77 	struct iovec			*iov;
78 
79 	/* Metadata */
80 	void				*md;
81 
82 	/* Number of IO vectors */
83 	size_t				iov_cnt;
84 
85 	/* Position within the io vector array */
86 	size_t				iov_pos;
87 
88 	/* Offset within the iovec (in blocks) */
89 	size_t				iov_off;
90 
91 	/* Band this IO is being written to */
92 	struct ftl_band			*band;
93 
94 	/* Request status */
95 	int				status;
96 
97 	/* Number of split requests */
98 	size_t				req_cnt;
99 
100 	/* Callback's context */
101 	void				*cb_ctx;
102 
103 	/* User callback function */
104 	spdk_ftl_fn			user_fn;
105 
106 	/* Flags */
107 	int				flags;
108 
109 	/* IO type */
110 	enum ftl_io_type		type;
111 
112 	/* Done flag */
113 	bool				done;
114 
115 	/* Used by retry and write completion queues */
116 	TAILQ_ENTRY(ftl_io)		queue_entry;
117 
118 	/* Reference to the chunk within NV cache */
119 	struct ftl_nv_cache_chunk	*nv_cache_chunk;
120 
121 	/* For l2p pinning */
122 	struct ftl_l2p_pin_ctx		l2p_pin_ctx;
123 
124 	/* Logical to physical mapping for this IO, number of entries equals to
125 	 * number of transfer blocks */
126 	ftl_addr			*map;
127 
128 	struct spdk_bdev_io_wait_entry	bdev_io_wait;
129 };
130 
131 /* */
132 struct ftl_rq_entry {
133 	/* Data payload of single entry (block) */
134 	void *io_payload;
135 
136 	void *io_md;
137 
138 	/*
139 	 * Physical address of block described by ftl_rq_entry.
140 	 * Valid after write command is completed (due to potential append reordering)
141 	 */
142 	ftl_addr addr;
143 
144 	/* Logical block address */
145 	uint64_t lba;
146 
147 	/* Sequence id of original chunk where this user data was written to */
148 	uint64_t seq_id;
149 
150 	/* Index of this entry within FTL request */
151 	const uint64_t index;
152 
153 	struct {
154 		void *priv;
155 	} owner;
156 
157 	/* If request issued in iterative way, it contains IO information */
158 	struct {
159 		struct ftl_band *band;
160 	} io;
161 
162 	/* For l2p pinning */
163 	struct ftl_l2p_pin_ctx l2p_pin_ctx;
164 
165 	struct {
166 		uint64_t offset_blocks;
167 		uint64_t num_blocks;
168 		struct spdk_bdev_io_wait_entry wait_entry;
169 	} bdev_io;
170 };
171 
172 /*
173  * Descriptor used for internal requests (compaction and reloc). May be split into multiple
174  * IO requests (as valid blocks that need to be relocated may not be contiguous) - utilizing
175  * the ftl_rq_entry array
176  */
177 struct ftl_rq {
178 	struct spdk_ftl_dev *dev;
179 
180 	/* Request queue entry */
181 	TAILQ_ENTRY(ftl_rq) qentry;
182 
183 	/* Number of block within the request */
184 	uint64_t num_blocks;
185 
186 	/* Extended metadata for IO. Its size is io_md_size * num_blocks */
187 	void *io_md;
188 
189 	/* Size of extended metadata size for one entry */
190 	uint64_t io_md_size;
191 
192 	/* Size of IO vector array */
193 	uint64_t io_vec_size;
194 
195 	/* Array of IO vectors, its size equals to num_blocks */
196 	struct iovec *io_vec;
197 
198 	/* Payload for IO */
199 	void *io_payload;
200 
201 	/* Request result status */
202 	bool success;
203 
204 	/* Fields for owner of this request */
205 	struct {
206 		/* End request callback */
207 		void (*cb)(struct ftl_rq *rq);
208 
209 		/* IO error request callback */
210 		void (*error)(struct ftl_rq *rq, struct ftl_band *band,
211 			      uint64_t idx, uint64_t count);
212 
213 		/* Owner context */
214 		void *priv;
215 
216 		/* This is compaction IO */
217 		bool compaction;
218 	} owner;
219 
220 	/* Iterator fields for processing state of the request */
221 	struct {
222 		uint32_t idx;
223 
224 		uint32_t count;
225 
226 		/* Queue depth on this request */
227 		uint32_t qd;
228 
229 		uint32_t remaining;
230 		int status;
231 	} iter;
232 
233 	/* Private fields for issuing IO */
234 	struct {
235 		/* Request physical address, on IO completion set for append device */
236 		ftl_addr addr;
237 
238 		/* Band to which IO is issued */
239 		struct ftl_band *band;
240 
241 		struct spdk_bdev_io_wait_entry bdev_io_wait;
242 	} io;
243 
244 	/* For writing P2L metadata */
245 	struct ftl_md_io_entry_ctx md_persist_entry_ctx;
246 
247 	struct ftl_rq_entry entries[];
248 };
249 
250 /* Used for reading/writing P2L map during runtime and recovery */
251 struct ftl_basic_rq {
252 	struct spdk_ftl_dev *dev;
253 
254 	/* Request queue entry */
255 	TAILQ_ENTRY(ftl_basic_rq) qentry;
256 
257 	/* Number of block within the request */
258 	uint64_t num_blocks;
259 
260 	/* Payload for IO */
261 	void *io_payload;
262 
263 	/* Request result status */
264 	bool success;
265 
266 	/* Fields for owner of this request */
267 	struct {
268 		/* End request callback */
269 		void (*cb)(struct ftl_basic_rq *brq);
270 
271 		/* Owner context */
272 		void *priv;
273 	} owner;
274 
275 	/* Private fields for issuing IO */
276 	struct {
277 		/* Request physical address, on IO completion set for append device */
278 		ftl_addr addr;
279 
280 		/* Band to which IO is issued */
281 		struct ftl_band *band;
282 
283 		/* Chunk to which IO is issued */
284 		struct ftl_nv_cache_chunk *chunk;
285 
286 		struct spdk_bdev_io_wait_entry bdev_io_wait;
287 	} io;
288 };
289 
290 void ftl_io_fail(struct ftl_io *io, int status);
291 void ftl_io_clear(struct ftl_io *io);
292 void ftl_io_inc_req(struct ftl_io *io);
293 void ftl_io_dec_req(struct ftl_io *io);
294 struct iovec *ftl_io_iovec(struct ftl_io *io);
295 uint64_t ftl_io_current_lba(const struct ftl_io *io);
296 uint64_t ftl_io_get_lba(const struct ftl_io *io, size_t offset);
297 void ftl_io_advance(struct ftl_io *io, size_t num_blocks);
298 size_t ftl_iovec_num_blocks(struct iovec *iov, size_t iov_cnt);
299 void *ftl_io_iovec_addr(struct ftl_io *io);
300 size_t ftl_io_iovec_len_left(struct ftl_io *io);
301 int ftl_io_init(struct spdk_io_channel *ioch, struct ftl_io *io, uint64_t lba,
302 		size_t num_blocks, struct iovec *iov, size_t iov_cnt, spdk_ftl_fn cb_fn,
303 		void *cb_arg, int type);
304 void ftl_io_complete(struct ftl_io *io);
305 void ftl_rq_del(struct ftl_rq *rq);
306 struct ftl_rq *ftl_rq_new(struct spdk_ftl_dev *dev, uint32_t io_md_size);
307 void ftl_rq_unpin(struct ftl_rq *rq);
308 
309 static inline void
310 ftl_basic_rq_init(struct spdk_ftl_dev *dev, struct ftl_basic_rq *brq,
311 		  void *io_payload, uint64_t num_blocks)
312 {
313 	brq->dev = dev;
314 	brq->io_payload = io_payload;
315 	brq->num_blocks = num_blocks;
316 	brq->success = false;
317 }
318 
319 static inline void
320 ftl_basic_rq_set_owner(struct ftl_basic_rq *brq, void (*cb)(struct ftl_basic_rq *brq), void *priv)
321 {
322 	brq->owner.cb = cb;
323 	brq->owner.priv = priv;
324 }
325 
326 static inline void
327 ftl_rq_swap_payload(struct ftl_rq *a, uint32_t aidx,
328 		    struct ftl_rq *b, uint32_t bidx)
329 {
330 	assert(aidx < a->num_blocks);
331 	assert(bidx < b->num_blocks);
332 
333 	void *a_payload = a->io_vec[aidx].iov_base;
334 	void *b_payload = b->io_vec[bidx].iov_base;
335 
336 	a->io_vec[aidx].iov_base = b_payload;
337 	a->entries[aidx].io_payload = b_payload;
338 
339 	b->io_vec[bidx].iov_base = a_payload;
340 	b->entries[bidx].io_payload = a_payload;
341 }
342 
343 static inline struct ftl_rq *
344 ftl_rq_from_entry(struct ftl_rq_entry *entry)
345 {
346 	uint64_t idx = entry->index;
347 	struct ftl_rq *rq = SPDK_CONTAINEROF(entry, struct ftl_rq, entries[idx]);
348 	return rq;
349 }
350 
351 
352 static inline bool
353 ftl_io_done(const struct ftl_io *io)
354 {
355 	return io->req_cnt == 0 && io->pos == io->num_blocks;
356 }
357 
358 #endif /* FTL_IO_H */
359