xref: /spdk/lib/ftl/ftl_io.h (revision 588dfe314bb83d86effdf67ec42837b11c2620bf)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #ifndef FTL_IO_H
7 #define FTL_IO_H
8 
9 #include "spdk/stdinc.h"
10 #include "spdk/nvme.h"
11 #include "spdk/ftl.h"
12 #include "spdk/bdev.h"
13 #include "spdk/util.h"
14 
15 #include "ftl_internal.h"
16 #include "ftl_trace.h"
17 #include "ftl_l2p.h"
18 #include "utils/ftl_md.h"
19 
20 struct spdk_ftl_dev;
21 struct ftl_band;
22 struct ftl_io;
23 
24 typedef void (*ftl_io_fn)(struct ftl_io *, void *, int);
25 
26 /* IO flags */
27 enum ftl_io_flags {
28 	/* Indicates whether IO is already initialized */
29 	FTL_IO_INITIALIZED	= (1 << 0),
30 	/* Indicated whether the user IO pinned the L2P pages containing LBAs */
31 	FTL_IO_PINNED		= (1 << 1),
32 };
33 
34 enum ftl_io_type {
35 	FTL_IO_READ,
36 	FTL_IO_WRITE,
37 	FTL_IO_UNMAP,
38 };
39 
40 #define FTL_IO_MAX_IOVEC 4
41 
42 struct ftl_io_channel {
43 	/*  Device */
44 	struct spdk_ftl_dev		*dev;
45 	/*  Entry of IO channels queue/list */
46 	TAILQ_ENTRY(ftl_io_channel)	entry;
47 	/*  IO map pool */
48 	struct ftl_mempool		*map_pool;
49 	/*  Poller used for completing user requests and retrying IO */
50 	struct spdk_poller		*poller;
51 	/*  Submission queue */
52 	struct spdk_ring		*sq;
53 	/*  Completion queue */
54 	struct spdk_ring		*cq;
55 };
56 
57 /* General IO descriptor for user requests */
58 struct ftl_io {
59 	/* Device */
60 	struct spdk_ftl_dev		*dev;
61 
62 	/* IO channel */
63 	struct spdk_io_channel		*ioch;
64 
65 	/* LBA address */
66 	uint64_t			lba;
67 
68 	/* First address of write when sent to cache device */
69 	ftl_addr			addr;
70 
71 	/* Number of processed blocks */
72 	size_t				pos;
73 
74 	/* Number of blocks */
75 	size_t				num_blocks;
76 
77 	/* IO vector pointer */
78 	struct iovec			*iov;
79 
80 	/* Metadata */
81 	void				*md;
82 
83 	/* Number of IO vectors */
84 	size_t				iov_cnt;
85 
86 	/* Position within the io vector array */
87 	size_t				iov_pos;
88 
89 	/* Offset within the iovec (in blocks) */
90 	size_t				iov_off;
91 
92 	/* Band this IO is being written to */
93 	struct ftl_band			*band;
94 
95 	/* Request status */
96 	int				status;
97 
98 	/* Number of split requests */
99 	size_t				req_cnt;
100 
101 	/* Callback's context */
102 	void				*cb_ctx;
103 
104 	/* User callback function */
105 	spdk_ftl_fn			user_fn;
106 
107 	/* Flags */
108 	int				flags;
109 
110 	/* IO type */
111 	enum ftl_io_type		type;
112 
113 	/* Done flag */
114 	bool				done;
115 
116 	/* Trace group id */
117 	uint64_t			trace;
118 
119 	/* Used by retry and write completion queues */
120 	TAILQ_ENTRY(ftl_io)		queue_entry;
121 
122 	/* Reference to the chunk within NV cache */
123 	struct ftl_nv_cache_chunk	*nv_cache_chunk;
124 
125 	/* For l2p pinning */
126 	struct ftl_l2p_pin_ctx		l2p_pin_ctx;
127 
128 	/* Logical to physical mapping for this IO, number of entries equals to
129 	 * number of transfer blocks */
130 	ftl_addr			*map;
131 
132 	struct spdk_bdev_io_wait_entry	bdev_io_wait;
133 };
134 
135 /* */
136 struct ftl_rq_entry {
137 	/* Data payload of single entry (block) */
138 	void *io_payload;
139 
140 	void *io_md;
141 
142 	/*
143 	 * Physical address of block described by ftl_rq_entry.
144 	 * Valid after write command is completed (due to potential append reordering)
145 	 */
146 	ftl_addr addr;
147 
148 	/* Logical block address */
149 	uint64_t lba;
150 
151 	/* Sequence id of original chunk where this user data was written to */
152 	uint64_t seq_id;
153 
154 	/* Index of this entry within FTL request */
155 	const uint64_t index;
156 
157 	struct {
158 		void *priv;
159 	} owner;
160 
161 	/* If request issued in iterative way, it contains IO information */
162 	struct {
163 		struct ftl_band *band;
164 	} io;
165 
166 	/* For l2p pinning */
167 	struct ftl_l2p_pin_ctx l2p_pin_ctx;
168 
169 	struct {
170 		uint64_t offset_blocks;
171 		uint64_t num_blocks;
172 		struct spdk_bdev_io_wait_entry wait_entry;
173 	} bdev_io;
174 };
175 
176 /*
177  * Descriptor used for internal requests (compaction and reloc). May be split into multiple
178  * IO requests (as valid blocks that need to be relocated may not be contiguous) - utilizing
179  * the ftl_rq_entry array
180  */
181 struct ftl_rq {
182 	struct spdk_ftl_dev *dev;
183 
184 	/* Request queue entry */
185 	TAILQ_ENTRY(ftl_rq) qentry;
186 
187 	/* Number of block within the request */
188 	uint64_t num_blocks;
189 
190 	/* Extended metadata for IO. Its size is io_md_size * num_blocks */
191 	void *io_md;
192 
193 	/* Size of extended metadata size for one entry */
194 	uint64_t io_md_size;
195 
196 	/* Size of IO vector array */
197 	uint64_t io_vec_size;
198 
199 	/* Array of IO vectors, its size equals to num_blocks */
200 	struct iovec *io_vec;
201 
202 	/* Payload for IO */
203 	void *io_payload;
204 
205 	/* Request result status */
206 	bool success;
207 
208 	/* Fields for owner of this request */
209 	struct {
210 		/* End request callback */
211 		void (*cb)(struct ftl_rq *rq);
212 
213 		/* IO error request callback */
214 		void (*error)(struct ftl_rq *rq, struct ftl_band *band,
215 			      uint64_t idx, uint64_t count);
216 
217 		/* Owner context */
218 		void *priv;
219 
220 		/* This is compaction IO */
221 		bool compaction;
222 	} owner;
223 
224 	/* Iterator fields for processing state of the request */
225 	struct {
226 		uint32_t idx;
227 
228 		uint32_t count;
229 
230 		/* Queue depth on this request */
231 		uint32_t qd;
232 
233 		uint32_t remaining;
234 		int status;
235 	} iter;
236 
237 	/* Private fields for issuing IO */
238 	struct {
239 		/* Request physical address, on IO completion set for append device */
240 		ftl_addr addr;
241 
242 		/* Band to which IO is issued */
243 		struct ftl_band *band;
244 
245 		struct spdk_bdev_io_wait_entry bdev_io_wait;
246 	} io;
247 
248 	/* For writing P2L metadata */
249 	struct ftl_md_io_entry_ctx md_persist_entry_ctx;
250 
251 	struct ftl_rq_entry entries[];
252 };
253 
254 /* Used for reading/writing P2L map during runtime and recovery */
255 struct ftl_basic_rq {
256 	struct spdk_ftl_dev *dev;
257 
258 	/* Request queue entry */
259 	TAILQ_ENTRY(ftl_basic_rq) qentry;
260 
261 	/* Number of block within the request */
262 	uint64_t num_blocks;
263 
264 	/* Payload for IO */
265 	void *io_payload;
266 
267 	/* Request result status */
268 	bool success;
269 
270 	/* Fields for owner of this request */
271 	struct {
272 		/* End request callback */
273 		void (*cb)(struct ftl_basic_rq *brq);
274 
275 		/* Owner context */
276 		void *priv;
277 	} owner;
278 
279 	/* Private fields for issuing IO */
280 	struct {
281 		/* Request physical address, on IO completion set for append device */
282 		ftl_addr addr;
283 
284 		/* Band to which IO is issued */
285 		struct ftl_band *band;
286 
287 		/* Chunk to which IO is issued */
288 		struct ftl_nv_cache_chunk *chunk;
289 
290 		struct spdk_bdev_io_wait_entry bdev_io_wait;
291 	} io;
292 };
293 
294 void ftl_io_fail(struct ftl_io *io, int status);
295 void ftl_io_clear(struct ftl_io *io);
296 void ftl_io_inc_req(struct ftl_io *io);
297 void ftl_io_dec_req(struct ftl_io *io);
298 struct iovec *ftl_io_iovec(struct ftl_io *io);
299 uint64_t ftl_io_current_lba(const struct ftl_io *io);
300 uint64_t ftl_io_get_lba(const struct ftl_io *io, size_t offset);
301 void ftl_io_advance(struct ftl_io *io, size_t num_blocks);
302 size_t ftl_iovec_num_blocks(struct iovec *iov, size_t iov_cnt);
303 void *ftl_io_iovec_addr(struct ftl_io *io);
304 size_t ftl_io_iovec_len_left(struct ftl_io *io);
305 int ftl_io_init(struct spdk_io_channel *ioch, struct ftl_io *io, uint64_t lba,
306 		size_t num_blocks, struct iovec *iov, size_t iov_cnt, spdk_ftl_fn cb_fn,
307 		void *cb_arg, int type);
308 void ftl_io_complete(struct ftl_io *io);
309 void ftl_rq_del(struct ftl_rq *rq);
310 struct ftl_rq *ftl_rq_new(struct spdk_ftl_dev *dev, uint32_t io_md_size);
311 void ftl_rq_unpin(struct ftl_rq *rq);
312 
313 static inline void
314 ftl_basic_rq_init(struct spdk_ftl_dev *dev, struct ftl_basic_rq *brq,
315 		  void *io_payload, uint64_t num_blocks)
316 {
317 	brq->dev = dev;
318 	brq->io_payload = io_payload;
319 	brq->num_blocks = num_blocks;
320 	brq->success = false;
321 }
322 
323 static inline void
324 ftl_basic_rq_set_owner(struct ftl_basic_rq *brq, void (*cb)(struct ftl_basic_rq *brq), void *priv)
325 {
326 	brq->owner.cb = cb;
327 	brq->owner.priv = priv;
328 }
329 
330 static inline void
331 ftl_rq_swap_payload(struct ftl_rq *a, uint32_t aidx,
332 		    struct ftl_rq *b, uint32_t bidx)
333 {
334 	assert(aidx < a->num_blocks);
335 	assert(bidx < b->num_blocks);
336 
337 	void *a_payload = a->io_vec[aidx].iov_base;
338 	void *b_payload = b->io_vec[bidx].iov_base;
339 
340 	a->io_vec[aidx].iov_base = b_payload;
341 	a->entries[aidx].io_payload = b_payload;
342 
343 	b->io_vec[bidx].iov_base = a_payload;
344 	b->entries[bidx].io_payload = a_payload;
345 }
346 
347 static inline struct ftl_rq *
348 ftl_rq_from_entry(struct ftl_rq_entry *entry)
349 {
350 	uint64_t idx = entry->index;
351 	struct ftl_rq *rq = SPDK_CONTAINEROF(entry, struct ftl_rq, entries[idx]);
352 	return rq;
353 }
354 
355 
356 static inline bool
357 ftl_io_done(const struct ftl_io *io)
358 {
359 	return io->req_cnt == 0 && io->pos == io->num_blocks;
360 }
361 
362 #endif /* FTL_IO_H */
363