xref: /spdk/lib/ftl/ftl_io.h (revision 8afdeef3becfe9409cc9e7372bd0bc10e8b7d46d)
1  /*   SPDX-License-Identifier: BSD-3-Clause
2   *   Copyright (C) 2018 Intel Corporation.
3   *   All rights reserved.
4   */
5  
6  #ifndef FTL_IO_H
7  #define FTL_IO_H
8  
9  #include "spdk/stdinc.h"
10  #include "spdk/nvme.h"
11  #include "spdk/ftl.h"
12  #include "spdk/bdev.h"
13  #include "spdk/util.h"
14  
15  #include "ftl_internal.h"
16  #include "ftl_trace.h"
17  #include "ftl_l2p.h"
18  #include "utils/ftl_md.h"
19  
20  struct spdk_ftl_dev;
21  struct ftl_band;
22  struct ftl_io;
23  
24  typedef void (*ftl_io_fn)(struct ftl_io *, void *, int);
25  
26  /* IO flags */
27  enum ftl_io_flags {
28  	/* Indicates whether IO is already initialized */
29  	FTL_IO_INITIALIZED	= (1 << 0),
30  	/* Indicated whether the user IO pinned the L2P pages containing LBAs */
31  	FTL_IO_PINNED		= (1 << 1),
32  };
33  
34  enum ftl_io_type {
35  	FTL_IO_READ,
36  	FTL_IO_WRITE,
37  	FTL_IO_TRIM,
38  };
39  
40  #define FTL_IO_MAX_IOVEC 4
41  
42  struct ftl_io_channel {
43  	/*  Device */
44  	struct spdk_ftl_dev		*dev;
45  	/*  Entry of IO channels queue/list */
46  	TAILQ_ENTRY(ftl_io_channel)	entry;
47  	/*  IO map pool */
48  	struct ftl_mempool		*map_pool;
49  	/*  Poller used for completing user requests and retrying IO */
50  	struct spdk_poller		*poller;
51  	/*  Submission queue */
52  	struct spdk_ring		*sq;
53  	/*  Completion queue */
54  	struct spdk_ring		*cq;
55  };
56  
57  /* General IO descriptor for user requests */
58  struct ftl_io {
59  	/* Device */
60  	struct spdk_ftl_dev		*dev;
61  
62  	/* IO channel */
63  	struct spdk_io_channel		*ioch;
64  
65  	/* LBA address */
66  	uint64_t			lba;
67  
68  	/* First address of write when sent to cache device */
69  	ftl_addr			addr;
70  
71  	/* Number of processed blocks */
72  	size_t				pos;
73  
74  	/* Number of blocks */
75  	size_t				num_blocks;
76  
77  	/* IO vector pointer */
78  	struct iovec			*iov;
79  
80  	/* Metadata */
81  	void				*md;
82  
83  	/* Number of IO vectors */
84  	size_t				iov_cnt;
85  
86  	/* Position within the io vector array */
87  	size_t				iov_pos;
88  
89  	/* Offset within the iovec (in blocks) */
90  	size_t				iov_off;
91  
92  	/* Band this IO is being written to */
93  	struct ftl_band			*band;
94  
95  	/* Request status */
96  	int				status;
97  
98  	/* Number of split requests */
99  	size_t				req_cnt;
100  
101  	/* Callback's context */
102  	void				*cb_ctx;
103  
104  	/* User callback function */
105  	spdk_ftl_fn			user_fn;
106  
107  	/* Flags */
108  	int				flags;
109  
110  	/* IO type */
111  	enum ftl_io_type		type;
112  
113  	/* Done flag */
114  	bool				done;
115  
116  	/* Trace group id */
117  	uint64_t			trace;
118  
119  	/* Used by retry and write completion queues */
120  	TAILQ_ENTRY(ftl_io)		queue_entry;
121  
122  	/* Reference to the chunk within NV cache */
123  	struct ftl_nv_cache_chunk	*nv_cache_chunk;
124  
125  	/* For l2p pinning */
126  	struct ftl_l2p_pin_ctx		l2p_pin_ctx;
127  
128  	/* Logical to physical mapping for this IO, number of entries equals to
129  	 * number of transfer blocks */
130  	ftl_addr			*map;
131  
132  	struct spdk_bdev_io_wait_entry	bdev_io_wait;
133  };
134  
135  /* */
136  struct ftl_rq_entry {
137  	/* Data payload of single entry (block) */
138  	void *io_payload;
139  
140  	void *io_md;
141  
142  	/*
143  	 * Physical address of block described by ftl_rq_entry.
144  	 * Valid after write command is completed (due to potential append reordering)
145  	 */
146  	ftl_addr addr;
147  
148  	/* Logical block address */
149  	uint64_t lba;
150  
151  	/* Sequence id of original chunk where this user data was written to */
152  	uint64_t seq_id;
153  
154  	/* Index of this entry within FTL request */
155  	const uint64_t index;
156  
157  	struct {
158  		void *priv;
159  	} owner;
160  
161  	/* If request issued in iterative way, it contains IO information */
162  	struct {
163  		struct ftl_band *band;
164  	} io;
165  
166  	/* For l2p pinning */
167  	struct ftl_l2p_pin_ctx l2p_pin_ctx;
168  
169  	struct {
170  		uint64_t offset_blocks;
171  		uint64_t num_blocks;
172  		struct spdk_bdev_io_wait_entry wait_entry;
173  	} bdev_io;
174  };
175  
176  /*
177   * Descriptor used for internal requests (compaction and reloc). May be split into multiple
178   * IO requests (as valid blocks that need to be relocated may not be contiguous) - utilizing
179   * the ftl_rq_entry array
180   */
181  struct ftl_rq {
182  	struct spdk_ftl_dev *dev;
183  
184  	/* Request queue entry */
185  	TAILQ_ENTRY(ftl_rq) qentry;
186  
187  	/* Number of block within the request */
188  	uint64_t num_blocks;
189  
190  	/* Extended metadata for IO. Its size is io_md_size * num_blocks */
191  	void *io_md;
192  
193  	/* Size of extended metadata size for one entry */
194  	uint64_t io_md_size;
195  
196  	/* Payload for IO */
197  	void *io_payload;
198  
199  	/* Request result status */
200  	bool success;
201  
202  	/* Fields for owner of this request */
203  	struct {
204  		/* End request callback */
205  		void (*cb)(struct ftl_rq *rq);
206  
207  		/* IO error request callback */
208  		void (*error)(struct ftl_rq *rq, struct ftl_band *band,
209  			      uint64_t idx, uint64_t count);
210  
211  		/* Owner context */
212  		void *priv;
213  
214  		/* This is compaction IO */
215  		bool compaction;
216  	} owner;
217  
218  	/* Iterator fields for processing state of the request */
219  	struct {
220  		uint32_t idx;
221  
222  		uint32_t count;
223  
224  		/* Queue depth on this request */
225  		uint32_t qd;
226  
227  		uint32_t remaining;
228  		int status;
229  	} iter;
230  
231  	/* Private fields for issuing IO */
232  	struct {
233  		/* Request physical address, on IO completion set for append device */
234  		ftl_addr addr;
235  
236  		/* Band to which IO is issued */
237  		struct ftl_band *band;
238  
239  		struct spdk_bdev_io_wait_entry bdev_io_wait;
240  	} io;
241  
242  	/* For writing P2L metadata */
243  	struct ftl_md_io_entry_ctx md_persist_entry_ctx;
244  
245  	struct ftl_rq_entry entries[];
246  };
247  
248  /* Used for reading/writing P2L map during runtime and recovery */
249  struct ftl_basic_rq {
250  	struct spdk_ftl_dev *dev;
251  
252  	/* Request queue entry */
253  	TAILQ_ENTRY(ftl_basic_rq) qentry;
254  
255  	/* Number of block within the request */
256  	uint64_t num_blocks;
257  
258  	/* Payload for IO */
259  	void *io_payload;
260  
261  	/* Request result status */
262  	bool success;
263  
264  	/* Fields for owner of this request */
265  	struct {
266  		/* End request callback */
267  		void (*cb)(struct ftl_basic_rq *brq);
268  
269  		/* Owner context */
270  		void *priv;
271  	} owner;
272  
273  	/* Private fields for issuing IO */
274  	struct {
275  		/* Request physical address, on IO completion set for append device */
276  		ftl_addr addr;
277  
278  		/* Band to which IO is issued */
279  		struct ftl_band *band;
280  
281  		/* Chunk to which IO is issued */
282  		struct ftl_nv_cache_chunk *chunk;
283  
284  		struct spdk_bdev_io_wait_entry bdev_io_wait;
285  	} io;
286  };
287  
288  static inline bool
289  ftl_rq_entry_loop_assert(struct ftl_rq *rq, struct ftl_rq_entry *entry, uint32_t count)
290  {
291  	assert(entry >= rq->entries);
292  	assert(((uintptr_t)entry - (uintptr_t)rq->entries) % sizeof(*entry) == 0);
293  	assert(count <= rq->num_blocks);
294  
295  	return true;
296  }
297  
298  #define FTL_RQ_ENTRY_LOOP_FROM(rq, from, entry, count) \
299  	for ((entry) = (from); \
300  		(entry) < (&(rq)->entries[count]) && ftl_rq_entry_loop_assert(rq, entry, count); (entry)++)
301  
302  #define FTL_RQ_ENTRY_LOOP(rq, entry, count) \
303  	FTL_RQ_ENTRY_LOOP_FROM(rq, (rq)->entries, entry, count)
304  
305  void ftl_io_fail(struct ftl_io *io, int status);
306  void ftl_io_clear(struct ftl_io *io);
307  void ftl_io_inc_req(struct ftl_io *io);
308  void ftl_io_dec_req(struct ftl_io *io);
309  struct iovec *ftl_io_iovec(struct ftl_io *io);
310  uint64_t ftl_io_current_lba(const struct ftl_io *io);
311  uint64_t ftl_io_get_lba(const struct ftl_io *io, size_t offset);
312  void ftl_io_advance(struct ftl_io *io, size_t num_blocks);
313  size_t ftl_iovec_num_blocks(struct iovec *iov, size_t iov_cnt);
314  void *ftl_io_iovec_addr(struct ftl_io *io);
315  size_t ftl_io_iovec_len_left(struct ftl_io *io);
316  int ftl_io_init(struct spdk_io_channel *ioch, struct ftl_io *io, uint64_t lba,
317  		size_t num_blocks, struct iovec *iov, size_t iov_cnt, spdk_ftl_fn cb_fn,
318  		void *cb_arg, int type);
319  void ftl_io_complete(struct ftl_io *io);
320  void ftl_rq_del(struct ftl_rq *rq);
321  struct ftl_rq *ftl_rq_new(struct spdk_ftl_dev *dev, uint32_t io_md_size);
322  void ftl_rq_unpin(struct ftl_rq *rq);
323  
324  static inline void
325  ftl_basic_rq_init(struct spdk_ftl_dev *dev, struct ftl_basic_rq *brq,
326  		  void *io_payload, uint64_t num_blocks)
327  {
328  	brq->dev = dev;
329  	brq->io_payload = io_payload;
330  	brq->num_blocks = num_blocks;
331  	brq->success = false;
332  }
333  
334  static inline void
335  ftl_basic_rq_set_owner(struct ftl_basic_rq *brq, void (*cb)(struct ftl_basic_rq *brq), void *priv)
336  {
337  	brq->owner.cb = cb;
338  	brq->owner.priv = priv;
339  }
340  
341  static inline struct ftl_rq *
342  ftl_rq_from_entry(struct ftl_rq_entry *entry)
343  {
344  	uint64_t idx = entry->index;
345  	struct ftl_rq *rq = SPDK_CONTAINEROF(entry, struct ftl_rq, entries[idx]);
346  	return rq;
347  }
348  
349  
350  static inline bool
351  ftl_io_done(const struct ftl_io *io)
352  {
353  	return io->req_cnt == 0 && io->pos == io->num_blocks;
354  }
355  
356  #endif /* FTL_IO_H */
357