xref: /spdk/lib/ftl/ftl_io.c (revision 45a053c5777494f4e8ce4bc1191c9de3920377f7)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 #include "spdk/ftl.h"
8 #include "spdk/likely.h"
9 #include "spdk/util.h"
10 
11 #include "ftl_io.h"
12 #include "ftl_core.h"
13 #include "ftl_band.h"
14 #include "ftl_debug.h"
15 
16 void
17 ftl_io_inc_req(struct ftl_io *io)
18 {
19 	io->dev->num_inflight++;
20 	io->req_cnt++;
21 }
22 
23 void
24 ftl_io_dec_req(struct ftl_io *io)
25 {
26 	assert(io->dev->num_inflight > 0);
27 	assert(io->req_cnt > 0);
28 
29 	io->dev->num_inflight--;
30 	io->req_cnt--;
31 }
32 
33 struct iovec *
34 ftl_io_iovec(struct ftl_io *io)
35 {
36 	return &io->iov[0];
37 }
38 
39 uint64_t
40 ftl_io_get_lba(const struct ftl_io *io, size_t offset)
41 {
42 	assert(offset < io->num_blocks);
43 	return io->lba + offset;
44 }
45 
46 uint64_t
47 ftl_io_current_lba(const struct ftl_io *io)
48 {
49 	return ftl_io_get_lba(io, io->pos);
50 }
51 
52 void
53 ftl_io_advance(struct ftl_io *io, size_t num_blocks)
54 {
55 	struct iovec *iov = ftl_io_iovec(io);
56 	size_t iov_blocks, block_left = num_blocks;
57 
58 	io->pos += num_blocks;
59 
60 	if (io->iov_cnt == 0) {
61 		return;
62 	}
63 
64 	while (block_left > 0) {
65 		assert(io->iov_pos < io->iov_cnt);
66 		iov_blocks = iov[io->iov_pos].iov_len / FTL_BLOCK_SIZE;
67 
68 		if (io->iov_off + block_left < iov_blocks) {
69 			io->iov_off += block_left;
70 			break;
71 		}
72 
73 		assert(iov_blocks > io->iov_off);
74 		block_left -= (iov_blocks - io->iov_off);
75 		io->iov_off = 0;
76 		io->iov_pos++;
77 	}
78 }
79 
80 size_t
81 ftl_iovec_num_blocks(struct iovec *iov, size_t iov_cnt)
82 {
83 	size_t num_blocks = 0, i = 0;
84 
85 	for (; i < iov_cnt; ++i) {
86 		if (iov[i].iov_len & (FTL_BLOCK_SIZE - 1)) {
87 			return 0;
88 		}
89 
90 		num_blocks += iov[i].iov_len / FTL_BLOCK_SIZE;
91 	}
92 
93 	return num_blocks;
94 }
95 
96 void *
97 ftl_io_iovec_addr(struct ftl_io *io)
98 {
99 	assert(io->iov_pos < io->iov_cnt);
100 	assert(io->iov_off * FTL_BLOCK_SIZE < ftl_io_iovec(io)[io->iov_pos].iov_len);
101 
102 	return (char *)ftl_io_iovec(io)[io->iov_pos].iov_base +
103 	       io->iov_off * FTL_BLOCK_SIZE;
104 }
105 
106 size_t
107 ftl_io_iovec_len_left(struct ftl_io *io)
108 {
109 	if (io->iov_pos < io->iov_cnt) {
110 		struct iovec *iov = ftl_io_iovec(io);
111 		return iov[io->iov_pos].iov_len / FTL_BLOCK_SIZE - io->iov_off;
112 	} else {
113 		return 0;
114 	}
115 }
116 
117 static void
118 ftl_io_cb(struct ftl_io *io, void *arg, int status)
119 {
120 	struct ftl_io_channel *ioch = ftl_io_channel_get_ctx(io->ioch);
121 	size_t result  __attribute__((unused));
122 
123 	if (spdk_unlikely(status)) {
124 		io->status = status;
125 
126 		if (-EAGAIN == status) {
127 			/* IO has to be rescheduled again */
128 			switch (io->type) {
129 			case FTL_IO_READ:
130 				ftl_io_clear(io);
131 				TAILQ_INSERT_HEAD(&io->dev->rd_sq, io, queue_entry);
132 				break;
133 			case FTL_IO_WRITE:
134 				ftl_io_clear(io);
135 				TAILQ_INSERT_HEAD(&io->dev->wr_sq, io, queue_entry);
136 				break;
137 			case FTL_IO_UNMAP:
138 				ftl_io_clear(io);
139 				TAILQ_INSERT_HEAD(&io->dev->unmap_sq, io, queue_entry);
140 				break;
141 			default:
142 				/* Unknown IO type, complete to the user */
143 				assert(0);
144 				break;
145 			}
146 
147 		}
148 
149 		if (!io->status) {
150 			/* IO rescheduled, return from the function */
151 			return;
152 		}
153 	}
154 
155 	if (io->map) {
156 		ftl_mempool_put(ioch->map_pool, io->map);
157 	}
158 
159 	result = spdk_ring_enqueue(ioch->cq, (void **)&io, 1, NULL);
160 	assert(result != 0);
161 }
162 
163 int
164 ftl_io_init(struct spdk_io_channel *_ioch, struct ftl_io *io, uint64_t lba, size_t num_blocks,
165 	    struct iovec *iov, size_t iov_cnt, spdk_ftl_fn cb_fn, void *cb_ctx, int type)
166 {
167 	struct ftl_io_channel *ioch = ftl_io_channel_get_ctx(_ioch);
168 	struct spdk_ftl_dev *dev = ioch->dev;
169 
170 	memset(io, 0, sizeof(struct ftl_io));
171 	io->ioch = _ioch;
172 
173 	io->flags |= FTL_IO_INITIALIZED;
174 	io->type = type;
175 	io->dev = dev;
176 	io->lba = FTL_LBA_INVALID;
177 	io->addr = FTL_ADDR_INVALID;
178 	io->cb_ctx = cb_ctx;
179 	io->lba = lba;
180 	io->user_fn = cb_fn;
181 	io->iov = iov;
182 	io->iov_cnt = iov_cnt;
183 	io->num_blocks = num_blocks;
184 	io->trace = ftl_trace_alloc_id(dev);
185 
186 	ftl_trace_lba_io_init(io->dev, io);
187 	return 0;
188 }
189 
190 static void
191 ftl_io_complete_verify(struct ftl_io *io)
192 {
193 	struct spdk_ftl_dev *dev = io->dev;
194 	uint64_t i;
195 	uint64_t lba = io->lba;
196 
197 	assert(io->num_blocks <= dev->xfer_size);
198 
199 	if (FTL_IO_WRITE == io->type) {
200 		return;
201 	}
202 
203 	if (spdk_unlikely(io->status)) {
204 		return;
205 	}
206 
207 	for (i = 0; i < io->num_blocks; i++, lba++) {
208 		ftl_addr current_addr = ftl_l2p_get(dev, lba);
209 
210 		/* If user read request gets stuck for whatever reason, then it's possible the LBA
211 		 * has been relocated by GC or compaction and it may no longer be safe to return data
212 		 * from that address */
213 		if (spdk_unlikely(current_addr != io->map[i])) {
214 			io->status = -EAGAIN;
215 			break;
216 		}
217 	}
218 }
219 
220 void
221 ftl_io_complete(struct ftl_io *io)
222 {
223 	io->flags &= ~FTL_IO_INITIALIZED;
224 	io->done = true;
225 
226 	if (io->flags & FTL_IO_PINNED) {
227 		ftl_io_complete_verify(io);
228 		ftl_l2p_unpin(io->dev, io->lba, io->num_blocks);
229 	}
230 
231 	ftl_io_cb(io, io->cb_ctx, io->status);
232 }
233 
234 void
235 ftl_io_fail(struct ftl_io *io, int status)
236 {
237 	io->status = status;
238 	ftl_io_advance(io, io->num_blocks - io->pos);
239 }
240 
241 void
242 ftl_io_clear(struct ftl_io *io)
243 {
244 	io->req_cnt = io->pos = io->iov_pos = io->iov_off = 0;
245 	io->done = false;
246 	io->status = 0;
247 	io->flags = 0;
248 	io->band = NULL;
249 }
250