xref: /spdk/lib/ftl/ftl_io.c (revision 1fc4165fe9bf8512483356ad8e6d27f793f2e3db)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 #include "spdk/ftl.h"
36 
37 #include "ftl_io.h"
38 #include "ftl_core.h"
39 #include "ftl_rwb.h"
40 #include "ftl_band.h"
41 
42 size_t
43 ftl_io_inc_req(struct ftl_io *io)
44 {
45 	struct ftl_band *band = io->band;
46 
47 	if (io->type != FTL_IO_READ && io->type != FTL_IO_ERASE) {
48 		ftl_band_acquire_md(band);
49 	}
50 
51 	__atomic_fetch_add(&io->dev->num_inflight, 1, __ATOMIC_SEQ_CST);
52 
53 	return ++io->req_cnt;
54 }
55 
56 size_t
57 ftl_io_dec_req(struct ftl_io *io)
58 {
59 	struct ftl_band *band = io->band;
60 	unsigned long num_inflight __attribute__((unused));
61 
62 	if (io->type != FTL_IO_READ && io->type != FTL_IO_ERASE) {
63 		ftl_band_release_md(band);
64 	}
65 
66 	num_inflight = __atomic_fetch_sub(&io->dev->num_inflight, 1, __ATOMIC_SEQ_CST);
67 
68 	assert(num_inflight > 0);
69 	assert(io->req_cnt > 0);
70 
71 	return --io->req_cnt;
72 }
73 
74 struct iovec *
75 ftl_io_iovec(struct ftl_io *io)
76 {
77 	if (io->iov_cnt > 1) {
78 		return io->iovs;
79 	} else {
80 		return &io->iov;
81 	}
82 }
83 
84 uint64_t
85 ftl_io_current_lba(struct ftl_io *io)
86 {
87 	if (io->flags & FTL_IO_VECTOR_LBA) {
88 		return io->lbas[io->pos];
89 	} else {
90 		return io->lba + io->pos;
91 	}
92 }
93 
94 void
95 ftl_io_update_iovec(struct ftl_io *io, size_t lbk_cnt)
96 {
97 	struct iovec *iov = ftl_io_iovec(io);
98 	size_t iov_lbks;
99 
100 	while (lbk_cnt > 0) {
101 		assert(io->iov_pos < io->iov_cnt);
102 		iov_lbks = iov[io->iov_pos].iov_len / PAGE_SIZE;
103 
104 		if (io->iov_off + lbk_cnt < iov_lbks) {
105 			io->iov_off += lbk_cnt;
106 			break;
107 		}
108 
109 		assert(iov_lbks > io->iov_off);
110 		lbk_cnt -= (iov_lbks - io->iov_off);
111 		io->iov_off = 0;
112 		io->iov_pos++;
113 	}
114 }
115 
116 size_t
117 ftl_iovec_num_lbks(struct iovec *iov, size_t iov_cnt)
118 {
119 	size_t lbks = 0, i = 0;
120 
121 	for (; i < iov_cnt; ++i) {
122 		lbks += iov[i].iov_len / PAGE_SIZE;
123 	}
124 
125 	return lbks;
126 }
127 
128 void *
129 ftl_io_iovec_addr(struct ftl_io *io)
130 {
131 	assert(io->iov_pos < io->iov_cnt);
132 	assert(io->iov_off * PAGE_SIZE < ftl_io_iovec(io)[io->iov_pos].iov_len);
133 
134 	return (char *)ftl_io_iovec(io)[io->iov_pos].iov_base +
135 	       io->iov_off * PAGE_SIZE;
136 }
137 
138 size_t
139 ftl_io_iovec_len_left(struct ftl_io *io)
140 {
141 	struct iovec *iov = ftl_io_iovec(io);
142 	return iov[io->iov_pos].iov_len / PAGE_SIZE - io->iov_off;
143 }
144 
145 int
146 ftl_io_init_iovec(struct ftl_io *io, void *buf,
147 		  size_t iov_cnt, size_t req_size)
148 {
149 	struct iovec *iov;
150 	size_t i;
151 
152 	if (iov_cnt > 1) {
153 		iov = io->iovs = calloc(iov_cnt, sizeof(*iov));
154 		if (!iov) {
155 			return -ENOMEM;
156 		}
157 	} else {
158 		iov = &io->iov;
159 	}
160 
161 	io->iov_pos = 0;
162 	io->iov_cnt = iov_cnt;
163 	for (i = 0; i < iov_cnt; ++i) {
164 		iov[i].iov_base = (char *)buf + i * req_size * PAGE_SIZE;
165 		iov[i].iov_len = req_size * PAGE_SIZE;
166 	}
167 
168 	return 0;
169 }
170 
171 static void
172 ftl_io_init(struct ftl_io *io, struct spdk_ftl_dev *dev,
173 	    spdk_ftl_fn fn, void *ctx, int flags, int type)
174 {
175 	io->flags |= flags | FTL_IO_INITIALIZED;
176 	io->type = type;
177 	io->dev = dev;
178 	io->lba = FTL_LBA_INVALID;
179 	io->cb.fn = fn;
180 	io->cb.ctx = ctx;
181 	io->trace = ftl_trace_alloc_id(dev);
182 }
183 
184 struct ftl_io *
185 ftl_io_init_internal(const struct ftl_io_init_opts *opts)
186 {
187 	struct ftl_io *io = opts->io;
188 	struct spdk_ftl_dev *dev = opts->dev;
189 
190 	if (!io) {
191 		io = ftl_io_alloc(dev->ioch);
192 		if (!io) {
193 			return NULL;
194 		}
195 	}
196 
197 	ftl_io_clear(io);
198 	ftl_io_init(io, dev, opts->fn, io, opts->flags | FTL_IO_INTERNAL, opts->type);
199 
200 	io->lbk_cnt = opts->iov_cnt * opts->req_size;
201 	io->rwb_batch = opts->rwb_batch;
202 	io->band = opts->band;
203 	io->md = opts->md;
204 
205 	if (ftl_io_init_iovec(io, opts->data, opts->iov_cnt, opts->req_size)) {
206 		if (!opts->io) {
207 			ftl_io_free(io);
208 		}
209 		return NULL;
210 	}
211 
212 	return io;
213 }
214 
215 struct ftl_io *
216 ftl_io_rwb_init(struct spdk_ftl_dev *dev, struct ftl_band *band,
217 		struct ftl_rwb_batch *batch, spdk_ftl_fn cb)
218 {
219 	struct ftl_io_init_opts opts = {
220 		.dev		= dev,
221 		.io		= NULL,
222 		.rwb_batch	= batch,
223 		.band		= band,
224 		.size		= sizeof(struct ftl_io),
225 		.flags		= 0,
226 		.type		= FTL_IO_WRITE,
227 		.iov_cnt	= 1,
228 		.req_size	= dev->xfer_size,
229 		.fn		= cb,
230 		.data		= ftl_rwb_batch_get_data(batch),
231 		.md		= ftl_rwb_batch_get_md(batch),
232 	};
233 
234 	return ftl_io_init_internal(&opts);
235 }
236 
237 struct ftl_io *
238 ftl_io_erase_init(struct ftl_band *band, size_t lbk_cnt, spdk_ftl_fn cb)
239 {
240 	struct ftl_io *io;
241 	struct ftl_io_init_opts opts = {
242 		.dev		= band->dev,
243 		.io		= NULL,
244 		.rwb_batch	= NULL,
245 		.band		= band,
246 		.size		= sizeof(struct ftl_io),
247 		.flags		= FTL_IO_PPA_MODE,
248 		.type		= FTL_IO_ERASE,
249 		.iov_cnt	= 0,
250 		.req_size	= 1,
251 		.fn		= cb,
252 		.data		= NULL,
253 		.md		= NULL,
254 	};
255 
256 	io = ftl_io_init_internal(&opts);
257 	io->lbk_cnt = lbk_cnt;
258 
259 	return io;
260 }
261 
262 void
263 ftl_io_user_init(struct spdk_ftl_dev *dev, struct ftl_io *io, uint64_t lba, size_t lbk_cnt,
264 		 struct iovec *iov, size_t iov_cnt,
265 		 spdk_ftl_fn cb_fn, void *cb_arg, int type)
266 {
267 	if (io->flags & FTL_IO_INITIALIZED) {
268 		return;
269 	}
270 
271 	ftl_io_init(io, dev, cb_fn, cb_arg, 0, type);
272 
273 	io->lba = lba;
274 	io->lbk_cnt = lbk_cnt;
275 	io->iov_cnt = iov_cnt;
276 
277 	if (iov_cnt > 1) {
278 		io->iovs = iov;
279 	} else {
280 		io->iov = *iov;
281 	}
282 
283 	ftl_trace_lba_io_init(io->dev, io);
284 }
285 
286 void
287 ftl_io_complete(struct ftl_io *io)
288 {
289 	int keep_alive = io->flags & FTL_IO_KEEP_ALIVE;
290 
291 	io->flags &= ~FTL_IO_INITIALIZED;
292 	io->cb.fn(io->cb.ctx, io->status);
293 
294 	if (!keep_alive) {
295 		ftl_io_free(io);
296 	}
297 }
298 
299 void
300 ftl_io_process_error(struct ftl_io *io, const struct spdk_nvme_cpl *status)
301 {
302 	io->status = -EIO;
303 
304 	/* TODO: add error handling for specifc cases */
305 	if (status->status.sct == SPDK_NVME_SCT_MEDIA_ERROR &&
306 	    status->status.sc == SPDK_OCSSD_SC_READ_HIGH_ECC) {
307 		io->status = 0;
308 	}
309 }
310 
311 void *
312 ftl_io_get_md(const struct ftl_io *io)
313 {
314 	if (!io->md) {
315 		return NULL;
316 	}
317 
318 	return (char *)io->md + io->pos * FTL_BLOCK_SIZE;
319 }
320 
321 struct ftl_io *
322 ftl_io_alloc(struct spdk_io_channel *ch)
323 {
324 	struct ftl_io *io;
325 	struct ftl_io_channel *ioch = spdk_io_channel_get_ctx(ch);
326 
327 	io = spdk_mempool_get(ioch->io_pool);
328 	if (!io) {
329 		return NULL;
330 	}
331 
332 	memset(io, 0, ioch->elem_size);
333 	io->ch = ch;
334 	return io;
335 }
336 
337 void
338 ftl_io_reinit(struct ftl_io *io, spdk_ftl_fn fn, void *ctx, int flags, int type)
339 {
340 	ftl_io_clear(io);
341 	ftl_io_init(io, io->dev, fn, ctx, flags, type);
342 }
343 
344 void
345 ftl_io_clear(struct ftl_io *io)
346 {
347 	io->pos = 0;
348 	io->req_cnt = 0;
349 	io->iov_pos = 0;
350 	io->iov_off = 0;
351 	io->flags = 0;
352 	io->rwb_batch = NULL;
353 	io->band = NULL;
354 }
355 
356 void
357 ftl_io_free(struct ftl_io *io)
358 {
359 	struct ftl_io_channel *ioch;
360 
361 	if (!io) {
362 		return;
363 	}
364 
365 	if ((io->flags & FTL_IO_INTERNAL) && io->iov_cnt > 1) {
366 		free(io->iovs);
367 	}
368 
369 	ioch = spdk_io_channel_get_ctx(io->ch);
370 	spdk_mempool_put(ioch->io_pool, io);
371 }
372