xref: /spdk/lib/ftl/ftl_io.c (revision c4d9daeb7bf491bc0eb6e8d417b75d44773cb009)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 #include "spdk/ftl.h"
36 #include "spdk/likely.h"
37 
38 #include "ftl_io.h"
39 #include "ftl_core.h"
40 #include "ftl_rwb.h"
41 #include "ftl_band.h"
42 
43 void
44 ftl_io_inc_req(struct ftl_io *io)
45 {
46 	struct ftl_band *band = io->band;
47 
48 	if (io->type != FTL_IO_READ && io->type != FTL_IO_ERASE) {
49 		ftl_band_acquire_md(band);
50 	}
51 
52 	__atomic_fetch_add(&io->dev->num_inflight, 1, __ATOMIC_SEQ_CST);
53 
54 	++io->req_cnt;
55 }
56 
57 void
58 ftl_io_dec_req(struct ftl_io *io)
59 {
60 	struct ftl_band *band = io->band;
61 	unsigned long num_inflight __attribute__((unused));
62 
63 	if (io->type != FTL_IO_READ && io->type != FTL_IO_ERASE) {
64 		ftl_band_release_md(band);
65 	}
66 
67 	num_inflight = __atomic_fetch_sub(&io->dev->num_inflight, 1, __ATOMIC_SEQ_CST);
68 
69 	assert(num_inflight > 0);
70 	assert(io->req_cnt > 0);
71 
72 	--io->req_cnt;
73 }
74 
75 struct iovec *
76 ftl_io_iovec(struct ftl_io *io)
77 {
78 	if (io->iov_cnt > 1) {
79 		return io->iov.vector;
80 	} else {
81 		return &io->iov.single;
82 	}
83 }
84 
85 uint64_t
86 ftl_io_get_lba(const struct ftl_io *io, size_t offset)
87 {
88 	assert(offset < io->lbk_cnt);
89 
90 	if (io->flags & FTL_IO_VECTOR_LBA) {
91 		return io->lba.vector[offset];
92 	} else {
93 		return io->lba.single + offset;
94 	}
95 }
96 
97 uint64_t
98 ftl_io_current_lba(const struct ftl_io *io)
99 {
100 	return ftl_io_get_lba(io, io->pos);
101 }
102 
103 void
104 ftl_io_advance(struct ftl_io *io, size_t lbk_cnt)
105 {
106 	struct iovec *iov = ftl_io_iovec(io);
107 	size_t iov_lbks;
108 
109 	io->pos += lbk_cnt;
110 
111 	if (io->iov_cnt == 0) {
112 		return;
113 	}
114 
115 	while (lbk_cnt > 0) {
116 		assert(io->iov_pos < io->iov_cnt);
117 		iov_lbks = iov[io->iov_pos].iov_len / PAGE_SIZE;
118 
119 		if (io->iov_off + lbk_cnt < iov_lbks) {
120 			io->iov_off += lbk_cnt;
121 			break;
122 		}
123 
124 		assert(iov_lbks > io->iov_off);
125 		lbk_cnt -= (iov_lbks - io->iov_off);
126 		io->iov_off = 0;
127 		io->iov_pos++;
128 	}
129 }
130 
131 size_t
132 ftl_iovec_num_lbks(struct iovec *iov, size_t iov_cnt)
133 {
134 	size_t lbks = 0, i = 0;
135 
136 	for (; i < iov_cnt; ++i) {
137 		lbks += iov[i].iov_len / PAGE_SIZE;
138 	}
139 
140 	return lbks;
141 }
142 
143 void *
144 ftl_io_iovec_addr(struct ftl_io *io)
145 {
146 	assert(io->iov_pos < io->iov_cnt);
147 	assert(io->iov_off * PAGE_SIZE < ftl_io_iovec(io)[io->iov_pos].iov_len);
148 
149 	return (char *)ftl_io_iovec(io)[io->iov_pos].iov_base +
150 	       io->iov_off * PAGE_SIZE;
151 }
152 
153 size_t
154 ftl_io_iovec_len_left(struct ftl_io *io)
155 {
156 	struct iovec *iov = ftl_io_iovec(io);
157 	return iov[io->iov_pos].iov_len / PAGE_SIZE - io->iov_off;
158 }
159 
160 int
161 ftl_io_init_iovec(struct ftl_io *io, void *buf,
162 		  size_t iov_cnt, size_t req_size)
163 {
164 	struct iovec *iov;
165 	size_t i;
166 
167 	if (iov_cnt > 1) {
168 		iov = io->iov.vector = calloc(iov_cnt, sizeof(*iov));
169 		if (!iov) {
170 			return -ENOMEM;
171 		}
172 	} else {
173 		iov = &io->iov.single;
174 	}
175 
176 	io->iov_pos = 0;
177 	io->iov_cnt = iov_cnt;
178 	for (i = 0; i < iov_cnt; ++i) {
179 		iov[i].iov_base = (char *)buf + i * req_size * PAGE_SIZE;
180 		iov[i].iov_len = req_size * PAGE_SIZE;
181 	}
182 
183 	return 0;
184 }
185 
186 static void
187 ftl_io_init(struct ftl_io *io, struct spdk_ftl_dev *dev,
188 	    spdk_ftl_fn fn, void *ctx, int flags, int type)
189 {
190 	io->flags |= flags | FTL_IO_INITIALIZED;
191 	io->type = type;
192 	io->dev = dev;
193 	io->lba.single = FTL_LBA_INVALID;
194 	io->ppa.ppa = FTL_PPA_INVALID;
195 	io->cb.fn = fn;
196 	io->cb.ctx = ctx;
197 	io->trace = ftl_trace_alloc_id(dev);
198 }
199 
200 struct ftl_io *
201 ftl_io_init_internal(const struct ftl_io_init_opts *opts)
202 {
203 	struct ftl_io *io = opts->io;
204 	struct spdk_ftl_dev *dev = opts->dev;
205 
206 	if (!io) {
207 		if (opts->parent) {
208 			io = ftl_io_alloc_child(opts->parent);
209 		} else {
210 			io = ftl_io_alloc(dev->ioch);
211 		}
212 
213 		if (!io) {
214 			return NULL;
215 		}
216 	}
217 
218 	ftl_io_clear(io);
219 	ftl_io_init(io, dev, opts->fn, io, opts->flags | FTL_IO_INTERNAL, opts->type);
220 
221 	io->lbk_cnt = opts->iov_cnt * opts->req_size;
222 	io->rwb_batch = opts->rwb_batch;
223 	io->band = opts->band;
224 	io->md = opts->md;
225 
226 	if (ftl_io_init_iovec(io, opts->data, opts->iov_cnt, opts->req_size)) {
227 		if (!opts->io) {
228 			ftl_io_free(io);
229 		}
230 		return NULL;
231 	}
232 
233 	return io;
234 }
235 
236 struct ftl_io *
237 ftl_io_rwb_init(struct spdk_ftl_dev *dev, struct ftl_band *band,
238 		struct ftl_rwb_batch *batch, spdk_ftl_fn cb)
239 {
240 	struct ftl_io_init_opts opts = {
241 		.dev		= dev,
242 		.io		= NULL,
243 		.rwb_batch	= batch,
244 		.band		= band,
245 		.size		= sizeof(struct ftl_io),
246 		.flags		= 0,
247 		.type		= FTL_IO_WRITE,
248 		.iov_cnt	= 1,
249 		.req_size	= dev->xfer_size,
250 		.fn		= cb,
251 		.data		= ftl_rwb_batch_get_data(batch),
252 		.md		= ftl_rwb_batch_get_md(batch),
253 	};
254 
255 	return ftl_io_init_internal(&opts);
256 }
257 
258 struct ftl_io *
259 ftl_io_erase_init(struct ftl_band *band, size_t lbk_cnt, spdk_ftl_fn cb)
260 {
261 	struct ftl_io *io;
262 	struct ftl_io_init_opts opts = {
263 		.dev		= band->dev,
264 		.io		= NULL,
265 		.rwb_batch	= NULL,
266 		.band		= band,
267 		.size		= sizeof(struct ftl_io),
268 		.flags		= FTL_IO_PPA_MODE,
269 		.type		= FTL_IO_ERASE,
270 		.iov_cnt	= 0,
271 		.req_size	= 1,
272 		.fn		= cb,
273 		.data		= NULL,
274 		.md		= NULL,
275 	};
276 
277 	io = ftl_io_init_internal(&opts);
278 	if (!io) {
279 		return NULL;
280 	}
281 
282 	io->lbk_cnt = lbk_cnt;
283 
284 	return io;
285 }
286 
287 void
288 ftl_io_user_init(struct spdk_ftl_dev *dev, struct ftl_io *io, uint64_t lba, size_t lbk_cnt,
289 		 struct iovec *iov, size_t iov_cnt,
290 		 spdk_ftl_fn cb_fn, void *cb_arg, int type)
291 {
292 	if (io->flags & FTL_IO_INITIALIZED) {
293 		return;
294 	}
295 
296 	ftl_io_init(io, dev, cb_fn, cb_arg, 0, type);
297 
298 	io->lba.single = lba;
299 	io->lbk_cnt = lbk_cnt;
300 	io->iov_cnt = iov_cnt;
301 
302 	if (iov_cnt > 1) {
303 		io->iov.vector = iov;
304 	} else {
305 		io->iov.single = *iov;
306 	}
307 
308 	ftl_trace_lba_io_init(io->dev, io);
309 }
310 
311 static void
312 _ftl_io_free(struct ftl_io *io)
313 {
314 	struct ftl_io_channel *ioch;
315 
316 	assert(LIST_EMPTY(&io->children));
317 
318 	if ((io->flags & FTL_IO_INTERNAL) && io->iov_cnt > 1) {
319 		free(io->iov.vector);
320 	}
321 
322 	if (pthread_spin_destroy(&io->lock)) {
323 		SPDK_ERRLOG("pthread_spin_destroy failed\n");
324 	}
325 
326 	ioch = spdk_io_channel_get_ctx(io->ioch);
327 	spdk_mempool_put(ioch->io_pool, io);
328 }
329 
330 static bool
331 ftl_io_remove_child(struct ftl_io *io)
332 {
333 	struct ftl_io *parent = io->parent;
334 	bool parent_done;
335 
336 	pthread_spin_lock(&parent->lock);
337 	LIST_REMOVE(io, child_entry);
338 	parent_done = parent->done && LIST_EMPTY(&parent->children);
339 	parent->status = parent->status ? : io->status;
340 	pthread_spin_unlock(&parent->lock);
341 
342 	return parent_done;
343 }
344 
345 void
346 ftl_io_complete(struct ftl_io *io)
347 {
348 	struct ftl_io *parent = io->parent;
349 	bool complete, keep_alive = io->flags & FTL_IO_KEEP_ALIVE;
350 
351 	io->flags &= ~FTL_IO_INITIALIZED;
352 
353 	pthread_spin_lock(&io->lock);
354 	complete = LIST_EMPTY(&io->children);
355 	io->done = true;
356 	pthread_spin_unlock(&io->lock);
357 
358 	if (complete) {
359 		if (io->cb.fn) {
360 			io->cb.fn(io->cb.ctx, io->status);
361 		}
362 
363 		if (parent && ftl_io_remove_child(io)) {
364 			ftl_io_complete(parent);
365 		}
366 
367 		if (!keep_alive) {
368 			_ftl_io_free(io);
369 		}
370 	}
371 }
372 
373 struct ftl_io *
374 ftl_io_alloc_child(struct ftl_io *parent)
375 {
376 	struct ftl_io *io;
377 
378 	io = ftl_io_alloc(parent->ioch);
379 	if (spdk_unlikely(!io)) {
380 		return NULL;
381 	}
382 
383 	io->parent = parent;
384 
385 	pthread_spin_lock(&parent->lock);
386 	LIST_INSERT_HEAD(&parent->children, io, child_entry);
387 	pthread_spin_unlock(&parent->lock);
388 
389 	return io;
390 }
391 
392 void
393 ftl_io_process_error(struct ftl_io *io, const struct spdk_nvme_cpl *status)
394 {
395 	/* TODO: add error handling for specifc cases */
396 	if (status->status.sct == SPDK_NVME_SCT_MEDIA_ERROR &&
397 	    status->status.sc == SPDK_OCSSD_SC_READ_HIGH_ECC) {
398 		return;
399 	}
400 
401 	io->status = -EIO;
402 }
403 
404 void ftl_io_fail(struct ftl_io *io, int status)
405 {
406 	io->status = status;
407 	ftl_io_advance(io, io->lbk_cnt - io->pos);
408 }
409 
410 void *
411 ftl_io_get_md(const struct ftl_io *io)
412 {
413 	if (!io->md) {
414 		return NULL;
415 	}
416 
417 	return (char *)io->md + io->pos * FTL_BLOCK_SIZE;
418 }
419 
420 struct ftl_io *
421 ftl_io_alloc(struct spdk_io_channel *ch)
422 {
423 	struct ftl_io *io;
424 	struct ftl_io_channel *ioch = spdk_io_channel_get_ctx(ch);
425 
426 	io = spdk_mempool_get(ioch->io_pool);
427 	if (!io) {
428 		return NULL;
429 	}
430 
431 	memset(io, 0, ioch->elem_size);
432 	io->ioch = ch;
433 
434 	if (pthread_spin_init(&io->lock, PTHREAD_PROCESS_PRIVATE)) {
435 		SPDK_ERRLOG("pthread_spin_init failed\n");
436 		spdk_mempool_put(ioch->io_pool, io);
437 		return NULL;
438 	}
439 
440 	return io;
441 }
442 
443 void
444 ftl_io_reinit(struct ftl_io *io, spdk_ftl_fn fn, void *ctx, int flags, int type)
445 {
446 	ftl_io_clear(io);
447 	ftl_io_init(io, io->dev, fn, ctx, flags, type);
448 }
449 
450 void
451 ftl_io_clear(struct ftl_io *io)
452 {
453 	io->pos = 0;
454 	io->req_cnt = 0;
455 	io->iov_pos = 0;
456 	io->iov_off = 0;
457 	io->flags = 0;
458 	io->rwb_batch = NULL;
459 	io->band = NULL;
460 }
461 
462 void
463 ftl_io_free(struct ftl_io *io)
464 {
465 	struct ftl_io *parent = io->parent;
466 
467 	if (!io) {
468 		return;
469 	}
470 
471 	if (parent && ftl_io_remove_child(io)) {
472 		ftl_io_complete(parent);
473 	}
474 
475 	_ftl_io_free(io);
476 }
477