xref: /spdk/lib/ftl/ftl_io.c (revision 19d5c3ed8e87dbd240c77ae0ddb5eda25ae99b5f)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 #include "spdk/ftl.h"
36 #include "spdk/likely.h"
37 #include "spdk/util.h"
38 
39 #include "ftl_io.h"
40 #include "ftl_core.h"
41 #include "ftl_rwb.h"
42 #include "ftl_band.h"
43 #include "ftl_debug.h"
44 
45 void
46 ftl_io_inc_req(struct ftl_io *io)
47 {
48 	struct ftl_band *band = io->band;
49 
50 	if (!(io->flags & FTL_IO_CACHE) && io->type != FTL_IO_READ && io->type != FTL_IO_ERASE) {
51 		ftl_band_acquire_lba_map(band);
52 	}
53 
54 	__atomic_fetch_add(&io->dev->num_inflight, 1, __ATOMIC_SEQ_CST);
55 
56 	++io->req_cnt;
57 }
58 
59 void
60 ftl_io_dec_req(struct ftl_io *io)
61 {
62 	struct ftl_band *band = io->band;
63 	unsigned long num_inflight __attribute__((unused));
64 
65 	if (!(io->flags & FTL_IO_CACHE) && io->type != FTL_IO_READ && io->type != FTL_IO_ERASE) {
66 		ftl_band_release_lba_map(band);
67 	}
68 
69 	num_inflight = __atomic_fetch_sub(&io->dev->num_inflight, 1, __ATOMIC_SEQ_CST);
70 
71 	assert(num_inflight > 0);
72 	assert(io->req_cnt > 0);
73 
74 	--io->req_cnt;
75 }
76 
77 struct iovec *
78 ftl_io_iovec(struct ftl_io *io)
79 {
80 	return &io->iov[0];
81 }
82 
83 uint64_t
84 ftl_io_get_lba(const struct ftl_io *io, size_t offset)
85 {
86 	assert(offset < io->num_blocks);
87 
88 	if (io->flags & FTL_IO_VECTOR_LBA) {
89 		return io->lba.vector[offset];
90 	} else {
91 		return io->lba.single + offset;
92 	}
93 }
94 
95 uint64_t
96 ftl_io_current_lba(const struct ftl_io *io)
97 {
98 	return ftl_io_get_lba(io, io->pos);
99 }
100 
101 void
102 ftl_io_advance(struct ftl_io *io, size_t num_blocks)
103 {
104 	struct iovec *iov = ftl_io_iovec(io);
105 	size_t iov_blocks, block_left = num_blocks;
106 
107 	io->pos += num_blocks;
108 
109 	if (io->iov_cnt != 0) {
110 		while (block_left > 0) {
111 			assert(io->iov_pos < io->iov_cnt);
112 			iov_blocks = iov[io->iov_pos].iov_len / FTL_BLOCK_SIZE;
113 
114 			if (io->iov_off + block_left < iov_blocks) {
115 				io->iov_off += block_left;
116 				break;
117 			}
118 
119 			assert(iov_blocks > io->iov_off);
120 			block_left -= (iov_blocks - io->iov_off);
121 			io->iov_off = 0;
122 			io->iov_pos++;
123 		}
124 	}
125 
126 	if (io->parent) {
127 		ftl_io_advance(io->parent, num_blocks);
128 	}
129 }
130 
131 size_t
132 ftl_iovec_num_blocks(struct iovec *iov, size_t iov_cnt)
133 {
134 	size_t num_blocks = 0, i = 0;
135 
136 	for (; i < iov_cnt; ++i) {
137 		num_blocks += iov[i].iov_len / FTL_BLOCK_SIZE;
138 	}
139 
140 	return num_blocks;
141 }
142 
143 void *
144 ftl_io_iovec_addr(struct ftl_io *io)
145 {
146 	assert(io->iov_pos < io->iov_cnt);
147 	assert(io->iov_off * FTL_BLOCK_SIZE < ftl_io_iovec(io)[io->iov_pos].iov_len);
148 
149 	return (char *)ftl_io_iovec(io)[io->iov_pos].iov_base +
150 	       io->iov_off * FTL_BLOCK_SIZE;
151 }
152 
153 size_t
154 ftl_io_iovec_len_left(struct ftl_io *io)
155 {
156 	struct iovec *iov = ftl_io_iovec(io);
157 	return iov[io->iov_pos].iov_len / FTL_BLOCK_SIZE - io->iov_off;
158 }
159 
160 static void
161 _ftl_io_init_iovec(struct ftl_io *io, const struct iovec *iov, size_t iov_cnt, size_t num_blocks)
162 {
163 	size_t iov_off;
164 
165 	io->iov_pos = 0;
166 	io->iov_cnt = iov_cnt;
167 	io->num_blocks = num_blocks;
168 
169 	memcpy(io->iov, iov, iov_cnt * sizeof(*iov));
170 
171 	if (num_blocks == 0) {
172 		for (iov_off = 0; iov_off < iov_cnt; ++iov_off) {
173 			io->num_blocks += iov[iov_off].iov_len / FTL_BLOCK_SIZE;
174 		}
175 	}
176 }
177 
178 static void _ftl_io_free(struct ftl_io *io);
179 
180 static int
181 ftl_io_add_child(struct ftl_io *io, const struct iovec *iov, size_t iov_cnt)
182 {
183 	struct ftl_io *child;
184 
185 	child = ftl_io_alloc_child(io);
186 	if (spdk_unlikely(!child)) {
187 		return -ENOMEM;
188 	}
189 
190 	_ftl_io_init_iovec(child, iov, iov_cnt, 0);
191 
192 	if (io->flags & FTL_IO_VECTOR_LBA) {
193 		child->lba.vector = io->lba.vector + io->num_blocks;
194 	} else {
195 		child->lba.single = io->lba.single + io->num_blocks;
196 	}
197 
198 	io->num_blocks += child->num_blocks;
199 	return 0;
200 }
201 
202 static int
203 ftl_io_init_iovec(struct ftl_io *io, const struct iovec *iov, size_t iov_cnt, size_t num_blocks)
204 {
205 	struct ftl_io *child;
206 	size_t iov_off = 0, iov_left;
207 	int rc;
208 
209 	if (spdk_likely(iov_cnt <= FTL_IO_MAX_IOVEC)) {
210 		_ftl_io_init_iovec(io, iov, iov_cnt, num_blocks);
211 		return 0;
212 	}
213 
214 	while (iov_off < iov_cnt) {
215 		iov_left = spdk_min(iov_cnt - iov_off, FTL_IO_MAX_IOVEC);
216 
217 		rc = ftl_io_add_child(io, &iov[iov_off], iov_left);
218 		if (spdk_unlikely(rc != 0)) {
219 			while ((child = LIST_FIRST(&io->children))) {
220 				assert(LIST_EMPTY(&child->children));
221 				LIST_REMOVE(child, child_entry);
222 				_ftl_io_free(child);
223 			}
224 
225 			return -ENOMEM;
226 		}
227 
228 		iov_off += iov_left;
229 	}
230 
231 	assert(io->num_blocks == num_blocks);
232 	return 0;
233 }
234 
235 void
236 ftl_io_shrink_iovec(struct ftl_io *io, size_t num_blocks)
237 {
238 	size_t iov_off = 0, block_off = 0;
239 
240 	assert(io->num_blocks >= num_blocks);
241 	assert(io->pos == 0 && io->iov_pos == 0 && io->iov_off == 0);
242 
243 	for (; iov_off < io->iov_cnt; ++iov_off) {
244 		size_t num_iov = io->iov[iov_off].iov_len / FTL_BLOCK_SIZE;
245 		size_t num_left = num_blocks - block_off;
246 
247 		if (num_iov >= num_left) {
248 			io->iov[iov_off].iov_len = num_left * FTL_BLOCK_SIZE;
249 			io->iov_cnt = iov_off + 1;
250 			io->num_blocks = num_blocks;
251 			break;
252 		}
253 
254 		block_off += num_iov;
255 	}
256 }
257 
258 static void
259 ftl_io_init(struct ftl_io *io, struct spdk_ftl_dev *dev,
260 	    ftl_io_fn fn, void *ctx, int flags, int type)
261 {
262 	io->flags |= flags | FTL_IO_INITIALIZED;
263 	io->type = type;
264 	io->dev = dev;
265 	io->lba.single = FTL_LBA_INVALID;
266 	io->addr.offset = FTL_ADDR_INVALID;
267 	io->cb_fn = fn;
268 	io->cb_ctx = ctx;
269 	io->trace = ftl_trace_alloc_id(dev);
270 }
271 
272 struct ftl_io *
273 ftl_io_init_internal(const struct ftl_io_init_opts *opts)
274 {
275 	struct ftl_io *io = opts->io;
276 	struct ftl_io *parent = opts->parent;
277 	struct spdk_ftl_dev *dev = opts->dev;
278 	struct iovec iov = {
279 		.iov_base = opts->data,
280 		.iov_len  = opts->num_blocks * FTL_BLOCK_SIZE
281 	};
282 
283 	if (!io) {
284 		if (parent) {
285 			io = ftl_io_alloc_child(parent);
286 		} else {
287 			io = ftl_io_alloc(ftl_get_io_channel(dev));
288 		}
289 
290 		if (!io) {
291 			return NULL;
292 		}
293 	}
294 
295 	ftl_io_clear(io);
296 	ftl_io_init(io, dev, opts->cb_fn, opts->cb_ctx, opts->flags | FTL_IO_INTERNAL, opts->type);
297 
298 	io->rwb_batch = opts->rwb_batch;
299 	io->band = opts->band;
300 	io->md = opts->md;
301 
302 	if (parent) {
303 		if (parent->flags & FTL_IO_VECTOR_LBA) {
304 			io->lba.vector = parent->lba.vector + parent->pos;
305 		} else {
306 			io->lba.single = parent->lba.single + parent->pos;
307 		}
308 	}
309 
310 	if (ftl_io_init_iovec(io, &iov, 1, opts->num_blocks)) {
311 		if (!opts->io) {
312 			ftl_io_free(io);
313 		}
314 		return NULL;
315 	}
316 
317 	if (opts->flags & FTL_IO_VECTOR_LBA) {
318 		io->lba.vector = calloc(io->num_blocks, sizeof(uint64_t));
319 		if (!io->lba.vector) {
320 			ftl_io_free(io);
321 			return NULL;
322 		}
323 	}
324 
325 	return io;
326 }
327 
328 struct ftl_io *
329 ftl_io_rwb_init(struct spdk_ftl_dev *dev, struct ftl_addr addr, struct ftl_band *band,
330 		struct ftl_rwb_batch *batch, ftl_io_fn cb)
331 {
332 	struct ftl_io *io;
333 	struct ftl_io_init_opts opts = {
334 		.dev		= dev,
335 		.io		= NULL,
336 		.rwb_batch	= batch,
337 		.band		= band,
338 		.size		= sizeof(struct ftl_io),
339 		.flags		= 0,
340 		.type		= FTL_IO_WRITE,
341 		.num_blocks	= dev->xfer_size,
342 		.cb_fn		= cb,
343 		.data		= ftl_rwb_batch_get_data(batch),
344 		.md		= ftl_rwb_batch_get_md(batch),
345 	};
346 
347 	io = ftl_io_init_internal(&opts);
348 	if (!io) {
349 		return NULL;
350 	}
351 
352 	io->addr = addr;
353 
354 	return io;
355 }
356 
357 struct ftl_io *
358 ftl_io_erase_init(struct ftl_band *band, size_t num_blocks, ftl_io_fn cb)
359 {
360 	struct ftl_io *io;
361 	struct ftl_io_init_opts opts = {
362 		.dev		= band->dev,
363 		.io		= NULL,
364 		.rwb_batch	= NULL,
365 		.band		= band,
366 		.size		= sizeof(struct ftl_io),
367 		.flags		= FTL_IO_PHYSICAL_MODE,
368 		.type		= FTL_IO_ERASE,
369 		.num_blocks	= 1,
370 		.cb_fn		= cb,
371 		.data		= NULL,
372 		.md		= NULL,
373 	};
374 
375 	io = ftl_io_init_internal(&opts);
376 	if (!io) {
377 		return NULL;
378 	}
379 
380 	io->num_blocks = num_blocks;
381 
382 	return io;
383 }
384 
385 static void
386 _ftl_user_cb(struct ftl_io *io, void *arg, int status)
387 {
388 	io->user_fn(arg, status);
389 }
390 
391 struct ftl_io *
392 ftl_io_user_init(struct spdk_io_channel *_ioch, uint64_t lba, size_t num_blocks, struct iovec *iov,
393 		 size_t iov_cnt, spdk_ftl_fn cb_fn, void *cb_ctx, int type)
394 {
395 	struct ftl_io_channel *ioch = spdk_io_channel_get_ctx(_ioch);
396 	struct spdk_ftl_dev *dev = ioch->dev;
397 	struct ftl_io *io;
398 
399 	io = ftl_io_alloc(_ioch);
400 	if (spdk_unlikely(!io)) {
401 		return NULL;
402 	}
403 
404 	ftl_io_init(io, dev, _ftl_user_cb, cb_ctx, 0, type);
405 	io->lba.single = lba;
406 	io->user_fn = cb_fn;
407 
408 	if (ftl_io_init_iovec(io, iov, iov_cnt, num_blocks)) {
409 		ftl_io_free(io);
410 		return NULL;
411 	}
412 
413 	ftl_trace_lba_io_init(io->dev, io);
414 	return io;
415 }
416 
417 static void
418 _ftl_io_free(struct ftl_io *io)
419 {
420 	struct ftl_io_channel *ioch;
421 
422 	assert(LIST_EMPTY(&io->children));
423 
424 	if (io->flags & FTL_IO_VECTOR_LBA) {
425 		free(io->lba.vector);
426 	}
427 
428 	if (pthread_spin_destroy(&io->lock)) {
429 		SPDK_ERRLOG("pthread_spin_destroy failed\n");
430 	}
431 
432 	ioch = spdk_io_channel_get_ctx(io->ioch);
433 	spdk_mempool_put(ioch->io_pool, io);
434 }
435 
436 static bool
437 ftl_io_remove_child(struct ftl_io *io)
438 {
439 	struct ftl_io *parent = io->parent;
440 	bool parent_done;
441 
442 	pthread_spin_lock(&parent->lock);
443 	LIST_REMOVE(io, child_entry);
444 	parent_done = parent->done && LIST_EMPTY(&parent->children);
445 	parent->status = parent->status ? : io->status;
446 	pthread_spin_unlock(&parent->lock);
447 
448 	return parent_done;
449 }
450 
451 void
452 ftl_io_complete(struct ftl_io *io)
453 {
454 	struct ftl_io *parent = io->parent;
455 	bool complete;
456 
457 	io->flags &= ~FTL_IO_INITIALIZED;
458 
459 	pthread_spin_lock(&io->lock);
460 	complete = LIST_EMPTY(&io->children);
461 	io->done = true;
462 	pthread_spin_unlock(&io->lock);
463 
464 	if (complete) {
465 		if (io->cb_fn) {
466 			io->cb_fn(io, io->cb_ctx, io->status);
467 		}
468 
469 		if (parent && ftl_io_remove_child(io)) {
470 			ftl_io_complete(parent);
471 		}
472 
473 		_ftl_io_free(io);
474 	}
475 }
476 
477 struct ftl_io *
478 ftl_io_alloc_child(struct ftl_io *parent)
479 {
480 	struct ftl_io *io;
481 
482 	io = ftl_io_alloc(parent->ioch);
483 	if (spdk_unlikely(!io)) {
484 		return NULL;
485 	}
486 
487 	ftl_io_init(io, parent->dev, NULL, NULL, parent->flags, parent->type);
488 	io->parent = parent;
489 
490 	pthread_spin_lock(&parent->lock);
491 	LIST_INSERT_HEAD(&parent->children, io, child_entry);
492 	pthread_spin_unlock(&parent->lock);
493 
494 	return io;
495 }
496 
497 void ftl_io_fail(struct ftl_io *io, int status)
498 {
499 	io->status = status;
500 	ftl_io_advance(io, io->num_blocks - io->pos);
501 }
502 
503 void *
504 ftl_io_get_md(const struct ftl_io *io)
505 {
506 	if (!io->md) {
507 		return NULL;
508 	}
509 
510 	return (char *)io->md + io->pos * io->dev->md_size;
511 }
512 
513 struct ftl_io *
514 ftl_io_alloc(struct spdk_io_channel *ch)
515 {
516 	struct ftl_io *io;
517 	struct ftl_io_channel *ioch = spdk_io_channel_get_ctx(ch);
518 
519 	io = spdk_mempool_get(ioch->io_pool);
520 	if (!io) {
521 		return NULL;
522 	}
523 
524 	memset(io, 0, ioch->elem_size);
525 	io->ioch = ch;
526 
527 	if (pthread_spin_init(&io->lock, PTHREAD_PROCESS_PRIVATE)) {
528 		SPDK_ERRLOG("pthread_spin_init failed\n");
529 		spdk_mempool_put(ioch->io_pool, io);
530 		return NULL;
531 	}
532 
533 	return io;
534 }
535 
536 void
537 ftl_io_reinit(struct ftl_io *io, ftl_io_fn cb, void *ctx, int flags, int type)
538 {
539 	ftl_io_clear(io);
540 	ftl_io_init(io, io->dev, cb, ctx, flags, type);
541 }
542 
543 void
544 ftl_io_clear(struct ftl_io *io)
545 {
546 	ftl_io_reset(io);
547 
548 	io->flags = 0;
549 	io->rwb_batch = NULL;
550 	io->band = NULL;
551 }
552 
553 void
554 ftl_io_reset(struct ftl_io *io)
555 {
556 	io->req_cnt = io->pos = io->iov_pos = io->iov_off = 0;
557 	io->done = false;
558 }
559 
560 void
561 ftl_io_free(struct ftl_io *io)
562 {
563 	struct ftl_io *parent;
564 
565 	if (!io) {
566 		return;
567 	}
568 
569 	parent = io->parent;
570 	if (parent && ftl_io_remove_child(io)) {
571 		ftl_io_complete(parent);
572 	}
573 
574 	_ftl_io_free(io);
575 }
576 
577 void
578 ftl_io_call_foreach_child(struct ftl_io *io, int (*callback)(struct ftl_io *))
579 {
580 	struct ftl_io *child, *tmp;
581 
582 	assert(!io->done);
583 
584 	/*
585 	 * If the IO doesn't have any children, it means that it directly describes a request (i.e.
586 	 * all of the buffers, LBAs, etc. are filled). Otherwise the IO only groups together several
587 	 * requests and may be partially filled, so the callback needs to be called on all of its
588 	 * children instead.
589 	 */
590 	if (LIST_EMPTY(&io->children)) {
591 		callback(io);
592 		return;
593 	}
594 
595 	LIST_FOREACH_SAFE(child, &io->children, child_entry, tmp) {
596 		int rc = callback(child);
597 		if (rc) {
598 			assert(rc != -EAGAIN);
599 			ftl_io_fail(io, rc);
600 			break;
601 		}
602 	}
603 
604 	/*
605 	 * If all the callbacks were processed or an error occurred, treat this IO as completed.
606 	 * Multiple calls to ftl_io_call_foreach_child are not supported, resubmissions are supposed
607 	 * to be handled in the callback.
608 	 */
609 	ftl_io_complete(io);
610 }
611