xref: /spdk/lib/ftl/ftl_io.c (revision 712a3f69d32632bf6c862f00200f7f437d3f7529)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 #include "spdk/ftl.h"
36 #include "spdk/likely.h"
37 #include "spdk/util.h"
38 
39 #include "ftl_io.h"
40 #include "ftl_core.h"
41 #include "ftl_rwb.h"
42 #include "ftl_band.h"
43 #include "ftl_debug.h"
44 
45 void
46 ftl_io_inc_req(struct ftl_io *io)
47 {
48 	struct ftl_band *band = io->band;
49 
50 	if (!(io->flags & FTL_IO_CACHE) && io->type != FTL_IO_READ && io->type != FTL_IO_ERASE) {
51 		ftl_band_acquire_lba_map(band);
52 	}
53 
54 	__atomic_fetch_add(&io->dev->num_inflight, 1, __ATOMIC_SEQ_CST);
55 
56 	++io->req_cnt;
57 }
58 
59 void
60 ftl_io_dec_req(struct ftl_io *io)
61 {
62 	struct ftl_band *band = io->band;
63 	unsigned long num_inflight __attribute__((unused));
64 
65 	if (!(io->flags & FTL_IO_CACHE) && io->type != FTL_IO_READ && io->type != FTL_IO_ERASE) {
66 		ftl_band_release_lba_map(band);
67 	}
68 
69 	num_inflight = __atomic_fetch_sub(&io->dev->num_inflight, 1, __ATOMIC_SEQ_CST);
70 
71 	assert(num_inflight > 0);
72 	assert(io->req_cnt > 0);
73 
74 	--io->req_cnt;
75 }
76 
77 struct iovec *
78 ftl_io_iovec(struct ftl_io *io)
79 {
80 	return &io->iov[0];
81 }
82 
83 uint64_t
84 ftl_io_get_lba(const struct ftl_io *io, size_t offset)
85 {
86 	assert(offset < io->num_blocks);
87 
88 	if (io->flags & FTL_IO_VECTOR_LBA) {
89 		return io->lba.vector[offset];
90 	} else {
91 		return io->lba.single + offset;
92 	}
93 }
94 
95 uint64_t
96 ftl_io_current_lba(const struct ftl_io *io)
97 {
98 	return ftl_io_get_lba(io, io->pos);
99 }
100 
101 void
102 ftl_io_advance(struct ftl_io *io, size_t num_blocks)
103 {
104 	struct iovec *iov = ftl_io_iovec(io);
105 	size_t iov_blocks, block_left = num_blocks;
106 
107 	io->pos += num_blocks;
108 
109 	if (io->iov_cnt != 0) {
110 		while (block_left > 0) {
111 			assert(io->iov_pos < io->iov_cnt);
112 			iov_blocks = iov[io->iov_pos].iov_len / FTL_BLOCK_SIZE;
113 
114 			if (io->iov_off + block_left < iov_blocks) {
115 				io->iov_off += block_left;
116 				break;
117 			}
118 
119 			assert(iov_blocks > io->iov_off);
120 			block_left -= (iov_blocks - io->iov_off);
121 			io->iov_off = 0;
122 			io->iov_pos++;
123 		}
124 	}
125 
126 	if (io->parent) {
127 		ftl_io_advance(io->parent, num_blocks);
128 	}
129 }
130 
131 size_t
132 ftl_iovec_num_blocks(struct iovec *iov, size_t iov_cnt)
133 {
134 	size_t num_blocks = 0, i = 0;
135 
136 	for (; i < iov_cnt; ++i) {
137 		num_blocks += iov[i].iov_len / FTL_BLOCK_SIZE;
138 	}
139 
140 	return num_blocks;
141 }
142 
143 void *
144 ftl_io_iovec_addr(struct ftl_io *io)
145 {
146 	assert(io->iov_pos < io->iov_cnt);
147 	assert(io->iov_off * FTL_BLOCK_SIZE < ftl_io_iovec(io)[io->iov_pos].iov_len);
148 
149 	return (char *)ftl_io_iovec(io)[io->iov_pos].iov_base +
150 	       io->iov_off * FTL_BLOCK_SIZE;
151 }
152 
153 size_t
154 ftl_io_iovec_len_left(struct ftl_io *io)
155 {
156 	struct iovec *iov = ftl_io_iovec(io);
157 	return iov[io->iov_pos].iov_len / FTL_BLOCK_SIZE - io->iov_off;
158 }
159 
160 static void
161 ftl_io_init_iovec(struct ftl_io *io, const struct iovec *iov, size_t iov_cnt, size_t iov_off,
162 		  size_t num_blocks)
163 {
164 	size_t offset = 0, num_left;
165 
166 	io->iov_pos = 0;
167 	io->iov_cnt = 0;
168 	io->num_blocks = num_blocks;
169 
170 	while (offset < num_blocks) {
171 		assert(io->iov_cnt < FTL_IO_MAX_IOVEC && io->iov_cnt < iov_cnt);
172 
173 		num_left = spdk_min(iov[io->iov_cnt].iov_len / FTL_BLOCK_SIZE - iov_off,
174 				    num_blocks);
175 		io->iov[io->iov_cnt].iov_base = (char *)iov[io->iov_cnt].iov_base +
176 						iov_off * FTL_BLOCK_SIZE;
177 		io->iov[io->iov_cnt].iov_len = num_left * FTL_BLOCK_SIZE;
178 
179 		offset += num_left;
180 		io->iov_cnt++;
181 		iov_off = 0;
182 	}
183 }
184 
185 void
186 ftl_io_shrink_iovec(struct ftl_io *io, size_t num_blocks)
187 {
188 	size_t iov_off = 0, block_off = 0;
189 
190 	assert(io->num_blocks >= num_blocks);
191 	assert(io->pos == 0 && io->iov_pos == 0 && io->iov_off == 0);
192 
193 	for (; iov_off < io->iov_cnt; ++iov_off) {
194 		size_t num_iov = io->iov[iov_off].iov_len / FTL_BLOCK_SIZE;
195 		size_t num_left = num_blocks - block_off;
196 
197 		if (num_iov >= num_left) {
198 			io->iov[iov_off].iov_len = num_left * FTL_BLOCK_SIZE;
199 			io->iov_cnt = iov_off + 1;
200 			io->num_blocks = num_blocks;
201 			break;
202 		}
203 
204 		block_off += num_iov;
205 	}
206 }
207 
208 static void
209 ftl_io_init(struct ftl_io *io, struct spdk_ftl_dev *dev,
210 	    ftl_io_fn fn, void *ctx, int flags, int type)
211 {
212 	io->flags |= flags | FTL_IO_INITIALIZED;
213 	io->type = type;
214 	io->dev = dev;
215 	io->lba.single = FTL_LBA_INVALID;
216 	io->addr.offset = FTL_ADDR_INVALID;
217 	io->cb_fn = fn;
218 	io->cb_ctx = ctx;
219 	io->trace = ftl_trace_alloc_id(dev);
220 }
221 
222 struct ftl_io *
223 ftl_io_init_internal(const struct ftl_io_init_opts *opts)
224 {
225 	struct ftl_io *io = opts->io;
226 	struct ftl_io *parent = opts->parent;
227 	struct spdk_ftl_dev *dev = opts->dev;
228 	const struct iovec *iov;
229 	size_t iov_cnt, iov_off;
230 
231 	if (!io) {
232 		if (parent) {
233 			io = ftl_io_alloc_child(parent);
234 		} else {
235 			io = ftl_io_alloc(ftl_get_io_channel(dev));
236 		}
237 
238 		if (!io) {
239 			return NULL;
240 		}
241 	}
242 
243 	ftl_io_clear(io);
244 	ftl_io_init(io, dev, opts->cb_fn, opts->cb_ctx, opts->flags | FTL_IO_INTERNAL, opts->type);
245 
246 	io->rwb_batch = opts->rwb_batch;
247 	io->band = opts->band;
248 	io->md = opts->md;
249 	io->iov = &io->iov_buf[0];
250 
251 	if (parent) {
252 		if (parent->flags & FTL_IO_VECTOR_LBA) {
253 			io->lba.vector = parent->lba.vector + parent->pos;
254 		} else {
255 			io->lba.single = parent->lba.single + parent->pos;
256 		}
257 
258 		iov = &parent->iov[parent->iov_pos];
259 		iov_cnt = parent->iov_cnt - parent->iov_pos;
260 		iov_off = parent->iov_off;
261 	} else {
262 		iov = &opts->iovs[0];
263 		iov_cnt = opts->iovcnt;
264 		iov_off = 0;
265 	}
266 
267 	/* Some requests (zone resets) do not use iovecs */
268 	if (iov_cnt > 0) {
269 		ftl_io_init_iovec(io, iov, iov_cnt, iov_off, opts->num_blocks);
270 	}
271 
272 	if (opts->flags & FTL_IO_VECTOR_LBA) {
273 		io->lba.vector = calloc(io->num_blocks, sizeof(uint64_t));
274 		if (!io->lba.vector) {
275 			ftl_io_free(io);
276 			return NULL;
277 		}
278 	}
279 
280 	return io;
281 }
282 
283 struct ftl_io *
284 ftl_io_rwb_init(struct spdk_ftl_dev *dev, struct ftl_addr addr, struct ftl_band *band,
285 		struct ftl_rwb_batch *batch, ftl_io_fn cb)
286 {
287 	struct ftl_io *io;
288 	struct ftl_io_init_opts opts = {
289 		.dev		= dev,
290 		.io		= NULL,
291 		.rwb_batch	= batch,
292 		.band		= band,
293 		.size		= sizeof(struct ftl_io),
294 		.flags		= 0,
295 		.type		= FTL_IO_WRITE,
296 		.num_blocks	= dev->xfer_size,
297 		.cb_fn		= cb,
298 		.iovs		= {
299 			{
300 				.iov_base = ftl_rwb_batch_get_data(batch),
301 				.iov_len = dev->xfer_size * FTL_BLOCK_SIZE,
302 			}
303 		},
304 		.iovcnt		= 1,
305 		.md		= ftl_rwb_batch_get_md(batch),
306 	};
307 
308 	io = ftl_io_init_internal(&opts);
309 	if (!io) {
310 		return NULL;
311 	}
312 
313 	io->addr = addr;
314 
315 	return io;
316 }
317 
318 struct ftl_io *
319 ftl_io_erase_init(struct ftl_band *band, size_t num_blocks, ftl_io_fn cb)
320 {
321 	struct ftl_io *io;
322 	struct ftl_io_init_opts opts = {
323 		.dev		= band->dev,
324 		.io		= NULL,
325 		.rwb_batch	= NULL,
326 		.band		= band,
327 		.size		= sizeof(struct ftl_io),
328 		.flags		= FTL_IO_PHYSICAL_MODE,
329 		.type		= FTL_IO_ERASE,
330 		.num_blocks	= 1,
331 		.cb_fn		= cb,
332 		.iovcnt		= 0,
333 		.md		= NULL,
334 	};
335 
336 	io = ftl_io_init_internal(&opts);
337 	if (!io) {
338 		return NULL;
339 	}
340 
341 	io->num_blocks = num_blocks;
342 
343 	return io;
344 }
345 
346 static void
347 _ftl_user_cb(struct ftl_io *io, void *arg, int status)
348 {
349 	io->user_fn(arg, status);
350 }
351 
352 struct ftl_io *
353 ftl_io_user_init(struct spdk_io_channel *_ioch, uint64_t lba, size_t num_blocks, struct iovec *iov,
354 		 size_t iov_cnt, spdk_ftl_fn cb_fn, void *cb_ctx, int type)
355 {
356 	struct ftl_io_channel *ioch = spdk_io_channel_get_ctx(_ioch);
357 	struct spdk_ftl_dev *dev = ioch->dev;
358 	struct ftl_io *io;
359 
360 	io = ftl_io_alloc(_ioch);
361 	if (spdk_unlikely(!io)) {
362 		return NULL;
363 	}
364 
365 	ftl_io_init(io, dev, _ftl_user_cb, cb_ctx, 0, type);
366 	io->lba.single = lba;
367 	io->user_fn = cb_fn;
368 	io->iov = iov;
369 	io->iov_cnt = iov_cnt;
370 	io->num_blocks = num_blocks;
371 
372 	ftl_trace_lba_io_init(io->dev, io);
373 	return io;
374 }
375 
376 static void
377 _ftl_io_free(struct ftl_io *io)
378 {
379 	struct ftl_io_channel *ioch;
380 
381 	assert(LIST_EMPTY(&io->children));
382 
383 	if (io->flags & FTL_IO_VECTOR_LBA) {
384 		free(io->lba.vector);
385 	}
386 
387 	if (pthread_spin_destroy(&io->lock)) {
388 		SPDK_ERRLOG("pthread_spin_destroy failed\n");
389 	}
390 
391 	ioch = spdk_io_channel_get_ctx(io->ioch);
392 	spdk_mempool_put(ioch->io_pool, io);
393 }
394 
395 static bool
396 ftl_io_remove_child(struct ftl_io *io)
397 {
398 	struct ftl_io *parent = io->parent;
399 	bool parent_done;
400 
401 	pthread_spin_lock(&parent->lock);
402 	LIST_REMOVE(io, child_entry);
403 	parent_done = parent->done && LIST_EMPTY(&parent->children);
404 	parent->status = parent->status ? : io->status;
405 	pthread_spin_unlock(&parent->lock);
406 
407 	return parent_done;
408 }
409 
410 void
411 ftl_io_complete(struct ftl_io *io)
412 {
413 	struct ftl_io *parent = io->parent;
414 	bool complete;
415 
416 	io->flags &= ~FTL_IO_INITIALIZED;
417 
418 	pthread_spin_lock(&io->lock);
419 	complete = LIST_EMPTY(&io->children);
420 	io->done = true;
421 	pthread_spin_unlock(&io->lock);
422 
423 	if (complete) {
424 		if (io->cb_fn) {
425 			io->cb_fn(io, io->cb_ctx, io->status);
426 		}
427 
428 		if (parent && ftl_io_remove_child(io)) {
429 			ftl_io_complete(parent);
430 		}
431 
432 		_ftl_io_free(io);
433 	}
434 }
435 
436 struct ftl_io *
437 ftl_io_alloc_child(struct ftl_io *parent)
438 {
439 	struct ftl_io *io;
440 
441 	io = ftl_io_alloc(parent->ioch);
442 	if (spdk_unlikely(!io)) {
443 		return NULL;
444 	}
445 
446 	ftl_io_init(io, parent->dev, NULL, NULL, parent->flags, parent->type);
447 	io->parent = parent;
448 
449 	pthread_spin_lock(&parent->lock);
450 	LIST_INSERT_HEAD(&parent->children, io, child_entry);
451 	pthread_spin_unlock(&parent->lock);
452 
453 	return io;
454 }
455 
456 void ftl_io_fail(struct ftl_io *io, int status)
457 {
458 	io->status = status;
459 	ftl_io_advance(io, io->num_blocks - io->pos);
460 }
461 
462 void *
463 ftl_io_get_md(const struct ftl_io *io)
464 {
465 	if (!io->md) {
466 		return NULL;
467 	}
468 
469 	return (char *)io->md + io->pos * io->dev->md_size;
470 }
471 
472 struct ftl_io *
473 ftl_io_alloc(struct spdk_io_channel *ch)
474 {
475 	struct ftl_io *io;
476 	struct ftl_io_channel *ioch = spdk_io_channel_get_ctx(ch);
477 
478 	io = spdk_mempool_get(ioch->io_pool);
479 	if (!io) {
480 		return NULL;
481 	}
482 
483 	memset(io, 0, ioch->elem_size);
484 	io->ioch = ch;
485 
486 	if (pthread_spin_init(&io->lock, PTHREAD_PROCESS_PRIVATE)) {
487 		SPDK_ERRLOG("pthread_spin_init failed\n");
488 		spdk_mempool_put(ioch->io_pool, io);
489 		return NULL;
490 	}
491 
492 	return io;
493 }
494 
495 void
496 ftl_io_reinit(struct ftl_io *io, ftl_io_fn cb, void *ctx, int flags, int type)
497 {
498 	ftl_io_clear(io);
499 	ftl_io_init(io, io->dev, cb, ctx, flags, type);
500 }
501 
502 void
503 ftl_io_clear(struct ftl_io *io)
504 {
505 	ftl_io_reset(io);
506 
507 	io->flags = 0;
508 	io->rwb_batch = NULL;
509 	io->band = NULL;
510 }
511 
512 void
513 ftl_io_reset(struct ftl_io *io)
514 {
515 	io->req_cnt = io->pos = io->iov_pos = io->iov_off = 0;
516 	io->done = false;
517 }
518 
519 void
520 ftl_io_free(struct ftl_io *io)
521 {
522 	struct ftl_io *parent;
523 
524 	if (!io) {
525 		return;
526 	}
527 
528 	parent = io->parent;
529 	if (parent && ftl_io_remove_child(io)) {
530 		ftl_io_complete(parent);
531 	}
532 
533 	_ftl_io_free(io);
534 }
535 
536 void
537 ftl_io_call_foreach_child(struct ftl_io *io, int (*callback)(struct ftl_io *))
538 {
539 	struct ftl_io *child, *tmp;
540 
541 	assert(!io->done);
542 
543 	/*
544 	 * If the IO doesn't have any children, it means that it directly describes a request (i.e.
545 	 * all of the buffers, LBAs, etc. are filled). Otherwise the IO only groups together several
546 	 * requests and may be partially filled, so the callback needs to be called on all of its
547 	 * children instead.
548 	 */
549 	if (LIST_EMPTY(&io->children)) {
550 		callback(io);
551 		return;
552 	}
553 
554 	LIST_FOREACH_SAFE(child, &io->children, child_entry, tmp) {
555 		int rc = callback(child);
556 		if (rc) {
557 			assert(rc != -EAGAIN);
558 			ftl_io_fail(io, rc);
559 			break;
560 		}
561 	}
562 
563 	/*
564 	 * If all the callbacks were processed or an error occurred, treat this IO as completed.
565 	 * Multiple calls to ftl_io_call_foreach_child are not supported, resubmissions are supposed
566 	 * to be handled in the callback.
567 	 */
568 	ftl_io_complete(io);
569 }
570