xref: /spdk/lib/ftl/ftl_io.c (revision 0098e636761237b77c12c30c2408263a5d2260cc)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 #include "spdk/ftl.h"
8 #include "spdk/likely.h"
9 #include "spdk/util.h"
10 
11 #include "ftl_io.h"
12 #include "ftl_core.h"
13 #include "ftl_band.h"
14 #include "ftl_debug.h"
15 
16 void
17 ftl_io_inc_req(struct ftl_io *io)
18 {
19 	struct ftl_band *band = io->band;
20 
21 	if (!(io->flags & FTL_IO_CACHE) && io->type != FTL_IO_READ && io->type != FTL_IO_ERASE) {
22 		ftl_band_acquire_lba_map(band);
23 	}
24 
25 	__atomic_fetch_add(&io->dev->num_inflight, 1, __ATOMIC_SEQ_CST);
26 
27 	++io->req_cnt;
28 }
29 
30 void
31 ftl_io_dec_req(struct ftl_io *io)
32 {
33 	struct ftl_band *band = io->band;
34 	unsigned long num_inflight __attribute__((unused));
35 
36 	if (!(io->flags & FTL_IO_CACHE) && io->type != FTL_IO_READ && io->type != FTL_IO_ERASE) {
37 		ftl_band_release_lba_map(band);
38 	}
39 
40 	num_inflight = __atomic_fetch_sub(&io->dev->num_inflight, 1, __ATOMIC_SEQ_CST);
41 
42 	assert(num_inflight > 0);
43 	assert(io->req_cnt > 0);
44 
45 	--io->req_cnt;
46 }
47 
48 struct iovec *
49 ftl_io_iovec(struct ftl_io *io)
50 {
51 	return &io->iov[0];
52 }
53 
54 uint64_t
55 ftl_io_get_lba(const struct ftl_io *io, size_t offset)
56 {
57 	assert(offset < io->num_blocks);
58 
59 	if (io->flags & FTL_IO_VECTOR_LBA) {
60 		return io->lba.vector[offset];
61 	} else {
62 		return io->lba.single + offset;
63 	}
64 }
65 
66 uint64_t
67 ftl_io_current_lba(const struct ftl_io *io)
68 {
69 	return ftl_io_get_lba(io, io->pos);
70 }
71 
72 void
73 ftl_io_advance(struct ftl_io *io, size_t num_blocks)
74 {
75 	struct iovec *iov = ftl_io_iovec(io);
76 	size_t iov_blocks, block_left = num_blocks;
77 
78 	io->pos += num_blocks;
79 
80 	if (io->iov_cnt != 0) {
81 		while (block_left > 0) {
82 			assert(io->iov_pos < io->iov_cnt);
83 			iov_blocks = iov[io->iov_pos].iov_len / FTL_BLOCK_SIZE;
84 
85 			if (io->iov_off + block_left < iov_blocks) {
86 				io->iov_off += block_left;
87 				break;
88 			}
89 
90 			assert(iov_blocks > io->iov_off);
91 			block_left -= (iov_blocks - io->iov_off);
92 			io->iov_off = 0;
93 			io->iov_pos++;
94 		}
95 	}
96 
97 	if (io->parent) {
98 		ftl_io_advance(io->parent, num_blocks);
99 	}
100 }
101 
102 size_t
103 ftl_iovec_num_blocks(struct iovec *iov, size_t iov_cnt)
104 {
105 	size_t num_blocks = 0, i = 0;
106 
107 	for (; i < iov_cnt; ++i) {
108 		num_blocks += iov[i].iov_len / FTL_BLOCK_SIZE;
109 	}
110 
111 	return num_blocks;
112 }
113 
114 void *
115 ftl_io_iovec_addr(struct ftl_io *io)
116 {
117 	assert(io->iov_pos < io->iov_cnt);
118 	assert(io->iov_off * FTL_BLOCK_SIZE < ftl_io_iovec(io)[io->iov_pos].iov_len);
119 
120 	return (char *)ftl_io_iovec(io)[io->iov_pos].iov_base +
121 	       io->iov_off * FTL_BLOCK_SIZE;
122 }
123 
124 size_t
125 ftl_io_iovec_len_left(struct ftl_io *io)
126 {
127 	struct iovec *iov = ftl_io_iovec(io);
128 	return iov[io->iov_pos].iov_len / FTL_BLOCK_SIZE - io->iov_off;
129 }
130 
131 static void
132 ftl_io_init_iovec(struct ftl_io *io, const struct iovec *iov, size_t iov_cnt, size_t iov_off,
133 		  size_t num_blocks)
134 {
135 	size_t offset = 0, num_left;
136 
137 	io->iov_pos = 0;
138 	io->iov_cnt = 0;
139 	io->num_blocks = num_blocks;
140 
141 	while (offset < num_blocks) {
142 		assert(io->iov_cnt < FTL_IO_MAX_IOVEC && io->iov_cnt < iov_cnt);
143 
144 		num_left = spdk_min(iov[io->iov_cnt].iov_len / FTL_BLOCK_SIZE - iov_off,
145 				    num_blocks);
146 		io->iov[io->iov_cnt].iov_base = (char *)iov[io->iov_cnt].iov_base +
147 						iov_off * FTL_BLOCK_SIZE;
148 		io->iov[io->iov_cnt].iov_len = num_left * FTL_BLOCK_SIZE;
149 
150 		offset += num_left;
151 		io->iov_cnt++;
152 		iov_off = 0;
153 	}
154 }
155 
156 void
157 ftl_io_shrink_iovec(struct ftl_io *io, size_t num_blocks)
158 {
159 	size_t iov_off = 0, block_off = 0;
160 
161 	assert(io->num_blocks >= num_blocks);
162 	assert(io->pos == 0 && io->iov_pos == 0 && io->iov_off == 0);
163 
164 	for (; iov_off < io->iov_cnt; ++iov_off) {
165 		size_t num_iov = io->iov[iov_off].iov_len / FTL_BLOCK_SIZE;
166 		size_t num_left = num_blocks - block_off;
167 
168 		if (num_iov >= num_left) {
169 			io->iov[iov_off].iov_len = num_left * FTL_BLOCK_SIZE;
170 			io->iov_cnt = iov_off + 1;
171 			io->num_blocks = num_blocks;
172 			break;
173 		}
174 
175 		block_off += num_iov;
176 	}
177 }
178 
179 static void
180 ftl_io_init(struct ftl_io *io, struct spdk_ftl_dev *dev,
181 	    ftl_io_fn fn, void *ctx, int flags, int type)
182 {
183 	io->flags |= flags | FTL_IO_INITIALIZED;
184 	io->type = type;
185 	io->dev = dev;
186 	io->lba.single = FTL_LBA_INVALID;
187 	io->addr.offset = FTL_ADDR_INVALID;
188 	io->cb_fn = fn;
189 	io->cb_ctx = ctx;
190 	io->trace = ftl_trace_alloc_id(dev);
191 }
192 
193 struct ftl_io *
194 ftl_io_init_internal(const struct ftl_io_init_opts *opts)
195 {
196 	struct ftl_io *io = opts->io;
197 	struct ftl_io *parent = opts->parent;
198 	struct spdk_ftl_dev *dev = opts->dev;
199 	const struct iovec *iov;
200 	size_t iov_cnt, iov_off;
201 
202 	if (!io) {
203 		if (parent) {
204 			io = ftl_io_alloc_child(parent);
205 		} else {
206 			io = ftl_io_alloc(ftl_get_io_channel(dev));
207 		}
208 
209 		if (!io) {
210 			return NULL;
211 		}
212 	}
213 
214 	ftl_io_clear(io);
215 	ftl_io_init(io, dev, opts->cb_fn, opts->cb_ctx, opts->flags | FTL_IO_INTERNAL, opts->type);
216 
217 	io->batch = opts->batch;
218 	io->band = opts->band;
219 	io->md = opts->md;
220 	io->iov = &io->iov_buf[0];
221 
222 	if (parent) {
223 		if (parent->flags & FTL_IO_VECTOR_LBA) {
224 			io->lba.vector = parent->lba.vector + parent->pos;
225 		} else {
226 			io->lba.single = parent->lba.single + parent->pos;
227 		}
228 
229 		iov = &parent->iov[parent->iov_pos];
230 		iov_cnt = parent->iov_cnt - parent->iov_pos;
231 		iov_off = parent->iov_off;
232 	} else {
233 		iov = &opts->iovs[0];
234 		iov_cnt = opts->iovcnt;
235 		iov_off = 0;
236 	}
237 
238 	/* Some requests (zone resets) do not use iovecs */
239 	if (iov_cnt > 0) {
240 		ftl_io_init_iovec(io, iov, iov_cnt, iov_off, opts->num_blocks);
241 	}
242 
243 	if (opts->flags & FTL_IO_VECTOR_LBA) {
244 		io->lba.vector = calloc(io->num_blocks, sizeof(uint64_t));
245 		if (!io->lba.vector) {
246 			ftl_io_free(io);
247 			return NULL;
248 		}
249 	}
250 
251 	return io;
252 }
253 
254 struct ftl_io *
255 ftl_io_wbuf_init(struct spdk_ftl_dev *dev, struct ftl_addr addr, struct ftl_band *band,
256 		 struct ftl_batch *batch, ftl_io_fn cb)
257 {
258 	struct ftl_io *io;
259 	struct ftl_io_init_opts opts = {
260 		.dev		= dev,
261 		.io		= NULL,
262 		.batch		= batch,
263 		.band		= band,
264 		.size		= sizeof(struct ftl_io),
265 		.flags		= 0,
266 		.type		= FTL_IO_WRITE,
267 		.num_blocks	= dev->xfer_size,
268 		.cb_fn		= cb,
269 		.iovcnt		= dev->xfer_size,
270 		.md		= batch->metadata,
271 	};
272 
273 	memcpy(opts.iovs, batch->iov, sizeof(struct iovec) * dev->xfer_size);
274 
275 	io = ftl_io_init_internal(&opts);
276 	if (!io) {
277 		return NULL;
278 	}
279 
280 	io->addr = addr;
281 
282 	return io;
283 }
284 
285 struct ftl_io *
286 ftl_io_erase_init(struct ftl_band *band, size_t num_blocks, ftl_io_fn cb)
287 {
288 	struct ftl_io *io;
289 	struct ftl_io_init_opts opts = {
290 		.dev		= band->dev,
291 		.io		= NULL,
292 		.band		= band,
293 		.size		= sizeof(struct ftl_io),
294 		.flags		= FTL_IO_PHYSICAL_MODE,
295 		.type		= FTL_IO_ERASE,
296 		.num_blocks	= 1,
297 		.cb_fn		= cb,
298 		.iovcnt		= 0,
299 		.md		= NULL,
300 	};
301 
302 	io = ftl_io_init_internal(&opts);
303 	if (!io) {
304 		return NULL;
305 	}
306 
307 	io->num_blocks = num_blocks;
308 
309 	return io;
310 }
311 
312 static void
313 _ftl_user_cb(struct ftl_io *io, void *arg, int status)
314 {
315 	io->user_fn(arg, status);
316 }
317 
318 struct ftl_io *
319 ftl_io_user_init(struct spdk_io_channel *_ioch, uint64_t lba, size_t num_blocks, struct iovec *iov,
320 		 size_t iov_cnt, spdk_ftl_fn cb_fn, void *cb_ctx, int type)
321 {
322 	struct ftl_io_channel *ioch = ftl_io_channel_get_ctx(_ioch);
323 	struct spdk_ftl_dev *dev = ioch->dev;
324 	struct ftl_io *io;
325 
326 	io = ftl_io_alloc(_ioch);
327 	if (spdk_unlikely(!io)) {
328 		return NULL;
329 	}
330 
331 	ftl_io_init(io, dev, _ftl_user_cb, cb_ctx, 0, type);
332 	io->lba.single = lba;
333 	io->user_fn = cb_fn;
334 	io->iov = iov;
335 	io->iov_cnt = iov_cnt;
336 	io->num_blocks = num_blocks;
337 
338 	ftl_trace_lba_io_init(io->dev, io);
339 	return io;
340 }
341 
342 static void
343 _ftl_io_free(struct ftl_io *io)
344 {
345 	struct ftl_io_channel *ioch;
346 
347 	assert(LIST_EMPTY(&io->children));
348 
349 	if (io->flags & FTL_IO_VECTOR_LBA) {
350 		free(io->lba.vector);
351 	}
352 
353 	if (pthread_spin_destroy(&io->lock)) {
354 		SPDK_ERRLOG("pthread_spin_destroy failed\n");
355 	}
356 
357 	ioch = ftl_io_channel_get_ctx(io->ioch);
358 	spdk_mempool_put(ioch->io_pool, io);
359 }
360 
361 static bool
362 ftl_io_remove_child(struct ftl_io *io)
363 {
364 	struct ftl_io *parent = io->parent;
365 	bool parent_done;
366 
367 	pthread_spin_lock(&parent->lock);
368 	LIST_REMOVE(io, child_entry);
369 	parent_done = parent->done && LIST_EMPTY(&parent->children);
370 	parent->status = parent->status ? : io->status;
371 	pthread_spin_unlock(&parent->lock);
372 
373 	return parent_done;
374 }
375 
376 void
377 ftl_io_complete(struct ftl_io *io)
378 {
379 	struct ftl_io *parent = io->parent;
380 	bool complete;
381 
382 	io->flags &= ~FTL_IO_INITIALIZED;
383 
384 	pthread_spin_lock(&io->lock);
385 	complete = LIST_EMPTY(&io->children);
386 	io->done = true;
387 	pthread_spin_unlock(&io->lock);
388 
389 	if (complete) {
390 		if (io->cb_fn) {
391 			io->cb_fn(io, io->cb_ctx, io->status);
392 		}
393 
394 		if (parent && ftl_io_remove_child(io)) {
395 			ftl_io_complete(parent);
396 		}
397 
398 		_ftl_io_free(io);
399 	}
400 }
401 
402 struct ftl_io *
403 ftl_io_alloc_child(struct ftl_io *parent)
404 {
405 	struct ftl_io *io;
406 
407 	io = ftl_io_alloc(parent->ioch);
408 	if (spdk_unlikely(!io)) {
409 		return NULL;
410 	}
411 
412 	ftl_io_init(io, parent->dev, NULL, NULL, parent->flags, parent->type);
413 	io->parent = parent;
414 
415 	pthread_spin_lock(&parent->lock);
416 	LIST_INSERT_HEAD(&parent->children, io, child_entry);
417 	pthread_spin_unlock(&parent->lock);
418 
419 	return io;
420 }
421 
422 void
423 ftl_io_fail(struct ftl_io *io, int status)
424 {
425 	io->status = status;
426 	ftl_io_advance(io, io->num_blocks - io->pos);
427 }
428 
429 void *
430 ftl_io_get_md(const struct ftl_io *io)
431 {
432 	if (!io->md) {
433 		return NULL;
434 	}
435 
436 	return (char *)io->md + io->pos * io->dev->md_size;
437 }
438 
439 struct ftl_io *
440 ftl_io_alloc(struct spdk_io_channel *ch)
441 {
442 	struct ftl_io *io;
443 	struct ftl_io_channel *ioch = ftl_io_channel_get_ctx(ch);
444 
445 	io = spdk_mempool_get(ioch->io_pool);
446 	if (!io) {
447 		return NULL;
448 	}
449 
450 	memset(io, 0, ioch->elem_size);
451 	io->ioch = ch;
452 
453 	if (pthread_spin_init(&io->lock, PTHREAD_PROCESS_PRIVATE)) {
454 		SPDK_ERRLOG("pthread_spin_init failed\n");
455 		spdk_mempool_put(ioch->io_pool, io);
456 		return NULL;
457 	}
458 
459 	return io;
460 }
461 
462 void
463 ftl_io_reinit(struct ftl_io *io, ftl_io_fn cb, void *ctx, int flags, int type)
464 {
465 	ftl_io_clear(io);
466 	ftl_io_init(io, io->dev, cb, ctx, flags, type);
467 }
468 
469 void
470 ftl_io_clear(struct ftl_io *io)
471 {
472 	ftl_io_reset(io);
473 
474 	io->flags = 0;
475 	io->batch = NULL;
476 	io->band = NULL;
477 }
478 
479 void
480 ftl_io_reset(struct ftl_io *io)
481 {
482 	io->req_cnt = io->pos = io->iov_pos = io->iov_off = 0;
483 	io->done = false;
484 }
485 
486 void
487 ftl_io_free(struct ftl_io *io)
488 {
489 	struct ftl_io *parent;
490 
491 	if (!io) {
492 		return;
493 	}
494 
495 	parent = io->parent;
496 	if (parent && ftl_io_remove_child(io)) {
497 		ftl_io_complete(parent);
498 	}
499 
500 	_ftl_io_free(io);
501 }
502 
503 void
504 ftl_io_call_foreach_child(struct ftl_io *io, int (*callback)(struct ftl_io *))
505 {
506 	struct ftl_io *child, *tmp;
507 
508 	assert(!io->done);
509 
510 	/*
511 	 * If the IO doesn't have any children, it means that it directly describes a request (i.e.
512 	 * all of the buffers, LBAs, etc. are filled). Otherwise the IO only groups together several
513 	 * requests and may be partially filled, so the callback needs to be called on all of its
514 	 * children instead.
515 	 */
516 	if (LIST_EMPTY(&io->children)) {
517 		callback(io);
518 		return;
519 	}
520 
521 	LIST_FOREACH_SAFE(child, &io->children, child_entry, tmp) {
522 		int rc = callback(child);
523 		if (rc) {
524 			assert(rc != -EAGAIN);
525 			ftl_io_fail(io, rc);
526 			break;
527 		}
528 	}
529 
530 	/*
531 	 * If all the callbacks were processed or an error occurred, treat this IO as completed.
532 	 * Multiple calls to ftl_io_call_foreach_child are not supported, resubmissions are supposed
533 	 * to be handled in the callback.
534 	 */
535 	ftl_io_complete(io);
536 }
537