xref: /spdk/lib/ftl/ftl_io.c (revision dd1c38cc680e4e8ca2642e93bf289072bff7fc3d)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 #include "spdk/ftl.h"
36 #include "spdk/likely.h"
37 
38 #include "ftl_io.h"
39 #include "ftl_core.h"
40 #include "ftl_rwb.h"
41 #include "ftl_band.h"
42 
43 void
44 ftl_io_inc_req(struct ftl_io *io)
45 {
46 	struct ftl_band *band = io->band;
47 
48 	if (io->type != FTL_IO_READ && io->type != FTL_IO_ERASE) {
49 		ftl_band_acquire_md(band);
50 	}
51 
52 	__atomic_fetch_add(&io->dev->num_inflight, 1, __ATOMIC_SEQ_CST);
53 
54 	++io->req_cnt;
55 }
56 
57 void
58 ftl_io_dec_req(struct ftl_io *io)
59 {
60 	struct ftl_band *band = io->band;
61 	unsigned long num_inflight __attribute__((unused));
62 
63 	if (io->type != FTL_IO_READ && io->type != FTL_IO_ERASE) {
64 		ftl_band_release_md(band);
65 	}
66 
67 	num_inflight = __atomic_fetch_sub(&io->dev->num_inflight, 1, __ATOMIC_SEQ_CST);
68 
69 	assert(num_inflight > 0);
70 	assert(io->req_cnt > 0);
71 
72 	--io->req_cnt;
73 }
74 
75 struct iovec *
76 ftl_io_iovec(struct ftl_io *io)
77 {
78 	if (io->iov_cnt > 1) {
79 		return io->iovs;
80 	} else {
81 		return &io->iov;
82 	}
83 }
84 
85 uint64_t
86 ftl_io_current_lba(struct ftl_io *io)
87 {
88 	if (io->flags & FTL_IO_VECTOR_LBA) {
89 		return io->lbas[io->pos];
90 	} else {
91 		return io->lba + io->pos;
92 	}
93 }
94 
95 void
96 ftl_io_advance(struct ftl_io *io, size_t lbk_cnt)
97 {
98 	struct iovec *iov = ftl_io_iovec(io);
99 	size_t iov_lbks;
100 
101 	io->pos += lbk_cnt;
102 
103 	if (io->iov_cnt == 0) {
104 		return;
105 	}
106 
107 	while (lbk_cnt > 0) {
108 		assert(io->iov_pos < io->iov_cnt);
109 		iov_lbks = iov[io->iov_pos].iov_len / PAGE_SIZE;
110 
111 		if (io->iov_off + lbk_cnt < iov_lbks) {
112 			io->iov_off += lbk_cnt;
113 			break;
114 		}
115 
116 		assert(iov_lbks > io->iov_off);
117 		lbk_cnt -= (iov_lbks - io->iov_off);
118 		io->iov_off = 0;
119 		io->iov_pos++;
120 	}
121 }
122 
123 size_t
124 ftl_iovec_num_lbks(struct iovec *iov, size_t iov_cnt)
125 {
126 	size_t lbks = 0, i = 0;
127 
128 	for (; i < iov_cnt; ++i) {
129 		lbks += iov[i].iov_len / PAGE_SIZE;
130 	}
131 
132 	return lbks;
133 }
134 
135 void *
136 ftl_io_iovec_addr(struct ftl_io *io)
137 {
138 	assert(io->iov_pos < io->iov_cnt);
139 	assert(io->iov_off * PAGE_SIZE < ftl_io_iovec(io)[io->iov_pos].iov_len);
140 
141 	return (char *)ftl_io_iovec(io)[io->iov_pos].iov_base +
142 	       io->iov_off * PAGE_SIZE;
143 }
144 
145 size_t
146 ftl_io_iovec_len_left(struct ftl_io *io)
147 {
148 	struct iovec *iov = ftl_io_iovec(io);
149 	return iov[io->iov_pos].iov_len / PAGE_SIZE - io->iov_off;
150 }
151 
152 int
153 ftl_io_init_iovec(struct ftl_io *io, void *buf,
154 		  size_t iov_cnt, size_t req_size)
155 {
156 	struct iovec *iov;
157 	size_t i;
158 
159 	if (iov_cnt > 1) {
160 		iov = io->iovs = calloc(iov_cnt, sizeof(*iov));
161 		if (!iov) {
162 			return -ENOMEM;
163 		}
164 	} else {
165 		iov = &io->iov;
166 	}
167 
168 	io->iov_pos = 0;
169 	io->iov_cnt = iov_cnt;
170 	for (i = 0; i < iov_cnt; ++i) {
171 		iov[i].iov_base = (char *)buf + i * req_size * PAGE_SIZE;
172 		iov[i].iov_len = req_size * PAGE_SIZE;
173 	}
174 
175 	return 0;
176 }
177 
178 static void
179 ftl_io_init(struct ftl_io *io, struct spdk_ftl_dev *dev,
180 	    spdk_ftl_fn fn, void *ctx, int flags, int type)
181 {
182 	io->flags |= flags | FTL_IO_INITIALIZED;
183 	io->type = type;
184 	io->dev = dev;
185 	io->lba = FTL_LBA_INVALID;
186 	io->cb.fn = fn;
187 	io->cb.ctx = ctx;
188 	io->trace = ftl_trace_alloc_id(dev);
189 }
190 
191 struct ftl_io *
192 ftl_io_init_internal(const struct ftl_io_init_opts *opts)
193 {
194 	struct ftl_io *io = opts->io;
195 	struct spdk_ftl_dev *dev = opts->dev;
196 
197 	if (!io) {
198 		if (opts->parent) {
199 			io = ftl_io_alloc_child(opts->parent);
200 		} else {
201 			io = ftl_io_alloc(dev->ioch);
202 		}
203 
204 		if (!io) {
205 			return NULL;
206 		}
207 	}
208 
209 	ftl_io_clear(io);
210 	ftl_io_init(io, dev, opts->fn, io, opts->flags | FTL_IO_INTERNAL, opts->type);
211 
212 	io->lbk_cnt = opts->iov_cnt * opts->req_size;
213 	io->rwb_batch = opts->rwb_batch;
214 	io->band = opts->band;
215 	io->md = opts->md;
216 
217 	if (ftl_io_init_iovec(io, opts->data, opts->iov_cnt, opts->req_size)) {
218 		if (!opts->io) {
219 			ftl_io_free(io);
220 		}
221 		return NULL;
222 	}
223 
224 	return io;
225 }
226 
227 struct ftl_io *
228 ftl_io_rwb_init(struct spdk_ftl_dev *dev, struct ftl_band *band,
229 		struct ftl_rwb_batch *batch, spdk_ftl_fn cb)
230 {
231 	struct ftl_io_init_opts opts = {
232 		.dev		= dev,
233 		.io		= NULL,
234 		.rwb_batch	= batch,
235 		.band		= band,
236 		.size		= sizeof(struct ftl_io),
237 		.flags		= 0,
238 		.type		= FTL_IO_WRITE,
239 		.iov_cnt	= 1,
240 		.req_size	= dev->xfer_size,
241 		.fn		= cb,
242 		.data		= ftl_rwb_batch_get_data(batch),
243 		.md		= ftl_rwb_batch_get_md(batch),
244 	};
245 
246 	return ftl_io_init_internal(&opts);
247 }
248 
249 struct ftl_io *
250 ftl_io_erase_init(struct ftl_band *band, size_t lbk_cnt, spdk_ftl_fn cb)
251 {
252 	struct ftl_io *io;
253 	struct ftl_io_init_opts opts = {
254 		.dev		= band->dev,
255 		.io		= NULL,
256 		.rwb_batch	= NULL,
257 		.band		= band,
258 		.size		= sizeof(struct ftl_io),
259 		.flags		= FTL_IO_PPA_MODE,
260 		.type		= FTL_IO_ERASE,
261 		.iov_cnt	= 0,
262 		.req_size	= 1,
263 		.fn		= cb,
264 		.data		= NULL,
265 		.md		= NULL,
266 	};
267 
268 	io = ftl_io_init_internal(&opts);
269 	if (!io) {
270 		return NULL;
271 	}
272 
273 	io->lbk_cnt = lbk_cnt;
274 
275 	return io;
276 }
277 
278 void
279 ftl_io_user_init(struct spdk_ftl_dev *dev, struct ftl_io *io, uint64_t lba, size_t lbk_cnt,
280 		 struct iovec *iov, size_t iov_cnt,
281 		 spdk_ftl_fn cb_fn, void *cb_arg, int type)
282 {
283 	if (io->flags & FTL_IO_INITIALIZED) {
284 		return;
285 	}
286 
287 	ftl_io_init(io, dev, cb_fn, cb_arg, 0, type);
288 
289 	io->lba = lba;
290 	io->lbk_cnt = lbk_cnt;
291 	io->iov_cnt = iov_cnt;
292 
293 	if (iov_cnt > 1) {
294 		io->iovs = iov;
295 	} else {
296 		io->iov = *iov;
297 	}
298 
299 	ftl_trace_lba_io_init(io->dev, io);
300 }
301 
302 static void
303 _ftl_io_free(struct ftl_io *io)
304 {
305 	struct ftl_io_channel *ioch;
306 
307 	assert(LIST_EMPTY(&io->children));
308 
309 	if ((io->flags & FTL_IO_INTERNAL) && io->iov_cnt > 1) {
310 		free(io->iovs);
311 	}
312 
313 	if (pthread_spin_destroy(&io->lock)) {
314 		SPDK_ERRLOG("pthread_spin_destroy failed\n");
315 	}
316 
317 	ioch = spdk_io_channel_get_ctx(io->ioch);
318 	spdk_mempool_put(ioch->io_pool, io);
319 }
320 
321 static bool
322 ftl_io_remove_child(struct ftl_io *io)
323 {
324 	struct ftl_io *parent = io->parent;
325 	bool parent_done;
326 
327 	pthread_spin_lock(&parent->lock);
328 	LIST_REMOVE(io, child_entry);
329 	parent_done = parent->done && LIST_EMPTY(&parent->children);
330 	parent->status = parent->status ? : io->status;
331 	pthread_spin_unlock(&parent->lock);
332 
333 	return parent_done;
334 }
335 
336 void
337 ftl_io_complete(struct ftl_io *io)
338 {
339 	struct ftl_io *parent = io->parent;
340 	bool complete, keep_alive = io->flags & FTL_IO_KEEP_ALIVE;
341 
342 	io->flags &= ~FTL_IO_INITIALIZED;
343 
344 	pthread_spin_lock(&io->lock);
345 	complete = LIST_EMPTY(&io->children);
346 	io->done = true;
347 	pthread_spin_unlock(&io->lock);
348 
349 	if (complete) {
350 		if (io->cb.fn) {
351 			io->cb.fn(io->cb.ctx, io->status);
352 		}
353 
354 		if (parent && ftl_io_remove_child(io)) {
355 			ftl_io_complete(parent);
356 		}
357 
358 		if (!keep_alive) {
359 			_ftl_io_free(io);
360 		}
361 	}
362 }
363 
364 struct ftl_io *
365 ftl_io_alloc_child(struct ftl_io *parent)
366 {
367 	struct ftl_io *io;
368 
369 	io = ftl_io_alloc(parent->ioch);
370 	if (spdk_unlikely(!io)) {
371 		return NULL;
372 	}
373 
374 	io->parent = parent;
375 
376 	pthread_spin_lock(&parent->lock);
377 	LIST_INSERT_HEAD(&parent->children, io, child_entry);
378 	pthread_spin_unlock(&parent->lock);
379 
380 	return io;
381 }
382 
383 void
384 ftl_io_process_error(struct ftl_io *io, const struct spdk_nvme_cpl *status)
385 {
386 	/* TODO: add error handling for specifc cases */
387 	if (status->status.sct == SPDK_NVME_SCT_MEDIA_ERROR &&
388 	    status->status.sc == SPDK_OCSSD_SC_READ_HIGH_ECC) {
389 		return;
390 	}
391 
392 	io->status = -EIO;
393 }
394 
395 void ftl_io_fail(struct ftl_io *io, int status)
396 {
397 	io->status = status;
398 	ftl_io_advance(io, io->lbk_cnt - io->pos);
399 }
400 
401 void *
402 ftl_io_get_md(const struct ftl_io *io)
403 {
404 	if (!io->md) {
405 		return NULL;
406 	}
407 
408 	return (char *)io->md + io->pos * FTL_BLOCK_SIZE;
409 }
410 
411 struct ftl_io *
412 ftl_io_alloc(struct spdk_io_channel *ch)
413 {
414 	struct ftl_io *io;
415 	struct ftl_io_channel *ioch = spdk_io_channel_get_ctx(ch);
416 
417 	io = spdk_mempool_get(ioch->io_pool);
418 	if (!io) {
419 		return NULL;
420 	}
421 
422 	memset(io, 0, ioch->elem_size);
423 	io->ioch = ch;
424 
425 	if (pthread_spin_init(&io->lock, PTHREAD_PROCESS_PRIVATE)) {
426 		SPDK_ERRLOG("pthread_spin_init failed\n");
427 		spdk_mempool_put(ioch->io_pool, io);
428 		return NULL;
429 	}
430 
431 	return io;
432 }
433 
434 void
435 ftl_io_reinit(struct ftl_io *io, spdk_ftl_fn fn, void *ctx, int flags, int type)
436 {
437 	ftl_io_clear(io);
438 	ftl_io_init(io, io->dev, fn, ctx, flags, type);
439 }
440 
441 void
442 ftl_io_clear(struct ftl_io *io)
443 {
444 	io->pos = 0;
445 	io->req_cnt = 0;
446 	io->iov_pos = 0;
447 	io->iov_off = 0;
448 	io->flags = 0;
449 	io->rwb_batch = NULL;
450 	io->band = NULL;
451 }
452 
453 void
454 ftl_io_free(struct ftl_io *io)
455 {
456 	struct ftl_io *parent = io->parent;
457 
458 	if (!io) {
459 		return;
460 	}
461 
462 	if (parent && ftl_io_remove_child(io)) {
463 		ftl_io_complete(parent);
464 	}
465 
466 	_ftl_io_free(io);
467 }
468