1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 #include "spdk/ftl.h" 36 #include "spdk/likely.h" 37 #include "spdk/util.h" 38 39 #include "ftl_io.h" 40 #include "ftl_core.h" 41 #include "ftl_band.h" 42 #include "ftl_debug.h" 43 44 void 45 ftl_io_inc_req(struct ftl_io *io) 46 { 47 struct ftl_band *band = io->band; 48 49 if (!(io->flags & FTL_IO_CACHE) && io->type != FTL_IO_READ && io->type != FTL_IO_ERASE) { 50 ftl_band_acquire_lba_map(band); 51 } 52 53 __atomic_fetch_add(&io->dev->num_inflight, 1, __ATOMIC_SEQ_CST); 54 55 ++io->req_cnt; 56 } 57 58 void 59 ftl_io_dec_req(struct ftl_io *io) 60 { 61 struct ftl_band *band = io->band; 62 unsigned long num_inflight __attribute__((unused)); 63 64 if (!(io->flags & FTL_IO_CACHE) && io->type != FTL_IO_READ && io->type != FTL_IO_ERASE) { 65 ftl_band_release_lba_map(band); 66 } 67 68 num_inflight = __atomic_fetch_sub(&io->dev->num_inflight, 1, __ATOMIC_SEQ_CST); 69 70 assert(num_inflight > 0); 71 assert(io->req_cnt > 0); 72 73 --io->req_cnt; 74 } 75 76 struct iovec * 77 ftl_io_iovec(struct ftl_io *io) 78 { 79 return &io->iov[0]; 80 } 81 82 uint64_t 83 ftl_io_get_lba(const struct ftl_io *io, size_t offset) 84 { 85 assert(offset < io->num_blocks); 86 87 if (io->flags & FTL_IO_VECTOR_LBA) { 88 return io->lba.vector[offset]; 89 } else { 90 return io->lba.single + offset; 91 } 92 } 93 94 uint64_t 95 ftl_io_current_lba(const struct ftl_io *io) 96 { 97 return ftl_io_get_lba(io, io->pos); 98 } 99 100 void 101 ftl_io_advance(struct ftl_io *io, size_t num_blocks) 102 { 103 struct iovec *iov = ftl_io_iovec(io); 104 size_t iov_blocks, block_left = num_blocks; 105 106 io->pos += num_blocks; 107 108 if (io->iov_cnt != 0) { 109 while (block_left > 0) { 110 assert(io->iov_pos < io->iov_cnt); 111 iov_blocks = iov[io->iov_pos].iov_len / FTL_BLOCK_SIZE; 112 113 if (io->iov_off + block_left < iov_blocks) { 114 io->iov_off += block_left; 115 break; 116 } 117 118 assert(iov_blocks > io->iov_off); 119 block_left -= (iov_blocks - io->iov_off); 120 io->iov_off = 0; 121 io->iov_pos++; 122 } 123 } 124 125 if (io->parent) { 126 ftl_io_advance(io->parent, num_blocks); 127 } 128 } 129 130 size_t 131 ftl_iovec_num_blocks(struct iovec *iov, size_t iov_cnt) 132 { 133 size_t num_blocks = 0, i = 0; 134 135 for (; i < iov_cnt; ++i) { 136 num_blocks += iov[i].iov_len / FTL_BLOCK_SIZE; 137 } 138 139 return num_blocks; 140 } 141 142 void * 143 ftl_io_iovec_addr(struct ftl_io *io) 144 { 145 assert(io->iov_pos < io->iov_cnt); 146 assert(io->iov_off * FTL_BLOCK_SIZE < ftl_io_iovec(io)[io->iov_pos].iov_len); 147 148 return (char *)ftl_io_iovec(io)[io->iov_pos].iov_base + 149 io->iov_off * FTL_BLOCK_SIZE; 150 } 151 152 size_t 153 ftl_io_iovec_len_left(struct ftl_io *io) 154 { 155 struct iovec *iov = ftl_io_iovec(io); 156 return iov[io->iov_pos].iov_len / FTL_BLOCK_SIZE - io->iov_off; 157 } 158 159 static void 160 ftl_io_init_iovec(struct ftl_io *io, const struct iovec *iov, size_t iov_cnt, size_t iov_off, 161 size_t num_blocks) 162 { 163 size_t offset = 0, num_left; 164 165 io->iov_pos = 0; 166 io->iov_cnt = 0; 167 io->num_blocks = num_blocks; 168 169 while (offset < num_blocks) { 170 assert(io->iov_cnt < FTL_IO_MAX_IOVEC && io->iov_cnt < iov_cnt); 171 172 num_left = spdk_min(iov[io->iov_cnt].iov_len / FTL_BLOCK_SIZE - iov_off, 173 num_blocks); 174 io->iov[io->iov_cnt].iov_base = (char *)iov[io->iov_cnt].iov_base + 175 iov_off * FTL_BLOCK_SIZE; 176 io->iov[io->iov_cnt].iov_len = num_left * FTL_BLOCK_SIZE; 177 178 offset += num_left; 179 io->iov_cnt++; 180 iov_off = 0; 181 } 182 } 183 184 void 185 ftl_io_shrink_iovec(struct ftl_io *io, size_t num_blocks) 186 { 187 size_t iov_off = 0, block_off = 0; 188 189 assert(io->num_blocks >= num_blocks); 190 assert(io->pos == 0 && io->iov_pos == 0 && io->iov_off == 0); 191 192 for (; iov_off < io->iov_cnt; ++iov_off) { 193 size_t num_iov = io->iov[iov_off].iov_len / FTL_BLOCK_SIZE; 194 size_t num_left = num_blocks - block_off; 195 196 if (num_iov >= num_left) { 197 io->iov[iov_off].iov_len = num_left * FTL_BLOCK_SIZE; 198 io->iov_cnt = iov_off + 1; 199 io->num_blocks = num_blocks; 200 break; 201 } 202 203 block_off += num_iov; 204 } 205 } 206 207 static void 208 ftl_io_init(struct ftl_io *io, struct spdk_ftl_dev *dev, 209 ftl_io_fn fn, void *ctx, int flags, int type) 210 { 211 io->flags |= flags | FTL_IO_INITIALIZED; 212 io->type = type; 213 io->dev = dev; 214 io->lba.single = FTL_LBA_INVALID; 215 io->addr.offset = FTL_ADDR_INVALID; 216 io->cb_fn = fn; 217 io->cb_ctx = ctx; 218 io->trace = ftl_trace_alloc_id(dev); 219 } 220 221 struct ftl_io * 222 ftl_io_init_internal(const struct ftl_io_init_opts *opts) 223 { 224 struct ftl_io *io = opts->io; 225 struct ftl_io *parent = opts->parent; 226 struct spdk_ftl_dev *dev = opts->dev; 227 const struct iovec *iov; 228 size_t iov_cnt, iov_off; 229 230 if (!io) { 231 if (parent) { 232 io = ftl_io_alloc_child(parent); 233 } else { 234 io = ftl_io_alloc(ftl_get_io_channel(dev)); 235 } 236 237 if (!io) { 238 return NULL; 239 } 240 } 241 242 ftl_io_clear(io); 243 ftl_io_init(io, dev, opts->cb_fn, opts->cb_ctx, opts->flags | FTL_IO_INTERNAL, opts->type); 244 245 io->batch = opts->batch; 246 io->band = opts->band; 247 io->md = opts->md; 248 io->iov = &io->iov_buf[0]; 249 250 if (parent) { 251 if (parent->flags & FTL_IO_VECTOR_LBA) { 252 io->lba.vector = parent->lba.vector + parent->pos; 253 } else { 254 io->lba.single = parent->lba.single + parent->pos; 255 } 256 257 iov = &parent->iov[parent->iov_pos]; 258 iov_cnt = parent->iov_cnt - parent->iov_pos; 259 iov_off = parent->iov_off; 260 } else { 261 iov = &opts->iovs[0]; 262 iov_cnt = opts->iovcnt; 263 iov_off = 0; 264 } 265 266 /* Some requests (zone resets) do not use iovecs */ 267 if (iov_cnt > 0) { 268 ftl_io_init_iovec(io, iov, iov_cnt, iov_off, opts->num_blocks); 269 } 270 271 if (opts->flags & FTL_IO_VECTOR_LBA) { 272 io->lba.vector = calloc(io->num_blocks, sizeof(uint64_t)); 273 if (!io->lba.vector) { 274 ftl_io_free(io); 275 return NULL; 276 } 277 } 278 279 return io; 280 } 281 282 struct ftl_io * 283 ftl_io_wbuf_init(struct spdk_ftl_dev *dev, struct ftl_addr addr, struct ftl_band *band, 284 struct ftl_batch *batch, ftl_io_fn cb) 285 { 286 struct ftl_io *io; 287 struct ftl_io_init_opts opts = { 288 .dev = dev, 289 .io = NULL, 290 .batch = batch, 291 .band = band, 292 .size = sizeof(struct ftl_io), 293 .flags = 0, 294 .type = FTL_IO_WRITE, 295 .num_blocks = dev->xfer_size, 296 .cb_fn = cb, 297 .iovcnt = dev->xfer_size, 298 .md = batch->metadata, 299 }; 300 301 memcpy(opts.iovs, batch->iov, sizeof(struct iovec) * dev->xfer_size); 302 303 io = ftl_io_init_internal(&opts); 304 if (!io) { 305 return NULL; 306 } 307 308 io->addr = addr; 309 310 return io; 311 } 312 313 struct ftl_io * 314 ftl_io_erase_init(struct ftl_band *band, size_t num_blocks, ftl_io_fn cb) 315 { 316 struct ftl_io *io; 317 struct ftl_io_init_opts opts = { 318 .dev = band->dev, 319 .io = NULL, 320 .band = band, 321 .size = sizeof(struct ftl_io), 322 .flags = FTL_IO_PHYSICAL_MODE, 323 .type = FTL_IO_ERASE, 324 .num_blocks = 1, 325 .cb_fn = cb, 326 .iovcnt = 0, 327 .md = NULL, 328 }; 329 330 io = ftl_io_init_internal(&opts); 331 if (!io) { 332 return NULL; 333 } 334 335 io->num_blocks = num_blocks; 336 337 return io; 338 } 339 340 static void 341 _ftl_user_cb(struct ftl_io *io, void *arg, int status) 342 { 343 io->user_fn(arg, status); 344 } 345 346 struct ftl_io * 347 ftl_io_user_init(struct spdk_io_channel *_ioch, uint64_t lba, size_t num_blocks, struct iovec *iov, 348 size_t iov_cnt, spdk_ftl_fn cb_fn, void *cb_ctx, int type) 349 { 350 struct ftl_io_channel *ioch = ftl_io_channel_get_ctx(_ioch); 351 struct spdk_ftl_dev *dev = ioch->dev; 352 struct ftl_io *io; 353 354 io = ftl_io_alloc(_ioch); 355 if (spdk_unlikely(!io)) { 356 return NULL; 357 } 358 359 ftl_io_init(io, dev, _ftl_user_cb, cb_ctx, 0, type); 360 io->lba.single = lba; 361 io->user_fn = cb_fn; 362 io->iov = iov; 363 io->iov_cnt = iov_cnt; 364 io->num_blocks = num_blocks; 365 366 ftl_trace_lba_io_init(io->dev, io); 367 return io; 368 } 369 370 static void 371 _ftl_io_free(struct ftl_io *io) 372 { 373 struct ftl_io_channel *ioch; 374 375 assert(LIST_EMPTY(&io->children)); 376 377 if (io->flags & FTL_IO_VECTOR_LBA) { 378 free(io->lba.vector); 379 } 380 381 if (pthread_spin_destroy(&io->lock)) { 382 SPDK_ERRLOG("pthread_spin_destroy failed\n"); 383 } 384 385 ioch = ftl_io_channel_get_ctx(io->ioch); 386 spdk_mempool_put(ioch->io_pool, io); 387 } 388 389 static bool 390 ftl_io_remove_child(struct ftl_io *io) 391 { 392 struct ftl_io *parent = io->parent; 393 bool parent_done; 394 395 pthread_spin_lock(&parent->lock); 396 LIST_REMOVE(io, child_entry); 397 parent_done = parent->done && LIST_EMPTY(&parent->children); 398 parent->status = parent->status ? : io->status; 399 pthread_spin_unlock(&parent->lock); 400 401 return parent_done; 402 } 403 404 void 405 ftl_io_complete(struct ftl_io *io) 406 { 407 struct ftl_io *parent = io->parent; 408 bool complete; 409 410 io->flags &= ~FTL_IO_INITIALIZED; 411 412 pthread_spin_lock(&io->lock); 413 complete = LIST_EMPTY(&io->children); 414 io->done = true; 415 pthread_spin_unlock(&io->lock); 416 417 if (complete) { 418 if (io->cb_fn) { 419 io->cb_fn(io, io->cb_ctx, io->status); 420 } 421 422 if (parent && ftl_io_remove_child(io)) { 423 ftl_io_complete(parent); 424 } 425 426 _ftl_io_free(io); 427 } 428 } 429 430 struct ftl_io * 431 ftl_io_alloc_child(struct ftl_io *parent) 432 { 433 struct ftl_io *io; 434 435 io = ftl_io_alloc(parent->ioch); 436 if (spdk_unlikely(!io)) { 437 return NULL; 438 } 439 440 ftl_io_init(io, parent->dev, NULL, NULL, parent->flags, parent->type); 441 io->parent = parent; 442 443 pthread_spin_lock(&parent->lock); 444 LIST_INSERT_HEAD(&parent->children, io, child_entry); 445 pthread_spin_unlock(&parent->lock); 446 447 return io; 448 } 449 450 void ftl_io_fail(struct ftl_io *io, int status) 451 { 452 io->status = status; 453 ftl_io_advance(io, io->num_blocks - io->pos); 454 } 455 456 void * 457 ftl_io_get_md(const struct ftl_io *io) 458 { 459 if (!io->md) { 460 return NULL; 461 } 462 463 return (char *)io->md + io->pos * io->dev->md_size; 464 } 465 466 struct ftl_io * 467 ftl_io_alloc(struct spdk_io_channel *ch) 468 { 469 struct ftl_io *io; 470 struct ftl_io_channel *ioch = ftl_io_channel_get_ctx(ch); 471 472 io = spdk_mempool_get(ioch->io_pool); 473 if (!io) { 474 return NULL; 475 } 476 477 memset(io, 0, ioch->elem_size); 478 io->ioch = ch; 479 480 if (pthread_spin_init(&io->lock, PTHREAD_PROCESS_PRIVATE)) { 481 SPDK_ERRLOG("pthread_spin_init failed\n"); 482 spdk_mempool_put(ioch->io_pool, io); 483 return NULL; 484 } 485 486 return io; 487 } 488 489 void 490 ftl_io_reinit(struct ftl_io *io, ftl_io_fn cb, void *ctx, int flags, int type) 491 { 492 ftl_io_clear(io); 493 ftl_io_init(io, io->dev, cb, ctx, flags, type); 494 } 495 496 void 497 ftl_io_clear(struct ftl_io *io) 498 { 499 ftl_io_reset(io); 500 501 io->flags = 0; 502 io->batch = NULL; 503 io->band = NULL; 504 } 505 506 void 507 ftl_io_reset(struct ftl_io *io) 508 { 509 io->req_cnt = io->pos = io->iov_pos = io->iov_off = 0; 510 io->done = false; 511 } 512 513 void 514 ftl_io_free(struct ftl_io *io) 515 { 516 struct ftl_io *parent; 517 518 if (!io) { 519 return; 520 } 521 522 parent = io->parent; 523 if (parent && ftl_io_remove_child(io)) { 524 ftl_io_complete(parent); 525 } 526 527 _ftl_io_free(io); 528 } 529 530 void 531 ftl_io_call_foreach_child(struct ftl_io *io, int (*callback)(struct ftl_io *)) 532 { 533 struct ftl_io *child, *tmp; 534 535 assert(!io->done); 536 537 /* 538 * If the IO doesn't have any children, it means that it directly describes a request (i.e. 539 * all of the buffers, LBAs, etc. are filled). Otherwise the IO only groups together several 540 * requests and may be partially filled, so the callback needs to be called on all of its 541 * children instead. 542 */ 543 if (LIST_EMPTY(&io->children)) { 544 callback(io); 545 return; 546 } 547 548 LIST_FOREACH_SAFE(child, &io->children, child_entry, tmp) { 549 int rc = callback(child); 550 if (rc) { 551 assert(rc != -EAGAIN); 552 ftl_io_fail(io, rc); 553 break; 554 } 555 } 556 557 /* 558 * If all the callbacks were processed or an error occurred, treat this IO as completed. 559 * Multiple calls to ftl_io_call_foreach_child are not supported, resubmissions are supposed 560 * to be handled in the callback. 561 */ 562 ftl_io_complete(io); 563 } 564