1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) Intel Corporation. 3 * All rights reserved. 4 */ 5 6 #include "ftl_band.h" 7 #include "ftl_core.h" 8 #include "ftl_debug.h" 9 #include "ftl_io.h" 10 #include "ftl_internal.h" 11 #include "spdk/ftl.h" 12 #include "spdk/likely.h" 13 14 struct ftl_reloc; 15 struct ftl_band_reloc; 16 17 /* TODO: Should probably change the move naming nomenclature to something more descriptive */ 18 enum ftl_reloc_move_state { 19 FTL_RELOC_STATE_READ = 0, 20 FTL_RELOC_STATE_PIN, 21 FTL_RELOC_STATE_WRITE, 22 FTL_RELOC_STATE_WAIT, 23 FTL_RELOC_STATE_HALT, 24 25 FTL_RELOC_STATE_MAX 26 }; 27 28 struct ftl_reloc_move { 29 /* FTL device */ 30 struct spdk_ftl_dev *dev; 31 32 struct ftl_reloc *reloc; 33 34 /* Request for doing IO */ 35 struct ftl_rq *rq; 36 37 /* Move state (read, write) */ 38 enum ftl_reloc_move_state state; 39 40 /* Entry of circular list */ 41 TAILQ_ENTRY(ftl_reloc_move) qentry; 42 }; 43 44 struct ftl_reloc { 45 /* Device associated with relocate */ 46 struct spdk_ftl_dev *dev; 47 48 /* Indicates relocate is about to halt */ 49 bool halt; 50 51 /* Band which are read to relocate */ 52 struct ftl_band *band; 53 54 /* Bands already read, but waiting for finishing GC */ 55 TAILQ_HEAD(, ftl_band) band_done; 56 size_t band_done_count; 57 58 /* Flags indicating reloc is waiting for a new band */ 59 bool band_waiting; 60 61 /* Maximum number of IOs per band */ 62 size_t max_qdepth; 63 64 /* Queue of free move objects */ 65 struct ftl_reloc_move *move_buffer; 66 67 /* Array of movers queue for each state */ 68 TAILQ_HEAD(, ftl_reloc_move) move_queue[FTL_RELOC_STATE_MAX]; 69 70 }; 71 72 static void move_read_cb(struct ftl_rq *rq); 73 static void move_write_cb(struct ftl_rq *rq); 74 static void move_set_state(struct ftl_reloc_move *mv, enum ftl_reloc_move_state state); 75 static void move_write(struct ftl_reloc *reloc, struct ftl_reloc_move *mv); 76 static void move_read_error_cb(struct ftl_rq *rq, struct ftl_band *band, uint64_t idx, 77 uint64_t count); 78 79 static void 80 move_deinit(struct ftl_reloc_move *mv) 81 { 82 assert(mv); 83 ftl_rq_del(mv->rq); 84 } 85 86 static int 87 move_init(struct ftl_reloc *reloc, struct ftl_reloc_move *mv) 88 { 89 mv->state = FTL_RELOC_STATE_HALT; 90 TAILQ_INSERT_TAIL(&reloc->move_queue[FTL_RELOC_STATE_HALT], mv, qentry); 91 92 mv->reloc = reloc; 93 mv->dev = reloc->dev; 94 mv->rq = ftl_rq_new(mv->dev, mv->dev->md_size); 95 96 if (!mv->rq) { 97 return -ENOMEM; 98 } 99 mv->rq->owner.priv = mv; 100 101 return 0; 102 } 103 104 struct ftl_reloc * 105 ftl_reloc_init(struct spdk_ftl_dev *dev) 106 { 107 struct ftl_reloc *reloc; 108 struct ftl_reloc_move *move; 109 size_t i, count; 110 111 reloc = calloc(1, sizeof(*reloc)); 112 if (!reloc) { 113 return NULL; 114 } 115 116 reloc->dev = dev; 117 reloc->halt = true; 118 reloc->max_qdepth = dev->sb->max_reloc_qdepth; 119 120 reloc->move_buffer = calloc(reloc->max_qdepth, sizeof(*reloc->move_buffer)); 121 if (!reloc->move_buffer) { 122 FTL_ERRLOG(dev, "Failed to initialize reloc moves pool"); 123 goto error; 124 } 125 126 /* Initialize movers queues */ 127 count = SPDK_COUNTOF(reloc->move_queue); 128 for (i = 0; i < count; ++i) { 129 TAILQ_INIT(&reloc->move_queue[i]); 130 } 131 132 for (i = 0; i < reloc->max_qdepth; ++i) { 133 move = &reloc->move_buffer[i]; 134 135 if (move_init(reloc, move)) { 136 goto error; 137 } 138 } 139 140 TAILQ_INIT(&reloc->band_done); 141 142 return reloc; 143 error: 144 ftl_reloc_free(reloc); 145 return NULL; 146 } 147 148 struct ftl_reloc_task_fini { 149 struct ftl_reloc_task *task; 150 spdk_msg_fn cb; 151 void *cb_arg; 152 }; 153 154 void 155 ftl_reloc_free(struct ftl_reloc *reloc) 156 { 157 size_t i; 158 159 if (!reloc) { 160 return; 161 } 162 163 if (reloc->move_buffer) { 164 for (i = 0; i < reloc->max_qdepth; ++i) { 165 move_deinit(&reloc->move_buffer[i]); 166 } 167 } 168 169 free(reloc->move_buffer); 170 free(reloc); 171 } 172 173 void 174 ftl_reloc_halt(struct ftl_reloc *reloc) 175 { 176 reloc->halt = true; 177 } 178 179 void 180 ftl_reloc_resume(struct ftl_reloc *reloc) 181 { 182 struct ftl_reloc_move *mv, *next; 183 reloc->halt = false; 184 185 TAILQ_FOREACH_SAFE(mv, &reloc->move_queue[FTL_RELOC_STATE_HALT], qentry, 186 next) { 187 move_set_state(mv, FTL_RELOC_STATE_READ); 188 } 189 } 190 191 static void 192 move_set_state(struct ftl_reloc_move *mv, enum ftl_reloc_move_state state) 193 { 194 struct ftl_reloc *reloc = mv->reloc; 195 196 switch (state) { 197 case FTL_RELOC_STATE_READ: 198 mv->rq->owner.cb = move_read_cb; 199 mv->rq->owner.error = move_read_error_cb; 200 mv->rq->iter.idx = 0; 201 mv->rq->iter.count = 0; 202 mv->rq->success = true; 203 break; 204 205 case FTL_RELOC_STATE_WRITE: 206 mv->rq->owner.cb = move_write_cb; 207 mv->rq->owner.error = NULL; 208 break; 209 210 case FTL_RELOC_STATE_PIN: 211 case FTL_RELOC_STATE_WAIT: 212 case FTL_RELOC_STATE_HALT: 213 break; 214 215 default: 216 ftl_abort(); 217 break; 218 } 219 220 if (mv->state != state) { 221 /* Remove the mover from previous queue */ 222 TAILQ_REMOVE(&reloc->move_queue[mv->state], mv, qentry); 223 /* Insert the mover to the new queue */ 224 TAILQ_INSERT_TAIL(&reloc->move_queue[state], mv, qentry); 225 /* Update state */ 226 mv->state = state; 227 } 228 } 229 230 static void 231 move_get_band_cb(struct ftl_band *band, void *cntx, bool status) 232 { 233 struct ftl_reloc *reloc = cntx; 234 235 if (spdk_likely(status)) { 236 reloc->band = band; 237 ftl_band_iter_init(band); 238 } 239 reloc->band_waiting = false; 240 } 241 242 static void 243 move_grab_new_band(struct ftl_reloc *reloc) 244 { 245 if (!reloc->band_waiting) { 246 if (!ftl_needs_reloc(reloc->dev)) { 247 return; 248 } 249 250 /* Limit number of simultaneously relocated bands */ 251 if (reloc->band_done_count > 2) { 252 return; 253 } 254 255 reloc->band_waiting = true; 256 ftl_band_get_next_gc(reloc->dev, move_get_band_cb, reloc); 257 } 258 } 259 260 static struct ftl_band * 261 move_get_band(struct ftl_reloc *reloc) 262 { 263 struct ftl_band *band = reloc->band; 264 265 if (!band) { 266 move_grab_new_band(reloc); 267 return NULL; 268 } 269 270 if (!ftl_band_filled(band, band->md->iter.offset)) { 271 /* Band still not read, we can continue reading */ 272 return band; 273 } 274 275 TAILQ_INSERT_TAIL(&reloc->band_done, band, queue_entry); 276 reloc->band_done_count++; 277 reloc->band = NULL; 278 279 return NULL; 280 } 281 282 static void 283 move_advance_rq(struct ftl_rq *rq) 284 { 285 struct ftl_band *band = rq->io.band; 286 uint64_t offset, i; 287 struct ftl_rq_entry *entry = &rq->entries[rq->iter.idx]; 288 289 assert(rq->iter.idx + rq->iter.count <= rq->num_blocks); 290 291 for (i = 0; i < rq->iter.count; i++) { 292 offset = ftl_band_block_offset_from_addr(band, rq->io.addr); 293 294 assert(offset < ftl_get_num_blocks_in_band(band->dev)); 295 assert(ftl_band_block_offset_valid(band, offset)); 296 297 entry->lba = band->p2l_map.band_map[offset]; 298 entry->addr = rq->io.addr; 299 entry->owner.priv = band; 300 301 entry++; 302 rq->io.addr = ftl_band_next_addr(band, rq->io.addr, 1); 303 band->owner.cnt++; 304 } 305 306 /* Increase QD for the request */ 307 rq->iter.qd++; 308 309 /* Advanced request iterator */ 310 rq->iter.idx += rq->iter.count; 311 } 312 313 static void 314 move_init_entries(struct ftl_rq *rq, uint64_t idx, uint64_t count) 315 { 316 uint64_t i = 0; 317 struct ftl_rq_entry *iter = &rq->entries[idx]; 318 319 assert(idx + count <= rq->num_blocks); 320 321 i = 0; 322 while (i < count) { 323 iter->addr = FTL_ADDR_INVALID; 324 iter->owner.priv = NULL; 325 iter->lba = FTL_LBA_INVALID; 326 iter++; 327 i++; 328 } 329 } 330 331 static void 332 move_read_error_cb(struct ftl_rq *rq, struct ftl_band *band, uint64_t idx, uint64_t count) 333 { 334 move_init_entries(rq, idx, count); 335 band->owner.cnt -= count; 336 } 337 338 static void 339 move_read_cb(struct ftl_rq *rq) 340 { 341 struct ftl_reloc_move *mv = rq->owner.priv; 342 343 /* Decrease QD of the request */ 344 assert(rq->iter.qd > 0); 345 rq->iter.qd--; 346 347 if (rq->iter.idx != rq->num_blocks || rq->iter.qd) { 348 return; 349 } 350 351 move_set_state(mv, FTL_RELOC_STATE_PIN); 352 } 353 354 static void 355 move_rq_pad(struct ftl_rq *rq, struct ftl_band *band) 356 { 357 struct ftl_rq_entry *entry = &rq->entries[rq->iter.idx]; 358 359 for (; rq->iter.idx < rq->num_blocks; ++rq->iter.idx) { 360 entry->addr = rq->io.addr; 361 entry->owner.priv = band; 362 entry->lba = FTL_LBA_INVALID; 363 entry++; 364 rq->io.addr = ftl_band_next_addr(band, rq->io.addr, 1); 365 band->owner.cnt++; 366 } 367 368 assert(rq->iter.idx == rq->num_blocks); 369 } 370 371 static void 372 move_read(struct ftl_reloc *reloc, struct ftl_reloc_move *mv, struct ftl_band *band) 373 { 374 struct ftl_rq *rq = mv->rq; 375 uint64_t blocks = ftl_get_num_blocks_in_band(band->dev); 376 uint64_t pos = band->md->iter.offset; 377 uint64_t begin = ftl_bitmap_find_first_set(band->p2l_map.valid, pos, UINT64_MAX); 378 uint64_t end, band_left, rq_left; 379 380 if (spdk_likely(begin < blocks)) { 381 if (begin > pos) { 382 ftl_band_iter_advance(band, begin - pos); 383 } else if (begin == pos) { 384 /* Valid block at the position of iterator */ 385 } else { 386 /* Inconsistent state */ 387 ftl_abort(); 388 } 389 } else if (UINT64_MAX == begin) { 390 /* No more valid LBAs in the band */ 391 band_left = ftl_band_user_blocks_left(band, pos); 392 ftl_band_iter_advance(band, band_left); 393 394 assert(ftl_band_filled(band, band->md->iter.offset)); 395 396 if (rq->iter.idx) { 397 move_rq_pad(rq, band); 398 move_set_state(mv, FTL_RELOC_STATE_WAIT); 399 rq->iter.qd++; 400 rq->owner.cb(rq); 401 } 402 403 return; 404 } else { 405 /* Inconsistent state */ 406 ftl_abort(); 407 } 408 409 rq_left = rq->num_blocks - rq->iter.idx; 410 assert(rq_left > 0); 411 412 /* Find next clear bit, but no further than max request count */ 413 end = ftl_bitmap_find_first_clear(band->p2l_map.valid, begin + 1, begin + rq_left); 414 if (end != UINT64_MAX) { 415 rq_left = end - begin; 416 } 417 418 band_left = ftl_band_user_blocks_left(band, band->md->iter.offset); 419 rq->iter.count = spdk_min(rq_left, band_left); 420 421 ftl_band_rq_read(band, rq); 422 423 move_advance_rq(rq); 424 425 /* Advance band iterator */ 426 ftl_band_iter_advance(band, rq->iter.count); 427 428 /* If band is fully written pad rest of request */ 429 if (ftl_band_filled(band, band->md->iter.offset)) { 430 move_rq_pad(rq, band); 431 } 432 433 if (rq->iter.idx == rq->num_blocks) { 434 /* 435 * All request entries scheduled for reading, 436 * We can change state to waiting 437 */ 438 move_set_state(mv, FTL_RELOC_STATE_WAIT); 439 } 440 } 441 442 static void 443 move_pin_cb(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx) 444 { 445 struct ftl_reloc_move *mv = pin_ctx->cb_ctx; 446 struct ftl_rq *rq = mv->rq; 447 448 if (status) { 449 rq->iter.status = status; 450 pin_ctx->lba = FTL_LBA_INVALID; 451 } 452 453 if (--rq->iter.remaining == 0) { 454 if (rq->iter.status) { 455 /* unpin and try again */ 456 ftl_rq_unpin(rq); 457 move_set_state(mv, FTL_RELOC_STATE_PIN); 458 return; 459 } 460 461 move_set_state(mv, FTL_RELOC_STATE_WRITE); 462 } 463 } 464 465 static void 466 move_pin(struct ftl_reloc_move *mv) 467 { 468 struct ftl_rq *rq = mv->rq; 469 struct ftl_rq_entry *entry = rq->entries; 470 uint64_t i; 471 472 move_set_state(mv, FTL_RELOC_STATE_WAIT); 473 474 rq->iter.remaining = rq->iter.count = rq->num_blocks; 475 rq->iter.status = 0; 476 477 for (i = 0; i < rq->num_blocks; i++) { 478 if (entry->lba != FTL_LBA_INVALID) { 479 ftl_l2p_pin(rq->dev, entry->lba, 1, move_pin_cb, mv, &entry->l2p_pin_ctx); 480 } else { 481 ftl_l2p_pin_skip(rq->dev, move_pin_cb, mv, &entry->l2p_pin_ctx); 482 } 483 entry++; 484 } 485 } 486 487 static void 488 move_finish_write(struct ftl_rq *rq) 489 { 490 uint64_t i; 491 struct spdk_ftl_dev *dev = rq->dev; 492 struct ftl_rq_entry *iter = rq->entries; 493 ftl_addr addr = rq->io.addr; 494 struct ftl_band *rq_band = rq->io.band; 495 struct ftl_band *band; 496 497 for (i = 0; i < rq->num_blocks; ++i, ++iter) { 498 band = iter->owner.priv; 499 500 if (band) { 501 assert(band->owner.cnt > 0); 502 band->owner.cnt--; 503 } 504 if (iter->lba != FTL_LBA_INVALID) { 505 /* Update L2P table */ 506 ftl_l2p_update_base(dev, iter->lba, addr, iter->addr); 507 ftl_l2p_unpin(dev, iter->lba, 1); 508 } 509 addr = ftl_band_next_addr(rq_band, addr, 1); 510 } 511 } 512 513 static void 514 move_write_cb(struct ftl_rq *rq) 515 { 516 struct ftl_reloc_move *mv = rq->owner.priv; 517 518 assert(rq->iter.qd == 1); 519 rq->iter.qd--; 520 521 if (spdk_likely(rq->success)) { 522 move_finish_write(rq); 523 move_set_state(mv, FTL_RELOC_STATE_READ); 524 } else { 525 /* Write failed, repeat write */ 526 move_set_state(mv, FTL_RELOC_STATE_WRITE); 527 } 528 } 529 530 static void 531 move_write(struct ftl_reloc *reloc, struct ftl_reloc_move *mv) 532 { 533 struct spdk_ftl_dev *dev = mv->dev; 534 struct ftl_rq *rq = mv->rq; 535 536 assert(rq->iter.idx == rq->num_blocks); 537 538 /* Request contains data to be placed on a new location, submit it */ 539 ftl_writer_queue_rq(&dev->writer_gc, rq); 540 rq->iter.qd++; 541 542 move_set_state(mv, FTL_RELOC_STATE_WAIT); 543 } 544 545 static void 546 move_run(struct ftl_reloc *reloc, struct ftl_reloc_move *mv) 547 { 548 struct ftl_band *band; 549 550 switch (mv->state) { 551 case FTL_RELOC_STATE_READ: { 552 if (spdk_unlikely(reloc->halt)) { 553 move_set_state(mv, FTL_RELOC_STATE_HALT); 554 break; 555 } 556 557 band = move_get_band(reloc); 558 if (!band) { 559 break; 560 } 561 562 move_read(reloc, mv, band); 563 } 564 break; 565 566 case FTL_RELOC_STATE_PIN: 567 move_pin(mv); 568 break; 569 570 case FTL_RELOC_STATE_WRITE: 571 if (spdk_unlikely(reloc->halt)) { 572 ftl_rq_unpin(mv->rq); 573 move_set_state(mv, FTL_RELOC_STATE_HALT); 574 break; 575 } 576 577 move_write(reloc, mv); 578 break; 579 580 case FTL_RELOC_STATE_HALT: 581 case FTL_RELOC_STATE_WAIT: 582 break; 583 584 default: 585 assert(0); 586 ftl_abort(); 587 break; 588 } 589 } 590 591 static void 592 move_handle_band_error(struct ftl_band *band) 593 { 594 struct ftl_reloc *reloc = band->dev->reloc; 595 /* 596 * Handle band error, it's because an error occurred during reading, 597 * Add band to the close band list, will try reloc it in a moment 598 */ 599 TAILQ_REMOVE(&reloc->band_done, band, queue_entry); 600 reloc->band_done_count--; 601 602 band->md->state = FTL_BAND_STATE_CLOSING; 603 ftl_band_set_state(band, FTL_BAND_STATE_CLOSED); 604 } 605 606 static void 607 move_release_bands(struct ftl_reloc *reloc) 608 { 609 struct ftl_band *band; 610 611 if (TAILQ_EMPTY(&reloc->band_done)) { 612 return; 613 } 614 615 band = TAILQ_FIRST(&reloc->band_done); 616 617 if (band->owner.cnt || ftl_band_qd(band)) { 618 /* Band still in use */ 619 return; 620 } 621 622 if (ftl_band_empty(band)) { 623 assert(ftl_band_filled(band, band->md->iter.offset)); 624 TAILQ_REMOVE(&reloc->band_done, band, queue_entry); 625 reloc->band_done_count--; 626 ftl_band_free(band); 627 } else { 628 move_handle_band_error(band); 629 } 630 } 631 632 bool 633 ftl_reloc_is_halted(const struct ftl_reloc *reloc) 634 { 635 size_t i, count; 636 637 count = SPDK_COUNTOF(reloc->move_queue); 638 for (i = 0; i < count; ++i) { 639 if (i == FTL_RELOC_STATE_HALT) { 640 continue; 641 } 642 643 if (!TAILQ_EMPTY(&reloc->move_queue[i])) { 644 return false; 645 } 646 } 647 648 return true; 649 } 650 651 void 652 ftl_reloc(struct ftl_reloc *reloc) 653 { 654 size_t i, count; 655 656 count = SPDK_COUNTOF(reloc->move_queue); 657 for (i = 0; i < count; ++i) { 658 if (TAILQ_EMPTY(&reloc->move_queue[i])) { 659 continue; 660 } 661 662 move_run(reloc, TAILQ_FIRST(&reloc->move_queue[i])); 663 } 664 665 move_release_bands(reloc); 666 } 667