1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/likely.h" 35 #include "spdk_internal/log.h" 36 #include "spdk/ftl.h" 37 38 #include "ftl_reloc.h" 39 #include "ftl_core.h" 40 #include "ftl_io.h" 41 #include "ftl_rwb.h" 42 #include "ftl_band.h" 43 #include "ftl_debug.h" 44 45 /* Maximum active reloc moves */ 46 #define FTL_RELOC_MAX_MOVES 256 47 48 struct ftl_reloc; 49 struct ftl_band_reloc; 50 51 enum ftl_reloc_move_state { 52 FTL_RELOC_STATE_READ_LBA_MAP, 53 FTL_RELOC_STATE_READ, 54 FTL_RELOC_STATE_WRITE, 55 }; 56 57 struct ftl_reloc_move { 58 struct ftl_band_reloc *breloc; 59 60 /* Start ppa */ 61 struct ftl_ppa ppa; 62 63 /* Number of logical blocks */ 64 size_t lbk_cnt; 65 66 /* Data buffer */ 67 void *data; 68 69 /* Move state (read lba_map, read, write) */ 70 enum ftl_reloc_move_state state; 71 72 /* IO associated with move */ 73 struct ftl_io *io; 74 }; 75 76 struct ftl_band_reloc { 77 struct ftl_reloc *parent; 78 79 /* Band being relocated */ 80 struct ftl_band *band; 81 82 /* Number of logical blocks to be relocated */ 83 size_t num_lbks; 84 85 /* Bitmap of logical blocks to be relocated */ 86 struct spdk_bit_array *reloc_map; 87 88 /* Indicates band being acitvely processed */ 89 int active; 90 91 /* Reloc map iterator */ 92 struct { 93 /* Array of chunk offsets */ 94 size_t *chk_offset; 95 96 /* Currently chunk */ 97 size_t chk_current; 98 } iter; 99 100 /* Number of outstanding moves */ 101 size_t num_outstanding; 102 103 /* Pool of move objects */ 104 struct ftl_reloc_move *moves; 105 106 /* Move queue */ 107 struct spdk_ring *move_queue; 108 109 TAILQ_ENTRY(ftl_band_reloc) entry; 110 }; 111 112 struct ftl_reloc { 113 /* Device associated with relocate */ 114 struct spdk_ftl_dev *dev; 115 116 /* Indicates relocate is about to halt */ 117 bool halt; 118 119 /* Maximum number of IOs per band */ 120 size_t max_qdepth; 121 122 /* Maximum number of active band relocates */ 123 size_t max_active; 124 125 /* Maximum transfer size (in logical blocks) per single IO */ 126 size_t xfer_size; 127 128 /* Array of band relocates */ 129 struct ftl_band_reloc *brelocs; 130 131 /* Number of active/priority band relocates */ 132 size_t num_active; 133 134 /* Priority band relocates queue */ 135 TAILQ_HEAD(, ftl_band_reloc) prio_queue; 136 137 /* Active band relocates queue */ 138 TAILQ_HEAD(, ftl_band_reloc) active_queue; 139 140 /* Pending band relocates queue */ 141 TAILQ_HEAD(, ftl_band_reloc) pending_queue; 142 }; 143 144 static size_t 145 ftl_reloc_iter_chk_offset(struct ftl_band_reloc *breloc) 146 { 147 size_t chunk = breloc->iter.chk_current; 148 149 return breloc->iter.chk_offset[chunk]; 150 } 151 152 static size_t 153 ftl_reloc_iter_chk_done(struct ftl_band_reloc *breloc) 154 { 155 size_t num_lbks = ftl_dev_lbks_in_chunk(breloc->parent->dev); 156 157 return ftl_reloc_iter_chk_offset(breloc) == num_lbks; 158 } 159 160 static void 161 ftl_reloc_clr_lbk(struct ftl_band_reloc *breloc, size_t lbkoff) 162 { 163 if (!spdk_bit_array_get(breloc->reloc_map, lbkoff)) { 164 return; 165 } 166 167 spdk_bit_array_clear(breloc->reloc_map, lbkoff); 168 assert(breloc->num_lbks); 169 breloc->num_lbks--; 170 } 171 172 173 static void 174 ftl_reloc_read_lba_map_cb(struct ftl_io *io, void *arg, int status) 175 { 176 struct ftl_reloc_move *move = arg; 177 struct ftl_band_reloc *breloc = move->breloc; 178 179 breloc->num_outstanding--; 180 assert(status == 0); 181 move->state = FTL_RELOC_STATE_WRITE; 182 spdk_ring_enqueue(breloc->move_queue, (void **)&move, 1, NULL); 183 } 184 185 static int 186 ftl_reloc_read_lba_map(struct ftl_band_reloc *breloc, struct ftl_reloc_move *move) 187 { 188 struct ftl_band *band = breloc->band; 189 190 breloc->num_outstanding++; 191 return ftl_band_read_lba_map(band, ftl_band_lbkoff_from_ppa(band, move->ppa), 192 move->lbk_cnt, ftl_reloc_read_lba_map_cb, move); 193 } 194 195 static void 196 ftl_reloc_prep(struct ftl_band_reloc *breloc) 197 { 198 struct ftl_band *band = breloc->band; 199 struct ftl_reloc *reloc = breloc->parent; 200 struct ftl_reloc_move *move; 201 size_t i; 202 203 breloc->active = 1; 204 reloc->num_active++; 205 206 if (!band->high_prio) { 207 if (band->lba_map.ref_cnt == 0) { 208 if (ftl_band_alloc_lba_map(band)) { 209 assert(false); 210 } 211 } else { 212 ftl_band_acquire_lba_map(band); 213 } 214 } 215 216 for (i = 0; i < reloc->max_qdepth; ++i) { 217 move = &breloc->moves[i]; 218 move->state = FTL_RELOC_STATE_READ; 219 spdk_ring_enqueue(breloc->move_queue, (void **)&move, 1, NULL); 220 } 221 } 222 223 static void 224 ftl_reloc_free_move(struct ftl_band_reloc *breloc, struct ftl_reloc_move *move) 225 { 226 assert(move); 227 spdk_dma_free(move->data); 228 memset(move, 0, sizeof(*move)); 229 move->state = FTL_RELOC_STATE_READ; 230 spdk_ring_enqueue(breloc->move_queue, (void **)&move, 1, NULL); 231 } 232 233 static void 234 ftl_reloc_write_cb(struct ftl_io *io, void *arg, int status) 235 { 236 struct ftl_reloc_move *move = arg; 237 struct ftl_ppa ppa = move->ppa; 238 struct ftl_band_reloc *breloc = move->breloc; 239 size_t i; 240 241 breloc->num_outstanding--; 242 243 if (status) { 244 SPDK_ERRLOG("Reloc write failed with status: %d\n", status); 245 assert(false); 246 return; 247 } 248 249 for (i = 0; i < move->lbk_cnt; ++i) { 250 ppa.lbk = move->ppa.lbk + i; 251 size_t lbkoff = ftl_band_lbkoff_from_ppa(breloc->band, ppa); 252 ftl_reloc_clr_lbk(breloc, lbkoff); 253 } 254 255 ftl_reloc_free_move(breloc, move); 256 } 257 258 static void 259 ftl_reloc_read_cb(struct ftl_io *io, void *arg, int status) 260 { 261 struct ftl_reloc_move *move = arg; 262 struct ftl_band_reloc *breloc = move->breloc; 263 264 breloc->num_outstanding--; 265 266 /* TODO: We should handle fail on relocation read. We need to inform */ 267 /* user that this group of blocks is bad (update l2p with bad block address and */ 268 /* put it to lba_map/sector_lba). Maybe we could also retry read with smaller granularity? */ 269 if (status) { 270 SPDK_ERRLOG("Reloc read failed with status: %d\n", status); 271 assert(false); 272 return; 273 } 274 275 move->state = FTL_RELOC_STATE_READ_LBA_MAP; 276 move->io = NULL; 277 spdk_ring_enqueue(breloc->move_queue, (void **)&move, 1, NULL); 278 } 279 280 static void 281 ftl_reloc_iter_reset(struct ftl_band_reloc *breloc) 282 { 283 memset(breloc->iter.chk_offset, 0, ftl_dev_num_punits(breloc->band->dev) * 284 sizeof(*breloc->iter.chk_offset)); 285 breloc->iter.chk_current = 0; 286 } 287 288 static size_t 289 ftl_reloc_iter_lbkoff(struct ftl_band_reloc *breloc) 290 { 291 size_t chk_offset = breloc->iter.chk_current * ftl_dev_lbks_in_chunk(breloc->parent->dev); 292 293 return breloc->iter.chk_offset[breloc->iter.chk_current] + chk_offset; 294 } 295 296 static void 297 ftl_reloc_iter_next_chk(struct ftl_band_reloc *breloc) 298 { 299 size_t num_chk = ftl_dev_num_punits(breloc->band->dev); 300 301 breloc->iter.chk_current = (breloc->iter.chk_current + 1) % num_chk; 302 } 303 304 static int 305 ftl_reloc_lbk_valid(struct ftl_band_reloc *breloc, size_t lbkoff) 306 { 307 struct ftl_ppa ppa = ftl_band_ppa_from_lbkoff(breloc->band, lbkoff); 308 309 return ftl_ppa_is_written(breloc->band, ppa) && 310 spdk_bit_array_get(breloc->reloc_map, lbkoff) && 311 ftl_band_lbkoff_valid(breloc->band, lbkoff); 312 } 313 314 static int 315 ftl_reloc_iter_next(struct ftl_band_reloc *breloc, size_t *lbkoff) 316 { 317 size_t chunk = breloc->iter.chk_current; 318 319 *lbkoff = ftl_reloc_iter_lbkoff(breloc); 320 321 if (ftl_reloc_iter_chk_done(breloc)) { 322 return 0; 323 } 324 325 breloc->iter.chk_offset[chunk]++; 326 327 if (!ftl_reloc_lbk_valid(breloc, *lbkoff)) { 328 ftl_reloc_clr_lbk(breloc, *lbkoff); 329 return 0; 330 } 331 332 return 1; 333 } 334 335 static int 336 ftl_reloc_first_valid_lbk(struct ftl_band_reloc *breloc, size_t *lbkoff) 337 { 338 size_t i, num_lbks = ftl_dev_lbks_in_chunk(breloc->parent->dev); 339 340 for (i = ftl_reloc_iter_chk_offset(breloc); i < num_lbks; ++i) { 341 if (ftl_reloc_iter_next(breloc, lbkoff)) { 342 return 1; 343 } 344 } 345 346 return 0; 347 } 348 349 static int 350 ftl_reloc_iter_done(struct ftl_band_reloc *breloc) 351 { 352 size_t i; 353 size_t num_chks = ftl_dev_num_punits(breloc->band->dev); 354 size_t num_lbks = ftl_dev_lbks_in_chunk(breloc->parent->dev); 355 356 for (i = 0; i < num_chks; ++i) { 357 if (breloc->iter.chk_offset[i] != num_lbks) { 358 return 0; 359 } 360 } 361 362 return 1; 363 } 364 365 static size_t 366 ftl_reloc_find_valid_lbks(struct ftl_band_reloc *breloc, 367 size_t num_lbk, struct ftl_ppa *ppa) 368 { 369 size_t lbkoff, lbk_cnt = 0; 370 371 if (!ftl_reloc_first_valid_lbk(breloc, &lbkoff)) { 372 return 0; 373 } 374 375 *ppa = ftl_band_ppa_from_lbkoff(breloc->band, lbkoff); 376 377 for (lbk_cnt = 1; lbk_cnt < num_lbk; lbk_cnt++) { 378 if (!ftl_reloc_iter_next(breloc, &lbkoff)) { 379 break; 380 } 381 } 382 383 return lbk_cnt; 384 } 385 386 static size_t 387 ftl_reloc_next_lbks(struct ftl_band_reloc *breloc, struct ftl_ppa *ppa) 388 { 389 size_t i, lbk_cnt = 0; 390 struct spdk_ftl_dev *dev = breloc->parent->dev; 391 392 for (i = 0; i < ftl_dev_num_punits(dev); ++i) { 393 lbk_cnt = ftl_reloc_find_valid_lbks(breloc, breloc->parent->xfer_size, ppa); 394 ftl_reloc_iter_next_chk(breloc); 395 396 if (lbk_cnt || ftl_reloc_iter_done(breloc)) { 397 break; 398 } 399 } 400 401 return lbk_cnt; 402 } 403 404 static struct ftl_io * 405 ftl_reloc_io_init(struct ftl_band_reloc *breloc, struct ftl_reloc_move *move, 406 ftl_io_fn fn, enum ftl_io_type io_type, int flags) 407 { 408 size_t lbkoff, i; 409 struct ftl_ppa ppa = move->ppa; 410 struct ftl_io *io = NULL; 411 struct ftl_io_init_opts opts = { 412 .dev = breloc->parent->dev, 413 .band = breloc->band, 414 .size = sizeof(*io), 415 .flags = flags | FTL_IO_INTERNAL | FTL_IO_PPA_MODE, 416 .type = io_type, 417 .lbk_cnt = move->lbk_cnt, 418 .data = move->data, 419 .cb_fn = fn, 420 }; 421 422 io = ftl_io_init_internal(&opts); 423 if (!io) { 424 return NULL; 425 } 426 427 io->cb_ctx = move; 428 io->ppa = move->ppa; 429 430 if (flags & FTL_IO_VECTOR_LBA) { 431 for (i = 0; i < io->lbk_cnt; ++i, ++ppa.lbk) { 432 lbkoff = ftl_band_lbkoff_from_ppa(breloc->band, ppa); 433 434 if (!ftl_band_lbkoff_valid(breloc->band, lbkoff)) { 435 io->lba.vector[i] = FTL_LBA_INVALID; 436 continue; 437 } 438 439 io->lba.vector[i] = breloc->band->lba_map.map[lbkoff]; 440 } 441 } 442 443 ftl_trace_lba_io_init(io->dev, io); 444 445 return io; 446 } 447 448 static int 449 ftl_reloc_write(struct ftl_band_reloc *breloc, struct ftl_reloc_move *move) 450 { 451 int io_flags = FTL_IO_WEAK | FTL_IO_VECTOR_LBA | FTL_IO_BYPASS_CACHE; 452 453 if (spdk_likely(!move->io)) { 454 move->io = ftl_reloc_io_init(breloc, move, ftl_reloc_write_cb, 455 FTL_IO_WRITE, io_flags); 456 if (!move->io) { 457 ftl_reloc_free_move(breloc, move); 458 return -ENOMEM; 459 } 460 } 461 462 breloc->num_outstanding++; 463 ftl_io_write(move->io); 464 return 0; 465 } 466 467 static int 468 ftl_reloc_read(struct ftl_band_reloc *breloc, struct ftl_reloc_move *move) 469 { 470 struct ftl_ppa ppa = {}; 471 472 move->lbk_cnt = ftl_reloc_next_lbks(breloc, &ppa); 473 move->breloc = breloc; 474 move->ppa = ppa; 475 476 if (!move->lbk_cnt) { 477 return 0; 478 } 479 480 move->data = spdk_dma_malloc(PAGE_SIZE * move->lbk_cnt, PAGE_SIZE, NULL); 481 if (!move->data) { 482 return -1; 483 } 484 485 move->io = ftl_reloc_io_init(breloc, move, ftl_reloc_read_cb, FTL_IO_READ, 0); 486 if (!move->io) { 487 ftl_reloc_free_move(breloc, move); 488 SPDK_ERRLOG("Failed to initialize io for relocation."); 489 return -1; 490 } 491 492 breloc->num_outstanding++; 493 ftl_io_read(move->io); 494 return 0; 495 } 496 497 static void 498 ftl_reloc_process_moves(struct ftl_band_reloc *breloc) 499 { 500 int rc = 0; 501 size_t i, num_moves; 502 struct ftl_reloc_move *moves[FTL_RELOC_MAX_MOVES] = {0}; 503 struct ftl_reloc *reloc = breloc->parent; 504 struct ftl_reloc_move *move; 505 506 num_moves = spdk_ring_dequeue(breloc->move_queue, (void **)moves, reloc->max_qdepth); 507 508 for (i = 0; i < num_moves; ++i) { 509 move = moves[i]; 510 assert(move != NULL); 511 switch (move->state) { 512 case FTL_RELOC_STATE_READ_LBA_MAP: 513 rc = ftl_reloc_read_lba_map(breloc, move); 514 break; 515 case FTL_RELOC_STATE_READ: 516 rc = ftl_reloc_read(breloc, move); 517 break; 518 case FTL_RELOC_STATE_WRITE: 519 rc = ftl_reloc_write(breloc, move); 520 break; 521 default: 522 assert(false); 523 break; 524 } 525 526 if (rc) { 527 SPDK_ERRLOG("Move queue processing failed\n"); 528 assert(false); 529 } 530 } 531 } 532 533 static bool 534 ftl_reloc_done(struct ftl_band_reloc *breloc) 535 { 536 return !breloc->num_outstanding && !spdk_ring_count(breloc->move_queue); 537 } 538 539 static void 540 ftl_reloc_release(struct ftl_band_reloc *breloc) 541 { 542 struct ftl_reloc *reloc = breloc->parent; 543 struct ftl_band *band = breloc->band; 544 545 if (band->high_prio && breloc->num_lbks == 0) { 546 band->high_prio = 0; 547 TAILQ_REMOVE(&reloc->prio_queue, breloc, entry); 548 } else if (!band->high_prio) { 549 TAILQ_REMOVE(&reloc->active_queue, breloc, entry); 550 } 551 552 ftl_reloc_iter_reset(breloc); 553 554 ftl_band_release_lba_map(band); 555 556 breloc->active = 0; 557 reloc->num_active--; 558 559 if (!band->high_prio && breloc->num_lbks) { 560 TAILQ_INSERT_TAIL(&reloc->pending_queue, breloc, entry); 561 return; 562 } 563 564 if (ftl_band_empty(band) && band->state == FTL_BAND_STATE_CLOSED) { 565 ftl_band_set_state(breloc->band, FTL_BAND_STATE_FREE); 566 } 567 } 568 569 static void 570 ftl_process_reloc(struct ftl_band_reloc *breloc) 571 { 572 ftl_reloc_process_moves(breloc); 573 574 if (ftl_reloc_done(breloc)) { 575 ftl_reloc_release(breloc); 576 } 577 } 578 579 static int 580 ftl_band_reloc_init(struct ftl_reloc *reloc, struct ftl_band_reloc *breloc, 581 struct ftl_band *band) 582 { 583 breloc->band = band; 584 breloc->parent = reloc; 585 586 breloc->reloc_map = spdk_bit_array_create(ftl_num_band_lbks(reloc->dev)); 587 if (!breloc->reloc_map) { 588 SPDK_ERRLOG("Failed to initialize reloc map"); 589 return -1; 590 } 591 592 breloc->iter.chk_offset = calloc(ftl_dev_num_punits(band->dev), 593 sizeof(*breloc->iter.chk_offset)); 594 if (!breloc->iter.chk_offset) { 595 SPDK_ERRLOG("Failed to initialize reloc iterator"); 596 return -1; 597 } 598 599 breloc->move_queue = spdk_ring_create(SPDK_RING_TYPE_MP_SC, 600 reloc->max_qdepth * 2, 601 SPDK_ENV_SOCKET_ID_ANY); 602 if (!breloc->move_queue) { 603 SPDK_ERRLOG("Failed to initialize reloc write queue"); 604 return -1; 605 } 606 607 breloc->moves = calloc(reloc->max_qdepth, sizeof(*breloc->moves)); 608 if (!breloc->moves) { 609 return -1; 610 } 611 612 return 0; 613 } 614 615 static void 616 ftl_band_reloc_free(struct ftl_band_reloc *breloc) 617 { 618 struct ftl_reloc *reloc; 619 struct ftl_reloc_move *moves[FTL_RELOC_MAX_MOVES] = {}; 620 size_t i, num_moves; 621 622 if (!breloc) { 623 return; 624 } 625 626 assert(breloc->num_outstanding == 0); 627 reloc = breloc->parent; 628 629 /* Drain write queue if there is active band relocation during shutdown */ 630 if (breloc->active) { 631 assert(reloc->halt); 632 num_moves = spdk_ring_dequeue(breloc->move_queue, (void **)&moves, reloc->max_qdepth); 633 for (i = 0; i < num_moves; ++i) { 634 ftl_reloc_free_move(breloc, moves[i]); 635 } 636 } 637 638 spdk_ring_free(breloc->move_queue); 639 spdk_bit_array_free(&breloc->reloc_map); 640 free(breloc->iter.chk_offset); 641 free(breloc->moves); 642 } 643 644 static void 645 ftl_reloc_add_active_queue(struct ftl_band_reloc *breloc) 646 { 647 struct ftl_reloc *reloc = breloc->parent; 648 649 TAILQ_REMOVE(&reloc->pending_queue, breloc, entry); 650 TAILQ_INSERT_HEAD(&reloc->active_queue, breloc, entry); 651 ftl_reloc_prep(breloc); 652 } 653 654 struct ftl_reloc * 655 ftl_reloc_init(struct spdk_ftl_dev *dev) 656 { 657 #define POOL_NAME_LEN 128 658 struct ftl_reloc *reloc; 659 char pool_name[POOL_NAME_LEN]; 660 int rc; 661 size_t i; 662 663 reloc = calloc(1, sizeof(*reloc)); 664 if (!reloc) { 665 return NULL; 666 } 667 668 reloc->dev = dev; 669 reloc->halt = true; 670 reloc->max_qdepth = dev->conf.max_reloc_qdepth; 671 reloc->max_active = dev->conf.max_active_relocs; 672 reloc->xfer_size = dev->xfer_size; 673 674 if (reloc->max_qdepth > FTL_RELOC_MAX_MOVES) { 675 goto error; 676 } 677 678 reloc->brelocs = calloc(ftl_dev_num_bands(dev), sizeof(*reloc->brelocs)); 679 if (!reloc->brelocs) { 680 goto error; 681 } 682 683 for (i = 0; i < ftl_dev_num_bands(reloc->dev); ++i) { 684 if (ftl_band_reloc_init(reloc, &reloc->brelocs[i], &dev->bands[i])) { 685 goto error; 686 } 687 } 688 689 rc = snprintf(pool_name, sizeof(pool_name), "%s-%s", dev->name, "reloc-io-pool"); 690 if (rc < 0 || rc >= POOL_NAME_LEN) { 691 goto error; 692 } 693 694 TAILQ_INIT(&reloc->pending_queue); 695 TAILQ_INIT(&reloc->active_queue); 696 TAILQ_INIT(&reloc->prio_queue); 697 698 return reloc; 699 error: 700 ftl_reloc_free(reloc); 701 return NULL; 702 } 703 704 void 705 ftl_reloc_free(struct ftl_reloc *reloc) 706 { 707 size_t i; 708 709 if (!reloc) { 710 return; 711 } 712 713 for (i = 0; i < ftl_dev_num_bands(reloc->dev); ++i) { 714 ftl_band_reloc_free(&reloc->brelocs[i]); 715 } 716 717 free(reloc->brelocs); 718 free(reloc); 719 } 720 721 bool 722 ftl_reloc_is_halted(const struct ftl_reloc *reloc) 723 { 724 return reloc->halt; 725 } 726 727 void 728 ftl_reloc_halt(struct ftl_reloc *reloc) 729 { 730 reloc->halt = true; 731 } 732 733 void 734 ftl_reloc_resume(struct ftl_reloc *reloc) 735 { 736 reloc->halt = false; 737 } 738 739 void 740 ftl_reloc(struct ftl_reloc *reloc) 741 { 742 struct ftl_band_reloc *breloc, *tbreloc; 743 744 if (ftl_reloc_is_halted(reloc)) { 745 return; 746 } 747 748 /* Process first band from priority queue and return */ 749 breloc = TAILQ_FIRST(&reloc->prio_queue); 750 if (breloc) { 751 if (!breloc->active) { 752 ftl_reloc_prep(breloc); 753 } 754 ftl_process_reloc(breloc); 755 return; 756 } 757 758 TAILQ_FOREACH_SAFE(breloc, &reloc->pending_queue, entry, tbreloc) { 759 if (reloc->num_active == reloc->max_active) { 760 break; 761 } 762 763 /* TODO: Add handling relocation on open bands */ 764 if (breloc->band->state != FTL_BAND_STATE_CLOSED) { 765 continue; 766 } 767 768 ftl_reloc_add_active_queue(breloc); 769 } 770 771 TAILQ_FOREACH_SAFE(breloc, &reloc->active_queue, entry, tbreloc) { 772 ftl_process_reloc(breloc); 773 } 774 } 775 776 void 777 ftl_reloc_add(struct ftl_reloc *reloc, struct ftl_band *band, size_t offset, 778 size_t num_lbks, int prio) 779 { 780 struct ftl_band_reloc *breloc = &reloc->brelocs[band->id]; 781 size_t i, prev_lbks = breloc->num_lbks; 782 783 /* No need to add anything if already at high prio - whole band should be relocated */ 784 if (!prio && band->high_prio) { 785 return; 786 } 787 788 pthread_spin_lock(&band->lba_map.lock); 789 if (band->lba_map.num_vld == 0) { 790 pthread_spin_unlock(&band->lba_map.lock); 791 return; 792 } 793 pthread_spin_unlock(&band->lba_map.lock); 794 795 for (i = offset; i < offset + num_lbks; ++i) { 796 if (spdk_bit_array_get(breloc->reloc_map, i)) { 797 continue; 798 } 799 spdk_bit_array_set(breloc->reloc_map, i); 800 breloc->num_lbks++; 801 } 802 803 if (!prio && prev_lbks == breloc->num_lbks) { 804 return; 805 } 806 807 if (!prev_lbks && !prio && !breloc->active) { 808 TAILQ_INSERT_HEAD(&reloc->pending_queue, breloc, entry); 809 } 810 811 if (prio) { 812 struct ftl_band_reloc *iter_breloc; 813 814 /* If priority band is already on pending or active queue, remove it from it */ 815 TAILQ_FOREACH(iter_breloc, &reloc->pending_queue, entry) { 816 if (breloc == iter_breloc) { 817 TAILQ_REMOVE(&reloc->pending_queue, breloc, entry); 818 break; 819 } 820 } 821 822 TAILQ_FOREACH(iter_breloc, &reloc->active_queue, entry) { 823 if (breloc == iter_breloc) { 824 TAILQ_REMOVE(&reloc->active_queue, breloc, entry); 825 break; 826 } 827 } 828 829 TAILQ_INSERT_TAIL(&reloc->prio_queue, breloc, entry); 830 ftl_band_acquire_lba_map(breloc->band); 831 } 832 } 833