1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 #include "spdk/nvme.h" 36 #include "spdk/io_channel.h" 37 #include "spdk/bdev_module.h" 38 #include "spdk_internal/log.h" 39 #include "spdk/ftl.h" 40 #include "ftl_core.h" 41 #include "ftl_anm.h" 42 #include "ftl_io.h" 43 #include "ftl_reloc.h" 44 #include "ftl_rwb.h" 45 #include "ftl_band.h" 46 #include "ftl_debug.h" 47 48 #define FTL_CORE_RING_SIZE 4096 49 #define FTL_INIT_TIMEOUT 30 50 #define FTL_NSID 1 51 52 #define ftl_range_intersect(s1, e1, s2, e2) \ 53 ((s1) <= (e2) && (s2) <= (e1)) 54 55 struct ftl_admin_cmpl { 56 struct spdk_nvme_cpl status; 57 58 int complete; 59 }; 60 61 static STAILQ_HEAD(, spdk_ftl_dev) g_ftl_queue = STAILQ_HEAD_INITIALIZER(g_ftl_queue); 62 static pthread_mutex_t g_ftl_queue_lock = PTHREAD_MUTEX_INITIALIZER; 63 static const struct spdk_ftl_conf g_default_conf = { 64 .defrag = { 65 .limits = { 66 /* 5 free bands / 0 % host writes */ 67 [SPDK_FTL_LIMIT_CRIT] = { .thld = 5, .limit = 0 }, 68 /* 10 free bands / 5 % host writes */ 69 [SPDK_FTL_LIMIT_HIGH] = { .thld = 10, .limit = 5 }, 70 /* 20 free bands / 40 % host writes */ 71 [SPDK_FTL_LIMIT_LOW] = { .thld = 20, .limit = 40 }, 72 /* 40 free bands / 100 % host writes - defrag starts running */ 73 [SPDK_FTL_LIMIT_START] = { .thld = 40, .limit = 100 }, 74 }, 75 /* 10 percent valid lbks */ 76 .invalid_thld = 10, 77 }, 78 /* 20% spare lbks */ 79 .lba_rsvd = 20, 80 /* 6M write buffer */ 81 .rwb_size = 6 * 1024 * 1024, 82 /* 90% band fill threshold */ 83 .band_thld = 90, 84 /* Max 32 IO depth per band relocate */ 85 .max_reloc_qdepth = 32, 86 /* Max 3 active band relocates */ 87 .max_active_relocs = 3, 88 /* IO pool size per user thread (this should be adjusted to thread IO qdepth) */ 89 .user_io_pool_size = 2048, 90 }; 91 92 static void ftl_dev_free_sync(struct spdk_ftl_dev *dev); 93 94 static void 95 ftl_admin_cb(void *ctx, const struct spdk_nvme_cpl *cpl) 96 { 97 struct ftl_admin_cmpl *cmpl = ctx; 98 99 cmpl->complete = 1; 100 cmpl->status = *cpl; 101 } 102 103 static int 104 ftl_band_init_md(struct ftl_band *band) 105 { 106 struct ftl_md *md = &band->md; 107 108 md->vld_map = spdk_bit_array_create(ftl_num_band_lbks(band->dev)); 109 if (!md->vld_map) { 110 return -ENOMEM; 111 } 112 113 pthread_spin_init(&md->lock, PTHREAD_PROCESS_PRIVATE); 114 ftl_band_md_clear(&band->md); 115 return 0; 116 } 117 118 static int 119 ftl_check_init_opts(const struct spdk_ftl_dev_init_opts *opts, 120 const struct spdk_ocssd_geometry_data *geo) 121 { 122 struct spdk_ftl_dev *dev; 123 size_t num_punits = geo->num_pu * geo->num_grp; 124 int rc = 0; 125 126 if (opts->range.begin > opts->range.end || opts->range.end >= num_punits) { 127 return -1; 128 } 129 130 pthread_mutex_lock(&g_ftl_queue_lock); 131 132 STAILQ_FOREACH(dev, &g_ftl_queue, stailq) { 133 if (spdk_nvme_transport_id_compare(&dev->trid, &opts->trid)) { 134 continue; 135 } 136 137 if (ftl_range_intersect(opts->range.begin, opts->range.end, 138 dev->range.begin, dev->range.end)) { 139 rc = -1; 140 goto out; 141 } 142 } 143 144 out: 145 pthread_mutex_unlock(&g_ftl_queue_lock); 146 return rc; 147 } 148 149 static int 150 ftl_retrieve_bbt_page(struct spdk_ftl_dev *dev, uint64_t offset, 151 struct spdk_ocssd_chunk_information_entry *info, 152 unsigned int num_entries) 153 { 154 volatile struct ftl_admin_cmpl cmpl = {}; 155 uint32_t nsid = spdk_nvme_ns_get_id(dev->ns); 156 157 if (spdk_nvme_ctrlr_cmd_get_log_page(dev->ctrlr, SPDK_OCSSD_LOG_CHUNK_INFO, nsid, 158 info, num_entries * sizeof(*info), 159 offset * sizeof(*info), 160 ftl_admin_cb, (void *)&cmpl)) { 161 return -1; 162 } 163 164 while (!cmpl.complete) { 165 spdk_nvme_ctrlr_process_admin_completions(dev->ctrlr); 166 } 167 168 if (spdk_nvme_cpl_is_error(&cmpl.status)) { 169 SPDK_ERRLOG("Unexpected status code: [%d], status code type: [%d]\n", 170 cmpl.status.status.sc, cmpl.status.status.sct); 171 return -1; 172 } 173 174 return 0; 175 } 176 177 static int 178 ftl_retrieve_bbt(struct spdk_ftl_dev *dev, const struct ftl_punit *punit, 179 struct spdk_ocssd_chunk_information_entry *info) 180 { 181 uint32_t i = 0; 182 unsigned int num_entries = PAGE_SIZE / sizeof(*info); 183 uint64_t off = (punit->start_ppa.grp * dev->geo.num_pu + punit->start_ppa.pu) * 184 dev->geo.num_chk; 185 186 for (i = 0; i < dev->geo.num_chk; i += num_entries) { 187 if (num_entries > dev->geo.num_chk - i) { 188 num_entries = dev->geo.num_chk - i; 189 } 190 191 if (ftl_retrieve_bbt_page(dev, off + i, &info[i], num_entries)) { 192 return -1; 193 } 194 } 195 196 return 0; 197 } 198 199 static unsigned char 200 ftl_get_chunk_state(const struct spdk_ocssd_chunk_information_entry *info) 201 { 202 if (info->cs.free) { 203 return FTL_CHUNK_STATE_FREE; 204 } 205 206 if (info->cs.open) { 207 return FTL_CHUNK_STATE_OPEN; 208 } 209 210 if (info->cs.closed) { 211 return FTL_CHUNK_STATE_CLOSED; 212 } 213 214 if (info->cs.offline) { 215 return FTL_CHUNK_STATE_BAD; 216 } 217 218 assert(0 && "Invalid block state"); 219 return FTL_CHUNK_STATE_BAD; 220 } 221 222 static void 223 ftl_remove_empty_bands(struct spdk_ftl_dev *dev) 224 { 225 struct ftl_band *band, *temp_band; 226 227 /* Remove band from shut_bands list to prevent further processing */ 228 /* if all blocks on this band are bad */ 229 LIST_FOREACH_SAFE(band, &dev->shut_bands, list_entry, temp_band) { 230 if (!band->num_chunks) { 231 dev->num_bands--; 232 LIST_REMOVE(band, list_entry); 233 } 234 } 235 } 236 237 static int 238 ftl_dev_init_bands(struct spdk_ftl_dev *dev) 239 { 240 struct spdk_ocssd_chunk_information_entry *info; 241 struct ftl_band *band, *pband; 242 struct ftl_punit *punit; 243 struct ftl_chunk *chunk; 244 unsigned int i, j; 245 char buf[128]; 246 int rc = 0; 247 248 LIST_INIT(&dev->free_bands); 249 LIST_INIT(&dev->shut_bands); 250 251 dev->num_free = 0; 252 dev->num_bands = ftl_dev_num_bands(dev); 253 dev->bands = calloc(ftl_dev_num_bands(dev), sizeof(*dev->bands)); 254 if (!dev->bands) { 255 return -1; 256 } 257 258 info = calloc(dev->geo.num_chk, sizeof(*info)); 259 if (!info) { 260 return -1; 261 } 262 263 for (i = 0; i < ftl_dev_num_bands(dev); ++i) { 264 band = &dev->bands[i]; 265 band->id = i; 266 band->dev = dev; 267 band->state = FTL_BAND_STATE_CLOSED; 268 269 if (LIST_EMPTY(&dev->shut_bands)) { 270 LIST_INSERT_HEAD(&dev->shut_bands, band, list_entry); 271 } else { 272 LIST_INSERT_AFTER(pband, band, list_entry); 273 } 274 pband = band; 275 276 CIRCLEQ_INIT(&band->chunks); 277 band->chunk_buf = calloc(ftl_dev_num_punits(dev), sizeof(*band->chunk_buf)); 278 if (!band->chunk_buf) { 279 SPDK_ERRLOG("Failed to allocate block state table for band: [%u]\n", i); 280 rc = -1; 281 goto out; 282 } 283 284 rc = ftl_band_init_md(band); 285 if (rc) { 286 SPDK_ERRLOG("Failed to initialize metadata structures for band [%u]\n", i); 287 goto out; 288 } 289 } 290 291 for (i = 0; i < ftl_dev_num_punits(dev); ++i) { 292 punit = &dev->punits[i]; 293 294 rc = ftl_retrieve_bbt(dev, punit, info); 295 if (rc) { 296 SPDK_ERRLOG("Failed to retrieve bbt for @ppa: %s [%lu]\n", 297 ftl_ppa2str(punit->start_ppa, buf, sizeof(buf)), 298 ftl_ppa_addr_pack(dev, punit->start_ppa)); 299 goto out; 300 } 301 302 for (j = 0; j < ftl_dev_num_bands(dev); ++j) { 303 band = &dev->bands[j]; 304 chunk = &band->chunk_buf[i]; 305 chunk->pos = i; 306 chunk->state = ftl_get_chunk_state(&info[j]); 307 chunk->punit = punit; 308 chunk->start_ppa = punit->start_ppa; 309 chunk->start_ppa.chk = band->id; 310 311 if (chunk->state != FTL_CHUNK_STATE_BAD) { 312 band->num_chunks++; 313 CIRCLEQ_INSERT_TAIL(&band->chunks, chunk, circleq); 314 } 315 } 316 } 317 318 ftl_remove_empty_bands(dev); 319 out: 320 free(info); 321 return rc; 322 } 323 324 static int 325 ftl_dev_init_punits(struct spdk_ftl_dev *dev) 326 { 327 unsigned int i, punit; 328 329 dev->punits = calloc(ftl_dev_num_punits(dev), sizeof(*dev->punits)); 330 if (!dev->punits) { 331 return -1; 332 } 333 334 for (i = 0; i < ftl_dev_num_punits(dev); ++i) { 335 dev->punits[i].dev = dev; 336 punit = dev->range.begin + i; 337 338 dev->punits[i].start_ppa.ppa = 0; 339 dev->punits[i].start_ppa.grp = punit % dev->geo.num_grp; 340 dev->punits[i].start_ppa.pu = punit / dev->geo.num_grp; 341 } 342 343 return 0; 344 } 345 346 static int 347 ftl_dev_retrieve_geo(struct spdk_ftl_dev *dev) 348 { 349 volatile struct ftl_admin_cmpl cmpl = {}; 350 struct spdk_ocssd_geometry_data *buf; 351 uint32_t nsid = spdk_nvme_ns_get_id(dev->ns); 352 int rc = -1; 353 354 buf = malloc(PAGE_SIZE); 355 if (!buf) { 356 SPDK_ERRLOG("Memory allocation failure\n"); 357 return -1; 358 } 359 360 if (spdk_nvme_ocssd_ctrlr_cmd_geometry(dev->ctrlr, nsid, buf, PAGE_SIZE, 361 ftl_admin_cb, (void *)&cmpl)) { 362 SPDK_ERRLOG("Unable to retrieve geometry\n"); 363 goto out; 364 } 365 366 /* TODO: add a timeout */ 367 while (!cmpl.complete) { 368 spdk_nvme_ctrlr_process_admin_completions(dev->ctrlr); 369 } 370 371 if (spdk_nvme_cpl_is_error(&cmpl.status)) { 372 SPDK_ERRLOG("Unexpected status code: [%d], status code type: [%d]\n", 373 cmpl.status.status.sc, cmpl.status.status.sct); 374 goto out; 375 } 376 377 dev->geo = *buf; 378 379 /* TODO: add sanity checks for the geo */ 380 dev->ppa_len = dev->geo.lbaf.grp_len + 381 dev->geo.lbaf.pu_len + 382 dev->geo.lbaf.chk_len + 383 dev->geo.lbaf.lbk_len; 384 385 dev->ppaf.lbk_offset = 0; 386 dev->ppaf.lbk_mask = (1 << dev->geo.lbaf.lbk_len) - 1; 387 dev->ppaf.chk_offset = dev->ppaf.lbk_offset + dev->geo.lbaf.lbk_len; 388 dev->ppaf.chk_mask = (1 << dev->geo.lbaf.chk_len) - 1; 389 dev->ppaf.pu_offset = dev->ppaf.chk_offset + dev->geo.lbaf.chk_len; 390 dev->ppaf.pu_mask = (1 << dev->geo.lbaf.pu_len) - 1; 391 dev->ppaf.grp_offset = dev->ppaf.pu_offset + dev->geo.lbaf.pu_len; 392 dev->ppaf.grp_mask = (1 << dev->geo.lbaf.grp_len) - 1; 393 394 /* We're using optimal write size as our xfer size */ 395 dev->xfer_size = dev->geo.ws_opt; 396 397 rc = 0; 398 out: 399 free(buf); 400 return rc; 401 } 402 403 static int 404 ftl_dev_nvme_init(struct spdk_ftl_dev *dev, const struct spdk_ftl_dev_init_opts *opts) 405 { 406 uint32_t block_size; 407 408 dev->ctrlr = opts->ctrlr; 409 410 if (spdk_nvme_ctrlr_get_num_ns(dev->ctrlr) != 1) { 411 SPDK_ERRLOG("Unsupported number of namespaces\n"); 412 return -1; 413 } 414 415 dev->ns = spdk_nvme_ctrlr_get_ns(dev->ctrlr, FTL_NSID); 416 dev->trid = opts->trid; 417 dev->md_size = spdk_nvme_ns_get_md_size(dev->ns); 418 419 block_size = spdk_nvme_ns_get_extended_sector_size(dev->ns); 420 if (block_size != FTL_BLOCK_SIZE) { 421 SPDK_ERRLOG("Unsupported block size (%"PRIu32")\n", block_size); 422 return -1; 423 } 424 425 if (dev->md_size % sizeof(uint32_t) != 0) { 426 /* Metadata pointer must be dword aligned */ 427 SPDK_ERRLOG("Unsupported metadata size (%zu)\n", dev->md_size); 428 return -1; 429 } 430 431 return 0; 432 } 433 434 static int 435 ftl_conf_validate(const struct spdk_ftl_conf *conf) 436 { 437 size_t i; 438 439 if (conf->defrag.invalid_thld >= 100) { 440 return -1; 441 } 442 if (conf->lba_rsvd >= 100) { 443 return -1; 444 } 445 if (conf->lba_rsvd == 0) { 446 return -1; 447 } 448 if (conf->rwb_size == 0) { 449 return -1; 450 } 451 if (conf->rwb_size % FTL_BLOCK_SIZE != 0) { 452 return -1; 453 } 454 455 for (i = 0; i < SPDK_FTL_LIMIT_MAX; ++i) { 456 if (conf->defrag.limits[i].limit > 100) { 457 return -1; 458 } 459 } 460 461 return 0; 462 } 463 464 void 465 spdk_ftl_conf_init_defaults(struct spdk_ftl_conf *conf) 466 { 467 *conf = g_default_conf; 468 } 469 470 static int 471 ftl_init_wptr_list(struct spdk_ftl_dev *dev) 472 { 473 #define POOL_NAME_LEN 128 474 char pool_name[POOL_NAME_LEN]; 475 int rc; 476 477 LIST_INIT(&dev->wptr_list); 478 LIST_INIT(&dev->flush_list); 479 480 rc = snprintf(pool_name, sizeof(pool_name), "%s-%s", dev->name, "ocssd-lba-pool"); 481 if (rc < 0 || rc >= POOL_NAME_LEN) { 482 return -ENAMETOOLONG; 483 } 484 485 /* We need to reserve at least 2 buffers for band close / open sequence 486 * alone, plus additional (8) buffers for handling write errors. 487 * TODO: This memory pool is utilized only by core thread - it introduce 488 * unnecessary overhead and should be replaced by different data structure. 489 */ 490 dev->lba_pool = spdk_mempool_create(pool_name, 2 + 8, 491 ftl_num_band_lbks(dev) * sizeof(uint64_t), 492 SPDK_MEMPOOL_DEFAULT_CACHE_SIZE, 493 SPDK_ENV_SOCKET_ID_ANY); 494 if (!dev->lba_pool) { 495 return -ENOMEM; 496 } 497 498 return 0; 499 } 500 501 static size_t 502 ftl_dev_band_max_seq(struct spdk_ftl_dev *dev) 503 { 504 struct ftl_band *band; 505 size_t seq = 0; 506 507 LIST_FOREACH(band, &dev->shut_bands, list_entry) { 508 if (band->md.seq > seq) { 509 seq = band->md.seq; 510 } 511 } 512 513 return seq; 514 } 515 516 static void 517 _ftl_init_bands_state(void *ctx) 518 { 519 struct ftl_band *band, *temp_band; 520 struct spdk_ftl_dev *dev = ctx; 521 522 dev->seq = ftl_dev_band_max_seq(dev); 523 524 LIST_FOREACH_SAFE(band, &dev->shut_bands, list_entry, temp_band) { 525 if (!band->md.num_vld) { 526 ftl_band_set_state(band, FTL_BAND_STATE_FREE); 527 } 528 } 529 530 ftl_reloc_resume(dev->reloc); 531 /* Clear the limit applications as they're incremented incorrectly by */ 532 /* the initialization code */ 533 memset(dev->stats.limits, 0, sizeof(dev->stats.limits)); 534 } 535 536 static int 537 ftl_init_num_free_bands(struct spdk_ftl_dev *dev) 538 { 539 struct ftl_band *band; 540 int cnt = 0; 541 542 LIST_FOREACH(band, &dev->shut_bands, list_entry) { 543 if (band->num_chunks && !band->md.num_vld) { 544 cnt++; 545 } 546 } 547 return cnt; 548 } 549 550 static int 551 ftl_init_bands_state(struct spdk_ftl_dev *dev) 552 { 553 /* TODO: Should we abort initialization or expose read only device */ 554 /* if there is no free bands? */ 555 /* If we abort initialization should we depend on condition that */ 556 /* we have no free bands or should we have some minimal number of */ 557 /* free bands? */ 558 if (!ftl_init_num_free_bands(dev)) { 559 return -1; 560 } 561 562 spdk_thread_send_msg(ftl_get_core_thread(dev), _ftl_init_bands_state, dev); 563 return 0; 564 } 565 566 static void 567 _ftl_dev_init_thread(void *ctx) 568 { 569 struct ftl_thread *thread = ctx; 570 struct spdk_ftl_dev *dev = thread->dev; 571 572 thread->thread = spdk_get_thread(); 573 574 thread->poller = spdk_poller_register(thread->poller_fn, thread, thread->period_us); 575 if (!thread->poller) { 576 SPDK_ERRLOG("Unable to register poller\n"); 577 assert(0); 578 } 579 580 if (spdk_get_thread() == ftl_get_core_thread(dev)) { 581 ftl_anm_register_device(dev, ftl_process_anm_event); 582 } 583 } 584 585 static int 586 ftl_dev_init_thread(struct spdk_ftl_dev *dev, struct ftl_thread *thread, 587 struct spdk_thread *spdk_thread, spdk_poller_fn fn, uint64_t period_us) 588 { 589 thread->dev = dev; 590 thread->poller_fn = fn; 591 thread->thread = spdk_thread; 592 thread->period_us = period_us; 593 594 thread->qpair = spdk_nvme_ctrlr_alloc_io_qpair(dev->ctrlr, NULL, 0); 595 if (!thread->qpair) { 596 SPDK_ERRLOG("Unable to initialize qpair\n"); 597 return -1; 598 } 599 600 spdk_thread_send_msg(spdk_thread, _ftl_dev_init_thread, thread); 601 return 0; 602 } 603 604 static int 605 ftl_dev_init_threads(struct spdk_ftl_dev *dev, const struct spdk_ftl_dev_init_opts *opts) 606 { 607 if (!opts->core_thread || !opts->read_thread) { 608 return -1; 609 } 610 611 if (ftl_dev_init_thread(dev, &dev->core_thread, opts->core_thread, ftl_task_core, 0)) { 612 SPDK_ERRLOG("Unable to initialize core thread\n"); 613 return -1; 614 } 615 616 if (ftl_dev_init_thread(dev, &dev->read_thread, opts->read_thread, ftl_task_read, 0)) { 617 SPDK_ERRLOG("Unable to initialize read thread\n"); 618 return -1; 619 } 620 621 return 0; 622 } 623 624 static void 625 ftl_dev_free_thread(struct spdk_ftl_dev *dev, struct ftl_thread *thread) 626 { 627 assert(thread->poller == NULL); 628 629 spdk_nvme_ctrlr_free_io_qpair(thread->qpair); 630 thread->thread = NULL; 631 thread->qpair = NULL; 632 } 633 634 static int 635 ftl_dev_l2p_alloc(struct spdk_ftl_dev *dev) 636 { 637 size_t addr_size; 638 uint64_t i; 639 640 if (dev->num_lbas == 0) { 641 SPDK_DEBUGLOG(SPDK_LOG_FTL_INIT, "Invalid l2p table size\n"); 642 return -1; 643 } 644 645 if (dev->l2p) { 646 SPDK_DEBUGLOG(SPDK_LOG_FTL_INIT, "L2p table already allocated\n"); 647 return -1; 648 } 649 650 addr_size = dev->ppa_len >= 32 ? 8 : 4; 651 dev->l2p = malloc(dev->num_lbas * addr_size); 652 if (!dev->l2p) { 653 SPDK_DEBUGLOG(SPDK_LOG_FTL_INIT, "Failed to allocate l2p table\n"); 654 return -1; 655 } 656 657 for (i = 0; i < dev->num_lbas; ++i) { 658 ftl_l2p_set(dev, i, ftl_to_ppa(FTL_PPA_INVALID)); 659 } 660 661 return 0; 662 } 663 664 static void 665 ftl_init_complete(struct spdk_ftl_dev *dev) 666 { 667 pthread_mutex_lock(&g_ftl_queue_lock); 668 STAILQ_INSERT_HEAD(&g_ftl_queue, dev, stailq); 669 pthread_mutex_unlock(&g_ftl_queue_lock); 670 671 dev->initialized = 1; 672 673 if (dev->init_cb) { 674 dev->init_cb(dev, dev->init_arg, 0); 675 } 676 677 dev->init_cb = NULL; 678 dev->init_arg = NULL; 679 } 680 681 static int 682 ftl_setup_initial_state(struct spdk_ftl_dev *dev) 683 { 684 struct spdk_ftl_conf *conf = &dev->conf; 685 size_t i; 686 687 spdk_uuid_generate(&dev->uuid); 688 689 dev->num_lbas = 0; 690 for (i = 0; i < ftl_dev_num_bands(dev); ++i) { 691 dev->num_lbas += ftl_band_num_usable_lbks(&dev->bands[i]); 692 } 693 694 dev->num_lbas = (dev->num_lbas * (100 - conf->lba_rsvd)) / 100; 695 696 if (ftl_dev_l2p_alloc(dev)) { 697 SPDK_ERRLOG("Unable to init l2p table\n"); 698 return -1; 699 } 700 701 if (ftl_init_bands_state(dev)) { 702 SPDK_ERRLOG("Unable to finish the initialization\n"); 703 return -1; 704 } 705 706 ftl_init_complete(dev); 707 return 0; 708 } 709 710 struct ftl_init_fail_ctx { 711 spdk_ftl_init_fn cb; 712 void *arg; 713 }; 714 715 static void 716 ftl_init_fail_cb(void *ctx, int status) 717 { 718 struct ftl_init_fail_ctx *fail_cb = ctx; 719 720 fail_cb->cb(NULL, fail_cb->arg, -ENODEV); 721 free(fail_cb); 722 } 723 724 static void 725 ftl_init_fail(struct spdk_ftl_dev *dev) 726 { 727 struct ftl_init_fail_ctx *fail_cb; 728 729 fail_cb = malloc(sizeof(*fail_cb)); 730 if (!fail_cb) { 731 SPDK_ERRLOG("Unable to allocate context to free the device\n"); 732 return; 733 } 734 735 fail_cb->cb = dev->init_cb; 736 fail_cb->arg = dev->init_arg; 737 dev->halt_cb = NULL; 738 739 if (spdk_ftl_dev_free(dev, ftl_init_fail_cb, fail_cb)) { 740 SPDK_ERRLOG("Unable to free the device\n"); 741 assert(0); 742 } 743 } 744 745 static void 746 ftl_restore_device_cb(struct spdk_ftl_dev *dev, struct ftl_restore *restore, int status) 747 { 748 if (status) { 749 SPDK_ERRLOG("Failed to restore the device from the SSD\n"); 750 goto error; 751 } 752 753 if (ftl_init_bands_state(dev)) { 754 SPDK_ERRLOG("Unable to finish the initialization\n"); 755 goto error; 756 } 757 758 ftl_init_complete(dev); 759 return; 760 error: 761 ftl_init_fail(dev); 762 } 763 764 static void 765 ftl_restore_md_cb(struct spdk_ftl_dev *dev, struct ftl_restore *restore, int status) 766 { 767 if (status) { 768 SPDK_ERRLOG("Failed to restore the metadata from the SSD\n"); 769 goto error; 770 } 771 772 /* After the metadata is read it should be possible to allocate the L2P */ 773 if (ftl_dev_l2p_alloc(dev)) { 774 SPDK_ERRLOG("Failed to allocate the L2P\n"); 775 goto error; 776 } 777 778 if (ftl_restore_device(restore, ftl_restore_device_cb)) { 779 SPDK_ERRLOG("Failed to start device restoration from the SSD\n"); 780 goto error; 781 } 782 783 return; 784 error: 785 ftl_init_fail(dev); 786 } 787 788 static int 789 ftl_restore_state(struct spdk_ftl_dev *dev, const struct spdk_ftl_dev_init_opts *opts) 790 { 791 dev->uuid = opts->uuid; 792 793 if (ftl_restore_md(dev, ftl_restore_md_cb)) { 794 SPDK_ERRLOG("Failed to start metadata restoration from the SSD\n"); 795 return -1; 796 } 797 798 return 0; 799 } 800 801 static int 802 ftl_io_channel_create_cb(void *io_device, void *ctx) 803 { 804 struct ftl_io_channel *ch = ctx; 805 char mempool_name[32]; 806 struct spdk_ftl_dev *dev = io_device; 807 808 snprintf(mempool_name, sizeof(mempool_name), "ftl_io_%p", ch); 809 ch->elem_size = sizeof(struct ftl_md_io); 810 ch->io_pool = spdk_mempool_create(mempool_name, 811 dev->conf.user_io_pool_size, 812 ch->elem_size, 813 0, 814 SPDK_ENV_SOCKET_ID_ANY); 815 816 if (!ch->io_pool) { 817 return -1; 818 } 819 820 return 0; 821 } 822 823 static void 824 ftl_io_channel_destroy_cb(void *io_device, void *ctx) 825 { 826 struct ftl_io_channel *ch = ctx; 827 828 spdk_mempool_free(ch->io_pool); 829 } 830 831 int 832 spdk_ftl_dev_init(const struct spdk_ftl_dev_init_opts *opts, spdk_ftl_init_fn cb, void *cb_arg) 833 { 834 struct spdk_ftl_dev *dev; 835 836 if (!opts || !opts->ctrlr) { 837 return -EINVAL; 838 } 839 840 dev = calloc(1, sizeof(*dev)); 841 if (!dev) { 842 return -ENOMEM; 843 } 844 845 if (opts->conf) { 846 if (ftl_conf_validate(opts->conf)) { 847 SPDK_ERRLOG("Invalid configuration\n"); 848 goto fail_sync; 849 } 850 851 memcpy(&dev->conf, opts->conf, sizeof(dev->conf)); 852 } else { 853 spdk_ftl_conf_init_defaults(&dev->conf); 854 } 855 856 spdk_io_device_register(dev, ftl_io_channel_create_cb, ftl_io_channel_destroy_cb, 857 sizeof(struct ftl_io_channel), 858 NULL); 859 860 dev->ioch = spdk_get_io_channel(dev); 861 dev->init_cb = cb; 862 dev->init_arg = cb_arg; 863 dev->range = opts->range; 864 dev->limit = SPDK_FTL_LIMIT_MAX; 865 dev->name = strdup(opts->name); 866 if (!dev->name) { 867 SPDK_ERRLOG("Unable to set device name\n"); 868 goto fail_sync; 869 } 870 871 if (ftl_dev_nvme_init(dev, opts)) { 872 SPDK_ERRLOG("Unable to initialize NVMe structures\n"); 873 goto fail_sync; 874 } 875 876 /* In case of errors, we free all of the memory in ftl_dev_free_sync(), */ 877 /* so we don't have to clean up in each of the init functions. */ 878 if (ftl_dev_retrieve_geo(dev)) { 879 SPDK_ERRLOG("Unable to retrieve geometry\n"); 880 goto fail_sync; 881 } 882 883 if (ftl_check_init_opts(opts, &dev->geo)) { 884 SPDK_ERRLOG("Invalid device configuration\n"); 885 goto fail_sync; 886 } 887 888 if (ftl_dev_init_punits(dev)) { 889 SPDK_ERRLOG("Unable to initialize LUNs\n"); 890 goto fail_sync; 891 } 892 893 if (ftl_init_wptr_list(dev)) { 894 SPDK_ERRLOG("Unable to init wptr\n"); 895 goto fail_sync; 896 } 897 898 if (ftl_dev_init_bands(dev)) { 899 SPDK_ERRLOG("Unable to initialize band array\n"); 900 goto fail_sync; 901 } 902 903 dev->rwb = ftl_rwb_init(&dev->conf, dev->geo.ws_opt, dev->md_size); 904 if (!dev->rwb) { 905 SPDK_ERRLOG("Unable to initialize rwb structures\n"); 906 goto fail_sync; 907 } 908 909 dev->reloc = ftl_reloc_init(dev); 910 if (!dev->reloc) { 911 SPDK_ERRLOG("Unable to initialize reloc structures\n"); 912 goto fail_sync; 913 } 914 915 if (ftl_dev_init_threads(dev, opts)) { 916 SPDK_ERRLOG("Unable to initialize device threads\n"); 917 goto fail_sync; 918 } 919 920 if (opts->mode & SPDK_FTL_MODE_CREATE) { 921 if (ftl_setup_initial_state(dev)) { 922 SPDK_ERRLOG("Failed to setup initial state of the device\n"); 923 goto fail_async; 924 } 925 926 } else { 927 if (ftl_restore_state(dev, opts)) { 928 SPDK_ERRLOG("Unable to restore device's state from the SSD\n"); 929 goto fail_async; 930 } 931 } 932 933 return 0; 934 fail_sync: 935 ftl_dev_free_sync(dev); 936 return -ENOMEM; 937 fail_async: 938 ftl_init_fail(dev); 939 return 0; 940 } 941 942 static void 943 _ftl_halt_defrag(void *arg) 944 { 945 ftl_reloc_halt(((struct spdk_ftl_dev *)arg)->reloc); 946 } 947 948 static void 949 ftl_dev_free_sync(struct spdk_ftl_dev *dev) 950 { 951 struct spdk_ftl_dev *iter; 952 size_t i; 953 954 if (!dev) { 955 return; 956 } 957 958 pthread_mutex_lock(&g_ftl_queue_lock); 959 STAILQ_FOREACH(iter, &g_ftl_queue, stailq) { 960 if (iter == dev) { 961 STAILQ_REMOVE(&g_ftl_queue, dev, spdk_ftl_dev, stailq); 962 break; 963 } 964 } 965 pthread_mutex_unlock(&g_ftl_queue_lock); 966 967 ftl_dev_free_thread(dev, &dev->read_thread); 968 ftl_dev_free_thread(dev, &dev->core_thread); 969 970 assert(LIST_EMPTY(&dev->wptr_list)); 971 972 ftl_dev_dump_bands(dev); 973 ftl_dev_dump_stats(dev); 974 975 spdk_put_io_channel(dev->ioch); 976 spdk_io_device_unregister(dev, NULL); 977 978 if (dev->bands) { 979 for (i = 0; i < ftl_dev_num_bands(dev); ++i) { 980 free(dev->bands[i].chunk_buf); 981 spdk_bit_array_free(&dev->bands[i].md.vld_map); 982 } 983 } 984 985 spdk_mempool_free(dev->lba_pool); 986 987 ftl_rwb_free(dev->rwb); 988 ftl_reloc_free(dev->reloc); 989 990 free(dev->name); 991 free(dev->punits); 992 free(dev->bands); 993 free(dev->l2p); 994 free(dev); 995 } 996 997 static int 998 ftl_halt_poller(void *ctx) 999 { 1000 struct spdk_ftl_dev *dev = ctx; 1001 spdk_ftl_fn halt_cb = dev->halt_cb; 1002 void *halt_arg = dev->halt_arg; 1003 1004 if (!dev->core_thread.poller && !dev->read_thread.poller) { 1005 spdk_poller_unregister(&dev->halt_poller); 1006 1007 ftl_anm_unregister_device(dev); 1008 ftl_dev_free_sync(dev); 1009 1010 if (halt_cb) { 1011 halt_cb(halt_arg, 0); 1012 } 1013 } 1014 1015 return 0; 1016 } 1017 1018 static void 1019 ftl_add_halt_poller(void *ctx) 1020 { 1021 struct spdk_ftl_dev *dev = ctx; 1022 1023 _ftl_halt_defrag(dev); 1024 1025 assert(!dev->halt_poller); 1026 dev->halt_poller = spdk_poller_register(ftl_halt_poller, dev, 100); 1027 } 1028 1029 int 1030 spdk_ftl_dev_free(struct spdk_ftl_dev *dev, spdk_ftl_fn cb, void *cb_arg) 1031 { 1032 if (!dev || !cb) { 1033 return -EINVAL; 1034 } 1035 1036 if (dev->halt_cb) { 1037 return -EBUSY; 1038 } 1039 1040 dev->halt_cb = cb; 1041 dev->halt_arg = cb_arg; 1042 dev->halt = 1; 1043 1044 spdk_thread_send_msg(ftl_get_core_thread(dev), ftl_add_halt_poller, dev); 1045 return 0; 1046 } 1047 1048 int 1049 spdk_ftl_module_init(const struct ftl_module_init_opts *opts, spdk_ftl_fn cb, void *cb_arg) 1050 { 1051 return ftl_anm_init(opts->anm_thread, cb, cb_arg); 1052 } 1053 1054 int 1055 spdk_ftl_module_fini(spdk_ftl_fn cb, void *cb_arg) 1056 { 1057 return ftl_anm_free(cb, cb_arg); 1058 } 1059 1060 SPDK_LOG_REGISTER_COMPONENT("ftl_init", SPDK_LOG_FTL_INIT) 1061