1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 #include "spdk/nvme.h" 36 #include "spdk/io_channel.h" 37 #include "spdk/bdev_module.h" 38 #include "spdk/string.h" 39 #include "spdk/likely.h" 40 #include "spdk_internal/log.h" 41 #include "spdk/ftl.h" 42 #include "spdk/likely.h" 43 #include "spdk/string.h" 44 45 #include "ftl_core.h" 46 #include "ftl_anm.h" 47 #include "ftl_io.h" 48 #include "ftl_reloc.h" 49 #include "ftl_rwb.h" 50 #include "ftl_band.h" 51 #include "ftl_debug.h" 52 53 #define FTL_CORE_RING_SIZE 4096 54 #define FTL_INIT_TIMEOUT 30 55 #define FTL_NSID 1 56 57 #define ftl_range_intersect(s1, e1, s2, e2) \ 58 ((s1) <= (e2) && (s2) <= (e1)) 59 60 struct ftl_admin_cmpl { 61 struct spdk_nvme_cpl status; 62 63 int complete; 64 }; 65 66 static STAILQ_HEAD(, spdk_ftl_dev) g_ftl_queue = STAILQ_HEAD_INITIALIZER(g_ftl_queue); 67 static pthread_mutex_t g_ftl_queue_lock = PTHREAD_MUTEX_INITIALIZER; 68 static const struct spdk_ftl_conf g_default_conf = { 69 .limits = { 70 /* 5 free bands / 0 % host writes */ 71 [SPDK_FTL_LIMIT_CRIT] = { .thld = 5, .limit = 0 }, 72 /* 10 free bands / 5 % host writes */ 73 [SPDK_FTL_LIMIT_HIGH] = { .thld = 10, .limit = 5 }, 74 /* 20 free bands / 40 % host writes */ 75 [SPDK_FTL_LIMIT_LOW] = { .thld = 20, .limit = 40 }, 76 /* 40 free bands / 100 % host writes - defrag starts running */ 77 [SPDK_FTL_LIMIT_START] = { .thld = 40, .limit = 100 }, 78 }, 79 /* 10 percent valid lbks */ 80 .invalid_thld = 10, 81 /* 20% spare lbks */ 82 .lba_rsvd = 20, 83 /* 6M write buffer */ 84 .rwb_size = 6 * 1024 * 1024, 85 /* 90% band fill threshold */ 86 .band_thld = 90, 87 /* Max 32 IO depth per band relocate */ 88 .max_reloc_qdepth = 32, 89 /* Max 3 active band relocates */ 90 .max_active_relocs = 3, 91 /* IO pool size per user thread (this should be adjusted to thread IO qdepth) */ 92 .user_io_pool_size = 2048, 93 /* Number of interleaving units per ws_opt */ 94 /* 1 for default and 3 for 3D TLC NAND */ 95 .num_interleave_units = 1, 96 /* 97 * If clear ftl will return error when restoring after a dirty shutdown 98 * If set, last band will be padded, ftl will restore based only on closed bands - this 99 * will result in lost data after recovery. 100 */ 101 .allow_open_bands = false, 102 .nv_cache = { 103 /* Maximum number of concurrent requests */ 104 .max_request_cnt = 2048, 105 /* Maximum number of blocks per request */ 106 .max_request_size = 16, 107 } 108 }; 109 110 static void ftl_dev_free_sync(struct spdk_ftl_dev *dev); 111 112 static void 113 ftl_admin_cb(void *ctx, const struct spdk_nvme_cpl *cpl) 114 { 115 struct ftl_admin_cmpl *cmpl = ctx; 116 117 cmpl->complete = 1; 118 cmpl->status = *cpl; 119 } 120 121 static int 122 ftl_band_init_md(struct ftl_band *band) 123 { 124 struct ftl_lba_map *lba_map = &band->lba_map; 125 126 lba_map->vld = spdk_bit_array_create(ftl_num_band_lbks(band->dev)); 127 if (!lba_map->vld) { 128 return -ENOMEM; 129 } 130 131 pthread_spin_init(&lba_map->lock, PTHREAD_PROCESS_PRIVATE); 132 ftl_band_md_clear(band); 133 return 0; 134 } 135 136 static int 137 ftl_check_conf(const struct spdk_ftl_conf *conf, 138 const struct spdk_ocssd_geometry_data *geo) 139 { 140 size_t i; 141 142 if (conf->invalid_thld >= 100) { 143 return -1; 144 } 145 if (conf->lba_rsvd >= 100) { 146 return -1; 147 } 148 if (conf->lba_rsvd == 0) { 149 return -1; 150 } 151 if (conf->rwb_size == 0) { 152 return -1; 153 } 154 if (conf->rwb_size % FTL_BLOCK_SIZE != 0) { 155 return -1; 156 } 157 if (geo->ws_opt % conf->num_interleave_units != 0) { 158 return -1; 159 } 160 161 for (i = 0; i < SPDK_FTL_LIMIT_MAX; ++i) { 162 if (conf->limits[i].limit > 100) { 163 return -1; 164 } 165 } 166 167 return 0; 168 } 169 170 static int 171 ftl_check_init_opts(const struct spdk_ftl_dev_init_opts *opts, 172 const struct spdk_ocssd_geometry_data *geo) 173 { 174 struct spdk_ftl_dev *dev; 175 size_t num_punits = geo->num_pu * geo->num_grp; 176 int rc = 0; 177 178 if (opts->range.begin > opts->range.end || opts->range.end >= num_punits) { 179 return -1; 180 } 181 182 if (ftl_check_conf(opts->conf, geo)) { 183 return -1; 184 } 185 186 pthread_mutex_lock(&g_ftl_queue_lock); 187 188 STAILQ_FOREACH(dev, &g_ftl_queue, stailq) { 189 if (spdk_nvme_transport_id_compare(&dev->trid, &opts->trid)) { 190 continue; 191 } 192 193 if (ftl_range_intersect(opts->range.begin, opts->range.end, 194 dev->range.begin, dev->range.end)) { 195 rc = -1; 196 goto out; 197 } 198 } 199 200 out: 201 pthread_mutex_unlock(&g_ftl_queue_lock); 202 return rc; 203 } 204 205 int 206 ftl_retrieve_chunk_info(struct spdk_ftl_dev *dev, struct ftl_ppa ppa, 207 struct spdk_ocssd_chunk_information_entry *info, 208 unsigned int num_entries) 209 { 210 volatile struct ftl_admin_cmpl cmpl = {}; 211 uint32_t nsid = spdk_nvme_ns_get_id(dev->ns); 212 uint64_t offset = (ppa.grp * dev->geo.num_pu + ppa.pu) * 213 dev->geo.num_chk + ppa.chk; 214 int rc; 215 216 rc = spdk_nvme_ctrlr_cmd_get_log_page(dev->ctrlr, SPDK_OCSSD_LOG_CHUNK_INFO, nsid, 217 info, num_entries * sizeof(*info), 218 offset * sizeof(*info), 219 ftl_admin_cb, (void *)&cmpl); 220 if (spdk_unlikely(rc != 0)) { 221 SPDK_ERRLOG("spdk_nvme_ctrlr_cmd_get_log_page: %s\n", spdk_strerror(-rc)); 222 return -1; 223 } 224 225 while (!cmpl.complete) { 226 spdk_nvme_ctrlr_process_admin_completions(dev->ctrlr); 227 } 228 229 if (spdk_nvme_cpl_is_error(&cmpl.status)) { 230 SPDK_ERRLOG("Unexpected status code: [%d], status code type: [%d]\n", 231 cmpl.status.status.sc, cmpl.status.status.sct); 232 return -1; 233 } 234 235 return 0; 236 } 237 238 static int 239 ftl_retrieve_punit_chunk_info(struct spdk_ftl_dev *dev, const struct ftl_punit *punit, 240 struct spdk_ocssd_chunk_information_entry *info) 241 { 242 uint32_t i = 0; 243 unsigned int num_entries = FTL_BLOCK_SIZE / sizeof(*info); 244 struct ftl_ppa chunk_ppa = punit->start_ppa; 245 char ppa_buf[128]; 246 247 for (i = 0; i < dev->geo.num_chk; i += num_entries, chunk_ppa.chk += num_entries) { 248 if (num_entries > dev->geo.num_chk - i) { 249 num_entries = dev->geo.num_chk - i; 250 } 251 252 if (ftl_retrieve_chunk_info(dev, chunk_ppa, &info[i], num_entries)) { 253 SPDK_ERRLOG("Failed to retrieve chunk information @ppa: %s\n", 254 ftl_ppa2str(chunk_ppa, ppa_buf, sizeof(ppa_buf))); 255 return -1; 256 } 257 } 258 259 return 0; 260 } 261 262 static unsigned char 263 ftl_get_chunk_state(const struct spdk_ocssd_chunk_information_entry *info) 264 { 265 if (info->cs.free) { 266 return FTL_CHUNK_STATE_FREE; 267 } 268 269 if (info->cs.open) { 270 return FTL_CHUNK_STATE_OPEN; 271 } 272 273 if (info->cs.closed) { 274 return FTL_CHUNK_STATE_CLOSED; 275 } 276 277 if (info->cs.offline) { 278 return FTL_CHUNK_STATE_BAD; 279 } 280 281 assert(0 && "Invalid block state"); 282 return FTL_CHUNK_STATE_BAD; 283 } 284 285 static void 286 ftl_remove_empty_bands(struct spdk_ftl_dev *dev) 287 { 288 struct ftl_band *band, *temp_band; 289 290 /* Remove band from shut_bands list to prevent further processing */ 291 /* if all blocks on this band are bad */ 292 LIST_FOREACH_SAFE(band, &dev->shut_bands, list_entry, temp_band) { 293 if (!band->num_chunks) { 294 dev->num_bands--; 295 LIST_REMOVE(band, list_entry); 296 } 297 } 298 } 299 300 static int 301 ftl_dev_init_bands(struct spdk_ftl_dev *dev) 302 { 303 struct spdk_ocssd_chunk_information_entry *info; 304 struct ftl_band *band, *pband; 305 struct ftl_punit *punit; 306 struct ftl_chunk *chunk; 307 unsigned int i, j; 308 char buf[128]; 309 int rc = 0; 310 311 LIST_INIT(&dev->free_bands); 312 LIST_INIT(&dev->shut_bands); 313 314 dev->num_free = 0; 315 dev->num_bands = ftl_dev_num_bands(dev); 316 dev->bands = calloc(ftl_dev_num_bands(dev), sizeof(*dev->bands)); 317 if (!dev->bands) { 318 return -1; 319 } 320 321 info = calloc(dev->geo.num_chk, sizeof(*info)); 322 if (!info) { 323 return -1; 324 } 325 326 for (i = 0; i < ftl_dev_num_bands(dev); ++i) { 327 band = &dev->bands[i]; 328 band->id = i; 329 band->dev = dev; 330 band->state = FTL_BAND_STATE_CLOSED; 331 332 if (LIST_EMPTY(&dev->shut_bands)) { 333 LIST_INSERT_HEAD(&dev->shut_bands, band, list_entry); 334 } else { 335 LIST_INSERT_AFTER(pband, band, list_entry); 336 } 337 pband = band; 338 339 CIRCLEQ_INIT(&band->chunks); 340 band->chunk_buf = calloc(ftl_dev_num_punits(dev), sizeof(*band->chunk_buf)); 341 if (!band->chunk_buf) { 342 SPDK_ERRLOG("Failed to allocate block state table for band: [%u]\n", i); 343 rc = -1; 344 goto out; 345 } 346 347 rc = ftl_band_init_md(band); 348 if (rc) { 349 SPDK_ERRLOG("Failed to initialize metadata structures for band [%u]\n", i); 350 goto out; 351 } 352 353 band->reloc_bitmap = spdk_bit_array_create(ftl_dev_num_bands(dev)); 354 if (!band->reloc_bitmap) { 355 SPDK_ERRLOG("Failed to allocate band relocation bitmap\n"); 356 goto out; 357 } 358 } 359 360 for (i = 0; i < ftl_dev_num_punits(dev); ++i) { 361 punit = &dev->punits[i]; 362 363 rc = ftl_retrieve_punit_chunk_info(dev, punit, info); 364 if (rc) { 365 SPDK_ERRLOG("Failed to retrieve bbt for @ppa: %s [%lu]\n", 366 ftl_ppa2str(punit->start_ppa, buf, sizeof(buf)), 367 ftl_ppa_addr_pack(dev, punit->start_ppa)); 368 goto out; 369 } 370 371 for (j = 0; j < ftl_dev_num_bands(dev); ++j) { 372 band = &dev->bands[j]; 373 chunk = &band->chunk_buf[i]; 374 chunk->pos = i; 375 chunk->state = ftl_get_chunk_state(&info[j]); 376 chunk->punit = punit; 377 chunk->start_ppa = punit->start_ppa; 378 chunk->start_ppa.chk = band->id; 379 chunk->write_offset = ftl_dev_lbks_in_chunk(dev); 380 381 if (chunk->state != FTL_CHUNK_STATE_BAD) { 382 band->num_chunks++; 383 CIRCLEQ_INSERT_TAIL(&band->chunks, chunk, circleq); 384 } 385 } 386 } 387 388 for (i = 0; i < ftl_dev_num_bands(dev); ++i) { 389 band = &dev->bands[i]; 390 band->tail_md_ppa = ftl_band_tail_md_ppa(band); 391 } 392 393 ftl_remove_empty_bands(dev); 394 out: 395 free(info); 396 return rc; 397 } 398 399 static int 400 ftl_dev_init_punits(struct spdk_ftl_dev *dev) 401 { 402 unsigned int i, punit; 403 404 dev->punits = calloc(ftl_dev_num_punits(dev), sizeof(*dev->punits)); 405 if (!dev->punits) { 406 return -1; 407 } 408 409 for (i = 0; i < ftl_dev_num_punits(dev); ++i) { 410 dev->punits[i].dev = dev; 411 punit = dev->range.begin + i; 412 413 dev->punits[i].start_ppa.ppa = 0; 414 dev->punits[i].start_ppa.grp = punit % dev->geo.num_grp; 415 dev->punits[i].start_ppa.pu = punit / dev->geo.num_grp; 416 } 417 418 return 0; 419 } 420 421 static int 422 ftl_dev_retrieve_geo(struct spdk_ftl_dev *dev) 423 { 424 volatile struct ftl_admin_cmpl cmpl = {}; 425 uint32_t nsid = spdk_nvme_ns_get_id(dev->ns); 426 427 if (spdk_nvme_ocssd_ctrlr_cmd_geometry(dev->ctrlr, nsid, &dev->geo, sizeof(dev->geo), 428 ftl_admin_cb, (void *)&cmpl)) { 429 SPDK_ERRLOG("Unable to retrieve geometry\n"); 430 return -1; 431 } 432 433 /* TODO: add a timeout */ 434 while (!cmpl.complete) { 435 spdk_nvme_ctrlr_process_admin_completions(dev->ctrlr); 436 } 437 438 if (spdk_nvme_cpl_is_error(&cmpl.status)) { 439 SPDK_ERRLOG("Unexpected status code: [%d], status code type: [%d]\n", 440 cmpl.status.status.sc, cmpl.status.status.sct); 441 return -1; 442 } 443 444 /* TODO: add sanity checks for the geo */ 445 dev->ppa_len = dev->geo.lbaf.grp_len + 446 dev->geo.lbaf.pu_len + 447 dev->geo.lbaf.chk_len + 448 dev->geo.lbaf.lbk_len; 449 450 dev->ppaf.lbk_offset = 0; 451 dev->ppaf.lbk_mask = (1 << dev->geo.lbaf.lbk_len) - 1; 452 dev->ppaf.chk_offset = dev->ppaf.lbk_offset + dev->geo.lbaf.lbk_len; 453 dev->ppaf.chk_mask = (1 << dev->geo.lbaf.chk_len) - 1; 454 dev->ppaf.pu_offset = dev->ppaf.chk_offset + dev->geo.lbaf.chk_len; 455 dev->ppaf.pu_mask = (1 << dev->geo.lbaf.pu_len) - 1; 456 dev->ppaf.grp_offset = dev->ppaf.pu_offset + dev->geo.lbaf.pu_len; 457 dev->ppaf.grp_mask = (1 << dev->geo.lbaf.grp_len) - 1; 458 459 /* We're using optimal write size as our xfer size */ 460 dev->xfer_size = dev->geo.ws_opt; 461 462 return 0; 463 } 464 465 static int 466 ftl_dev_nvme_init(struct spdk_ftl_dev *dev, const struct spdk_ftl_dev_init_opts *opts) 467 { 468 uint32_t block_size; 469 470 dev->ctrlr = opts->ctrlr; 471 472 if (spdk_nvme_ctrlr_get_num_ns(dev->ctrlr) != 1) { 473 SPDK_ERRLOG("Unsupported number of namespaces\n"); 474 return -1; 475 } 476 477 dev->ns = spdk_nvme_ctrlr_get_ns(dev->ctrlr, FTL_NSID); 478 if (dev->ns == NULL) { 479 SPDK_ERRLOG("Invalid NS (%"PRIu32")\n", FTL_NSID); 480 return -1; 481 } 482 dev->trid = opts->trid; 483 dev->md_size = spdk_nvme_ns_get_md_size(dev->ns); 484 485 block_size = spdk_nvme_ns_get_extended_sector_size(dev->ns); 486 if (block_size != FTL_BLOCK_SIZE) { 487 SPDK_ERRLOG("Unsupported block size (%"PRIu32")\n", block_size); 488 return -1; 489 } 490 491 if (dev->md_size % sizeof(uint32_t) != 0) { 492 /* Metadata pointer must be dword aligned */ 493 SPDK_ERRLOG("Unsupported metadata size (%zu)\n", dev->md_size); 494 return -1; 495 } 496 497 return 0; 498 } 499 500 static int 501 ftl_dev_init_nv_cache(struct spdk_ftl_dev *dev, struct spdk_bdev_desc *bdev_desc) 502 { 503 struct spdk_bdev *bdev; 504 struct spdk_ftl_conf *conf = &dev->conf; 505 struct ftl_nv_cache *nv_cache = &dev->nv_cache; 506 char pool_name[128]; 507 int rc; 508 509 if (!bdev_desc) { 510 return 0; 511 } 512 513 bdev = spdk_bdev_desc_get_bdev(bdev_desc); 514 SPDK_INFOLOG(SPDK_LOG_FTL_INIT, "Using %s as write buffer cache\n", 515 spdk_bdev_get_name(bdev)); 516 517 if (spdk_bdev_get_block_size(bdev) != FTL_BLOCK_SIZE) { 518 SPDK_ERRLOG("Unsupported block size (%d)\n", spdk_bdev_get_block_size(bdev)); 519 return -1; 520 } 521 522 if (!spdk_bdev_is_md_separate(bdev)) { 523 SPDK_ERRLOG("Bdev %s doesn't support separate metadata buffer IO\n", 524 spdk_bdev_get_name(bdev)); 525 return -1; 526 } 527 528 if (spdk_bdev_get_md_size(bdev) < sizeof(uint64_t)) { 529 SPDK_ERRLOG("Bdev's %s metadata is too small (%"PRIu32")\n", 530 spdk_bdev_get_name(bdev), spdk_bdev_get_md_size(bdev)); 531 return -1; 532 } 533 534 if (spdk_bdev_get_dif_type(bdev) != SPDK_DIF_DISABLE) { 535 SPDK_ERRLOG("Unsupported DIF type used by bdev %s\n", 536 spdk_bdev_get_name(bdev)); 537 return -1; 538 } 539 540 /* The cache needs to be capable of storing at least two full bands. This requirement comes 541 * from the fact that cache works as a protection against power loss, so before the data 542 * inside the cache can be overwritten, the band it's stored on has to be closed. Plus one 543 * extra block is needed to store the header. 544 */ 545 if (spdk_bdev_get_num_blocks(bdev) < ftl_num_band_lbks(dev) * 2 + 1) { 546 SPDK_ERRLOG("Insufficient number of blocks for write buffer cache (available: %" 547 PRIu64", required: %"PRIu64")\n", spdk_bdev_get_num_blocks(bdev), 548 ftl_num_band_lbks(dev) * 2 + 1); 549 return -1; 550 } 551 552 rc = snprintf(pool_name, sizeof(pool_name), "ftl-nvpool-%p", dev); 553 if (rc < 0 || rc >= 128) { 554 return -1; 555 } 556 557 nv_cache->md_pool = spdk_mempool_create(pool_name, conf->nv_cache.max_request_cnt, 558 spdk_bdev_get_md_size(bdev) * 559 conf->nv_cache.max_request_size, 560 SPDK_MEMPOOL_DEFAULT_CACHE_SIZE, 561 SPDK_ENV_SOCKET_ID_ANY); 562 if (!nv_cache->md_pool) { 563 SPDK_ERRLOG("Failed to initialize non-volatile cache metadata pool\n"); 564 return -1; 565 } 566 567 nv_cache->dma_buf = spdk_dma_zmalloc(FTL_BLOCK_SIZE, spdk_bdev_get_buf_align(bdev), NULL); 568 if (!nv_cache->dma_buf) { 569 SPDK_ERRLOG("Memory allocation failure\n"); 570 return -1; 571 } 572 573 if (pthread_spin_init(&nv_cache->lock, PTHREAD_PROCESS_PRIVATE)) { 574 SPDK_ERRLOG("Failed to initialize cache lock\n"); 575 return -1; 576 } 577 578 nv_cache->bdev_desc = bdev_desc; 579 nv_cache->current_addr = FTL_NV_CACHE_DATA_OFFSET; 580 nv_cache->num_data_blocks = spdk_bdev_get_num_blocks(bdev) - 1; 581 nv_cache->num_available = nv_cache->num_data_blocks; 582 nv_cache->ready = false; 583 584 return 0; 585 } 586 587 void 588 spdk_ftl_conf_init_defaults(struct spdk_ftl_conf *conf) 589 { 590 *conf = g_default_conf; 591 } 592 593 static void 594 ftl_lba_map_request_ctor(struct spdk_mempool *mp, void *opaque, void *obj, unsigned obj_idx) 595 { 596 struct ftl_lba_map_request *request = obj; 597 struct spdk_ftl_dev *dev = opaque; 598 599 request->segments = spdk_bit_array_create(spdk_divide_round_up( 600 ftl_num_band_lbks(dev), FTL_NUM_LBA_IN_BLOCK)); 601 } 602 603 static int 604 ftl_init_lba_map_pools(struct spdk_ftl_dev *dev) 605 { 606 #define POOL_NAME_LEN 128 607 char pool_name[POOL_NAME_LEN]; 608 int rc; 609 610 rc = snprintf(pool_name, sizeof(pool_name), "%s-%s", dev->name, "ocssd-lba-pool"); 611 if (rc < 0 || rc >= POOL_NAME_LEN) { 612 return -ENAMETOOLONG; 613 } 614 615 /* We need to reserve at least 2 buffers for band close / open sequence 616 * alone, plus additional (8) buffers for handling write errors. 617 * TODO: This memory pool is utilized only by core thread - it introduce 618 * unnecessary overhead and should be replaced by different data structure. 619 */ 620 dev->lba_pool = spdk_mempool_create(pool_name, 2 + 8, 621 ftl_lba_map_pool_elem_size(dev), 622 SPDK_MEMPOOL_DEFAULT_CACHE_SIZE, 623 SPDK_ENV_SOCKET_ID_ANY); 624 if (!dev->lba_pool) { 625 return -ENOMEM; 626 } 627 628 rc = snprintf(pool_name, sizeof(pool_name), "%s-%s", dev->name, "ocssd-lbareq-pool"); 629 if (rc < 0 || rc >= POOL_NAME_LEN) { 630 return -ENAMETOOLONG; 631 } 632 633 dev->lba_request_pool = spdk_mempool_create_ctor(pool_name, 634 dev->conf.max_reloc_qdepth * dev->conf.max_active_relocs, 635 sizeof(struct ftl_lba_map_request), 636 SPDK_MEMPOOL_DEFAULT_CACHE_SIZE, 637 SPDK_ENV_SOCKET_ID_ANY, 638 ftl_lba_map_request_ctor, 639 dev); 640 if (!dev->lba_request_pool) { 641 return -ENOMEM; 642 } 643 644 return 0; 645 } 646 647 static void 648 ftl_init_wptr_list(struct spdk_ftl_dev *dev) 649 { 650 LIST_INIT(&dev->wptr_list); 651 LIST_INIT(&dev->flush_list); 652 LIST_INIT(&dev->band_flush_list); 653 } 654 655 static size_t 656 ftl_dev_band_max_seq(struct spdk_ftl_dev *dev) 657 { 658 struct ftl_band *band; 659 size_t seq = 0; 660 661 LIST_FOREACH(band, &dev->shut_bands, list_entry) { 662 if (band->seq > seq) { 663 seq = band->seq; 664 } 665 } 666 667 return seq; 668 } 669 670 static void 671 _ftl_init_bands_state(void *ctx) 672 { 673 struct ftl_band *band, *temp_band; 674 struct spdk_ftl_dev *dev = ctx; 675 676 dev->seq = ftl_dev_band_max_seq(dev); 677 678 LIST_FOREACH_SAFE(band, &dev->shut_bands, list_entry, temp_band) { 679 if (!band->lba_map.num_vld) { 680 ftl_band_set_state(band, FTL_BAND_STATE_FREE); 681 } 682 } 683 684 ftl_reloc_resume(dev->reloc); 685 /* Clear the limit applications as they're incremented incorrectly by */ 686 /* the initialization code */ 687 memset(dev->stats.limits, 0, sizeof(dev->stats.limits)); 688 } 689 690 static int 691 ftl_init_num_free_bands(struct spdk_ftl_dev *dev) 692 { 693 struct ftl_band *band; 694 int cnt = 0; 695 696 LIST_FOREACH(band, &dev->shut_bands, list_entry) { 697 if (band->num_chunks && !band->lba_map.num_vld) { 698 cnt++; 699 } 700 } 701 return cnt; 702 } 703 704 static int 705 ftl_init_bands_state(struct spdk_ftl_dev *dev) 706 { 707 /* TODO: Should we abort initialization or expose read only device */ 708 /* if there is no free bands? */ 709 /* If we abort initialization should we depend on condition that */ 710 /* we have no free bands or should we have some minimal number of */ 711 /* free bands? */ 712 if (!ftl_init_num_free_bands(dev)) { 713 return -1; 714 } 715 716 spdk_thread_send_msg(ftl_get_core_thread(dev), _ftl_init_bands_state, dev); 717 return 0; 718 } 719 720 static void 721 _ftl_dev_init_thread(void *ctx) 722 { 723 struct ftl_thread *thread = ctx; 724 struct spdk_ftl_dev *dev = thread->dev; 725 726 thread->poller = spdk_poller_register(thread->poller_fn, thread, thread->period_us); 727 if (!thread->poller) { 728 SPDK_ERRLOG("Unable to register poller\n"); 729 assert(0); 730 } 731 732 if (spdk_get_thread() == ftl_get_core_thread(dev)) { 733 ftl_anm_register_device(dev, ftl_process_anm_event); 734 } 735 736 thread->ioch = spdk_get_io_channel(dev); 737 } 738 739 static int 740 ftl_dev_init_thread(struct spdk_ftl_dev *dev, struct ftl_thread *thread, 741 struct spdk_thread *spdk_thread, spdk_poller_fn fn, uint64_t period_us) 742 { 743 thread->dev = dev; 744 thread->poller_fn = fn; 745 thread->thread = spdk_thread; 746 thread->period_us = period_us; 747 748 thread->qpair = spdk_nvme_ctrlr_alloc_io_qpair(dev->ctrlr, NULL, 0); 749 if (!thread->qpair) { 750 SPDK_ERRLOG("Unable to initialize qpair\n"); 751 return -1; 752 } 753 754 spdk_thread_send_msg(spdk_thread, _ftl_dev_init_thread, thread); 755 return 0; 756 } 757 758 static int 759 ftl_dev_init_threads(struct spdk_ftl_dev *dev, const struct spdk_ftl_dev_init_opts *opts) 760 { 761 if (!opts->core_thread || !opts->read_thread) { 762 return -1; 763 } 764 765 if (ftl_dev_init_thread(dev, &dev->core_thread, opts->core_thread, ftl_task_core, 0)) { 766 SPDK_ERRLOG("Unable to initialize core thread\n"); 767 return -1; 768 } 769 770 if (ftl_dev_init_thread(dev, &dev->read_thread, opts->read_thread, ftl_task_read, 0)) { 771 SPDK_ERRLOG("Unable to initialize read thread\n"); 772 return -1; 773 } 774 775 return 0; 776 } 777 778 static void 779 ftl_dev_free_thread(struct spdk_ftl_dev *dev, struct ftl_thread *thread) 780 { 781 assert(thread->poller == NULL); 782 783 spdk_put_io_channel(thread->ioch); 784 spdk_nvme_ctrlr_free_io_qpair(thread->qpair); 785 thread->thread = NULL; 786 thread->ioch = NULL; 787 thread->qpair = NULL; 788 } 789 790 static int 791 ftl_dev_l2p_alloc(struct spdk_ftl_dev *dev) 792 { 793 size_t addr_size; 794 uint64_t i; 795 796 if (dev->num_lbas == 0) { 797 SPDK_DEBUGLOG(SPDK_LOG_FTL_INIT, "Invalid l2p table size\n"); 798 return -1; 799 } 800 801 if (dev->l2p) { 802 SPDK_DEBUGLOG(SPDK_LOG_FTL_INIT, "L2p table already allocated\n"); 803 return -1; 804 } 805 806 addr_size = dev->ppa_len >= 32 ? 8 : 4; 807 dev->l2p = malloc(dev->num_lbas * addr_size); 808 if (!dev->l2p) { 809 SPDK_DEBUGLOG(SPDK_LOG_FTL_INIT, "Failed to allocate l2p table\n"); 810 return -1; 811 } 812 813 for (i = 0; i < dev->num_lbas; ++i) { 814 ftl_l2p_set(dev, i, ftl_to_ppa(FTL_PPA_INVALID)); 815 } 816 817 return 0; 818 } 819 820 static void 821 ftl_call_init_complete_cb(void *_ctx) 822 { 823 struct ftl_init_context *ctx = _ctx; 824 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(ctx, struct spdk_ftl_dev, init_ctx); 825 826 if (ctx->cb_fn != NULL) { 827 ctx->cb_fn(dev, ctx->cb_arg, 0); 828 } 829 } 830 831 static void 832 ftl_init_complete(struct spdk_ftl_dev *dev) 833 { 834 pthread_mutex_lock(&g_ftl_queue_lock); 835 STAILQ_INSERT_HEAD(&g_ftl_queue, dev, stailq); 836 pthread_mutex_unlock(&g_ftl_queue_lock); 837 838 dev->initialized = 1; 839 840 spdk_thread_send_msg(dev->init_ctx.thread, ftl_call_init_complete_cb, &dev->init_ctx); 841 } 842 843 static void 844 ftl_init_fail_cb(struct spdk_ftl_dev *dev, void *_ctx, int status) 845 { 846 struct ftl_init_context *ctx = _ctx; 847 848 if (ctx->cb_fn != NULL) { 849 ctx->cb_fn(NULL, ctx->cb_arg, -ENODEV); 850 } 851 852 free(ctx); 853 } 854 855 static int _spdk_ftl_dev_free(struct spdk_ftl_dev *dev, spdk_ftl_init_fn cb_fn, void *cb_arg, 856 struct spdk_thread *thread); 857 858 static void 859 ftl_init_fail(struct spdk_ftl_dev *dev) 860 { 861 struct ftl_init_context *ctx; 862 863 ctx = malloc(sizeof(*ctx)); 864 if (!ctx) { 865 SPDK_ERRLOG("Unable to allocate context to free the device\n"); 866 return; 867 } 868 869 *ctx = dev->init_ctx; 870 if (_spdk_ftl_dev_free(dev, ftl_init_fail_cb, ctx, ctx->thread)) { 871 SPDK_ERRLOG("Unable to free the device\n"); 872 assert(0); 873 } 874 } 875 876 static void 877 ftl_write_nv_cache_md_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 878 { 879 struct spdk_ftl_dev *dev = cb_arg; 880 881 spdk_bdev_free_io(bdev_io); 882 if (spdk_unlikely(!success)) { 883 SPDK_ERRLOG("Writing non-volatile cache's metadata header failed\n"); 884 ftl_init_fail(dev); 885 return; 886 } 887 888 dev->nv_cache.ready = true; 889 ftl_init_complete(dev); 890 } 891 892 static void 893 ftl_clear_nv_cache_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 894 { 895 struct spdk_ftl_dev *dev = cb_arg; 896 struct ftl_nv_cache *nv_cache = &dev->nv_cache; 897 898 spdk_bdev_free_io(bdev_io); 899 if (spdk_unlikely(!success)) { 900 SPDK_ERRLOG("Unable to clear the non-volatile cache bdev\n"); 901 ftl_init_fail(dev); 902 return; 903 } 904 905 nv_cache->phase = 1; 906 if (ftl_nv_cache_write_header(nv_cache, false, ftl_write_nv_cache_md_cb, dev)) { 907 SPDK_ERRLOG("Unable to write non-volatile cache metadata header\n"); 908 ftl_init_fail(dev); 909 } 910 } 911 912 static int 913 ftl_setup_initial_state(struct spdk_ftl_dev *dev) 914 { 915 struct spdk_ftl_conf *conf = &dev->conf; 916 size_t i; 917 int rc; 918 919 spdk_uuid_generate(&dev->uuid); 920 921 dev->num_lbas = 0; 922 for (i = 0; i < ftl_dev_num_bands(dev); ++i) { 923 dev->num_lbas += ftl_band_num_usable_lbks(&dev->bands[i]); 924 } 925 926 dev->num_lbas = (dev->num_lbas * (100 - conf->lba_rsvd)) / 100; 927 928 if (ftl_dev_l2p_alloc(dev)) { 929 SPDK_ERRLOG("Unable to init l2p table\n"); 930 return -1; 931 } 932 933 if (ftl_init_bands_state(dev)) { 934 SPDK_ERRLOG("Unable to finish the initialization\n"); 935 return -1; 936 } 937 938 if (!ftl_dev_has_nv_cache(dev)) { 939 ftl_init_complete(dev); 940 } else { 941 rc = ftl_nv_cache_scrub(&dev->nv_cache, ftl_clear_nv_cache_cb, dev); 942 if (spdk_unlikely(rc != 0)) { 943 SPDK_ERRLOG("Unable to clear the non-volatile cache bdev: %s\n", 944 spdk_strerror(-rc)); 945 return -1; 946 } 947 } 948 949 return 0; 950 } 951 952 static void 953 ftl_restore_nv_cache_cb(struct spdk_ftl_dev *dev, struct ftl_restore *restore, int status) 954 { 955 if (spdk_unlikely(status != 0)) { 956 SPDK_ERRLOG("Failed to restore the non-volatile cache state\n"); 957 ftl_init_fail(dev); 958 return; 959 } 960 961 ftl_init_complete(dev); 962 } 963 964 static void 965 ftl_restore_device_cb(struct spdk_ftl_dev *dev, struct ftl_restore *restore, int status) 966 { 967 if (status) { 968 SPDK_ERRLOG("Failed to restore the device from the SSD\n"); 969 ftl_init_fail(dev); 970 return; 971 } 972 973 if (ftl_init_bands_state(dev)) { 974 SPDK_ERRLOG("Unable to finish the initialization\n"); 975 ftl_init_fail(dev); 976 return; 977 } 978 979 if (!ftl_dev_has_nv_cache(dev)) { 980 ftl_init_complete(dev); 981 return; 982 } 983 984 ftl_restore_nv_cache(restore, ftl_restore_nv_cache_cb); 985 } 986 987 static void 988 ftl_restore_md_cb(struct spdk_ftl_dev *dev, struct ftl_restore *restore, int status) 989 { 990 if (status) { 991 SPDK_ERRLOG("Failed to restore the metadata from the SSD\n"); 992 goto error; 993 } 994 995 /* After the metadata is read it should be possible to allocate the L2P */ 996 if (ftl_dev_l2p_alloc(dev)) { 997 SPDK_ERRLOG("Failed to allocate the L2P\n"); 998 goto error; 999 } 1000 1001 if (ftl_restore_device(restore, ftl_restore_device_cb)) { 1002 SPDK_ERRLOG("Failed to start device restoration from the SSD\n"); 1003 goto error; 1004 } 1005 1006 return; 1007 error: 1008 ftl_init_fail(dev); 1009 } 1010 1011 static int 1012 ftl_restore_state(struct spdk_ftl_dev *dev, const struct spdk_ftl_dev_init_opts *opts) 1013 { 1014 dev->uuid = opts->uuid; 1015 1016 if (ftl_restore_md(dev, ftl_restore_md_cb)) { 1017 SPDK_ERRLOG("Failed to start metadata restoration from the SSD\n"); 1018 return -1; 1019 } 1020 1021 return 0; 1022 } 1023 1024 static int 1025 ftl_io_channel_create_cb(void *io_device, void *ctx) 1026 { 1027 struct spdk_ftl_dev *dev = io_device; 1028 struct ftl_io_channel *ioch = ctx; 1029 char mempool_name[32]; 1030 1031 snprintf(mempool_name, sizeof(mempool_name), "ftl_io_%p", ioch); 1032 ioch->cache_ioch = NULL; 1033 ioch->dev = dev; 1034 ioch->elem_size = sizeof(struct ftl_md_io); 1035 ioch->io_pool = spdk_mempool_create(mempool_name, 1036 dev->conf.user_io_pool_size, 1037 ioch->elem_size, 1038 0, 1039 SPDK_ENV_SOCKET_ID_ANY); 1040 if (!ioch->io_pool) { 1041 SPDK_ERRLOG("Failed to create IO channel's IO pool\n"); 1042 return -1; 1043 } 1044 1045 if (ftl_dev_has_nv_cache(dev)) { 1046 ioch->cache_ioch = spdk_bdev_get_io_channel(dev->nv_cache.bdev_desc); 1047 if (!ioch->cache_ioch) { 1048 SPDK_ERRLOG("Failed to create cache IO channel\n"); 1049 spdk_mempool_free(ioch->io_pool); 1050 return -1; 1051 } 1052 } 1053 1054 return 0; 1055 } 1056 1057 static void 1058 ftl_io_channel_destroy_cb(void *io_device, void *ctx) 1059 { 1060 struct ftl_io_channel *ioch = ctx; 1061 1062 spdk_mempool_free(ioch->io_pool); 1063 1064 if (ioch->cache_ioch) { 1065 spdk_put_io_channel(ioch->cache_ioch); 1066 } 1067 } 1068 1069 static int 1070 ftl_dev_init_io_channel(struct spdk_ftl_dev *dev) 1071 { 1072 spdk_io_device_register(dev, ftl_io_channel_create_cb, ftl_io_channel_destroy_cb, 1073 sizeof(struct ftl_io_channel), 1074 NULL); 1075 1076 return 0; 1077 } 1078 1079 int 1080 spdk_ftl_dev_init(const struct spdk_ftl_dev_init_opts *_opts, spdk_ftl_init_fn cb_fn, void *cb_arg) 1081 { 1082 struct spdk_ftl_dev *dev; 1083 struct spdk_ftl_dev_init_opts opts = *_opts; 1084 1085 dev = calloc(1, sizeof(*dev)); 1086 if (!dev) { 1087 return -ENOMEM; 1088 } 1089 1090 if (!opts.conf) { 1091 opts.conf = &g_default_conf; 1092 } 1093 1094 TAILQ_INIT(&dev->retry_queue); 1095 dev->conf = *opts.conf; 1096 dev->init_ctx.cb_fn = cb_fn; 1097 dev->init_ctx.cb_arg = cb_arg; 1098 dev->init_ctx.thread = spdk_get_thread(); 1099 dev->range = opts.range; 1100 dev->limit = SPDK_FTL_LIMIT_MAX; 1101 1102 dev->name = strdup(opts.name); 1103 if (!dev->name) { 1104 SPDK_ERRLOG("Unable to set device name\n"); 1105 goto fail_sync; 1106 } 1107 1108 if (ftl_dev_nvme_init(dev, &opts)) { 1109 SPDK_ERRLOG("Unable to initialize NVMe structures\n"); 1110 goto fail_sync; 1111 } 1112 1113 /* In case of errors, we free all of the memory in ftl_dev_free_sync(), */ 1114 /* so we don't have to clean up in each of the init functions. */ 1115 if (ftl_dev_retrieve_geo(dev)) { 1116 SPDK_ERRLOG("Unable to retrieve geometry\n"); 1117 goto fail_sync; 1118 } 1119 1120 if (ftl_check_init_opts(&opts, &dev->geo)) { 1121 SPDK_ERRLOG("Invalid device configuration\n"); 1122 goto fail_sync; 1123 } 1124 1125 if (ftl_dev_init_punits(dev)) { 1126 SPDK_ERRLOG("Unable to initialize LUNs\n"); 1127 goto fail_sync; 1128 } 1129 1130 if (ftl_init_lba_map_pools(dev)) { 1131 SPDK_ERRLOG("Unable to init LBA map pools\n"); 1132 goto fail_sync; 1133 } 1134 1135 ftl_init_wptr_list(dev); 1136 1137 if (ftl_dev_init_bands(dev)) { 1138 SPDK_ERRLOG("Unable to initialize band array\n"); 1139 goto fail_sync; 1140 } 1141 1142 if (ftl_dev_init_nv_cache(dev, opts.cache_bdev_desc)) { 1143 SPDK_ERRLOG("Unable to initialize persistent cache\n"); 1144 goto fail_sync; 1145 } 1146 1147 dev->rwb = ftl_rwb_init(&dev->conf, dev->geo.ws_opt, dev->md_size, ftl_dev_num_punits(dev)); 1148 if (!dev->rwb) { 1149 SPDK_ERRLOG("Unable to initialize rwb structures\n"); 1150 goto fail_sync; 1151 } 1152 1153 dev->reloc = ftl_reloc_init(dev); 1154 if (!dev->reloc) { 1155 SPDK_ERRLOG("Unable to initialize reloc structures\n"); 1156 goto fail_sync; 1157 } 1158 1159 if (ftl_dev_init_io_channel(dev)) { 1160 SPDK_ERRLOG("Unable to initialize IO channels\n"); 1161 goto fail_sync; 1162 } 1163 1164 if (ftl_dev_init_threads(dev, &opts)) { 1165 SPDK_ERRLOG("Unable to initialize device threads\n"); 1166 goto fail_sync; 1167 } 1168 1169 if (opts.mode & SPDK_FTL_MODE_CREATE) { 1170 if (ftl_setup_initial_state(dev)) { 1171 SPDK_ERRLOG("Failed to setup initial state of the device\n"); 1172 goto fail_async; 1173 } 1174 } else { 1175 if (ftl_restore_state(dev, &opts)) { 1176 SPDK_ERRLOG("Unable to restore device's state from the SSD\n"); 1177 goto fail_async; 1178 } 1179 } 1180 1181 return 0; 1182 fail_sync: 1183 ftl_dev_free_sync(dev); 1184 return -ENOMEM; 1185 fail_async: 1186 ftl_init_fail(dev); 1187 return 0; 1188 } 1189 1190 static void 1191 _ftl_halt_defrag(void *arg) 1192 { 1193 ftl_reloc_halt(((struct spdk_ftl_dev *)arg)->reloc); 1194 } 1195 1196 static void 1197 ftl_lba_map_request_dtor(struct spdk_mempool *mp, void *opaque, void *obj, unsigned obj_idx) 1198 { 1199 struct ftl_lba_map_request *request = obj; 1200 1201 spdk_bit_array_free(&request->segments); 1202 } 1203 1204 static void 1205 ftl_dev_free_sync(struct spdk_ftl_dev *dev) 1206 { 1207 struct spdk_ftl_dev *iter; 1208 size_t i; 1209 1210 if (!dev) { 1211 return; 1212 } 1213 1214 pthread_mutex_lock(&g_ftl_queue_lock); 1215 STAILQ_FOREACH(iter, &g_ftl_queue, stailq) { 1216 if (iter == dev) { 1217 STAILQ_REMOVE(&g_ftl_queue, dev, spdk_ftl_dev, stailq); 1218 break; 1219 } 1220 } 1221 pthread_mutex_unlock(&g_ftl_queue_lock); 1222 1223 assert(LIST_EMPTY(&dev->wptr_list)); 1224 assert(ftl_rwb_num_acquired(dev->rwb, FTL_RWB_TYPE_INTERNAL) == 0); 1225 assert(ftl_rwb_num_acquired(dev->rwb, FTL_RWB_TYPE_USER) == 0); 1226 1227 ftl_dev_dump_bands(dev); 1228 ftl_dev_dump_stats(dev); 1229 1230 spdk_io_device_unregister(dev, NULL); 1231 1232 if (dev->bands) { 1233 for (i = 0; i < ftl_dev_num_bands(dev); ++i) { 1234 free(dev->bands[i].chunk_buf); 1235 spdk_bit_array_free(&dev->bands[i].lba_map.vld); 1236 spdk_bit_array_free(&dev->bands[i].reloc_bitmap); 1237 } 1238 } 1239 1240 spdk_dma_free(dev->nv_cache.dma_buf); 1241 1242 spdk_mempool_free(dev->lba_pool); 1243 spdk_mempool_free(dev->nv_cache.md_pool); 1244 if (dev->lba_request_pool) { 1245 spdk_mempool_obj_iter(dev->lba_request_pool, ftl_lba_map_request_dtor, NULL); 1246 } 1247 spdk_mempool_free(dev->lba_request_pool); 1248 1249 ftl_rwb_free(dev->rwb); 1250 ftl_reloc_free(dev->reloc); 1251 1252 free(dev->name); 1253 free(dev->punits); 1254 free(dev->bands); 1255 free(dev->l2p); 1256 free(dev); 1257 } 1258 1259 static void 1260 ftl_call_fini_complete(struct spdk_ftl_dev *dev, int status) 1261 { 1262 struct ftl_init_context ctx = dev->fini_ctx; 1263 1264 ftl_dev_free_sync(dev); 1265 if (ctx.cb_fn != NULL) { 1266 ctx.cb_fn(NULL, ctx.cb_arg, status); 1267 } 1268 } 1269 1270 static void 1271 ftl_nv_cache_header_fini_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 1272 { 1273 int status = 0; 1274 1275 spdk_bdev_free_io(bdev_io); 1276 if (spdk_unlikely(!success)) { 1277 SPDK_ERRLOG("Failed to write non-volatile cache metadata header\n"); 1278 status = -EIO; 1279 } 1280 1281 ftl_call_fini_complete((struct spdk_ftl_dev *)cb_arg, status); 1282 } 1283 1284 static void 1285 ftl_halt_complete_cb(void *ctx) 1286 { 1287 struct spdk_ftl_dev *dev = ctx; 1288 struct ftl_nv_cache *nv_cache = &dev->nv_cache; 1289 int rc = 0; 1290 1291 if (!ftl_dev_has_nv_cache(dev)) { 1292 ftl_call_fini_complete(dev, 0); 1293 } else { 1294 rc = ftl_nv_cache_write_header(nv_cache, true, ftl_nv_cache_header_fini_cb, dev); 1295 if (spdk_unlikely(rc != 0)) { 1296 SPDK_ERRLOG("Failed to write non-volatile cache metadata header: %s\n", 1297 spdk_strerror(-rc)); 1298 ftl_call_fini_complete(dev, rc); 1299 } 1300 } 1301 } 1302 1303 static int 1304 ftl_halt_poller(void *ctx) 1305 { 1306 struct spdk_ftl_dev *dev = ctx; 1307 1308 if (!dev->core_thread.poller && !dev->read_thread.poller) { 1309 spdk_poller_unregister(&dev->fini_ctx.poller); 1310 1311 ftl_dev_free_thread(dev, &dev->read_thread); 1312 ftl_dev_free_thread(dev, &dev->core_thread); 1313 1314 ftl_anm_unregister_device(dev); 1315 1316 spdk_thread_send_msg(dev->fini_ctx.thread, ftl_halt_complete_cb, dev); 1317 } 1318 1319 return 0; 1320 } 1321 1322 static void 1323 ftl_add_halt_poller(void *ctx) 1324 { 1325 struct spdk_ftl_dev *dev = ctx; 1326 dev->halt = 1; 1327 1328 _ftl_halt_defrag(dev); 1329 1330 assert(!dev->fini_ctx.poller); 1331 dev->fini_ctx.poller = spdk_poller_register(ftl_halt_poller, dev, 100); 1332 } 1333 1334 static int 1335 _spdk_ftl_dev_free(struct spdk_ftl_dev *dev, spdk_ftl_init_fn cb_fn, void *cb_arg, 1336 struct spdk_thread *thread) 1337 { 1338 if (dev->fini_ctx.cb_fn != NULL) { 1339 return -EBUSY; 1340 } 1341 1342 dev->fini_ctx.cb_fn = cb_fn; 1343 dev->fini_ctx.cb_arg = cb_arg; 1344 dev->fini_ctx.thread = thread; 1345 1346 ftl_rwb_disable_interleaving(dev->rwb); 1347 1348 spdk_thread_send_msg(ftl_get_core_thread(dev), ftl_add_halt_poller, dev); 1349 return 0; 1350 } 1351 1352 int 1353 spdk_ftl_dev_free(struct spdk_ftl_dev *dev, spdk_ftl_init_fn cb_fn, void *cb_arg) 1354 { 1355 return _spdk_ftl_dev_free(dev, cb_fn, cb_arg, spdk_get_thread()); 1356 } 1357 1358 int 1359 spdk_ftl_module_init(const struct ftl_module_init_opts *opts, spdk_ftl_fn cb, void *cb_arg) 1360 { 1361 return ftl_anm_init(opts->anm_thread, cb, cb_arg); 1362 } 1363 1364 int 1365 spdk_ftl_module_fini(spdk_ftl_fn cb, void *cb_arg) 1366 { 1367 return ftl_anm_free(cb, cb_arg); 1368 } 1369 1370 SPDK_LOG_REGISTER_COMPONENT("ftl_init", SPDK_LOG_FTL_INIT) 1371