1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2022 Intel Corporation. 3 * Copyright 2023 Solidigm All Rights Reserved 4 * All rights reserved. 5 */ 6 7 8 #include "spdk/bdev.h" 9 #include "spdk/bdev_module.h" 10 #include "spdk/ftl.h" 11 #include "spdk/string.h" 12 13 #include "ftl_nv_cache.h" 14 #include "ftl_nv_cache_io.h" 15 #include "ftl_core.h" 16 #include "ftl_band.h" 17 #include "utils/ftl_addr_utils.h" 18 #include "mngt/ftl_mngt.h" 19 20 static inline uint64_t nvc_data_blocks(struct ftl_nv_cache *nv_cache) __attribute__((unused)); 21 static struct ftl_nv_cache_compactor *compactor_alloc(struct spdk_ftl_dev *dev); 22 static void compactor_free(struct spdk_ftl_dev *dev, struct ftl_nv_cache_compactor *compactor); 23 static void compaction_process_ftl_done(struct ftl_rq *rq); 24 static void compaction_process_read_entry(void *arg); 25 static void ftl_property_dump_cache_dev(struct spdk_ftl_dev *dev, 26 const struct ftl_property *property, 27 struct spdk_json_write_ctx *w); 28 29 static inline void 30 nvc_validate_md(struct ftl_nv_cache *nv_cache, 31 struct ftl_nv_cache_chunk_md *chunk_md) 32 { 33 struct ftl_md *md = nv_cache->md; 34 void *buffer = ftl_md_get_buffer(md); 35 uint64_t size = ftl_md_get_buffer_size(md); 36 void *ptr = chunk_md; 37 38 if (ptr < buffer) { 39 ftl_abort(); 40 } 41 42 ptr += sizeof(*chunk_md); 43 if (ptr > buffer + size) { 44 ftl_abort(); 45 } 46 } 47 48 static inline uint64_t 49 nvc_data_offset(struct ftl_nv_cache *nv_cache) 50 { 51 return 0; 52 } 53 54 static inline uint64_t 55 nvc_data_blocks(struct ftl_nv_cache *nv_cache) 56 { 57 return nv_cache->chunk_blocks * nv_cache->chunk_count; 58 } 59 60 size_t 61 ftl_nv_cache_chunk_tail_md_num_blocks(const struct ftl_nv_cache *nv_cache) 62 { 63 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, 64 struct spdk_ftl_dev, nv_cache); 65 return spdk_divide_round_up(dev->layout.nvc.chunk_data_blocks * dev->layout.l2p.addr_size, 66 FTL_BLOCK_SIZE); 67 } 68 69 static size_t 70 nv_cache_p2l_map_pool_elem_size(const struct ftl_nv_cache *nv_cache) 71 { 72 /* Map pool element holds the whole tail md */ 73 return nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE; 74 } 75 76 static uint64_t 77 get_chunk_idx(struct ftl_nv_cache_chunk *chunk) 78 { 79 struct ftl_nv_cache_chunk *first_chunk = chunk->nv_cache->chunks; 80 81 return (chunk->offset - first_chunk->offset) / chunk->nv_cache->chunk_blocks; 82 } 83 84 static void 85 ftl_nv_cache_init_update_limits(struct spdk_ftl_dev *dev) 86 { 87 struct ftl_nv_cache *nvc = &dev->nv_cache; 88 uint64_t usable_chunks = nvc->chunk_count - nvc->chunk_inactive_count; 89 90 /* Start compaction when full chunks exceed given % of entire active chunks */ 91 nvc->chunk_compaction_threshold = usable_chunks * 92 dev->conf.nv_cache.chunk_compaction_threshold / 93 100; 94 95 nvc->throttle.interval_tsc = FTL_NV_CACHE_THROTTLE_INTERVAL_MS * 96 (spdk_get_ticks_hz() / 1000); 97 98 nvc->chunk_free_target = spdk_divide_round_up(usable_chunks * 99 dev->conf.nv_cache.chunk_free_target, 100 100); 101 } 102 103 struct nvc_scrub_ctx { 104 uint64_t chunk_no; 105 nvc_scrub_cb cb; 106 void *cb_ctx; 107 108 struct ftl_layout_region reg_chunk; 109 struct ftl_md *md_chunk; 110 }; 111 112 static int 113 nvc_scrub_find_next_chunk(struct spdk_ftl_dev *dev, struct nvc_scrub_ctx *scrub_ctx) 114 { 115 while (scrub_ctx->chunk_no < dev->layout.nvc.chunk_count) { 116 if (dev->nv_cache.nvc_type->ops.is_chunk_active(dev, scrub_ctx->reg_chunk.current.offset)) { 117 return 0; 118 } 119 120 /* Move the dummy region along with the active chunk */ 121 scrub_ctx->reg_chunk.current.offset += dev->layout.nvc.chunk_data_blocks; 122 scrub_ctx->chunk_no++; 123 } 124 return -ENOENT; 125 } 126 127 static void 128 nvc_scrub_clear_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status) 129 { 130 struct nvc_scrub_ctx *scrub_ctx = md->owner.cb_ctx; 131 union ftl_md_vss vss; 132 133 /* Move to the next chunk */ 134 scrub_ctx->chunk_no++; 135 scrub_ctx->reg_chunk.current.offset += dev->layout.nvc.chunk_data_blocks; 136 137 FTL_DEBUGLOG(dev, "Scrub progress: %"PRIu64"/%"PRIu64" chunks\n", 138 scrub_ctx->chunk_no, dev->layout.nvc.chunk_count); 139 140 if (status || nvc_scrub_find_next_chunk(dev, scrub_ctx)) { 141 /* IO error or no more active chunks found. Scrubbing finished. */ 142 scrub_ctx->cb(dev, scrub_ctx->cb_ctx, status); 143 ftl_md_destroy(scrub_ctx->md_chunk, 0); 144 free(scrub_ctx); 145 return; 146 } 147 148 /* Scrub the next chunk */ 149 vss.version.md_version = 0; 150 vss.nv_cache.lba = FTL_ADDR_INVALID; 151 152 scrub_ctx->md_chunk->cb = nvc_scrub_clear_cb; 153 scrub_ctx->md_chunk->owner.cb_ctx = scrub_ctx; 154 155 ftl_md_clear(scrub_ctx->md_chunk, 0, &vss); 156 } 157 158 void 159 ftl_nv_cache_scrub(struct spdk_ftl_dev *dev, nvc_scrub_cb cb, void *cb_ctx) 160 { 161 struct nvc_scrub_ctx *scrub_ctx = calloc(1, sizeof(*scrub_ctx)); 162 union ftl_md_vss vss; 163 164 if (!scrub_ctx) { 165 cb(dev, cb_ctx, -ENOMEM); 166 return; 167 } 168 169 scrub_ctx->cb = cb; 170 scrub_ctx->cb_ctx = cb_ctx; 171 172 /* Setup a dummy region for the first chunk */ 173 scrub_ctx->reg_chunk.name = ftl_md_region_name(FTL_LAYOUT_REGION_TYPE_DATA_NVC); 174 scrub_ctx->reg_chunk.type = FTL_LAYOUT_REGION_TYPE_DATA_NVC; 175 scrub_ctx->reg_chunk.mirror_type = FTL_LAYOUT_REGION_TYPE_INVALID; 176 scrub_ctx->reg_chunk.current.version = 0; 177 scrub_ctx->reg_chunk.current.offset = 0; 178 scrub_ctx->reg_chunk.current.blocks = dev->layout.nvc.chunk_data_blocks; 179 scrub_ctx->reg_chunk.entry_size = FTL_BLOCK_SIZE; 180 scrub_ctx->reg_chunk.num_entries = dev->layout.nvc.chunk_data_blocks; 181 scrub_ctx->reg_chunk.vss_blksz = dev->nv_cache.md_size; 182 scrub_ctx->reg_chunk.bdev_desc = dev->nv_cache.bdev_desc; 183 scrub_ctx->reg_chunk.ioch = dev->nv_cache.cache_ioch; 184 185 /* Setup an MD object for the region */ 186 scrub_ctx->md_chunk = ftl_md_create(dev, scrub_ctx->reg_chunk.current.blocks, 187 scrub_ctx->reg_chunk.vss_blksz, scrub_ctx->reg_chunk.name, FTL_MD_CREATE_NO_MEM, 188 &scrub_ctx->reg_chunk); 189 190 if (!scrub_ctx->md_chunk) { 191 free(scrub_ctx); 192 cb(dev, cb_ctx, -ENOMEM); 193 return; 194 } 195 196 if (nvc_scrub_find_next_chunk(dev, scrub_ctx)) { 197 /* No active chunks found */ 198 ftl_md_destroy(scrub_ctx->md_chunk, 0); 199 free(scrub_ctx); 200 cb(dev, cb_ctx, -ENOENT); 201 return; 202 } 203 204 /* Scrub the first chunk */ 205 vss.version.md_version = 0; 206 vss.nv_cache.lba = FTL_ADDR_INVALID; 207 208 scrub_ctx->md_chunk->cb = nvc_scrub_clear_cb; 209 scrub_ctx->md_chunk->owner.cb_ctx = scrub_ctx; 210 211 ftl_md_clear(scrub_ctx->md_chunk, 0, &vss); 212 return; 213 } 214 215 int 216 ftl_nv_cache_init(struct spdk_ftl_dev *dev) 217 { 218 struct ftl_nv_cache *nv_cache = &dev->nv_cache; 219 struct ftl_nv_cache_chunk *chunk; 220 struct ftl_nv_cache_chunk_md *md; 221 struct ftl_nv_cache_compactor *compactor; 222 uint64_t i, offset; 223 224 nv_cache->halt = true; 225 226 nv_cache->md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD]; 227 if (!nv_cache->md) { 228 FTL_ERRLOG(dev, "No NV cache metadata object\n"); 229 return -1; 230 } 231 232 nv_cache->md_pool = ftl_mempool_create(dev->conf.user_io_pool_size, 233 nv_cache->md_size * dev->xfer_size, 234 FTL_BLOCK_SIZE, SPDK_ENV_SOCKET_ID_ANY); 235 if (!nv_cache->md_pool) { 236 FTL_ERRLOG(dev, "Failed to initialize NV cache metadata pool\n"); 237 return -1; 238 } 239 240 /* 241 * Initialize chunk info 242 */ 243 nv_cache->chunk_blocks = dev->layout.nvc.chunk_data_blocks; 244 nv_cache->chunk_count = dev->layout.nvc.chunk_count; 245 nv_cache->tail_md_chunk_blocks = ftl_nv_cache_chunk_tail_md_num_blocks(nv_cache); 246 247 /* Allocate chunks */ 248 nv_cache->chunks = calloc(nv_cache->chunk_count, 249 sizeof(nv_cache->chunks[0])); 250 if (!nv_cache->chunks) { 251 FTL_ERRLOG(dev, "Failed to initialize NV cache chunks\n"); 252 return -1; 253 } 254 255 TAILQ_INIT(&nv_cache->chunk_free_list); 256 TAILQ_INIT(&nv_cache->chunk_open_list); 257 TAILQ_INIT(&nv_cache->chunk_full_list); 258 TAILQ_INIT(&nv_cache->chunk_comp_list); 259 TAILQ_INIT(&nv_cache->chunk_inactive_list); 260 TAILQ_INIT(&nv_cache->needs_free_persist_list); 261 262 /* First chunk metadata */ 263 md = ftl_md_get_buffer(nv_cache->md); 264 if (!md) { 265 FTL_ERRLOG(dev, "No NV cache metadata\n"); 266 return -1; 267 } 268 269 chunk = nv_cache->chunks; 270 offset = nvc_data_offset(nv_cache); 271 for (i = 0; i < nv_cache->chunk_count; i++, chunk++, md++) { 272 chunk->nv_cache = nv_cache; 273 chunk->md = md; 274 chunk->md->version = FTL_NVC_VERSION_CURRENT; 275 nvc_validate_md(nv_cache, md); 276 chunk->offset = offset; 277 offset += nv_cache->chunk_blocks; 278 279 if (nv_cache->nvc_type->ops.is_chunk_active(dev, chunk->offset)) { 280 nv_cache->chunk_free_count++; 281 TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry); 282 } else { 283 chunk->md->state = FTL_CHUNK_STATE_INACTIVE; 284 nv_cache->chunk_inactive_count++; 285 TAILQ_INSERT_TAIL(&nv_cache->chunk_inactive_list, chunk, entry); 286 } 287 } 288 assert(nv_cache->chunk_free_count + nv_cache->chunk_inactive_count == nv_cache->chunk_count); 289 assert(offset <= nvc_data_offset(nv_cache) + nvc_data_blocks(nv_cache)); 290 291 TAILQ_INIT(&nv_cache->compactor_list); 292 for (i = 0; i < FTL_NV_CACHE_NUM_COMPACTORS; i++) { 293 compactor = compactor_alloc(dev); 294 295 if (!compactor) { 296 FTL_ERRLOG(dev, "Cannot allocate compaction process\n"); 297 return -1; 298 } 299 300 TAILQ_INSERT_TAIL(&nv_cache->compactor_list, compactor, entry); 301 } 302 303 #define FTL_MAX_OPEN_CHUNKS 2 304 nv_cache->p2l_pool = ftl_mempool_create(FTL_MAX_OPEN_CHUNKS, 305 nv_cache_p2l_map_pool_elem_size(nv_cache), 306 FTL_BLOCK_SIZE, 307 SPDK_ENV_SOCKET_ID_ANY); 308 if (!nv_cache->p2l_pool) { 309 return -ENOMEM; 310 } 311 312 /* One entry per open chunk */ 313 nv_cache->chunk_md_pool = ftl_mempool_create(FTL_MAX_OPEN_CHUNKS, 314 sizeof(struct ftl_nv_cache_chunk_md), 315 FTL_BLOCK_SIZE, 316 SPDK_ENV_SOCKET_ID_ANY); 317 if (!nv_cache->chunk_md_pool) { 318 return -ENOMEM; 319 } 320 321 /* Each compactor can be reading a different chunk which it needs to switch state to free to at the end, 322 * plus one backup each for high invalidity chunks processing (if there's a backlog of chunks with extremely 323 * small, even 0, validity then they can be processed by the compactors quickly and trigger a lot of updates 324 * to free state at once) */ 325 nv_cache->free_chunk_md_pool = ftl_mempool_create(2 * FTL_NV_CACHE_NUM_COMPACTORS, 326 sizeof(struct ftl_nv_cache_chunk_md), 327 FTL_BLOCK_SIZE, 328 SPDK_ENV_SOCKET_ID_ANY); 329 if (!nv_cache->free_chunk_md_pool) { 330 return -ENOMEM; 331 } 332 333 ftl_nv_cache_init_update_limits(dev); 334 ftl_property_register(dev, "cache_device", NULL, 0, NULL, NULL, ftl_property_dump_cache_dev, NULL, 335 NULL, true); 336 return 0; 337 } 338 339 void 340 ftl_nv_cache_deinit(struct spdk_ftl_dev *dev) 341 { 342 struct ftl_nv_cache *nv_cache = &dev->nv_cache; 343 struct ftl_nv_cache_compactor *compactor; 344 345 while (!TAILQ_EMPTY(&nv_cache->compactor_list)) { 346 compactor = TAILQ_FIRST(&nv_cache->compactor_list); 347 TAILQ_REMOVE(&nv_cache->compactor_list, compactor, entry); 348 349 compactor_free(dev, compactor); 350 } 351 352 ftl_mempool_destroy(nv_cache->md_pool); 353 ftl_mempool_destroy(nv_cache->p2l_pool); 354 ftl_mempool_destroy(nv_cache->chunk_md_pool); 355 ftl_mempool_destroy(nv_cache->free_chunk_md_pool); 356 nv_cache->md_pool = NULL; 357 nv_cache->p2l_pool = NULL; 358 nv_cache->chunk_md_pool = NULL; 359 nv_cache->free_chunk_md_pool = NULL; 360 361 free(nv_cache->chunks); 362 nv_cache->chunks = NULL; 363 } 364 365 static uint64_t 366 chunk_get_free_space(struct ftl_nv_cache *nv_cache, 367 struct ftl_nv_cache_chunk *chunk) 368 { 369 assert(chunk->md->write_pointer + nv_cache->tail_md_chunk_blocks <= 370 nv_cache->chunk_blocks); 371 return nv_cache->chunk_blocks - chunk->md->write_pointer - 372 nv_cache->tail_md_chunk_blocks; 373 } 374 375 static bool 376 chunk_is_closed(struct ftl_nv_cache_chunk *chunk) 377 { 378 return chunk->md->write_pointer == chunk->nv_cache->chunk_blocks; 379 } 380 381 static void ftl_chunk_close(struct ftl_nv_cache_chunk *chunk); 382 383 static uint64_t 384 ftl_nv_cache_get_wr_buffer(struct ftl_nv_cache *nv_cache, struct ftl_io *io) 385 { 386 uint64_t address = FTL_LBA_INVALID; 387 uint64_t num_blocks = io->num_blocks; 388 uint64_t free_space; 389 struct ftl_nv_cache_chunk *chunk; 390 391 do { 392 chunk = nv_cache->chunk_current; 393 /* Chunk has been closed so pick new one */ 394 if (chunk && chunk_is_closed(chunk)) { 395 chunk = NULL; 396 } 397 398 if (!chunk) { 399 chunk = TAILQ_FIRST(&nv_cache->chunk_open_list); 400 if (chunk && chunk->md->state == FTL_CHUNK_STATE_OPEN) { 401 TAILQ_REMOVE(&nv_cache->chunk_open_list, chunk, entry); 402 nv_cache->chunk_current = chunk; 403 } else { 404 break; 405 } 406 } 407 408 free_space = chunk_get_free_space(nv_cache, chunk); 409 410 if (free_space >= num_blocks) { 411 /* Enough space in chunk */ 412 413 /* Calculate address in NV cache */ 414 address = chunk->offset + chunk->md->write_pointer; 415 416 /* Set chunk in IO */ 417 io->nv_cache_chunk = chunk; 418 419 /* Move write pointer */ 420 chunk->md->write_pointer += num_blocks; 421 break; 422 } 423 424 /* Not enough space in nv_cache_chunk */ 425 nv_cache->chunk_current = NULL; 426 427 if (0 == free_space) { 428 continue; 429 } 430 431 chunk->md->blocks_skipped = free_space; 432 chunk->md->blocks_written += free_space; 433 chunk->md->write_pointer += free_space; 434 435 if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) { 436 ftl_chunk_close(chunk); 437 } 438 } while (1); 439 440 return address; 441 } 442 443 void 444 ftl_nv_cache_fill_md(struct ftl_io *io) 445 { 446 struct ftl_nv_cache_chunk *chunk = io->nv_cache_chunk; 447 uint64_t i; 448 union ftl_md_vss *metadata = io->md; 449 uint64_t lba = ftl_io_get_lba(io, 0); 450 451 for (i = 0; i < io->num_blocks; ++i, lba++, metadata++) { 452 metadata->nv_cache.lba = lba; 453 metadata->nv_cache.seq_id = chunk->md->seq_id; 454 } 455 } 456 457 uint64_t 458 chunk_tail_md_offset(struct ftl_nv_cache *nv_cache) 459 { 460 return nv_cache->chunk_blocks - nv_cache->tail_md_chunk_blocks; 461 } 462 463 static void 464 chunk_advance_blocks(struct ftl_nv_cache *nv_cache, struct ftl_nv_cache_chunk *chunk, 465 uint64_t advanced_blocks) 466 { 467 chunk->md->blocks_written += advanced_blocks; 468 469 assert(chunk->md->blocks_written <= nv_cache->chunk_blocks); 470 471 if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) { 472 ftl_chunk_close(chunk); 473 } 474 } 475 476 static uint64_t 477 chunk_user_blocks_written(struct ftl_nv_cache_chunk *chunk) 478 { 479 return chunk->md->blocks_written - chunk->md->blocks_skipped - 480 chunk->nv_cache->tail_md_chunk_blocks; 481 } 482 483 static bool 484 is_chunk_compacted(struct ftl_nv_cache_chunk *chunk) 485 { 486 assert(chunk->md->blocks_written != 0); 487 488 if (chunk_user_blocks_written(chunk) == chunk->md->blocks_compacted) { 489 return true; 490 } 491 492 return false; 493 } 494 495 static int 496 ftl_chunk_alloc_md_entry(struct ftl_nv_cache_chunk *chunk) 497 { 498 struct ftl_nv_cache *nv_cache = chunk->nv_cache; 499 struct ftl_p2l_map *p2l_map = &chunk->p2l_map; 500 501 p2l_map->chunk_dma_md = ftl_mempool_get(nv_cache->chunk_md_pool); 502 503 if (!p2l_map->chunk_dma_md) { 504 return -ENOMEM; 505 } 506 507 ftl_nv_cache_chunk_md_initialize(p2l_map->chunk_dma_md); 508 return 0; 509 } 510 511 static void 512 ftl_chunk_free_md_entry(struct ftl_nv_cache_chunk *chunk) 513 { 514 struct ftl_p2l_map *p2l_map = &chunk->p2l_map; 515 516 ftl_mempool_put(chunk->nv_cache->chunk_md_pool, p2l_map->chunk_dma_md); 517 p2l_map->chunk_dma_md = NULL; 518 } 519 520 static void 521 ftl_chunk_free(struct ftl_nv_cache_chunk *chunk) 522 { 523 struct ftl_nv_cache *nv_cache = chunk->nv_cache; 524 525 /* Reset chunk */ 526 ftl_nv_cache_chunk_md_initialize(chunk->md); 527 528 TAILQ_INSERT_TAIL(&nv_cache->needs_free_persist_list, chunk, entry); 529 nv_cache->chunk_free_persist_count++; 530 } 531 532 static int 533 ftl_chunk_alloc_chunk_free_entry(struct ftl_nv_cache_chunk *chunk) 534 { 535 struct ftl_nv_cache *nv_cache = chunk->nv_cache; 536 struct ftl_p2l_map *p2l_map = &chunk->p2l_map; 537 538 p2l_map->chunk_dma_md = ftl_mempool_get(nv_cache->free_chunk_md_pool); 539 if (!p2l_map->chunk_dma_md) { 540 return -ENOMEM; 541 } 542 543 ftl_nv_cache_chunk_md_initialize(p2l_map->chunk_dma_md); 544 return 0; 545 } 546 547 static void 548 ftl_chunk_free_chunk_free_entry(struct ftl_nv_cache_chunk *chunk) 549 { 550 struct ftl_p2l_map *p2l_map = &chunk->p2l_map; 551 552 ftl_mempool_put(chunk->nv_cache->free_chunk_md_pool, p2l_map->chunk_dma_md); 553 p2l_map->chunk_dma_md = NULL; 554 } 555 556 static void 557 chunk_free_cb(int status, void *ctx) 558 { 559 struct ftl_nv_cache_chunk *chunk = (struct ftl_nv_cache_chunk *)ctx; 560 561 if (spdk_likely(!status)) { 562 struct ftl_nv_cache *nv_cache = chunk->nv_cache; 563 564 nv_cache->chunk_free_persist_count--; 565 TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry); 566 nv_cache->chunk_free_count++; 567 nv_cache->chunk_full_count--; 568 chunk->md->state = FTL_CHUNK_STATE_FREE; 569 chunk->md->close_seq_id = 0; 570 ftl_chunk_free_chunk_free_entry(chunk); 571 } else { 572 #ifdef SPDK_FTL_RETRY_ON_ERROR 573 ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx); 574 #else 575 ftl_abort(); 576 #endif 577 } 578 } 579 580 static void 581 ftl_chunk_persist_free_state(struct ftl_nv_cache *nv_cache) 582 { 583 int rc; 584 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache); 585 struct ftl_p2l_map *p2l_map; 586 struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD]; 587 struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD); 588 struct ftl_nv_cache_chunk *tchunk, *chunk = NULL; 589 590 TAILQ_FOREACH_SAFE(chunk, &nv_cache->needs_free_persist_list, entry, tchunk) { 591 p2l_map = &chunk->p2l_map; 592 rc = ftl_chunk_alloc_chunk_free_entry(chunk); 593 if (rc) { 594 break; 595 } 596 597 TAILQ_REMOVE(&nv_cache->needs_free_persist_list, chunk, entry); 598 599 memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE); 600 p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_FREE; 601 p2l_map->chunk_dma_md->close_seq_id = 0; 602 p2l_map->chunk_dma_md->p2l_map_checksum = 0; 603 604 ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, p2l_map->chunk_dma_md, NULL, 605 chunk_free_cb, chunk, &chunk->md_persist_entry_ctx); 606 } 607 } 608 609 static void 610 compaction_stats_update(struct ftl_nv_cache_chunk *chunk) 611 { 612 struct ftl_nv_cache *nv_cache = chunk->nv_cache; 613 struct compaction_bw_stats *compaction_bw = &nv_cache->compaction_recent_bw; 614 double *ptr; 615 616 if (spdk_unlikely(chunk->compaction_length_tsc == 0)) { 617 return; 618 } 619 620 if (spdk_likely(compaction_bw->count == FTL_NV_CACHE_COMPACTION_SMA_N)) { 621 ptr = compaction_bw->buf + compaction_bw->first; 622 compaction_bw->first++; 623 if (compaction_bw->first == FTL_NV_CACHE_COMPACTION_SMA_N) { 624 compaction_bw->first = 0; 625 } 626 compaction_bw->sum -= *ptr; 627 } else { 628 ptr = compaction_bw->buf + compaction_bw->count; 629 compaction_bw->count++; 630 } 631 632 *ptr = (double)chunk->md->blocks_compacted * FTL_BLOCK_SIZE / chunk->compaction_length_tsc; 633 chunk->compaction_length_tsc = 0; 634 635 compaction_bw->sum += *ptr; 636 nv_cache->compaction_sma = compaction_bw->sum / compaction_bw->count; 637 } 638 639 static void 640 chunk_compaction_advance(struct ftl_nv_cache_chunk *chunk, uint64_t num_blocks) 641 { 642 struct ftl_nv_cache *nv_cache = chunk->nv_cache; 643 uint64_t tsc = spdk_thread_get_last_tsc(spdk_get_thread()); 644 645 chunk->compaction_length_tsc += tsc - chunk->compaction_start_tsc; 646 chunk->compaction_start_tsc = tsc; 647 648 chunk->md->blocks_compacted += num_blocks; 649 assert(chunk->md->blocks_compacted <= chunk_user_blocks_written(chunk)); 650 if (!is_chunk_compacted(chunk)) { 651 return; 652 } 653 654 /* Remove chunk from compacted list */ 655 TAILQ_REMOVE(&nv_cache->chunk_comp_list, chunk, entry); 656 nv_cache->chunk_comp_count--; 657 658 compaction_stats_update(chunk); 659 660 ftl_chunk_free(chunk); 661 } 662 663 static bool 664 is_compaction_required_for_upgrade(struct ftl_nv_cache *nv_cache) 665 { 666 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache); 667 668 if (dev->conf.prep_upgrade_on_shutdown) { 669 if (nv_cache->chunk_full_count || nv_cache->chunk_open_count) { 670 return true; 671 } 672 } 673 674 return false; 675 } 676 677 static bool 678 is_compaction_required(struct ftl_nv_cache *nv_cache) 679 { 680 if (spdk_unlikely(nv_cache->halt)) { 681 return is_compaction_required_for_upgrade(nv_cache); 682 } 683 684 if (nv_cache->chunk_full_count >= nv_cache->chunk_compaction_threshold) { 685 return true; 686 } 687 688 return false; 689 } 690 691 static void compaction_process_finish_read(struct ftl_nv_cache_compactor *compactor); 692 static void compaction_process_pin_lba(struct ftl_nv_cache_compactor *comp); 693 694 static void 695 _compaction_process_pin_lba(void *_comp) 696 { 697 struct ftl_nv_cache_compactor *comp = _comp; 698 699 compaction_process_pin_lba(comp); 700 } 701 702 static void 703 compaction_process_pin_lba_cb(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx) 704 { 705 struct ftl_nv_cache_compactor *comp = pin_ctx->cb_ctx; 706 struct ftl_rq *rq = comp->rq; 707 708 if (status) { 709 rq->iter.status = status; 710 pin_ctx->lba = FTL_LBA_INVALID; 711 } 712 713 if (--rq->iter.remaining == 0) { 714 if (rq->iter.status) { 715 /* unpin and try again */ 716 ftl_rq_unpin(rq); 717 spdk_thread_send_msg(spdk_get_thread(), _compaction_process_pin_lba, comp); 718 return; 719 } 720 721 compaction_process_finish_read(comp); 722 } 723 } 724 725 static void 726 compaction_process_pin_lba(struct ftl_nv_cache_compactor *comp) 727 { 728 struct ftl_rq *rq = comp->rq; 729 struct spdk_ftl_dev *dev = rq->dev; 730 struct ftl_rq_entry *entry; 731 732 assert(rq->iter.count); 733 rq->iter.remaining = rq->iter.count; 734 rq->iter.status = 0; 735 736 FTL_RQ_ENTRY_LOOP(rq, entry, rq->iter.count) { 737 struct ftl_nv_cache_chunk *chunk = entry->owner.priv; 738 struct ftl_l2p_pin_ctx *pin_ctx = &entry->l2p_pin_ctx; 739 union ftl_md_vss *md = entry->io_md; 740 741 if (md->nv_cache.lba == FTL_LBA_INVALID || md->nv_cache.seq_id != chunk->md->seq_id) { 742 ftl_l2p_pin_skip(dev, compaction_process_pin_lba_cb, comp, pin_ctx); 743 } else { 744 ftl_l2p_pin(dev, md->nv_cache.lba, 1, compaction_process_pin_lba_cb, comp, pin_ctx); 745 } 746 } 747 } 748 749 static void 750 compaction_process_read_entry_cb(struct spdk_bdev_io *bdev_io, bool success, void *arg) 751 { 752 struct ftl_rq_entry *entry = arg; 753 struct ftl_rq *rq = ftl_rq_from_entry(entry); 754 struct spdk_ftl_dev *dev = rq->dev; 755 struct ftl_nv_cache_compactor *compactor = rq->owner.priv; 756 757 ftl_stats_bdev_io_completed(dev, FTL_STATS_TYPE_CMP, bdev_io); 758 759 spdk_bdev_free_io(bdev_io); 760 761 if (!success) { 762 /* retry */ 763 spdk_thread_send_msg(spdk_get_thread(), compaction_process_read_entry, entry); 764 return; 765 } 766 767 assert(rq->iter.remaining >= entry->bdev_io.num_blocks); 768 rq->iter.remaining -= entry->bdev_io.num_blocks; 769 if (0 == rq->iter.remaining) { 770 /* All IOs processed, go to next phase - pining */ 771 compaction_process_pin_lba(compactor); 772 } 773 } 774 775 static void 776 compaction_process_read_entry(void *arg) 777 { 778 struct ftl_rq_entry *entry = arg; 779 struct ftl_rq *rq = ftl_rq_from_entry(entry); 780 struct spdk_ftl_dev *dev = rq->dev; 781 782 int rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, dev->nv_cache.bdev_desc, 783 dev->nv_cache.cache_ioch, entry->io_payload, entry->io_md, 784 entry->bdev_io.offset_blocks, entry->bdev_io.num_blocks, 785 compaction_process_read_entry_cb, entry); 786 787 if (spdk_unlikely(rc)) { 788 if (rc == -ENOMEM) { 789 struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->nv_cache.bdev_desc); 790 entry->bdev_io.wait_entry.bdev = bdev; 791 entry->bdev_io.wait_entry.cb_fn = compaction_process_read_entry; 792 entry->bdev_io.wait_entry.cb_arg = entry; 793 spdk_bdev_queue_io_wait(bdev, dev->nv_cache.cache_ioch, &entry->bdev_io.wait_entry); 794 } else { 795 ftl_abort(); 796 } 797 } 798 799 dev->stats.io_activity_total += entry->bdev_io.num_blocks; 800 } 801 802 static bool 803 is_chunk_to_read(struct ftl_nv_cache_chunk *chunk) 804 { 805 assert(chunk->md->blocks_written != 0); 806 807 if (chunk_user_blocks_written(chunk) == chunk->md->read_pointer) { 808 return false; 809 } 810 811 return true; 812 } 813 814 static struct ftl_nv_cache_chunk * 815 get_chunk_for_compaction(struct ftl_nv_cache *nv_cache) 816 { 817 struct ftl_nv_cache_chunk *chunk = NULL; 818 819 if (!TAILQ_EMPTY(&nv_cache->chunk_comp_list)) { 820 chunk = TAILQ_FIRST(&nv_cache->chunk_comp_list); 821 if (is_chunk_to_read(chunk)) { 822 return chunk; 823 } 824 } 825 826 if (!TAILQ_EMPTY(&nv_cache->chunk_full_list)) { 827 chunk = TAILQ_FIRST(&nv_cache->chunk_full_list); 828 TAILQ_REMOVE(&nv_cache->chunk_full_list, chunk, entry); 829 830 assert(chunk->md->write_pointer); 831 } else { 832 return NULL; 833 } 834 835 if (spdk_likely(chunk)) { 836 assert(chunk->md->write_pointer != 0); 837 TAILQ_INSERT_HEAD(&nv_cache->chunk_comp_list, chunk, entry); 838 nv_cache->chunk_comp_count++; 839 } 840 841 return chunk; 842 } 843 844 static uint64_t 845 chunk_blocks_to_read(struct ftl_nv_cache_chunk *chunk) 846 { 847 uint64_t blocks_written; 848 uint64_t blocks_to_read; 849 850 assert(chunk->md->blocks_written >= chunk->md->blocks_skipped); 851 blocks_written = chunk_user_blocks_written(chunk); 852 853 assert(blocks_written >= chunk->md->read_pointer); 854 blocks_to_read = blocks_written - chunk->md->read_pointer; 855 856 return blocks_to_read; 857 } 858 859 static void 860 compactor_deactivate(struct ftl_nv_cache_compactor *compactor) 861 { 862 struct ftl_nv_cache *nv_cache = compactor->nv_cache; 863 864 compactor->rq->iter.count = 0; 865 assert(nv_cache->compaction_active_count); 866 nv_cache->compaction_active_count--; 867 TAILQ_INSERT_TAIL(&nv_cache->compactor_list, compactor, entry); 868 } 869 870 static void 871 compaction_process_invalidate_entry(struct ftl_rq_entry *entry) 872 { 873 entry->addr = FTL_ADDR_INVALID; 874 entry->lba = FTL_LBA_INVALID; 875 entry->seq_id = 0; 876 entry->owner.priv = NULL; 877 } 878 879 static void 880 compaction_process_pad(struct ftl_nv_cache_compactor *compactor, uint64_t idx) 881 { 882 struct ftl_rq *rq = compactor->rq; 883 struct ftl_rq_entry *entry; 884 885 assert(idx < rq->num_blocks); 886 FTL_RQ_ENTRY_LOOP_FROM(rq, &rq->entries[idx], entry, rq->num_blocks) { 887 compaction_process_invalidate_entry(entry); 888 } 889 } 890 891 static void 892 compaction_process_read(struct ftl_nv_cache_compactor *compactor) 893 { 894 struct ftl_rq *rq = compactor->rq; 895 struct ftl_nv_cache *nv_cache = compactor->nv_cache; 896 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache); 897 struct ftl_rq_entry *entry, *io; 898 899 assert(rq->iter.count); 900 rq->iter.remaining = rq->iter.count; 901 902 io = rq->entries; 903 io->bdev_io.num_blocks = 1; 904 io->bdev_io.offset_blocks = ftl_addr_to_nvc_offset(dev, io->addr); 905 FTL_RQ_ENTRY_LOOP_FROM(rq, &rq->entries[1], entry, rq->iter.count) { 906 if (entry->addr == io->addr + io->bdev_io.num_blocks) { 907 io->bdev_io.num_blocks++; 908 } else { 909 compaction_process_read_entry(io); 910 io = entry; 911 io->bdev_io.num_blocks = 1; 912 io->bdev_io.offset_blocks = ftl_addr_to_nvc_offset(dev, io->addr); 913 } 914 } 915 compaction_process_read_entry(io); 916 } 917 918 static ftl_addr 919 compaction_chunk_read_pos(struct spdk_ftl_dev *dev, struct ftl_nv_cache_chunk *chunk) 920 { 921 ftl_addr start, pos; 922 uint64_t skip, to_read = chunk_blocks_to_read(chunk); 923 924 if (0 == to_read) { 925 return FTL_ADDR_INVALID; 926 } 927 928 start = ftl_addr_from_nvc_offset(dev, chunk->offset + chunk->md->read_pointer); 929 pos = ftl_bitmap_find_first_set(dev->valid_map, start, start + to_read - 1); 930 931 if (pos == UINT64_MAX) { 932 chunk->md->read_pointer += to_read; 933 chunk_compaction_advance(chunk, to_read); 934 return FTL_ADDR_INVALID; 935 } 936 937 assert(pos >= start); 938 skip = pos - start; 939 if (skip) { 940 chunk->md->read_pointer += skip; 941 chunk_compaction_advance(chunk, skip); 942 } 943 944 return pos; 945 } 946 947 static bool 948 compaction_entry_read_pos(struct ftl_nv_cache *nv_cache, struct ftl_rq_entry *entry) 949 { 950 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache); 951 struct ftl_nv_cache_chunk *chunk = NULL; 952 ftl_addr addr = FTL_ADDR_INVALID; 953 954 while (!chunk) { 955 /* Get currently handled chunk */ 956 chunk = get_chunk_for_compaction(nv_cache); 957 if (!chunk) { 958 return false; 959 } 960 chunk->compaction_start_tsc = spdk_thread_get_last_tsc(spdk_get_thread()); 961 962 /* Get next read position in chunk */ 963 addr = compaction_chunk_read_pos(dev, chunk); 964 if (FTL_ADDR_INVALID == addr) { 965 chunk = NULL; 966 } 967 } 968 969 assert(FTL_ADDR_INVALID != addr); 970 971 /* Set entry address info and chunk */ 972 entry->addr = addr; 973 entry->owner.priv = chunk; 974 975 /* Move read pointer in the chunk */ 976 chunk->md->read_pointer++; 977 978 return true; 979 } 980 981 static void 982 compaction_process_start(struct ftl_nv_cache_compactor *compactor) 983 { 984 struct ftl_rq *rq = compactor->rq; 985 struct ftl_nv_cache *nv_cache = compactor->nv_cache; 986 struct ftl_rq_entry *entry; 987 988 assert(0 == compactor->rq->iter.count); 989 FTL_RQ_ENTRY_LOOP(rq, entry, rq->num_blocks) { 990 if (!compaction_entry_read_pos(nv_cache, entry)) { 991 compaction_process_pad(compactor, entry->index); 992 break; 993 } 994 rq->iter.count++; 995 } 996 997 if (rq->iter.count) { 998 /* Schedule Read IOs */ 999 compaction_process_read(compactor); 1000 } else { 1001 compactor_deactivate(compactor); 1002 } 1003 } 1004 1005 static void 1006 compaction_process(struct ftl_nv_cache *nv_cache) 1007 { 1008 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache); 1009 struct ftl_nv_cache_compactor *compactor; 1010 1011 if (!is_compaction_required(nv_cache)) { 1012 return; 1013 } 1014 1015 compactor = TAILQ_FIRST(&nv_cache->compactor_list); 1016 if (!compactor) { 1017 return; 1018 } 1019 1020 TAILQ_REMOVE(&nv_cache->compactor_list, compactor, entry); 1021 compactor->nv_cache->compaction_active_count++; 1022 compaction_process_start(compactor); 1023 ftl_add_io_activity(dev); 1024 } 1025 1026 static void 1027 compaction_process_ftl_done(struct ftl_rq *rq) 1028 { 1029 struct spdk_ftl_dev *dev = rq->dev; 1030 struct ftl_nv_cache_compactor *compactor = rq->owner.priv; 1031 struct ftl_band *band = rq->io.band; 1032 struct ftl_rq_entry *entry; 1033 ftl_addr addr; 1034 1035 if (spdk_unlikely(false == rq->success)) { 1036 /* IO error retry writing */ 1037 #ifdef SPDK_FTL_RETRY_ON_ERROR 1038 ftl_writer_queue_rq(&dev->writer_user, rq); 1039 return; 1040 #else 1041 ftl_abort(); 1042 #endif 1043 } 1044 1045 assert(rq->iter.count); 1046 1047 /* Update L2P table */ 1048 addr = rq->io.addr; 1049 FTL_RQ_ENTRY_LOOP(rq, entry, rq->iter.count) { 1050 struct ftl_nv_cache_chunk *chunk = entry->owner.priv; 1051 1052 if (entry->lba != FTL_LBA_INVALID) { 1053 ftl_l2p_update_base(dev, entry->lba, addr, entry->addr); 1054 ftl_l2p_unpin(dev, entry->lba, 1); 1055 chunk_compaction_advance(chunk, 1); 1056 } else { 1057 assert(entry->addr == FTL_ADDR_INVALID); 1058 } 1059 1060 addr = ftl_band_next_addr(band, addr, 1); 1061 compaction_process_invalidate_entry(entry); 1062 } 1063 1064 compactor_deactivate(compactor); 1065 } 1066 1067 static void 1068 compaction_process_finish_read(struct ftl_nv_cache_compactor *compactor) 1069 { 1070 struct ftl_rq *rq = compactor->rq; 1071 struct spdk_ftl_dev *dev = rq->dev; 1072 struct ftl_rq_entry *entry; 1073 ftl_addr current_addr; 1074 uint64_t skip = 0; 1075 1076 FTL_RQ_ENTRY_LOOP(rq, entry, rq->iter.count) { 1077 struct ftl_nv_cache_chunk *chunk = entry->owner.priv; 1078 union ftl_md_vss *md = entry->io_md; 1079 1080 if (md->nv_cache.lba == FTL_LBA_INVALID || md->nv_cache.seq_id != chunk->md->seq_id) { 1081 skip++; 1082 compaction_process_invalidate_entry(entry); 1083 chunk_compaction_advance(chunk, 1); 1084 continue; 1085 } 1086 1087 current_addr = ftl_l2p_get(dev, md->nv_cache.lba); 1088 if (current_addr == entry->addr) { 1089 entry->lba = md->nv_cache.lba; 1090 entry->seq_id = chunk->md->seq_id; 1091 } else { 1092 /* This address already invalidated, just omit this block */ 1093 chunk_compaction_advance(chunk, 1); 1094 ftl_l2p_unpin(dev, md->nv_cache.lba, 1); 1095 compaction_process_invalidate_entry(entry); 1096 skip++; 1097 } 1098 } 1099 1100 if (skip < rq->iter.count) { 1101 /* 1102 * Request contains data to be placed on FTL, compact it 1103 */ 1104 ftl_writer_queue_rq(&dev->writer_user, rq); 1105 } else { 1106 compactor_deactivate(compactor); 1107 } 1108 } 1109 1110 static void 1111 compactor_free(struct spdk_ftl_dev *dev, struct ftl_nv_cache_compactor *compactor) 1112 { 1113 if (!compactor) { 1114 return; 1115 } 1116 1117 ftl_rq_del(compactor->rq); 1118 free(compactor); 1119 } 1120 1121 static struct ftl_nv_cache_compactor * 1122 compactor_alloc(struct spdk_ftl_dev *dev) 1123 { 1124 struct ftl_nv_cache_compactor *compactor; 1125 struct ftl_rq_entry *entry; 1126 1127 compactor = calloc(1, sizeof(*compactor)); 1128 if (!compactor) { 1129 goto error; 1130 } 1131 1132 /* Allocate help request for reading */ 1133 compactor->rq = ftl_rq_new(dev, dev->nv_cache.md_size); 1134 if (!compactor->rq) { 1135 goto error; 1136 } 1137 1138 compactor->nv_cache = &dev->nv_cache; 1139 compactor->rq->owner.priv = compactor; 1140 compactor->rq->owner.cb = compaction_process_ftl_done; 1141 compactor->rq->owner.compaction = true; 1142 1143 FTL_RQ_ENTRY_LOOP(compactor->rq, entry, compactor->rq->num_blocks) { 1144 compaction_process_invalidate_entry(entry); 1145 } 1146 1147 return compactor; 1148 1149 error: 1150 compactor_free(dev, compactor); 1151 return NULL; 1152 } 1153 1154 static void 1155 ftl_nv_cache_submit_cb_done(struct ftl_io *io) 1156 { 1157 struct ftl_nv_cache *nv_cache = &io->dev->nv_cache; 1158 1159 chunk_advance_blocks(nv_cache, io->nv_cache_chunk, io->num_blocks); 1160 io->nv_cache_chunk = NULL; 1161 1162 ftl_mempool_put(nv_cache->md_pool, io->md); 1163 ftl_io_complete(io); 1164 } 1165 1166 static void 1167 ftl_nv_cache_l2p_update(struct ftl_io *io) 1168 { 1169 struct spdk_ftl_dev *dev = io->dev; 1170 ftl_addr next_addr = io->addr; 1171 size_t i; 1172 1173 for (i = 0; i < io->num_blocks; ++i, ++next_addr) { 1174 ftl_l2p_update_cache(dev, ftl_io_get_lba(io, i), next_addr, io->map[i]); 1175 } 1176 1177 ftl_l2p_unpin(dev, io->lba, io->num_blocks); 1178 ftl_nv_cache_submit_cb_done(io); 1179 } 1180 1181 static void 1182 ftl_nv_cache_submit_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 1183 { 1184 struct ftl_io *io = cb_arg; 1185 1186 ftl_stats_bdev_io_completed(io->dev, FTL_STATS_TYPE_USER, bdev_io); 1187 1188 spdk_bdev_free_io(bdev_io); 1189 1190 if (spdk_unlikely(!success)) { 1191 FTL_ERRLOG(io->dev, "Non-volatile cache write failed at %"PRIx64"\n", 1192 io->addr); 1193 io->status = -EIO; 1194 ftl_nv_cache_submit_cb_done(io); 1195 } else { 1196 ftl_nv_cache_l2p_update(io); 1197 } 1198 } 1199 1200 static void 1201 nv_cache_write(void *_io) 1202 { 1203 struct ftl_io *io = _io; 1204 struct spdk_ftl_dev *dev = io->dev; 1205 struct ftl_nv_cache *nv_cache = &dev->nv_cache; 1206 int rc; 1207 1208 rc = spdk_bdev_writev_blocks_with_md(nv_cache->bdev_desc, nv_cache->cache_ioch, 1209 io->iov, io->iov_cnt, io->md, 1210 ftl_addr_to_nvc_offset(dev, io->addr), io->num_blocks, 1211 ftl_nv_cache_submit_cb, io); 1212 if (spdk_unlikely(rc)) { 1213 if (rc == -ENOMEM) { 1214 struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc); 1215 io->bdev_io_wait.bdev = bdev; 1216 io->bdev_io_wait.cb_fn = nv_cache_write; 1217 io->bdev_io_wait.cb_arg = io; 1218 spdk_bdev_queue_io_wait(bdev, nv_cache->cache_ioch, &io->bdev_io_wait); 1219 } else { 1220 ftl_abort(); 1221 } 1222 } 1223 } 1224 1225 static void 1226 ftl_nv_cache_pin_cb(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx) 1227 { 1228 struct ftl_io *io = pin_ctx->cb_ctx; 1229 size_t i; 1230 1231 if (spdk_unlikely(status != 0)) { 1232 /* Retry on the internal L2P fault */ 1233 FTL_ERRLOG(dev, "Cannot PIN LBA for NV cache write failed at %"PRIx64"\n", 1234 io->addr); 1235 io->status = -EAGAIN; 1236 ftl_nv_cache_submit_cb_done(io); 1237 return; 1238 } 1239 1240 /* Remember previous l2p mapping to resolve conflicts in case of outstanding write-after-write */ 1241 for (i = 0; i < io->num_blocks; ++i) { 1242 io->map[i] = ftl_l2p_get(dev, ftl_io_get_lba(io, i)); 1243 } 1244 1245 assert(io->iov_pos == 0); 1246 1247 ftl_trace_submission(io->dev, io, io->addr, io->num_blocks); 1248 1249 nv_cache_write(io); 1250 } 1251 1252 bool 1253 ftl_nv_cache_write(struct ftl_io *io) 1254 { 1255 struct spdk_ftl_dev *dev = io->dev; 1256 uint64_t cache_offset; 1257 1258 io->md = ftl_mempool_get(dev->nv_cache.md_pool); 1259 if (spdk_unlikely(!io->md)) { 1260 return false; 1261 } 1262 1263 /* Reserve area on the write buffer cache */ 1264 cache_offset = ftl_nv_cache_get_wr_buffer(&dev->nv_cache, io); 1265 if (cache_offset == FTL_LBA_INVALID) { 1266 /* No free space in NV cache, resubmit request */ 1267 ftl_mempool_put(dev->nv_cache.md_pool, io->md); 1268 return false; 1269 } 1270 io->addr = ftl_addr_from_nvc_offset(dev, cache_offset); 1271 io->nv_cache_chunk = dev->nv_cache.chunk_current; 1272 1273 ftl_nv_cache_fill_md(io); 1274 ftl_l2p_pin(io->dev, io->lba, io->num_blocks, 1275 ftl_nv_cache_pin_cb, io, 1276 &io->l2p_pin_ctx); 1277 1278 dev->nv_cache.throttle.blocks_submitted += io->num_blocks; 1279 1280 return true; 1281 } 1282 1283 int 1284 ftl_nv_cache_read(struct ftl_io *io, ftl_addr addr, uint32_t num_blocks, 1285 spdk_bdev_io_completion_cb cb, void *cb_arg) 1286 { 1287 int rc; 1288 struct ftl_nv_cache *nv_cache = &io->dev->nv_cache; 1289 1290 assert(ftl_addr_in_nvc(io->dev, addr)); 1291 1292 rc = ftl_nv_cache_bdev_read_blocks_with_md(io->dev, nv_cache->bdev_desc, nv_cache->cache_ioch, 1293 ftl_io_iovec_addr(io), NULL, ftl_addr_to_nvc_offset(io->dev, addr), 1294 num_blocks, cb, cb_arg); 1295 1296 return rc; 1297 } 1298 1299 bool 1300 ftl_nv_cache_is_halted(struct ftl_nv_cache *nv_cache) 1301 { 1302 if (nv_cache->compaction_active_count) { 1303 return false; 1304 } 1305 1306 if (nv_cache->chunk_open_count > 0) { 1307 return false; 1308 } 1309 1310 if (is_compaction_required_for_upgrade(nv_cache)) { 1311 return false; 1312 } 1313 1314 return true; 1315 } 1316 1317 void 1318 ftl_chunk_map_set_lba(struct ftl_nv_cache_chunk *chunk, 1319 uint64_t offset, uint64_t lba) 1320 { 1321 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache); 1322 struct ftl_p2l_map *p2l_map = &chunk->p2l_map; 1323 1324 ftl_lba_store(dev, p2l_map->chunk_map, offset, lba); 1325 } 1326 1327 uint64_t 1328 ftl_chunk_map_get_lba(struct ftl_nv_cache_chunk *chunk, uint64_t offset) 1329 { 1330 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache); 1331 struct ftl_p2l_map *p2l_map = &chunk->p2l_map; 1332 1333 return ftl_lba_load(dev, p2l_map->chunk_map, offset); 1334 } 1335 1336 static void 1337 ftl_chunk_set_addr(struct ftl_nv_cache_chunk *chunk, uint64_t lba, ftl_addr addr) 1338 { 1339 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache); 1340 uint64_t cache_offset = ftl_addr_to_nvc_offset(dev, addr); 1341 uint64_t offset; 1342 1343 offset = (cache_offset - chunk->offset) % chunk->nv_cache->chunk_blocks; 1344 ftl_chunk_map_set_lba(chunk, offset, lba); 1345 } 1346 1347 struct ftl_nv_cache_chunk * 1348 ftl_nv_cache_get_chunk_from_addr(struct spdk_ftl_dev *dev, ftl_addr addr) 1349 { 1350 struct ftl_nv_cache_chunk *chunk = dev->nv_cache.chunks; 1351 uint64_t chunk_idx; 1352 uint64_t cache_offset = ftl_addr_to_nvc_offset(dev, addr); 1353 1354 assert(chunk != NULL); 1355 chunk_idx = (cache_offset - chunk->offset) / chunk->nv_cache->chunk_blocks; 1356 chunk += chunk_idx; 1357 1358 return chunk; 1359 } 1360 1361 void 1362 ftl_nv_cache_set_addr(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr addr) 1363 { 1364 struct ftl_nv_cache_chunk *chunk; 1365 1366 chunk = ftl_nv_cache_get_chunk_from_addr(dev, addr); 1367 1368 assert(lba != FTL_LBA_INVALID); 1369 1370 ftl_chunk_set_addr(chunk, lba, addr); 1371 ftl_bitmap_set(dev->valid_map, addr); 1372 } 1373 1374 static void 1375 ftl_nv_cache_throttle_update(struct ftl_nv_cache *nv_cache) 1376 { 1377 double err; 1378 double modifier; 1379 1380 err = ((double)nv_cache->chunk_free_count - nv_cache->chunk_free_target) / nv_cache->chunk_count; 1381 modifier = FTL_NV_CACHE_THROTTLE_MODIFIER_KP * err; 1382 1383 if (modifier < FTL_NV_CACHE_THROTTLE_MODIFIER_MIN) { 1384 modifier = FTL_NV_CACHE_THROTTLE_MODIFIER_MIN; 1385 } else if (modifier > FTL_NV_CACHE_THROTTLE_MODIFIER_MAX) { 1386 modifier = FTL_NV_CACHE_THROTTLE_MODIFIER_MAX; 1387 } 1388 1389 if (spdk_unlikely(nv_cache->compaction_sma == 0 || nv_cache->compaction_active_count == 0)) { 1390 nv_cache->throttle.blocks_submitted_limit = UINT64_MAX; 1391 } else { 1392 double blocks_per_interval = nv_cache->compaction_sma * nv_cache->throttle.interval_tsc / 1393 FTL_BLOCK_SIZE; 1394 nv_cache->throttle.blocks_submitted_limit = blocks_per_interval * (1.0 + modifier); 1395 } 1396 } 1397 1398 static void 1399 ftl_nv_cache_process_throttle(struct ftl_nv_cache *nv_cache) 1400 { 1401 uint64_t tsc = spdk_thread_get_last_tsc(spdk_get_thread()); 1402 1403 if (spdk_unlikely(!nv_cache->throttle.start_tsc)) { 1404 nv_cache->throttle.start_tsc = tsc; 1405 } else if (tsc - nv_cache->throttle.start_tsc >= nv_cache->throttle.interval_tsc) { 1406 ftl_nv_cache_throttle_update(nv_cache); 1407 nv_cache->throttle.start_tsc = tsc; 1408 nv_cache->throttle.blocks_submitted = 0; 1409 } 1410 } 1411 1412 static void ftl_chunk_open(struct ftl_nv_cache_chunk *chunk); 1413 1414 void 1415 ftl_nv_cache_process(struct spdk_ftl_dev *dev) 1416 { 1417 struct ftl_nv_cache *nv_cache = &dev->nv_cache; 1418 1419 assert(dev->nv_cache.bdev_desc); 1420 1421 if (nv_cache->chunk_open_count < FTL_MAX_OPEN_CHUNKS && spdk_likely(!nv_cache->halt) && 1422 !TAILQ_EMPTY(&nv_cache->chunk_free_list)) { 1423 struct ftl_nv_cache_chunk *chunk = TAILQ_FIRST(&nv_cache->chunk_free_list); 1424 TAILQ_REMOVE(&nv_cache->chunk_free_list, chunk, entry); 1425 TAILQ_INSERT_TAIL(&nv_cache->chunk_open_list, chunk, entry); 1426 nv_cache->chunk_free_count--; 1427 chunk->md->seq_id = ftl_get_next_seq_id(dev); 1428 ftl_chunk_open(chunk); 1429 ftl_add_io_activity(dev); 1430 } 1431 1432 compaction_process(nv_cache); 1433 ftl_chunk_persist_free_state(nv_cache); 1434 ftl_nv_cache_process_throttle(nv_cache); 1435 } 1436 1437 static bool 1438 ftl_nv_cache_full(struct ftl_nv_cache *nv_cache) 1439 { 1440 if (0 == nv_cache->chunk_open_count && NULL == nv_cache->chunk_current) { 1441 return true; 1442 } else { 1443 return false; 1444 } 1445 } 1446 1447 bool 1448 ftl_nv_cache_throttle(struct spdk_ftl_dev *dev) 1449 { 1450 struct ftl_nv_cache *nv_cache = &dev->nv_cache; 1451 1452 if (dev->nv_cache.throttle.blocks_submitted >= nv_cache->throttle.blocks_submitted_limit || 1453 ftl_nv_cache_full(nv_cache)) { 1454 return true; 1455 } 1456 1457 return false; 1458 } 1459 1460 static void 1461 chunk_free_p2l_map(struct ftl_nv_cache_chunk *chunk) 1462 { 1463 struct ftl_nv_cache *nv_cache = chunk->nv_cache; 1464 struct ftl_p2l_map *p2l_map = &chunk->p2l_map; 1465 1466 ftl_mempool_put(nv_cache->p2l_pool, p2l_map->chunk_map); 1467 p2l_map->chunk_map = NULL; 1468 1469 ftl_chunk_free_md_entry(chunk); 1470 } 1471 1472 int 1473 ftl_nv_cache_save_state(struct ftl_nv_cache *nv_cache) 1474 { 1475 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache); 1476 struct ftl_nv_cache_chunk *chunk; 1477 int status = 0; 1478 uint64_t i; 1479 1480 assert(nv_cache->chunk_open_count == 0); 1481 1482 if (nv_cache->compaction_active_count) { 1483 FTL_ERRLOG(dev, "Cannot save NV cache state, compaction in progress\n"); 1484 return -EINVAL; 1485 } 1486 1487 chunk = nv_cache->chunks; 1488 if (!chunk) { 1489 FTL_ERRLOG(dev, "Cannot save NV cache state, no NV cache metadata\n"); 1490 return -ENOMEM; 1491 } 1492 1493 for (i = 0; i < nv_cache->chunk_count; i++, chunk++) { 1494 nvc_validate_md(nv_cache, chunk->md); 1495 1496 if (chunk->md->read_pointer) { 1497 /* Only full chunks can be compacted */ 1498 if (chunk->md->blocks_written != nv_cache->chunk_blocks) { 1499 assert(0); 1500 status = -EINVAL; 1501 break; 1502 } 1503 1504 /* 1505 * Chunk in the middle of compaction, start over after 1506 * load 1507 */ 1508 chunk->md->read_pointer = chunk->md->blocks_compacted = 0; 1509 } else if (chunk->md->blocks_written == nv_cache->chunk_blocks) { 1510 /* Full chunk */ 1511 } else if (0 == chunk->md->blocks_written) { 1512 /* Empty chunk */ 1513 } else { 1514 assert(0); 1515 status = -EINVAL; 1516 break; 1517 } 1518 } 1519 1520 if (status) { 1521 FTL_ERRLOG(dev, "Cannot save NV cache state, inconsistent NV cache" 1522 "metadata\n"); 1523 } 1524 1525 return status; 1526 } 1527 1528 static int 1529 sort_chunks_cmp(const void *a, const void *b) 1530 { 1531 struct ftl_nv_cache_chunk *a_chunk = *(struct ftl_nv_cache_chunk **)a; 1532 struct ftl_nv_cache_chunk *b_chunk = *(struct ftl_nv_cache_chunk **)b; 1533 1534 return a_chunk->md->seq_id - b_chunk->md->seq_id; 1535 } 1536 1537 static int 1538 sort_chunks(struct ftl_nv_cache *nv_cache) 1539 { 1540 struct ftl_nv_cache_chunk **chunks_list; 1541 struct ftl_nv_cache_chunk *chunk; 1542 uint32_t i; 1543 1544 if (TAILQ_EMPTY(&nv_cache->chunk_full_list)) { 1545 return 0; 1546 } 1547 1548 chunks_list = calloc(nv_cache->chunk_full_count, 1549 sizeof(chunks_list[0])); 1550 if (!chunks_list) { 1551 return -ENOMEM; 1552 } 1553 1554 i = 0; 1555 TAILQ_FOREACH(chunk, &nv_cache->chunk_full_list, entry) { 1556 chunks_list[i] = chunk; 1557 i++; 1558 } 1559 assert(i == nv_cache->chunk_full_count); 1560 1561 qsort(chunks_list, nv_cache->chunk_full_count, sizeof(chunks_list[0]), 1562 sort_chunks_cmp); 1563 1564 TAILQ_INIT(&nv_cache->chunk_full_list); 1565 for (i = 0; i < nv_cache->chunk_full_count; i++) { 1566 chunk = chunks_list[i]; 1567 TAILQ_INSERT_TAIL(&nv_cache->chunk_full_list, chunk, entry); 1568 } 1569 1570 free(chunks_list); 1571 return 0; 1572 } 1573 1574 static int 1575 chunk_alloc_p2l_map(struct ftl_nv_cache_chunk *chunk) 1576 { 1577 struct ftl_nv_cache *nv_cache = chunk->nv_cache; 1578 struct ftl_p2l_map *p2l_map = &chunk->p2l_map; 1579 1580 assert(p2l_map->ref_cnt == 0); 1581 assert(p2l_map->chunk_map == NULL); 1582 1583 p2l_map->chunk_map = ftl_mempool_get(nv_cache->p2l_pool); 1584 1585 if (!p2l_map->chunk_map) { 1586 return -ENOMEM; 1587 } 1588 1589 if (ftl_chunk_alloc_md_entry(chunk)) { 1590 ftl_mempool_put(nv_cache->p2l_pool, p2l_map->chunk_map); 1591 p2l_map->chunk_map = NULL; 1592 return -ENOMEM; 1593 } 1594 1595 /* Set the P2L to FTL_LBA_INVALID */ 1596 memset(p2l_map->chunk_map, -1, FTL_BLOCK_SIZE * nv_cache->tail_md_chunk_blocks); 1597 1598 return 0; 1599 } 1600 1601 int 1602 ftl_nv_cache_load_state(struct ftl_nv_cache *nv_cache) 1603 { 1604 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache); 1605 struct ftl_nv_cache_chunk *chunk; 1606 uint64_t chunks_number, offset, i; 1607 int status = 0; 1608 bool active; 1609 1610 nv_cache->chunk_current = NULL; 1611 TAILQ_INIT(&nv_cache->chunk_free_list); 1612 TAILQ_INIT(&nv_cache->chunk_full_list); 1613 TAILQ_INIT(&nv_cache->chunk_inactive_list); 1614 nv_cache->chunk_full_count = 0; 1615 nv_cache->chunk_free_count = 0; 1616 nv_cache->chunk_inactive_count = 0; 1617 1618 assert(nv_cache->chunk_open_count == 0); 1619 offset = nvc_data_offset(nv_cache); 1620 if (!nv_cache->chunks) { 1621 FTL_ERRLOG(dev, "No NV cache metadata\n"); 1622 return -1; 1623 } 1624 1625 if (dev->sb->upgrade_ready) { 1626 /* 1627 * During upgrade some transitions are allowed: 1628 * 1629 * 1. FREE -> INACTIVE 1630 * 2. INACTIVE -> FREE 1631 */ 1632 chunk = nv_cache->chunks; 1633 for (i = 0; i < nv_cache->chunk_count; i++, chunk++) { 1634 active = nv_cache->nvc_type->ops.is_chunk_active(dev, chunk->offset); 1635 1636 if (chunk->md->state == FTL_CHUNK_STATE_FREE) { 1637 if (!active) { 1638 chunk->md->state = FTL_CHUNK_STATE_INACTIVE; 1639 } 1640 } else if (chunk->md->state == FTL_CHUNK_STATE_INACTIVE) { 1641 if (active) { 1642 chunk->md->state = FTL_CHUNK_STATE_FREE; 1643 } 1644 } 1645 } 1646 } 1647 1648 chunk = nv_cache->chunks; 1649 for (i = 0; i < nv_cache->chunk_count; i++, chunk++) { 1650 chunk->nv_cache = nv_cache; 1651 nvc_validate_md(nv_cache, chunk->md); 1652 1653 if (offset != chunk->offset) { 1654 status = -EINVAL; 1655 goto error; 1656 } 1657 1658 if (chunk->md->version != FTL_NVC_VERSION_CURRENT) { 1659 status = -EINVAL; 1660 goto error; 1661 } 1662 1663 active = nv_cache->nvc_type->ops.is_chunk_active(dev, chunk->offset); 1664 if (false == active) { 1665 if (chunk->md->state != FTL_CHUNK_STATE_INACTIVE) { 1666 status = -EINVAL; 1667 goto error; 1668 } 1669 } 1670 1671 switch (chunk->md->state) { 1672 case FTL_CHUNK_STATE_FREE: 1673 if (chunk->md->blocks_written || chunk->md->write_pointer) { 1674 status = -EINVAL; 1675 goto error; 1676 } 1677 /* Chunk empty, move it on empty list */ 1678 TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry); 1679 nv_cache->chunk_free_count++; 1680 break; 1681 case FTL_CHUNK_STATE_OPEN: 1682 /* All chunks needs to closed at this point */ 1683 status = -EINVAL; 1684 goto error; 1685 break; 1686 case FTL_CHUNK_STATE_CLOSED: 1687 if (chunk->md->blocks_written != nv_cache->chunk_blocks) { 1688 status = -EINVAL; 1689 goto error; 1690 } 1691 /* Chunk full, move it on full list */ 1692 TAILQ_INSERT_TAIL(&nv_cache->chunk_full_list, chunk, entry); 1693 nv_cache->chunk_full_count++; 1694 break; 1695 case FTL_CHUNK_STATE_INACTIVE: 1696 TAILQ_INSERT_TAIL(&nv_cache->chunk_inactive_list, chunk, entry); 1697 nv_cache->chunk_inactive_count++; 1698 break; 1699 default: 1700 status = -EINVAL; 1701 FTL_ERRLOG(dev, "Invalid chunk state\n"); 1702 goto error; 1703 } 1704 1705 offset += nv_cache->chunk_blocks; 1706 } 1707 1708 chunks_number = nv_cache->chunk_free_count + nv_cache->chunk_full_count + 1709 nv_cache->chunk_inactive_count; 1710 assert(nv_cache->chunk_current == NULL); 1711 1712 if (chunks_number != nv_cache->chunk_count) { 1713 FTL_ERRLOG(dev, "Inconsistent NV cache metadata\n"); 1714 status = -EINVAL; 1715 goto error; 1716 } 1717 1718 status = sort_chunks(nv_cache); 1719 if (status) { 1720 FTL_ERRLOG(dev, "FTL NV Cache: sorting chunks ERROR\n"); 1721 } 1722 1723 FTL_NOTICELOG(dev, "FTL NV Cache: full chunks = %lu, empty chunks = %lu\n", 1724 nv_cache->chunk_full_count, nv_cache->chunk_free_count); 1725 1726 if (0 == status) { 1727 FTL_NOTICELOG(dev, "FTL NV Cache: state loaded successfully\n"); 1728 } else { 1729 FTL_ERRLOG(dev, "FTL NV Cache: loading state ERROR\n"); 1730 } 1731 1732 /* The number of active/inactive chunks calculated at initialization can change at this point due to metadata 1733 * upgrade. Recalculate the thresholds that depend on active chunk count. 1734 */ 1735 ftl_nv_cache_init_update_limits(dev); 1736 error: 1737 return status; 1738 } 1739 1740 void 1741 ftl_nv_cache_get_max_seq_id(struct ftl_nv_cache *nv_cache, uint64_t *open_seq_id, 1742 uint64_t *close_seq_id) 1743 { 1744 uint64_t i, o_seq_id = 0, c_seq_id = 0; 1745 struct ftl_nv_cache_chunk *chunk; 1746 1747 chunk = nv_cache->chunks; 1748 assert(chunk); 1749 1750 /* Iterate over chunks and get their max open and close seq id */ 1751 for (i = 0; i < nv_cache->chunk_count; i++, chunk++) { 1752 o_seq_id = spdk_max(o_seq_id, chunk->md->seq_id); 1753 c_seq_id = spdk_max(c_seq_id, chunk->md->close_seq_id); 1754 } 1755 1756 *open_seq_id = o_seq_id; 1757 *close_seq_id = c_seq_id; 1758 } 1759 1760 typedef void (*ftl_chunk_ops_cb)(struct ftl_nv_cache_chunk *chunk, void *cntx, bool status); 1761 1762 static void 1763 write_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg) 1764 { 1765 struct ftl_basic_rq *brq = arg; 1766 struct ftl_nv_cache_chunk *chunk = brq->io.chunk; 1767 1768 ftl_stats_bdev_io_completed(brq->dev, FTL_STATS_TYPE_MD_NV_CACHE, bdev_io); 1769 1770 brq->success = success; 1771 if (spdk_likely(success)) { 1772 chunk_advance_blocks(chunk->nv_cache, chunk, brq->num_blocks); 1773 } 1774 1775 spdk_bdev_free_io(bdev_io); 1776 brq->owner.cb(brq); 1777 } 1778 1779 static void 1780 _ftl_chunk_basic_rq_write(void *_brq) 1781 { 1782 struct ftl_basic_rq *brq = _brq; 1783 struct ftl_nv_cache *nv_cache = brq->io.chunk->nv_cache; 1784 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache); 1785 int rc; 1786 1787 rc = ftl_nv_cache_bdev_write_blocks_with_md(dev, nv_cache->bdev_desc, nv_cache->cache_ioch, 1788 brq->io_payload, NULL, brq->io.addr, 1789 brq->num_blocks, write_brq_end, brq); 1790 if (spdk_unlikely(rc)) { 1791 if (rc == -ENOMEM) { 1792 struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc); 1793 brq->io.bdev_io_wait.bdev = bdev; 1794 brq->io.bdev_io_wait.cb_fn = _ftl_chunk_basic_rq_write; 1795 brq->io.bdev_io_wait.cb_arg = brq; 1796 spdk_bdev_queue_io_wait(bdev, nv_cache->cache_ioch, &brq->io.bdev_io_wait); 1797 } else { 1798 ftl_abort(); 1799 } 1800 } 1801 } 1802 1803 static void 1804 ftl_chunk_basic_rq_write(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq) 1805 { 1806 struct ftl_nv_cache *nv_cache = chunk->nv_cache; 1807 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache); 1808 1809 brq->io.chunk = chunk; 1810 brq->success = false; 1811 1812 _ftl_chunk_basic_rq_write(brq); 1813 1814 chunk->md->write_pointer += brq->num_blocks; 1815 dev->stats.io_activity_total += brq->num_blocks; 1816 } 1817 1818 static void 1819 read_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg) 1820 { 1821 struct ftl_basic_rq *brq = arg; 1822 1823 ftl_stats_bdev_io_completed(brq->dev, FTL_STATS_TYPE_MD_NV_CACHE, bdev_io); 1824 1825 brq->success = success; 1826 1827 brq->owner.cb(brq); 1828 spdk_bdev_free_io(bdev_io); 1829 } 1830 1831 static int 1832 ftl_chunk_basic_rq_read(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq) 1833 { 1834 struct ftl_nv_cache *nv_cache = chunk->nv_cache; 1835 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache); 1836 int rc; 1837 1838 brq->io.chunk = chunk; 1839 brq->success = false; 1840 1841 rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, nv_cache->bdev_desc, nv_cache->cache_ioch, 1842 brq->io_payload, NULL, brq->io.addr, brq->num_blocks, read_brq_end, brq); 1843 1844 if (spdk_likely(!rc)) { 1845 dev->stats.io_activity_total += brq->num_blocks; 1846 } 1847 1848 return rc; 1849 } 1850 1851 static void 1852 chunk_open_cb(int status, void *ctx) 1853 { 1854 struct ftl_nv_cache_chunk *chunk = (struct ftl_nv_cache_chunk *)ctx; 1855 1856 if (spdk_unlikely(status)) { 1857 #ifdef SPDK_FTL_RETRY_ON_ERROR 1858 ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx); 1859 return; 1860 #else 1861 ftl_abort(); 1862 #endif 1863 } 1864 1865 chunk->md->state = FTL_CHUNK_STATE_OPEN; 1866 } 1867 1868 static void 1869 ftl_chunk_open(struct ftl_nv_cache_chunk *chunk) 1870 { 1871 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache); 1872 struct ftl_p2l_map *p2l_map = &chunk->p2l_map; 1873 struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD); 1874 struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD]; 1875 1876 if (chunk_alloc_p2l_map(chunk)) { 1877 assert(0); 1878 /* 1879 * We control number of opening chunk and it shall be consistent with size of chunk 1880 * P2L map pool 1881 */ 1882 ftl_abort(); 1883 return; 1884 } 1885 1886 chunk->nv_cache->chunk_open_count++; 1887 1888 assert(chunk->md->write_pointer == 0); 1889 assert(chunk->md->blocks_written == 0); 1890 1891 memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE); 1892 p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_OPEN; 1893 p2l_map->chunk_dma_md->p2l_map_checksum = 0; 1894 1895 ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, p2l_map->chunk_dma_md, 1896 NULL, chunk_open_cb, chunk, 1897 &chunk->md_persist_entry_ctx); 1898 } 1899 1900 static void 1901 chunk_close_cb(int status, void *ctx) 1902 { 1903 struct ftl_nv_cache_chunk *chunk = (struct ftl_nv_cache_chunk *)ctx; 1904 1905 assert(chunk->md->write_pointer == chunk->nv_cache->chunk_blocks); 1906 1907 if (spdk_likely(!status)) { 1908 chunk->md->p2l_map_checksum = chunk->p2l_map.chunk_dma_md->p2l_map_checksum; 1909 chunk_free_p2l_map(chunk); 1910 1911 assert(chunk->nv_cache->chunk_open_count > 0); 1912 chunk->nv_cache->chunk_open_count--; 1913 1914 /* Chunk full move it on full list */ 1915 TAILQ_INSERT_TAIL(&chunk->nv_cache->chunk_full_list, chunk, entry); 1916 chunk->nv_cache->chunk_full_count++; 1917 1918 chunk->nv_cache->last_seq_id = chunk->md->close_seq_id; 1919 1920 chunk->md->state = FTL_CHUNK_STATE_CLOSED; 1921 } else { 1922 #ifdef SPDK_FTL_RETRY_ON_ERROR 1923 ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx); 1924 #else 1925 ftl_abort(); 1926 #endif 1927 } 1928 } 1929 1930 static void 1931 chunk_map_write_cb(struct ftl_basic_rq *brq) 1932 { 1933 struct ftl_nv_cache_chunk *chunk = brq->io.chunk; 1934 struct ftl_p2l_map *p2l_map = &chunk->p2l_map; 1935 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache); 1936 struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD); 1937 struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD]; 1938 uint32_t chunk_map_crc; 1939 1940 if (spdk_likely(brq->success)) { 1941 chunk_map_crc = spdk_crc32c_update(p2l_map->chunk_map, 1942 chunk->nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE, 0); 1943 memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE); 1944 p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_CLOSED; 1945 p2l_map->chunk_dma_md->p2l_map_checksum = chunk_map_crc; 1946 ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, chunk->p2l_map.chunk_dma_md, 1947 NULL, chunk_close_cb, chunk, 1948 &chunk->md_persist_entry_ctx); 1949 } else { 1950 #ifdef SPDK_FTL_RETRY_ON_ERROR 1951 /* retry */ 1952 chunk->md->write_pointer -= brq->num_blocks; 1953 ftl_chunk_basic_rq_write(chunk, brq); 1954 #else 1955 ftl_abort(); 1956 #endif 1957 } 1958 } 1959 1960 static void 1961 ftl_chunk_close(struct ftl_nv_cache_chunk *chunk) 1962 { 1963 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache); 1964 struct ftl_basic_rq *brq = &chunk->metadata_rq; 1965 void *metadata = chunk->p2l_map.chunk_map; 1966 1967 chunk->md->close_seq_id = ftl_get_next_seq_id(dev); 1968 ftl_basic_rq_init(dev, brq, metadata, chunk->nv_cache->tail_md_chunk_blocks); 1969 ftl_basic_rq_set_owner(brq, chunk_map_write_cb, chunk); 1970 1971 assert(chunk->md->write_pointer == chunk_tail_md_offset(chunk->nv_cache)); 1972 brq->io.addr = chunk->offset + chunk->md->write_pointer; 1973 1974 ftl_chunk_basic_rq_write(chunk, brq); 1975 } 1976 1977 static int ftl_chunk_read_tail_md(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq, 1978 void (*cb)(struct ftl_basic_rq *brq), void *cb_ctx); 1979 static void read_tail_md_cb(struct ftl_basic_rq *brq); 1980 static void recover_open_chunk_cb(struct ftl_basic_rq *brq); 1981 1982 static void 1983 restore_chunk_close_cb(int status, void *ctx) 1984 { 1985 struct ftl_basic_rq *parent = (struct ftl_basic_rq *)ctx; 1986 struct ftl_nv_cache_chunk *chunk = parent->io.chunk; 1987 struct ftl_p2l_map *p2l_map = &chunk->p2l_map; 1988 1989 if (spdk_unlikely(status)) { 1990 parent->success = false; 1991 } else { 1992 chunk->md->p2l_map_checksum = p2l_map->chunk_dma_md->p2l_map_checksum; 1993 chunk->md->state = FTL_CHUNK_STATE_CLOSED; 1994 } 1995 1996 read_tail_md_cb(parent); 1997 } 1998 1999 static void 2000 restore_fill_p2l_map_cb(struct ftl_basic_rq *parent) 2001 { 2002 struct ftl_nv_cache_chunk *chunk = parent->io.chunk; 2003 struct ftl_p2l_map *p2l_map = &chunk->p2l_map; 2004 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache); 2005 struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD]; 2006 struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD); 2007 uint32_t chunk_map_crc; 2008 2009 /* Set original callback */ 2010 ftl_basic_rq_set_owner(parent, recover_open_chunk_cb, parent->owner.priv); 2011 2012 if (spdk_unlikely(!parent->success)) { 2013 read_tail_md_cb(parent); 2014 return; 2015 } 2016 2017 chunk_map_crc = spdk_crc32c_update(p2l_map->chunk_map, 2018 chunk->nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE, 0); 2019 memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE); 2020 p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_CLOSED; 2021 p2l_map->chunk_dma_md->write_pointer = chunk->nv_cache->chunk_blocks; 2022 p2l_map->chunk_dma_md->blocks_written = chunk->nv_cache->chunk_blocks; 2023 p2l_map->chunk_dma_md->p2l_map_checksum = chunk_map_crc; 2024 2025 ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, p2l_map->chunk_dma_md, NULL, 2026 restore_chunk_close_cb, parent, &chunk->md_persist_entry_ctx); 2027 } 2028 2029 static void 2030 restore_fill_tail_md(struct ftl_basic_rq *parent, struct ftl_nv_cache_chunk *chunk) 2031 { 2032 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache); 2033 void *metadata; 2034 2035 chunk->md->close_seq_id = ftl_get_next_seq_id(dev); 2036 2037 metadata = chunk->p2l_map.chunk_map; 2038 ftl_basic_rq_init(dev, parent, metadata, chunk->nv_cache->tail_md_chunk_blocks); 2039 ftl_basic_rq_set_owner(parent, restore_fill_p2l_map_cb, parent->owner.priv); 2040 2041 parent->io.addr = chunk->offset + chunk_tail_md_offset(chunk->nv_cache); 2042 parent->io.chunk = chunk; 2043 2044 ftl_chunk_basic_rq_write(chunk, parent); 2045 } 2046 2047 static void 2048 read_open_chunk_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 2049 { 2050 struct ftl_rq *rq = (struct ftl_rq *)cb_arg; 2051 struct ftl_basic_rq *parent = (struct ftl_basic_rq *)rq->owner.priv; 2052 struct ftl_nv_cache_chunk *chunk = parent->io.chunk; 2053 struct ftl_nv_cache *nv_cache = chunk->nv_cache; 2054 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache); 2055 union ftl_md_vss *md; 2056 uint64_t cache_offset = bdev_io->u.bdev.offset_blocks; 2057 uint64_t len = bdev_io->u.bdev.num_blocks; 2058 ftl_addr addr = ftl_addr_from_nvc_offset(dev, cache_offset); 2059 int rc; 2060 2061 ftl_stats_bdev_io_completed(dev, FTL_STATS_TYPE_USER, bdev_io); 2062 2063 spdk_bdev_free_io(bdev_io); 2064 2065 if (!success) { 2066 parent->success = false; 2067 read_tail_md_cb(parent); 2068 return; 2069 } 2070 2071 while (rq->iter.idx < rq->iter.count) { 2072 /* Get metadata */ 2073 md = rq->entries[rq->iter.idx].io_md; 2074 if (md->nv_cache.seq_id != chunk->md->seq_id) { 2075 md->nv_cache.lba = FTL_LBA_INVALID; 2076 } 2077 /* 2078 * The p2l map contains effectively random data at this point (since it contains arbitrary 2079 * blocks from potentially not even filled tail md), so even LBA_INVALID needs to be set explicitly 2080 */ 2081 2082 ftl_chunk_set_addr(chunk, md->nv_cache.lba, addr + rq->iter.idx); 2083 rq->iter.idx++; 2084 } 2085 2086 if (cache_offset + len < chunk->offset + chunk_tail_md_offset(nv_cache)) { 2087 cache_offset += len; 2088 len = spdk_min(dev->xfer_size, chunk->offset + chunk_tail_md_offset(nv_cache) - cache_offset); 2089 rq->iter.idx = 0; 2090 rq->iter.count = len; 2091 2092 rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, nv_cache->bdev_desc, 2093 nv_cache->cache_ioch, 2094 rq->io_payload, 2095 rq->io_md, 2096 cache_offset, len, 2097 read_open_chunk_cb, 2098 rq); 2099 2100 if (rc) { 2101 ftl_rq_del(rq); 2102 parent->success = false; 2103 read_tail_md_cb(parent); 2104 return; 2105 } 2106 } else { 2107 ftl_rq_del(rq); 2108 restore_fill_tail_md(parent, chunk); 2109 } 2110 } 2111 2112 static void 2113 restore_open_chunk(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *parent) 2114 { 2115 struct ftl_nv_cache *nv_cache = chunk->nv_cache; 2116 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache); 2117 struct ftl_rq *rq; 2118 uint64_t addr; 2119 uint64_t len = dev->xfer_size; 2120 int rc; 2121 2122 /* 2123 * We've just read the p2l map, prefill it with INVALID LBA 2124 * TODO we need to do this because tail md blocks (p2l map) are also represented in the p2l map, instead of just user data region 2125 */ 2126 memset(chunk->p2l_map.chunk_map, -1, FTL_BLOCK_SIZE * nv_cache->tail_md_chunk_blocks); 2127 2128 /* Need to read user data, recalculate chunk's P2L and write tail md with it */ 2129 rq = ftl_rq_new(dev, dev->nv_cache.md_size); 2130 if (!rq) { 2131 parent->success = false; 2132 read_tail_md_cb(parent); 2133 return; 2134 } 2135 2136 rq->owner.priv = parent; 2137 rq->iter.idx = 0; 2138 rq->iter.count = len; 2139 2140 addr = chunk->offset; 2141 2142 len = spdk_min(dev->xfer_size, chunk->offset + chunk_tail_md_offset(nv_cache) - addr); 2143 2144 rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, nv_cache->bdev_desc, 2145 nv_cache->cache_ioch, 2146 rq->io_payload, 2147 rq->io_md, 2148 addr, len, 2149 read_open_chunk_cb, 2150 rq); 2151 2152 if (rc) { 2153 ftl_rq_del(rq); 2154 parent->success = false; 2155 read_tail_md_cb(parent); 2156 } 2157 } 2158 2159 static void 2160 read_tail_md_cb(struct ftl_basic_rq *brq) 2161 { 2162 brq->owner.cb(brq); 2163 } 2164 2165 static int 2166 ftl_chunk_read_tail_md(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq, 2167 void (*cb)(struct ftl_basic_rq *brq), void *cb_ctx) 2168 { 2169 struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache); 2170 void *metadata; 2171 int rc; 2172 2173 metadata = chunk->p2l_map.chunk_map; 2174 ftl_basic_rq_init(dev, brq, metadata, chunk->nv_cache->tail_md_chunk_blocks); 2175 ftl_basic_rq_set_owner(brq, cb, cb_ctx); 2176 2177 brq->io.addr = chunk->offset + chunk_tail_md_offset(chunk->nv_cache); 2178 rc = ftl_chunk_basic_rq_read(chunk, brq); 2179 2180 return rc; 2181 } 2182 2183 struct restore_chunk_md_ctx { 2184 ftl_chunk_md_cb cb; 2185 void *cb_ctx; 2186 int status; 2187 uint64_t qd; 2188 uint64_t id; 2189 }; 2190 2191 static inline bool 2192 is_chunk_count_valid(struct ftl_nv_cache *nv_cache) 2193 { 2194 uint64_t chunk_count = 0; 2195 2196 chunk_count += nv_cache->chunk_open_count; 2197 chunk_count += nv_cache->chunk_free_count; 2198 chunk_count += nv_cache->chunk_full_count; 2199 chunk_count += nv_cache->chunk_comp_count; 2200 chunk_count += nv_cache->chunk_inactive_count; 2201 2202 return chunk_count == nv_cache->chunk_count; 2203 } 2204 2205 static void 2206 walk_tail_md_cb(struct ftl_basic_rq *brq) 2207 { 2208 struct ftl_mngt_process *mngt = brq->owner.priv; 2209 struct ftl_nv_cache_chunk *chunk = brq->io.chunk; 2210 struct restore_chunk_md_ctx *ctx = ftl_mngt_get_step_ctx(mngt); 2211 int rc = 0; 2212 2213 if (brq->success) { 2214 rc = ctx->cb(chunk, ctx->cb_ctx); 2215 } else { 2216 rc = -EIO; 2217 } 2218 2219 if (rc) { 2220 ctx->status = rc; 2221 } 2222 ctx->qd--; 2223 chunk_free_p2l_map(chunk); 2224 ftl_mngt_continue_step(mngt); 2225 } 2226 2227 static void 2228 ftl_mngt_nv_cache_walk_tail_md(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt, 2229 uint64_t seq_id, ftl_chunk_md_cb cb, void *cb_ctx) 2230 { 2231 struct ftl_nv_cache *nvc = &dev->nv_cache; 2232 struct restore_chunk_md_ctx *ctx; 2233 2234 ctx = ftl_mngt_get_step_ctx(mngt); 2235 if (!ctx) { 2236 if (ftl_mngt_alloc_step_ctx(mngt, sizeof(*ctx))) { 2237 ftl_mngt_fail_step(mngt); 2238 return; 2239 } 2240 ctx = ftl_mngt_get_step_ctx(mngt); 2241 assert(ctx); 2242 2243 ctx->cb = cb; 2244 ctx->cb_ctx = cb_ctx; 2245 } 2246 2247 /* 2248 * This function generates a high queue depth and will utilize ftl_mngt_continue_step during completions to make sure all chunks 2249 * are processed before returning an error (if any were found) or continuing on. 2250 */ 2251 if (0 == ctx->qd && ctx->id == nvc->chunk_count) { 2252 if (!is_chunk_count_valid(nvc)) { 2253 FTL_ERRLOG(dev, "Recovery ERROR, invalid number of chunk\n"); 2254 assert(false); 2255 ctx->status = -EINVAL; 2256 } 2257 2258 if (ctx->status) { 2259 ftl_mngt_fail_step(mngt); 2260 } else { 2261 ftl_mngt_next_step(mngt); 2262 } 2263 return; 2264 } 2265 2266 while (ctx->id < nvc->chunk_count) { 2267 struct ftl_nv_cache_chunk *chunk = &nvc->chunks[ctx->id]; 2268 int rc; 2269 2270 if (!chunk->recovery) { 2271 /* This chunk is inactive or empty and not used in recovery */ 2272 ctx->id++; 2273 continue; 2274 } 2275 2276 if (seq_id && (chunk->md->close_seq_id <= seq_id)) { 2277 ctx->id++; 2278 continue; 2279 } 2280 2281 if (chunk_alloc_p2l_map(chunk)) { 2282 /* No more free P2L map, break and continue later */ 2283 break; 2284 } 2285 ctx->id++; 2286 2287 rc = ftl_chunk_read_tail_md(chunk, &chunk->metadata_rq, walk_tail_md_cb, mngt); 2288 2289 if (0 == rc) { 2290 ctx->qd++; 2291 } else { 2292 chunk_free_p2l_map(chunk); 2293 ctx->status = rc; 2294 } 2295 } 2296 2297 if (0 == ctx->qd) { 2298 /* 2299 * No QD could happen due to all leftover chunks being in free state. 2300 * Additionally ftl_chunk_read_tail_md could fail starting with the first IO in a given patch. 2301 * For streamlining of all potential error handling (since many chunks are reading P2L at the same time), 2302 * we're using ftl_mngt_continue_step to arrive at the same spot of checking for mngt step end (see beginning of function). 2303 */ 2304 ftl_mngt_continue_step(mngt); 2305 } 2306 2307 } 2308 2309 void 2310 ftl_mngt_nv_cache_restore_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt, 2311 ftl_chunk_md_cb cb, void *cb_ctx) 2312 { 2313 ftl_mngt_nv_cache_walk_tail_md(dev, mngt, dev->sb->ckpt_seq_id, cb, cb_ctx); 2314 } 2315 2316 static void 2317 restore_chunk_state_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status) 2318 { 2319 struct ftl_mngt_process *mngt = md->owner.cb_ctx; 2320 struct ftl_nv_cache *nvc = &dev->nv_cache; 2321 struct ftl_nv_cache_chunk *chunk; 2322 uint64_t i; 2323 2324 if (status) { 2325 /* Restore error, end step */ 2326 ftl_mngt_fail_step(mngt); 2327 return; 2328 } 2329 2330 for (i = 0; i < nvc->chunk_count; i++) { 2331 chunk = &nvc->chunks[i]; 2332 2333 if (false == nvc->nvc_type->ops.is_chunk_active(dev, chunk->offset) && 2334 chunk->md->state != FTL_CHUNK_STATE_INACTIVE) { 2335 status = -EINVAL; 2336 break; 2337 } 2338 2339 if (chunk->md->version != FTL_NVC_VERSION_CURRENT) { 2340 status = -EINVAL; 2341 break; 2342 } 2343 2344 switch (chunk->md->state) { 2345 case FTL_CHUNK_STATE_FREE: 2346 break; 2347 case FTL_CHUNK_STATE_OPEN: 2348 TAILQ_REMOVE(&nvc->chunk_free_list, chunk, entry); 2349 nvc->chunk_free_count--; 2350 2351 TAILQ_INSERT_TAIL(&nvc->chunk_open_list, chunk, entry); 2352 nvc->chunk_open_count++; 2353 2354 /* Chunk is not empty, mark it to be recovered */ 2355 chunk->recovery = true; 2356 break; 2357 case FTL_CHUNK_STATE_CLOSED: 2358 TAILQ_REMOVE(&nvc->chunk_free_list, chunk, entry); 2359 nvc->chunk_free_count--; 2360 2361 TAILQ_INSERT_TAIL(&nvc->chunk_full_list, chunk, entry); 2362 nvc->chunk_full_count++; 2363 2364 /* Chunk is not empty, mark it to be recovered */ 2365 chunk->recovery = true; 2366 break; 2367 case FTL_CHUNK_STATE_INACTIVE: 2368 break; 2369 default: 2370 status = -EINVAL; 2371 } 2372 } 2373 2374 if (status) { 2375 ftl_mngt_fail_step(mngt); 2376 } else { 2377 ftl_mngt_next_step(mngt); 2378 } 2379 } 2380 2381 void 2382 ftl_mngt_nv_cache_restore_chunk_state(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt) 2383 { 2384 struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD]; 2385 2386 md->owner.cb_ctx = mngt; 2387 md->cb = restore_chunk_state_cb; 2388 ftl_md_restore(md); 2389 } 2390 2391 static void 2392 recover_open_chunk_cb(struct ftl_basic_rq *brq) 2393 { 2394 struct ftl_mngt_process *mngt = brq->owner.priv; 2395 struct ftl_nv_cache_chunk *chunk = brq->io.chunk; 2396 struct ftl_nv_cache *nvc = chunk->nv_cache; 2397 struct spdk_ftl_dev *dev = ftl_mngt_get_dev(mngt); 2398 2399 chunk_free_p2l_map(chunk); 2400 2401 if (!brq->success) { 2402 FTL_ERRLOG(dev, "Recovery chunk ERROR, offset = %"PRIu64", seq id %"PRIu64"\n", chunk->offset, 2403 chunk->md->seq_id); 2404 ftl_mngt_fail_step(mngt); 2405 return; 2406 } 2407 2408 FTL_NOTICELOG(dev, "Recovered chunk, offset = %"PRIu64", seq id %"PRIu64"\n", chunk->offset, 2409 chunk->md->seq_id); 2410 2411 TAILQ_REMOVE(&nvc->chunk_open_list, chunk, entry); 2412 nvc->chunk_open_count--; 2413 2414 TAILQ_INSERT_TAIL(&nvc->chunk_full_list, chunk, entry); 2415 nvc->chunk_full_count++; 2416 2417 /* This is closed chunk */ 2418 chunk->md->write_pointer = nvc->chunk_blocks; 2419 chunk->md->blocks_written = nvc->chunk_blocks; 2420 2421 ftl_mngt_continue_step(mngt); 2422 } 2423 2424 void 2425 ftl_mngt_nv_cache_recover_open_chunk(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt) 2426 { 2427 struct ftl_nv_cache *nvc = &dev->nv_cache; 2428 struct ftl_nv_cache_chunk *chunk; 2429 struct ftl_basic_rq *brq = ftl_mngt_get_step_ctx(mngt); 2430 2431 if (!brq) { 2432 if (TAILQ_EMPTY(&nvc->chunk_open_list)) { 2433 FTL_NOTICELOG(dev, "No open chunks to recover P2L\n"); 2434 ftl_mngt_next_step(mngt); 2435 return; 2436 } 2437 2438 if (ftl_mngt_alloc_step_ctx(mngt, sizeof(*brq))) { 2439 ftl_mngt_fail_step(mngt); 2440 return; 2441 } 2442 brq = ftl_mngt_get_step_ctx(mngt); 2443 ftl_basic_rq_set_owner(brq, recover_open_chunk_cb, mngt); 2444 } 2445 2446 if (TAILQ_EMPTY(&nvc->chunk_open_list)) { 2447 if (!is_chunk_count_valid(nvc)) { 2448 FTL_ERRLOG(dev, "Recovery ERROR, invalid number of chunk\n"); 2449 ftl_mngt_fail_step(mngt); 2450 return; 2451 } 2452 2453 /* 2454 * Now all chunks loaded and closed, do final step of restoring 2455 * chunks state 2456 */ 2457 if (ftl_nv_cache_load_state(nvc)) { 2458 ftl_mngt_fail_step(mngt); 2459 } else { 2460 ftl_mngt_next_step(mngt); 2461 } 2462 } else { 2463 chunk = TAILQ_FIRST(&nvc->chunk_open_list); 2464 if (chunk_alloc_p2l_map(chunk)) { 2465 ftl_mngt_fail_step(mngt); 2466 return; 2467 } 2468 2469 brq->io.chunk = chunk; 2470 2471 FTL_NOTICELOG(dev, "Start recovery open chunk, offset = %"PRIu64", seq id %"PRIu64"\n", 2472 chunk->offset, chunk->md->seq_id); 2473 restore_open_chunk(chunk, brq); 2474 } 2475 } 2476 2477 int 2478 ftl_nv_cache_chunks_busy(struct ftl_nv_cache *nv_cache) 2479 { 2480 /* chunk_current is migrating to closed status when closing, any others should already be 2481 * moved to free chunk list. Also need to wait for free md requests */ 2482 return nv_cache->chunk_open_count == 0 && nv_cache->chunk_free_persist_count == 0; 2483 } 2484 2485 void 2486 ftl_nv_cache_halt(struct ftl_nv_cache *nv_cache) 2487 { 2488 struct ftl_nv_cache_chunk *chunk; 2489 uint64_t free_space; 2490 2491 nv_cache->halt = true; 2492 2493 /* Set chunks on open list back to free state since no user data has been written to it */ 2494 while (!TAILQ_EMPTY(&nv_cache->chunk_open_list)) { 2495 chunk = TAILQ_FIRST(&nv_cache->chunk_open_list); 2496 2497 /* Chunks are moved between lists on metadata update submission, but state is changed 2498 * on completion. Breaking early in such a case to make sure all the necessary resources 2499 * will be freed (during next pass(es) of ftl_nv_cache_halt). 2500 */ 2501 if (chunk->md->state != FTL_CHUNK_STATE_OPEN) { 2502 break; 2503 } 2504 2505 TAILQ_REMOVE(&nv_cache->chunk_open_list, chunk, entry); 2506 chunk_free_p2l_map(chunk); 2507 ftl_nv_cache_chunk_md_initialize(chunk->md); 2508 assert(nv_cache->chunk_open_count > 0); 2509 nv_cache->chunk_open_count--; 2510 } 2511 2512 /* Close current chunk by skipping all not written blocks */ 2513 chunk = nv_cache->chunk_current; 2514 if (chunk != NULL) { 2515 nv_cache->chunk_current = NULL; 2516 if (chunk_is_closed(chunk)) { 2517 return; 2518 } 2519 2520 free_space = chunk_get_free_space(nv_cache, chunk); 2521 chunk->md->blocks_skipped = free_space; 2522 chunk->md->blocks_written += free_space; 2523 chunk->md->write_pointer += free_space; 2524 ftl_chunk_close(chunk); 2525 } 2526 } 2527 2528 uint64_t 2529 ftl_nv_cache_acquire_trim_seq_id(struct ftl_nv_cache *nv_cache) 2530 { 2531 struct ftl_nv_cache_chunk *chunk = nv_cache->chunk_current; 2532 uint64_t seq_id, free_space; 2533 2534 if (!chunk) { 2535 chunk = TAILQ_FIRST(&nv_cache->chunk_open_list); 2536 if (chunk && chunk->md->state == FTL_CHUNK_STATE_OPEN) { 2537 return chunk->md->seq_id; 2538 } else { 2539 return 0; 2540 } 2541 } 2542 2543 if (chunk_is_closed(chunk)) { 2544 return 0; 2545 } 2546 2547 seq_id = nv_cache->chunk_current->md->seq_id; 2548 free_space = chunk_get_free_space(nv_cache, chunk); 2549 2550 chunk->md->blocks_skipped = free_space; 2551 chunk->md->blocks_written += free_space; 2552 chunk->md->write_pointer += free_space; 2553 if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) { 2554 ftl_chunk_close(chunk); 2555 } 2556 nv_cache->chunk_current = NULL; 2557 2558 seq_id++; 2559 return seq_id; 2560 } 2561 2562 static double 2563 ftl_nv_cache_get_chunk_utilization(struct ftl_nv_cache *nv_cache, 2564 struct ftl_nv_cache_chunk *chunk) 2565 { 2566 double capacity = nv_cache->chunk_blocks; 2567 double used = chunk->md->blocks_written + chunk->md->blocks_skipped; 2568 2569 return used / capacity; 2570 } 2571 2572 static const char * 2573 ftl_nv_cache_get_chunk_state_name(struct ftl_nv_cache_chunk *chunk) 2574 { 2575 static const char *names[] = { 2576 "FREE", "OPEN", "CLOSED", "INACTIVE" 2577 }; 2578 2579 assert(chunk->md->state < SPDK_COUNTOF(names)); 2580 if (chunk->md->state < SPDK_COUNTOF(names)) { 2581 return names[chunk->md->state]; 2582 } else { 2583 assert(false); 2584 return "?"; 2585 } 2586 } 2587 2588 static void 2589 ftl_property_dump_cache_dev(struct spdk_ftl_dev *dev, const struct ftl_property *property, 2590 struct spdk_json_write_ctx *w) 2591 { 2592 uint64_t i; 2593 struct ftl_nv_cache_chunk *chunk; 2594 2595 spdk_json_write_named_string(w, "type", dev->nv_cache.nvc_type->name); 2596 spdk_json_write_named_array_begin(w, "chunks"); 2597 for (i = 0, chunk = dev->nv_cache.chunks; i < dev->nv_cache.chunk_count; i++, chunk++) { 2598 spdk_json_write_object_begin(w); 2599 spdk_json_write_named_uint64(w, "id", i); 2600 spdk_json_write_named_string(w, "state", ftl_nv_cache_get_chunk_state_name(chunk)); 2601 spdk_json_write_named_double(w, "utilization", 2602 ftl_nv_cache_get_chunk_utilization(&dev->nv_cache, chunk)); 2603 spdk_json_write_object_end(w); 2604 } 2605 spdk_json_write_array_end(w); 2606 } 2607 2608 void 2609 ftl_nv_cache_chunk_md_initialize(struct ftl_nv_cache_chunk_md *md) 2610 { 2611 memset(md, 0, sizeof(*md)); 2612 md->version = FTL_NVC_VERSION_CURRENT; 2613 } 2614