1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) Intel Corporation. 3 * All rights reserved. 4 */ 5 6 #include "spdk/stdinc.h" 7 #include "spdk/cpuset.h" 8 #include "spdk/queue.h" 9 #include "spdk/thread.h" 10 #include "spdk/event.h" 11 #include "spdk/ftl.h" 12 #include "spdk/conf.h" 13 #include "spdk/env.h" 14 #include "spdk/util.h" 15 16 #include "ftl_core.h" 17 #include "ftl_l2p_cache.h" 18 #include "ftl_layout.h" 19 #include "ftl_nv_cache_io.h" 20 #include "mngt/ftl_mngt_steps.h" 21 #include "utils/ftl_defs.h" 22 #include "utils/ftl_addr_utils.h" 23 24 struct ftl_l2p_cache_page_io_ctx { 25 struct ftl_l2p_cache *cache; 26 uint64_t updates; 27 spdk_bdev_io_completion_cb cb; 28 struct spdk_bdev_io_wait_entry bdev_io_wait; 29 }; 30 31 enum ftl_l2p_page_state { 32 L2P_CACHE_PAGE_INIT, /* Page in memory not initialized from disk page */ 33 L2P_CACHE_PAGE_READY, /* Page initialized from disk */ 34 L2P_CACHE_PAGE_FLUSHING, /* Page is being flushed to disk and removed from memory */ 35 L2P_CACHE_PAGE_PERSISTING, /* Page is being flushed to disk and not removed from memory */ 36 L2P_CACHE_PAGE_CLEARING, /* Page is being initialized with INVALID addresses */ 37 L2P_CACHE_PAGE_CORRUPTED /* Page corrupted */ 38 }; 39 40 struct ftl_l2p_page { 41 uint64_t updates; /* Number of times an L2P entry was updated in the page since it was last persisted */ 42 TAILQ_HEAD(, ftl_l2p_page_wait_ctx) ppe_list; /* for deferred pins */ 43 TAILQ_ENTRY(ftl_l2p_page) list_entry; 44 uint64_t page_no; 45 enum ftl_l2p_page_state state; 46 uint64_t pin_ref_cnt; 47 struct ftl_l2p_cache_page_io_ctx ctx; 48 bool on_lru_list; 49 void *page_buffer; 50 uint64_t ckpt_seq_id; 51 ftl_df_obj_id obj_id; 52 }; 53 54 struct ftl_l2p_page_set; 55 56 struct ftl_l2p_page_wait_ctx { 57 uint16_t pg_pin_issued; 58 uint16_t pg_pin_completed; 59 struct ftl_l2p_page_set *parent; 60 uint64_t pg_no; 61 TAILQ_ENTRY(ftl_l2p_page_wait_ctx) list_entry; 62 }; 63 64 /* A L2P page contains 1024 4B entries (or 512 8B ones for big drives). 65 * Currently internal IO will only pin 1 LBA at a time, so only one entry should be needed. 66 * User IO is split on internal xfer_size boundaries, which is currently set to 1MiB (256 blocks), 67 * so one entry should also be enough. 68 * TODO: We should probably revisit this though, when/if the xfer_size is based on io requirements of the 69 * bottom device (e.g. RAID5F), since then big IOs (especially unaligned ones) could potentially break this. 70 */ 71 #define L2P_MAX_PAGES_TO_PIN 4 72 struct ftl_l2p_page_set { 73 uint16_t to_pin_cnt; 74 uint16_t pinned_cnt; 75 uint16_t pin_fault_cnt; 76 uint8_t locked; 77 uint8_t deferred; 78 struct ftl_l2p_pin_ctx *pin_ctx; 79 TAILQ_ENTRY(ftl_l2p_page_set) list_entry; 80 struct ftl_l2p_page_wait_ctx entry[L2P_MAX_PAGES_TO_PIN]; 81 }; 82 83 struct ftl_l2p_l1_map_entry { 84 ftl_df_obj_id page_obj_id; 85 }; 86 87 enum ftl_l2p_cache_state { 88 L2P_CACHE_INIT, 89 L2P_CACHE_RUNNING, 90 L2P_CACHE_IN_SHUTDOWN, 91 L2P_CACHE_SHUTDOWN_DONE, 92 }; 93 94 struct ftl_l2p_cache_process_ctx { 95 int status; 96 ftl_l2p_cb cb; 97 void *cb_ctx; 98 uint64_t idx; 99 uint64_t qd; 100 }; 101 102 struct ftl_l2p_cache { 103 struct spdk_ftl_dev *dev; 104 struct ftl_l2p_l1_map_entry *l2_mapping; 105 struct ftl_md *l2_md; 106 struct ftl_md *l2_ctx_md; 107 struct ftl_mempool *l2_ctx_pool; 108 struct ftl_md *l1_md; 109 110 TAILQ_HEAD(l2p_lru_list, ftl_l2p_page) lru_list; 111 /* TODO: A lot of / and % operations are done on this value, consider adding a shift based field and calculactions instead */ 112 uint64_t lbas_in_page; 113 uint64_t num_pages; /* num pages to hold the entire L2P */ 114 115 uint64_t ios_in_flight; /* Currently in flight IOs, to determine l2p shutdown readiness */ 116 enum ftl_l2p_cache_state state; 117 uint32_t l2_pgs_avail; 118 uint32_t l2_pgs_evicting; 119 uint32_t l2_pgs_resident_max; 120 uint32_t evict_keep; 121 struct ftl_mempool *page_sets_pool; 122 TAILQ_HEAD(, ftl_l2p_page_set) deferred_page_set_list; /* for deferred page sets */ 123 124 /* Process unmap in backgorund */ 125 struct { 126 #define FTL_L2P_MAX_LAZY_UNMAP_QD 1 127 /* Unmap queue depth */ 128 uint32_t qd; 129 /* Currently processed page */ 130 uint64_t page_no; 131 /* Context for page pinning */ 132 struct ftl_l2p_pin_ctx pin_ctx; 133 } lazy_unmap; 134 135 /* This is a context for a management process */ 136 struct ftl_l2p_cache_process_ctx mctx; 137 138 /* MD layout cache: Offset on a device in FTL_BLOCK_SIZE unit */ 139 uint64_t cache_layout_offset; 140 141 /* MD layout cache: Device of region */ 142 struct spdk_bdev_desc *cache_layout_bdev_desc; 143 144 /* MD layout cache: IO channel of region */ 145 struct spdk_io_channel *cache_layout_ioch; 146 }; 147 148 typedef void (*ftl_l2p_cache_clear_cb)(struct ftl_l2p_cache *cache, int status, void *ctx_page); 149 typedef void (*ftl_l2p_cache_persist_cb)(struct ftl_l2p_cache *cache, int status, void *ctx_page); 150 typedef void (*ftl_l2p_cache_sync_cb)(struct spdk_ftl_dev *dev, int status, void *page, 151 void *user_ctx); 152 153 static bool page_set_is_done(struct ftl_l2p_page_set *page_set); 154 static void page_set_end(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache, 155 struct ftl_l2p_page_set *page_set); 156 static void page_out_io_retry(void *arg); 157 static void page_in_io_retry(void *arg); 158 159 static inline void 160 ftl_l2p_page_queue_wait_ctx(struct ftl_l2p_page *page, 161 struct ftl_l2p_page_wait_ctx *ppe) 162 { 163 TAILQ_INSERT_TAIL(&page->ppe_list, ppe, list_entry); 164 } 165 166 static inline uint64_t 167 ftl_l2p_cache_get_l1_page_size(void) 168 { 169 return 1UL << 12; 170 } 171 172 static inline uint64_t 173 ftl_l2p_cache_get_lbas_in_page(struct ftl_l2p_cache *cache) 174 { 175 return cache->lbas_in_page; 176 } 177 178 static inline size_t 179 ftl_l2p_cache_get_page_all_size(void) 180 { 181 return sizeof(struct ftl_l2p_page) + ftl_l2p_cache_get_l1_page_size(); 182 } 183 184 static void 185 ftl_l2p_cache_lru_remove_page(struct ftl_l2p_cache *cache, struct ftl_l2p_page *page) 186 { 187 assert(page); 188 assert(page->on_lru_list); 189 190 TAILQ_REMOVE(&cache->lru_list, page, list_entry); 191 page->on_lru_list = false; 192 } 193 194 static void 195 ftl_l2p_cache_lru_add_page(struct ftl_l2p_cache *cache, struct ftl_l2p_page *page) 196 { 197 assert(page); 198 assert(!page->on_lru_list); 199 200 TAILQ_INSERT_HEAD(&cache->lru_list, page, list_entry); 201 202 page->on_lru_list = true; 203 } 204 205 static void 206 ftl_l2p_cache_lru_promote_page(struct ftl_l2p_cache *cache, struct ftl_l2p_page *page) 207 { 208 if (!page->on_lru_list) { 209 return; 210 } 211 212 ftl_l2p_cache_lru_remove_page(cache, page); 213 ftl_l2p_cache_lru_add_page(cache, page); 214 } 215 216 static inline void 217 ftl_l2p_cache_page_insert(struct ftl_l2p_cache *cache, struct ftl_l2p_page *page) 218 { 219 struct ftl_l2p_l1_map_entry *me = cache->l2_mapping; 220 assert(me); 221 222 assert(me[page->page_no].page_obj_id == FTL_DF_OBJ_ID_INVALID); 223 me[page->page_no].page_obj_id = page->obj_id; 224 } 225 226 static void 227 ftl_l2p_cache_page_remove(struct ftl_l2p_cache *cache, struct ftl_l2p_page *page) 228 { 229 struct ftl_l2p_l1_map_entry *me = cache->l2_mapping; 230 assert(me); 231 assert(me[page->page_no].page_obj_id != FTL_DF_OBJ_ID_INVALID); 232 assert(TAILQ_EMPTY(&page->ppe_list)); 233 234 me[page->page_no].page_obj_id = FTL_DF_OBJ_ID_INVALID; 235 cache->l2_pgs_avail++; 236 ftl_mempool_put(cache->l2_ctx_pool, page); 237 } 238 239 static inline struct ftl_l2p_page * 240 ftl_l2p_cache_get_coldest_page(struct ftl_l2p_cache *cache) 241 { 242 return TAILQ_LAST(&cache->lru_list, l2p_lru_list); 243 } 244 245 static inline struct ftl_l2p_page * 246 ftl_l2p_cache_get_hotter_page(struct ftl_l2p_page *page) 247 { 248 return TAILQ_PREV(page, l2p_lru_list, list_entry); 249 } 250 251 static inline uint64_t 252 ftl_l2p_cache_page_get_bdev_offset(struct ftl_l2p_cache *cache, 253 struct ftl_l2p_page *page) 254 { 255 return cache->cache_layout_offset + page->page_no; 256 } 257 258 static inline struct spdk_bdev_desc * 259 ftl_l2p_cache_get_bdev_desc(struct ftl_l2p_cache *cache) 260 { 261 return cache->cache_layout_bdev_desc; 262 } 263 264 static inline struct spdk_io_channel * 265 ftl_l2p_cache_get_bdev_iochannel(struct ftl_l2p_cache *cache) 266 { 267 return cache->cache_layout_ioch; 268 } 269 270 static struct ftl_l2p_page * 271 ftl_l2p_cache_page_alloc(struct ftl_l2p_cache *cache, size_t page_no) 272 { 273 struct ftl_l2p_page *page = ftl_mempool_get(cache->l2_ctx_pool); 274 ftl_bug(!page); 275 276 cache->l2_pgs_avail--; 277 278 memset(page, 0, sizeof(*page)); 279 280 page->obj_id = ftl_mempool_get_df_obj_id(cache->l2_ctx_pool, page); 281 282 page->page_buffer = (char *)ftl_md_get_buffer(cache->l1_md) + ftl_mempool_get_df_obj_index( 283 cache->l2_ctx_pool, page) * FTL_BLOCK_SIZE; 284 285 TAILQ_INIT(&page->ppe_list); 286 287 page->page_no = page_no; 288 page->state = L2P_CACHE_PAGE_INIT; 289 290 return page; 291 } 292 293 static inline bool 294 ftl_l2p_cache_page_can_remove(struct ftl_l2p_page *page) 295 { 296 return (!page->updates && 297 page->state != L2P_CACHE_PAGE_INIT && 298 !page->pin_ref_cnt); 299 } 300 301 static inline ftl_addr 302 ftl_l2p_cache_get_addr(struct spdk_ftl_dev *dev, 303 struct ftl_l2p_cache *cache, struct ftl_l2p_page *page, uint64_t lba) 304 { 305 return ftl_addr_load(dev, page->page_buffer, lba % cache->lbas_in_page); 306 } 307 308 static inline void 309 ftl_l2p_cache_set_addr(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache, 310 struct ftl_l2p_page *page, uint64_t lba, ftl_addr addr) 311 { 312 ftl_addr_store(dev, page->page_buffer, lba % cache->lbas_in_page, addr); 313 } 314 315 static void 316 ftl_l2p_page_set_invalid(struct spdk_ftl_dev *dev, struct ftl_l2p_page *page) 317 { 318 ftl_addr addr; 319 struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)dev->l2p; 320 uint64_t naddr; 321 322 page->updates++; 323 324 naddr = ftl_l2p_cache_get_lbas_in_page(cache); 325 for (uint64_t i = 0; i < naddr; i++) { 326 addr = ftl_addr_load(dev, page->page_buffer, i); 327 if (addr == FTL_ADDR_INVALID) { 328 continue; 329 } 330 331 ftl_invalidate_addr(dev, addr); 332 ftl_l2p_cache_set_addr(dev, cache, page, i, FTL_ADDR_INVALID); 333 } 334 } 335 336 static inline void 337 ftl_l2p_cache_page_pin(struct ftl_l2p_cache *cache, struct ftl_l2p_page *page) 338 { 339 page->pin_ref_cnt++; 340 /* Pinned pages can't be evicted (since L2P sets/gets will be executed on it), so remove them from LRU */ 341 if (page->on_lru_list) { 342 ftl_l2p_cache_lru_remove_page(cache, page); 343 } 344 } 345 346 static inline void 347 ftl_l2p_cache_page_unpin(struct ftl_l2p_cache *cache, struct ftl_l2p_page *page) 348 { 349 page->pin_ref_cnt--; 350 if (!page->pin_ref_cnt && !page->on_lru_list && page->state != L2P_CACHE_PAGE_FLUSHING) { 351 /* L2P_CACHE_PAGE_FLUSHING: the page is currently being evicted. 352 * In such a case, the page can't be returned to the rank list, because 353 * the ongoing eviction will remove it if no pg updates had happened. 354 * Moreover, the page could make it to the top of the rank list and be 355 * selected for another eviction, while the ongoing one did not finish yet. 356 * 357 * Depending on the page updates tracker, the page will be evicted 358 * or returned to the rank list in context of the eviction completion 359 * cb - see page_out_io_complete(). 360 */ 361 ftl_l2p_cache_lru_add_page(cache, page); 362 } 363 } 364 365 static inline bool 366 ftl_l2p_cache_page_can_evict(struct ftl_l2p_page *page) 367 { 368 return (page->state == L2P_CACHE_PAGE_FLUSHING || 369 page->state == L2P_CACHE_PAGE_PERSISTING || 370 page->state == L2P_CACHE_PAGE_INIT || 371 page->pin_ref_cnt) ? false : true; 372 } 373 374 static bool 375 ftl_l2p_cache_evict_continue(struct ftl_l2p_cache *cache) 376 { 377 return cache->l2_pgs_avail + cache->l2_pgs_evicting < cache->evict_keep; 378 } 379 380 static void * 381 _ftl_l2p_cache_init(struct spdk_ftl_dev *dev, size_t addr_size, uint64_t l2p_size) 382 { 383 struct ftl_l2p_cache *cache; 384 uint64_t l2_pages = spdk_divide_round_up(l2p_size, ftl_l2p_cache_get_l1_page_size()); 385 size_t l2_size = l2_pages * sizeof(struct ftl_l2p_l1_map_entry); 386 387 cache = calloc(1, sizeof(struct ftl_l2p_cache)); 388 if (cache == NULL) { 389 return NULL; 390 } 391 cache->dev = dev; 392 393 cache->l2_md = ftl_md_create(dev, 394 spdk_divide_round_up(l2_size, FTL_BLOCK_SIZE), 0, 395 FTL_L2P_CACHE_MD_NAME_L2, 396 ftl_md_create_shm_flags(dev), NULL); 397 398 if (cache->l2_md == NULL) { 399 goto fail_l2_md; 400 } 401 cache->l2_mapping = ftl_md_get_buffer(cache->l2_md); 402 403 cache->lbas_in_page = dev->layout.l2p.lbas_in_page; 404 cache->num_pages = l2_pages; 405 406 return cache; 407 fail_l2_md: 408 free(cache); 409 return NULL; 410 } 411 412 static struct ftl_l2p_page * 413 get_l2p_page_by_df_id(struct ftl_l2p_cache *cache, size_t page_no) 414 { 415 struct ftl_l2p_l1_map_entry *me = cache->l2_mapping; 416 ftl_df_obj_id obj_id = me[page_no].page_obj_id; 417 418 if (obj_id != FTL_DF_OBJ_ID_INVALID) { 419 return ftl_mempool_get_df_ptr(cache->l2_ctx_pool, obj_id); 420 } 421 422 return NULL; 423 } 424 425 int 426 ftl_l2p_cache_init(struct spdk_ftl_dev *dev) 427 { 428 uint64_t l2p_size = dev->num_lbas * dev->layout.l2p.addr_size; 429 struct ftl_l2p_cache *cache; 430 const struct ftl_layout_region *reg; 431 void *l2p = _ftl_l2p_cache_init(dev, dev->layout.l2p.addr_size, l2p_size); 432 size_t page_sets_pool_size = 1 << 15; 433 size_t max_resident_size, max_resident_pgs; 434 435 if (!l2p) { 436 return -1; 437 } 438 dev->l2p = l2p; 439 440 cache = (struct ftl_l2p_cache *)dev->l2p; 441 cache->page_sets_pool = ftl_mempool_create(page_sets_pool_size, 442 sizeof(struct ftl_l2p_page_set), 443 64, SPDK_ENV_SOCKET_ID_ANY); 444 if (!cache->page_sets_pool) { 445 return -1; 446 } 447 448 max_resident_size = dev->conf.l2p_dram_limit << 20; 449 max_resident_pgs = max_resident_size / ftl_l2p_cache_get_page_all_size(); 450 451 if (max_resident_pgs > cache->num_pages) { 452 SPDK_NOTICELOG("l2p memory limit higher than entire L2P size\n"); 453 max_resident_pgs = cache->num_pages; 454 } 455 456 /* Round down max res pgs to the nearest # of l2/l1 pgs */ 457 max_resident_size = max_resident_pgs * ftl_l2p_cache_get_page_all_size(); 458 SPDK_NOTICELOG("l2p maximum resident size is: %"PRIu64" (of %"PRIu64") MiB\n", 459 max_resident_size >> 20, dev->conf.l2p_dram_limit); 460 461 TAILQ_INIT(&cache->deferred_page_set_list); 462 TAILQ_INIT(&cache->lru_list); 463 464 cache->l2_ctx_md = ftl_md_create(dev, 465 spdk_divide_round_up(max_resident_pgs * SPDK_ALIGN_CEIL(sizeof(struct ftl_l2p_page), 64), 466 FTL_BLOCK_SIZE), 0, FTL_L2P_CACHE_MD_NAME_L2_CTX, ftl_md_create_shm_flags(dev), NULL); 467 468 if (cache->l2_ctx_md == NULL) { 469 return -1; 470 } 471 472 cache->l2_pgs_resident_max = max_resident_pgs; 473 cache->l2_pgs_avail = max_resident_pgs; 474 cache->l2_pgs_evicting = 0; 475 cache->l2_ctx_pool = ftl_mempool_create_ext(ftl_md_get_buffer(cache->l2_ctx_md), 476 max_resident_pgs, sizeof(struct ftl_l2p_page), 64); 477 478 if (cache->l2_ctx_pool == NULL) { 479 return -1; 480 } 481 482 #define FTL_L2P_CACHE_PAGE_AVAIL_MAX 16UL << 10 483 #define FTL_L2P_CACHE_PAGE_AVAIL_RATIO 5UL 484 cache->evict_keep = spdk_divide_round_up(cache->num_pages * FTL_L2P_CACHE_PAGE_AVAIL_RATIO, 100); 485 cache->evict_keep = spdk_min(FTL_L2P_CACHE_PAGE_AVAIL_MAX, cache->evict_keep); 486 487 if (!ftl_fast_startup(dev) && !ftl_fast_recovery(dev)) { 488 memset(cache->l2_mapping, (int)FTL_DF_OBJ_ID_INVALID, ftl_md_get_buffer_size(cache->l2_md)); 489 ftl_mempool_initialize_ext(cache->l2_ctx_pool); 490 } 491 492 cache->l1_md = ftl_md_create(dev, 493 max_resident_pgs, 0, 494 FTL_L2P_CACHE_MD_NAME_L1, 495 ftl_md_create_shm_flags(dev), NULL); 496 497 if (cache->l1_md == NULL) { 498 return -1; 499 } 500 501 /* Cache MD layout */ 502 reg = &dev->layout.region[FTL_LAYOUT_REGION_TYPE_L2P]; 503 cache->cache_layout_offset = reg->current.offset; 504 cache->cache_layout_bdev_desc = reg->bdev_desc; 505 cache->cache_layout_ioch = reg->ioch; 506 507 cache->state = L2P_CACHE_RUNNING; 508 return 0; 509 } 510 511 static void 512 ftl_l2p_cache_deinit_l2(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache) 513 { 514 ftl_md_destroy(cache->l2_ctx_md, ftl_md_destroy_shm_flags(dev)); 515 cache->l2_ctx_md = NULL; 516 517 ftl_mempool_destroy_ext(cache->l2_ctx_pool); 518 cache->l2_ctx_pool = NULL; 519 520 ftl_md_destroy(cache->l1_md, ftl_md_destroy_shm_flags(dev)); 521 cache->l1_md = NULL; 522 523 ftl_mempool_destroy(cache->page_sets_pool); 524 cache->page_sets_pool = NULL; 525 } 526 527 static void 528 _ftl_l2p_cache_deinit(struct spdk_ftl_dev *dev) 529 { 530 struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)dev->l2p; 531 532 ftl_l2p_cache_deinit_l2(dev, cache); 533 ftl_md_destroy(cache->l2_md, ftl_md_destroy_shm_flags(dev)); 534 free(cache); 535 } 536 537 void 538 ftl_l2p_cache_deinit(struct spdk_ftl_dev *dev) 539 { 540 struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)dev->l2p; 541 542 if (!cache) { 543 return; 544 } 545 assert(cache->state == L2P_CACHE_SHUTDOWN_DONE || cache->state == L2P_CACHE_INIT); 546 547 _ftl_l2p_cache_deinit(dev); 548 dev->l2p = 0; 549 } 550 551 static void 552 process_init_ctx(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache, 553 ftl_l2p_cb cb, void *cb_ctx) 554 { 555 struct ftl_l2p_cache_process_ctx *ctx = &cache->mctx; 556 557 assert(NULL == ctx->cb_ctx); 558 assert(0 == cache->l2_pgs_evicting); 559 560 memset(ctx, 0, sizeof(*ctx)); 561 562 ctx->cb = cb; 563 ctx->cb_ctx = cb_ctx; 564 } 565 566 static void 567 process_finish(struct ftl_l2p_cache *cache) 568 { 569 struct ftl_l2p_cache_process_ctx ctx = cache->mctx; 570 571 assert(cache->l2_pgs_avail == cache->l2_pgs_resident_max); 572 assert(0 == ctx.qd); 573 574 memset(&cache->mctx, 0, sizeof(cache->mctx)); 575 ctx.cb(cache->dev, ctx.status, ctx.cb_ctx); 576 } 577 578 static void process_page_out_retry(void *_page); 579 static void process_persist(struct ftl_l2p_cache *cache); 580 581 static void 582 process_page_in(struct ftl_l2p_page *page, spdk_bdev_io_completion_cb cb) 583 { 584 struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)page->ctx.cache; 585 int rc; 586 587 assert(page->page_buffer); 588 589 rc = ftl_nv_cache_bdev_read_blocks_with_md(cache->dev, ftl_l2p_cache_get_bdev_desc(cache), 590 ftl_l2p_cache_get_bdev_iochannel(cache), 591 page->page_buffer, NULL, ftl_l2p_cache_page_get_bdev_offset(cache, page), 592 1, cb, page); 593 594 if (rc) { 595 cb(NULL, false, page); 596 } 597 } 598 599 static void 600 process_persist_page_out_cb(struct spdk_bdev_io *bdev_io, bool success, void *arg) 601 { 602 struct ftl_l2p_page *page = arg; 603 struct ftl_l2p_cache *cache = page->ctx.cache; 604 struct spdk_ftl_dev *dev = cache->dev; 605 struct ftl_l2p_cache_process_ctx *ctx = &cache->mctx; 606 607 assert(bdev_io); 608 spdk_bdev_free_io(bdev_io); 609 610 if (!success) { 611 ctx->status = -EIO; 612 } 613 614 if (ftl_bitmap_get(dev->unmap_map, ctx->idx)) { 615 /* 616 * Page had been unmapped, in persist path before IO, it was invalidated entirely 617 * now clear unmap flag 618 */ 619 ftl_bitmap_clear(dev->unmap_map, page->page_no); 620 } 621 ftl_l2p_cache_page_remove(cache, page); 622 623 ctx->qd--; 624 process_persist(cache); 625 } 626 627 static void 628 process_page_out(struct ftl_l2p_page *page, spdk_bdev_io_completion_cb cb) 629 { 630 struct spdk_bdev *bdev; 631 struct spdk_bdev_io_wait_entry *bdev_io_wait; 632 struct ftl_l2p_cache *cache = page->ctx.cache; 633 struct spdk_ftl_dev *dev = cache->dev; 634 int rc; 635 636 assert(page->page_buffer); 637 638 rc = ftl_nv_cache_bdev_write_blocks_with_md(dev, ftl_l2p_cache_get_bdev_desc(cache), 639 ftl_l2p_cache_get_bdev_iochannel(cache), 640 page->page_buffer, NULL, ftl_l2p_cache_page_get_bdev_offset(cache, page), 641 1, cb, page); 642 643 if (spdk_likely(0 == rc)) { 644 return; 645 } 646 647 if (rc == -ENOMEM) { 648 bdev = spdk_bdev_desc_get_bdev(ftl_l2p_cache_get_bdev_desc(cache)); 649 bdev_io_wait = &page->ctx.bdev_io_wait; 650 bdev_io_wait->bdev = bdev; 651 bdev_io_wait->cb_fn = process_page_out_retry; 652 bdev_io_wait->cb_arg = page; 653 page->ctx.cb = cb; 654 655 rc = spdk_bdev_queue_io_wait(bdev, ftl_l2p_cache_get_bdev_iochannel(cache), bdev_io_wait); 656 ftl_bug(rc); 657 } else { 658 ftl_abort(); 659 } 660 } 661 662 static void 663 process_page_out_retry(void *_page) 664 { 665 struct ftl_l2p_page *page = _page; 666 667 process_page_out(page, page->ctx.cb); 668 } 669 670 static void process_unmap(struct ftl_l2p_cache *cache); 671 672 static void 673 process_unmap_page_out_cb(struct spdk_bdev_io *bdev_io, bool success, void *ctx_page) 674 { 675 struct ftl_l2p_page *page = (struct ftl_l2p_page *)ctx_page; 676 struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)page->ctx.cache; 677 struct spdk_ftl_dev *dev = cache->dev; 678 struct ftl_l2p_cache_process_ctx *ctx = &cache->mctx; 679 680 assert(bdev_io); 681 spdk_bdev_free_io(bdev_io); 682 683 if (!success) { 684 ctx->status = -EIO; 685 } 686 687 assert(!page->on_lru_list); 688 assert(ftl_bitmap_get(dev->unmap_map, page->page_no)); 689 ftl_bitmap_clear(dev->unmap_map, page->page_no); 690 ftl_l2p_cache_page_remove(cache, page); 691 692 ctx->qd--; 693 process_unmap(cache); 694 } 695 696 static void 697 process_unmap_page_in_cb(struct spdk_bdev_io *bdev_io, bool success, void *ctx_page) 698 { 699 struct ftl_l2p_page *page = (struct ftl_l2p_page *)ctx_page; 700 struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)page->ctx.cache; 701 struct spdk_ftl_dev *dev = cache->dev; 702 struct ftl_l2p_cache_process_ctx *ctx = &cache->mctx; 703 704 if (bdev_io) { 705 spdk_bdev_free_io(bdev_io); 706 } 707 if (success) { 708 assert(ftl_bitmap_get(dev->unmap_map, page->page_no)); 709 ftl_l2p_page_set_invalid(dev, page); 710 process_page_out(page, process_unmap_page_out_cb); 711 } else { 712 ctx->status = -EIO; 713 ctx->qd--; 714 process_unmap(cache); 715 } 716 } 717 718 static void 719 process_unmap(struct ftl_l2p_cache *cache) 720 { 721 struct ftl_l2p_cache_process_ctx *ctx = &cache->mctx; 722 723 while (ctx->idx < cache->num_pages && ctx->qd < 64) { 724 struct ftl_l2p_page *page; 725 726 if (!ftl_bitmap_get(cache->dev->unmap_map, ctx->idx)) { 727 /* Page had not been unmapped, continue */ 728 ctx->idx++; 729 continue; 730 } 731 732 /* All pages were removed in persist phase */ 733 assert(get_l2p_page_by_df_id(cache, ctx->idx) == NULL); 734 735 /* Allocate page to invalidate it */ 736 page = ftl_l2p_cache_page_alloc(cache, ctx->idx); 737 if (!page) { 738 /* All pages utilized so far, continue when they will be back available */ 739 assert(ctx->qd); 740 break; 741 } 742 743 page->state = L2P_CACHE_PAGE_CLEARING; 744 page->ctx.cache = cache; 745 746 ftl_l2p_cache_page_insert(cache, page); 747 process_page_in(page, process_unmap_page_in_cb); 748 749 ctx->qd++; 750 ctx->idx++; 751 } 752 753 if (0 == ctx->qd) { 754 process_finish(cache); 755 } 756 } 757 758 void 759 ftl_l2p_cache_unmap(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx) 760 { 761 struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)dev->l2p; 762 763 process_init_ctx(dev, cache, cb, cb_ctx); 764 process_unmap(cache); 765 } 766 767 static void 768 clear_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status) 769 { 770 ftl_l2p_cb cb = md->owner.private; 771 void *cb_cntx = md->owner.cb_ctx; 772 773 cb(dev, status, cb_cntx); 774 } 775 776 void 777 ftl_l2p_cache_clear(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx) 778 { 779 struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_L2P]; 780 ftl_addr invalid_addr = FTL_ADDR_INVALID; 781 782 md->cb = clear_cb; 783 md->owner.cb_ctx = cb_ctx; 784 md->owner.private = cb; 785 786 ftl_md_clear(md, invalid_addr, NULL); 787 } 788 789 static void 790 l2p_shm_restore_clean(struct spdk_ftl_dev *dev) 791 { 792 struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)dev->l2p; 793 struct ftl_l2p_l1_map_entry *me = cache->l2_mapping; 794 struct ftl_l2p_page *page; 795 ftl_df_obj_id obj_id; 796 uint64_t page_no; 797 798 for (page_no = 0; page_no < cache->num_pages; ++page_no) { 799 obj_id = me[page_no].page_obj_id; 800 if (obj_id == FTL_DF_OBJ_ID_INVALID) { 801 continue; 802 } 803 804 page = ftl_mempool_claim_df(cache->l2_ctx_pool, obj_id); 805 assert(page); 806 assert(page->obj_id == ftl_mempool_get_df_obj_id(cache->l2_ctx_pool, page)); 807 assert(page->page_no == page_no); 808 assert(page->state != L2P_CACHE_PAGE_INIT); 809 assert(page->state != L2P_CACHE_PAGE_CLEARING); 810 assert(cache->l2_pgs_avail > 0); 811 cache->l2_pgs_avail--; 812 813 page->page_buffer = (char *)ftl_md_get_buffer(cache->l1_md) + ftl_mempool_get_df_obj_index( 814 cache->l2_ctx_pool, page) * FTL_BLOCK_SIZE; 815 816 TAILQ_INIT(&page->ppe_list); 817 818 page->pin_ref_cnt = 0; 819 page->on_lru_list = 0; 820 memset(&page->ctx, 0, sizeof(page->ctx)); 821 822 ftl_l2p_cache_lru_add_page(cache, page); 823 } 824 825 ftl_mempool_initialize_ext(cache->l2_ctx_pool); 826 } 827 828 static void 829 l2p_shm_restore_dirty(struct spdk_ftl_dev *dev) 830 { 831 struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)dev->l2p; 832 struct ftl_l2p_l1_map_entry *me = cache->l2_mapping; 833 struct ftl_l2p_page *page; 834 ftl_df_obj_id obj_id; 835 uint64_t page_no; 836 837 for (page_no = 0; page_no < cache->num_pages; ++page_no) { 838 obj_id = me[page_no].page_obj_id; 839 if (obj_id == FTL_DF_OBJ_ID_INVALID) { 840 continue; 841 } 842 843 page = ftl_mempool_claim_df(cache->l2_ctx_pool, obj_id); 844 assert(page); 845 assert(page->obj_id == ftl_mempool_get_df_obj_id(cache->l2_ctx_pool, page)); 846 assert(page->page_no == page_no); 847 assert(page->state != L2P_CACHE_PAGE_CLEARING); 848 assert(cache->l2_pgs_avail > 0); 849 cache->l2_pgs_avail--; 850 851 if (page->state == L2P_CACHE_PAGE_INIT) { 852 me[page_no].page_obj_id = FTL_DF_OBJ_ID_INVALID; 853 cache->l2_pgs_avail++; 854 ftl_mempool_release_df(cache->l2_ctx_pool, obj_id); 855 continue; 856 } 857 858 page->state = L2P_CACHE_PAGE_READY; 859 /* Assume page is dirty after crash */ 860 page->updates = 1; 861 page->page_buffer = (char *)ftl_md_get_buffer(cache->l1_md) + ftl_mempool_get_df_obj_index( 862 cache->l2_ctx_pool, page) * FTL_BLOCK_SIZE; 863 864 TAILQ_INIT(&page->ppe_list); 865 866 page->pin_ref_cnt = 0; 867 page->on_lru_list = 0; 868 memset(&page->ctx, 0, sizeof(page->ctx)); 869 870 ftl_l2p_cache_lru_add_page(cache, page); 871 } 872 873 ftl_mempool_initialize_ext(cache->l2_ctx_pool); 874 } 875 876 void 877 ftl_l2p_cache_restore(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx) 878 { 879 if (ftl_fast_startup(dev)) { 880 l2p_shm_restore_clean(dev); 881 } 882 883 if (ftl_fast_recovery(dev)) { 884 l2p_shm_restore_dirty(dev); 885 } 886 887 cb(dev, 0, cb_ctx); 888 } 889 890 static void 891 process_persist(struct ftl_l2p_cache *cache) 892 { 893 struct ftl_l2p_cache_process_ctx *ctx = &cache->mctx; 894 struct spdk_ftl_dev *dev = cache->dev; 895 896 while (ctx->idx < cache->num_pages && ctx->qd < 64) { 897 struct ftl_l2p_page *page = get_l2p_page_by_df_id(cache, ctx->idx); 898 ctx->idx++; 899 900 if (!page) { 901 continue; 902 } 903 904 /* Finished unmap if the page was marked */ 905 if (ftl_bitmap_get(dev->unmap_map, ctx->idx)) { 906 ftl_l2p_page_set_invalid(dev, page); 907 } 908 909 if (page->on_lru_list) { 910 ftl_l2p_cache_lru_remove_page(cache, page); 911 } 912 913 if (page->updates) { 914 /* Need to persist the page */ 915 page->state = L2P_CACHE_PAGE_PERSISTING; 916 page->ctx.cache = cache; 917 ctx->qd++; 918 process_page_out(page, process_persist_page_out_cb); 919 } else { 920 ftl_l2p_cache_page_remove(cache, page); 921 } 922 } 923 924 if (0 == ctx->qd) { 925 process_finish(cache); 926 } 927 } 928 929 void 930 ftl_l2p_cache_persist(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx) 931 { 932 struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)dev->l2p; 933 934 process_init_ctx(dev, cache, cb, cb_ctx); 935 process_persist(cache); 936 } 937 938 bool 939 ftl_l2p_cache_is_halted(struct spdk_ftl_dev *dev) 940 { 941 struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)dev->l2p; 942 943 return cache->state == L2P_CACHE_SHUTDOWN_DONE; 944 } 945 946 void 947 ftl_l2p_cache_halt(struct spdk_ftl_dev *dev) 948 { 949 struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)dev->l2p; 950 951 if (cache->state != L2P_CACHE_SHUTDOWN_DONE) { 952 cache->state = L2P_CACHE_IN_SHUTDOWN; 953 if (!cache->ios_in_flight && !cache->l2_pgs_evicting) { 954 cache->state = L2P_CACHE_SHUTDOWN_DONE; 955 } 956 } 957 } 958 959 static inline struct ftl_l2p_page * 960 get_page(struct ftl_l2p_cache *cache, uint64_t lba) 961 { 962 return get_l2p_page_by_df_id(cache, lba / cache->lbas_in_page); 963 } 964 965 static inline void 966 ftl_l2p_cache_init_page_set(struct ftl_l2p_page_set *page_set, struct ftl_l2p_pin_ctx *pin_ctx) 967 { 968 page_set->to_pin_cnt = 0; 969 page_set->pinned_cnt = 0; 970 page_set->pin_fault_cnt = 0; 971 page_set->locked = 0; 972 page_set->deferred = 0; 973 page_set->pin_ctx = pin_ctx; 974 } 975 976 static inline bool 977 ftl_l2p_cache_running(struct ftl_l2p_cache *cache) 978 { 979 return cache->state == L2P_CACHE_RUNNING; 980 } 981 982 static inline bool 983 ftl_l2p_cache_page_is_pinnable(struct ftl_l2p_page *page) 984 { 985 return page->state != L2P_CACHE_PAGE_INIT; 986 } 987 988 void 989 ftl_l2p_cache_pin(struct spdk_ftl_dev *dev, struct ftl_l2p_pin_ctx *pin_ctx) 990 { 991 assert(dev->num_lbas >= pin_ctx->lba + pin_ctx->count); 992 struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)dev->l2p; 993 struct ftl_l2p_page_set *page_set; 994 bool defer_pin = false; 995 996 /* Calculate first and last page to pin, count of them */ 997 uint64_t start = pin_ctx->lba / cache->lbas_in_page; 998 uint64_t end = (pin_ctx->lba + pin_ctx->count - 1) / cache->lbas_in_page; 999 uint64_t count = end - start + 1; 1000 uint64_t i; 1001 1002 if (spdk_unlikely(count > L2P_MAX_PAGES_TO_PIN)) { 1003 ftl_l2p_pin_complete(dev, -E2BIG, pin_ctx); 1004 return; 1005 } 1006 1007 /* Get and initialize page sets */ 1008 assert(ftl_l2p_cache_running(cache)); 1009 page_set = ftl_mempool_get(cache->page_sets_pool); 1010 if (!page_set) { 1011 ftl_l2p_pin_complete(dev, -EAGAIN, pin_ctx); 1012 return; 1013 } 1014 ftl_l2p_cache_init_page_set(page_set, pin_ctx); 1015 1016 struct ftl_l2p_page_wait_ctx *entry = page_set->entry; 1017 for (i = start; i <= end; i++, entry++) { 1018 struct ftl_l2p_page *page; 1019 entry->parent = page_set; 1020 entry->pg_no = i; 1021 entry->pg_pin_completed = false; 1022 entry->pg_pin_issued = false; 1023 1024 page_set->to_pin_cnt++; 1025 1026 /* Try get page and pin */ 1027 page = get_l2p_page_by_df_id(cache, i); 1028 if (page) { 1029 if (ftl_l2p_cache_page_is_pinnable(page)) { 1030 /* Page available and we can pin it */ 1031 page_set->pinned_cnt++; 1032 entry->pg_pin_issued = true; 1033 entry->pg_pin_completed = true; 1034 ftl_l2p_cache_page_pin(cache, page); 1035 } else { 1036 /* The page is being loaded */ 1037 /* Queue the page pin entry to be executed on page in */ 1038 ftl_l2p_page_queue_wait_ctx(page, entry); 1039 entry->pg_pin_issued = true; 1040 } 1041 } else { 1042 /* The page is not in the cache, queue the page_set to page in */ 1043 defer_pin = true; 1044 } 1045 } 1046 1047 /* Check if page set is done */ 1048 if (page_set_is_done(page_set)) { 1049 page_set_end(dev, cache, page_set); 1050 } else if (defer_pin) { 1051 TAILQ_INSERT_TAIL(&cache->deferred_page_set_list, page_set, list_entry); 1052 page_set->deferred = 1; 1053 } 1054 } 1055 1056 void 1057 ftl_l2p_cache_unpin(struct spdk_ftl_dev *dev, uint64_t lba, uint64_t count) 1058 { 1059 assert(dev->num_lbas >= lba + count); 1060 struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)dev->l2p; 1061 struct ftl_l2p_page *page; 1062 uint64_t start = lba / cache->lbas_in_page; 1063 uint64_t end = (lba + count - 1) / cache->lbas_in_page; 1064 uint64_t i; 1065 1066 assert(count); 1067 assert(start < cache->num_pages); 1068 assert(end < cache->num_pages); 1069 1070 for (i = start; i <= end; i++) { 1071 page = get_l2p_page_by_df_id(cache, i); 1072 ftl_bug(!page); 1073 ftl_l2p_cache_page_unpin(cache, page); 1074 } 1075 } 1076 1077 ftl_addr 1078 ftl_l2p_cache_get(struct spdk_ftl_dev *dev, uint64_t lba) 1079 { 1080 assert(dev->num_lbas > lba); 1081 struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)dev->l2p; 1082 struct ftl_l2p_page *page = get_page(cache, lba); 1083 ftl_addr addr; 1084 1085 ftl_bug(!page); 1086 assert(ftl_l2p_cache_running(cache)); 1087 assert(page->pin_ref_cnt); 1088 1089 if (ftl_bitmap_get(dev->unmap_map, page->page_no)) { 1090 ftl_l2p_page_set_invalid(dev, page); 1091 ftl_bitmap_clear(dev->unmap_map, page->page_no); 1092 } 1093 1094 ftl_l2p_cache_lru_promote_page(cache, page); 1095 addr = ftl_l2p_cache_get_addr(dev, cache, page, lba); 1096 1097 return addr; 1098 } 1099 1100 void 1101 ftl_l2p_cache_set(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr addr) 1102 { 1103 assert(dev->num_lbas > lba); 1104 struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)dev->l2p; 1105 struct ftl_l2p_page *page = get_page(cache, lba); 1106 1107 ftl_bug(!page); 1108 assert(ftl_l2p_cache_running(cache)); 1109 assert(page->pin_ref_cnt); 1110 1111 if (ftl_bitmap_get(dev->unmap_map, page->page_no)) { 1112 ftl_l2p_page_set_invalid(dev, page); 1113 ftl_bitmap_clear(dev->unmap_map, page->page_no); 1114 } 1115 1116 page->updates++; 1117 ftl_l2p_cache_lru_promote_page(cache, page); 1118 ftl_l2p_cache_set_addr(dev, cache, page, lba, addr); 1119 } 1120 1121 static struct ftl_l2p_page * 1122 page_allocate(struct ftl_l2p_cache *cache, uint64_t page_no) 1123 { 1124 struct ftl_l2p_page *page = ftl_l2p_cache_page_alloc(cache, page_no); 1125 ftl_l2p_cache_page_insert(cache, page); 1126 1127 return page; 1128 } 1129 1130 static bool 1131 page_set_is_done(struct ftl_l2p_page_set *page_set) 1132 { 1133 if (page_set->locked) { 1134 return false; 1135 } 1136 1137 assert(page_set->pinned_cnt + page_set->pin_fault_cnt <= page_set->to_pin_cnt); 1138 return (page_set->pinned_cnt + page_set->pin_fault_cnt == page_set->to_pin_cnt); 1139 } 1140 1141 static void 1142 page_set_unpin(struct ftl_l2p_cache *cache, struct ftl_l2p_page_set *page_set) 1143 { 1144 uint64_t i; 1145 struct ftl_l2p_page_wait_ctx *pentry = page_set->entry; 1146 1147 for (i = 0; i < page_set->to_pin_cnt; i++, pentry++) { 1148 struct ftl_l2p_page *pinned_page; 1149 1150 if (false == pentry->pg_pin_completed) { 1151 continue; 1152 } 1153 1154 pinned_page = get_l2p_page_by_df_id(cache, pentry->pg_no); 1155 ftl_bug(!pinned_page); 1156 1157 ftl_l2p_cache_page_unpin(cache, pinned_page); 1158 } 1159 } 1160 1161 static void 1162 page_set_end(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache, 1163 struct ftl_l2p_page_set *page_set) 1164 { 1165 if (spdk_likely(0 == page_set->pin_fault_cnt)) { 1166 ftl_l2p_pin_complete(dev, 0, page_set->pin_ctx); 1167 } else { 1168 page_set_unpin(cache, page_set); 1169 ftl_l2p_pin_complete(dev, -EIO, page_set->pin_ctx); 1170 } 1171 1172 if (page_set->deferred) { 1173 TAILQ_REMOVE(&cache->deferred_page_set_list, page_set, list_entry); 1174 } 1175 1176 assert(0 == page_set->locked); 1177 ftl_mempool_put(cache->page_sets_pool, page_set); 1178 } 1179 1180 static void 1181 page_in_io_complete(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache, 1182 struct ftl_l2p_page *page, bool success) 1183 { 1184 struct ftl_l2p_page_set *page_set; 1185 struct ftl_l2p_page_wait_ctx *pentry; 1186 1187 cache->ios_in_flight--; 1188 1189 assert(0 == page->pin_ref_cnt); 1190 assert(L2P_CACHE_PAGE_INIT == page->state); 1191 assert(false == page->on_lru_list); 1192 1193 if (spdk_likely(success)) { 1194 page->state = L2P_CACHE_PAGE_READY; 1195 } 1196 1197 while ((pentry = TAILQ_FIRST(&page->ppe_list))) { 1198 TAILQ_REMOVE(&page->ppe_list, pentry, list_entry); 1199 1200 page_set = pentry->parent; 1201 1202 assert(false == pentry->pg_pin_completed); 1203 1204 if (success) { 1205 ftl_l2p_cache_page_pin(cache, page); 1206 page_set->pinned_cnt++; 1207 pentry->pg_pin_completed = true; 1208 } else { 1209 page_set->pin_fault_cnt++; 1210 } 1211 1212 /* Check if page_set is done */ 1213 if (page_set_is_done(page_set)) { 1214 page_set_end(dev, cache, page_set); 1215 } 1216 } 1217 1218 if (spdk_unlikely(!success)) { 1219 ftl_bug(page->on_lru_list); 1220 ftl_l2p_cache_page_remove(cache, page); 1221 } 1222 } 1223 1224 static void 1225 page_in_io_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 1226 { 1227 struct ftl_l2p_page *page = cb_arg; 1228 struct ftl_l2p_cache *cache = page->ctx.cache; 1229 struct spdk_ftl_dev *dev = cache->dev; 1230 1231 spdk_bdev_free_io(bdev_io); 1232 page_in_io_complete(dev, cache, page, success); 1233 } 1234 1235 static void 1236 page_in_io(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache, struct ftl_l2p_page *page) 1237 { 1238 struct spdk_io_channel *ioch; 1239 struct spdk_bdev *bdev; 1240 struct spdk_bdev_io_wait_entry *bdev_io_wait; 1241 int rc; 1242 page->ctx.cache = cache; 1243 1244 rc = ftl_nv_cache_bdev_read_blocks_with_md(cache->dev, ftl_l2p_cache_get_bdev_desc(cache), 1245 ftl_l2p_cache_get_bdev_iochannel(cache), 1246 page->page_buffer, NULL, ftl_l2p_cache_page_get_bdev_offset(cache, page), 1247 1, page_in_io_cb, page); 1248 cache->ios_in_flight++; 1249 if (spdk_likely(0 == rc)) { 1250 return; 1251 } 1252 1253 if (rc == -ENOMEM) { 1254 ioch = ftl_l2p_cache_get_bdev_iochannel(cache); 1255 bdev = spdk_bdev_desc_get_bdev(ftl_l2p_cache_get_bdev_desc(cache)); 1256 bdev_io_wait = &page->ctx.bdev_io_wait; 1257 bdev_io_wait->bdev = bdev; 1258 bdev_io_wait->cb_fn = page_in_io_retry; 1259 bdev_io_wait->cb_arg = page; 1260 1261 rc = spdk_bdev_queue_io_wait(bdev, ioch, bdev_io_wait); 1262 ftl_bug(rc); 1263 } else { 1264 ftl_abort(); 1265 } 1266 } 1267 1268 static void 1269 page_in_io_retry(void *arg) 1270 { 1271 struct ftl_l2p_page *page = arg; 1272 struct ftl_l2p_cache *cache = page->ctx.cache; 1273 struct spdk_ftl_dev *dev = cache->dev; 1274 1275 cache->ios_in_flight--; 1276 page_in_io(dev, cache, page); 1277 } 1278 1279 static void 1280 page_in(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache, 1281 struct ftl_l2p_page_set *page_set, struct ftl_l2p_page_wait_ctx *pentry) 1282 { 1283 struct ftl_l2p_page *page; 1284 bool page_in = false; 1285 1286 /* Get page */ 1287 page = get_l2p_page_by_df_id(cache, pentry->pg_no); 1288 if (!page) { 1289 /* Page not allocated yet, do it */ 1290 page = page_allocate(cache, pentry->pg_no); 1291 page_in = true; 1292 } 1293 1294 if (ftl_l2p_cache_page_is_pinnable(page)) { 1295 ftl_l2p_cache_page_pin(cache, page); 1296 page_set->pinned_cnt++; 1297 pentry->pg_pin_issued = true; 1298 pentry->pg_pin_completed = true; 1299 } else { 1300 pentry->pg_pin_issued = true; 1301 ftl_l2p_page_queue_wait_ctx(page, pentry); 1302 } 1303 1304 if (page_in) { 1305 page_in_io(dev, cache, page); 1306 } 1307 } 1308 1309 static int 1310 ftl_l2p_cache_process_page_sets(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache) 1311 { 1312 struct ftl_l2p_page_set *page_set; 1313 struct ftl_l2p_page_wait_ctx *pentry; 1314 uint64_t i; 1315 1316 page_set = TAILQ_FIRST(&cache->deferred_page_set_list); 1317 if (!page_set) { 1318 /* No page_set */ 1319 return -ECHILD; 1320 } 1321 1322 if (page_set->to_pin_cnt > cache->l2_pgs_avail) { 1323 /* No enough page to pin, wait */ 1324 return -EBUSY; 1325 } 1326 if (cache->ios_in_flight > 512) { 1327 /* Too big QD */ 1328 return -EBUSY; 1329 } 1330 1331 TAILQ_REMOVE(&cache->deferred_page_set_list, page_set, list_entry); 1332 page_set->deferred = 0; 1333 page_set->locked = 1; 1334 1335 /* Now we can start pinning */ 1336 pentry = page_set->entry; 1337 for (i = 0; i < page_set->to_pin_cnt; i++, pentry++) { 1338 if (!pentry->pg_pin_issued) { 1339 page_in(dev, cache, page_set, pentry); 1340 } 1341 } 1342 1343 page_set->locked = 0; 1344 1345 /* Check if page_set is done */ 1346 if (page_set_is_done(page_set)) { 1347 page_set_end(dev, cache, page_set); 1348 } 1349 1350 return 0; 1351 } 1352 1353 static struct ftl_l2p_page * 1354 eviction_get_page(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache) 1355 { 1356 uint64_t i = 0; 1357 struct ftl_l2p_page *page = ftl_l2p_cache_get_coldest_page(cache); 1358 1359 while (page) { 1360 ftl_bug(L2P_CACHE_PAGE_READY != page->state); 1361 ftl_bug(page->pin_ref_cnt); 1362 1363 if (ftl_l2p_cache_page_can_evict(page)) { 1364 ftl_l2p_cache_lru_remove_page(cache, page); 1365 return page; 1366 } 1367 1368 /* 1369 * Practically only one iteration is needed to find a page. It is because 1370 * the rank of pages contains only ready and unpinned pages 1371 */ 1372 ftl_bug(++i > 1024); 1373 1374 page = ftl_l2p_cache_get_hotter_page(page); 1375 } 1376 1377 return NULL; 1378 } 1379 1380 static void 1381 page_out_io_complete(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache, 1382 struct ftl_l2p_page *page, bool success) 1383 { 1384 cache->l2_pgs_evicting--; 1385 1386 ftl_bug(page->ctx.updates > page->updates); 1387 ftl_bug(!TAILQ_EMPTY(&page->ppe_list)); 1388 ftl_bug(page->on_lru_list); 1389 1390 if (spdk_likely(success)) { 1391 page->updates -= page->ctx.updates; 1392 } 1393 1394 if (success && ftl_l2p_cache_page_can_remove(page)) { 1395 ftl_l2p_cache_page_remove(cache, page); 1396 } else { 1397 if (!page->pin_ref_cnt) { 1398 ftl_l2p_cache_lru_add_page(cache, page); 1399 } 1400 page->state = L2P_CACHE_PAGE_READY; 1401 } 1402 } 1403 1404 static void 1405 page_out_io_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 1406 { 1407 struct ftl_l2p_page *page = cb_arg; 1408 struct ftl_l2p_cache *cache = page->ctx.cache; 1409 struct spdk_ftl_dev *dev = cache->dev; 1410 1411 spdk_bdev_free_io(bdev_io); 1412 page_out_io_complete(dev, cache, page, success); 1413 } 1414 1415 static void 1416 page_out_io(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache, 1417 struct ftl_l2p_page *page) 1418 { 1419 struct spdk_io_channel *ioch; 1420 struct spdk_bdev *bdev; 1421 struct spdk_bdev_io_wait_entry *bdev_io_wait; 1422 int rc; 1423 1424 page->ctx.cache = cache; 1425 1426 rc = ftl_nv_cache_bdev_write_blocks_with_md(dev, ftl_l2p_cache_get_bdev_desc(cache), 1427 ftl_l2p_cache_get_bdev_iochannel(cache), 1428 page->page_buffer, NULL, ftl_l2p_cache_page_get_bdev_offset(cache, page), 1429 1, page_out_io_cb, page); 1430 1431 cache->l2_pgs_evicting++; 1432 if (spdk_likely(0 == rc)) { 1433 return; 1434 } 1435 1436 if (rc == -ENOMEM) { 1437 ioch = ftl_l2p_cache_get_bdev_iochannel(cache); 1438 bdev = spdk_bdev_desc_get_bdev(ftl_l2p_cache_get_bdev_desc(cache)); 1439 bdev_io_wait = &page->ctx.bdev_io_wait; 1440 bdev_io_wait->bdev = bdev; 1441 bdev_io_wait->cb_fn = page_out_io_retry; 1442 bdev_io_wait->cb_arg = page; 1443 1444 rc = spdk_bdev_queue_io_wait(bdev, ioch, bdev_io_wait); 1445 ftl_bug(rc); 1446 } else { 1447 ftl_abort(); 1448 } 1449 } 1450 1451 static void 1452 page_out_io_retry(void *arg) 1453 { 1454 struct ftl_l2p_page *page = arg; 1455 struct ftl_l2p_cache *cache = page->ctx.cache; 1456 struct spdk_ftl_dev *dev = cache->dev; 1457 1458 cache->l2_pgs_evicting--; 1459 page_out_io(dev, cache, page); 1460 } 1461 1462 static void 1463 ftl_l2p_cache_process_eviction(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache) 1464 { 1465 struct ftl_l2p_page *page; 1466 1467 if (!ftl_l2p_cache_evict_continue(cache)) { 1468 return; 1469 } 1470 1471 if (cache->l2_pgs_evicting > 512) { 1472 return; 1473 } 1474 1475 page = eviction_get_page(dev, cache); 1476 if (spdk_unlikely(!page)) { 1477 return; 1478 } 1479 1480 if (page->updates) { 1481 page->state = L2P_CACHE_PAGE_FLUSHING; 1482 page->ctx.updates = page->updates; 1483 page_out_io(dev, cache, page); 1484 } else { 1485 /* Page clean and we can remove it */ 1486 ftl_l2p_cache_page_remove(cache, page); 1487 } 1488 } 1489 1490 static void 1491 ftl_l2p_lazy_unmap_process_cb(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx) 1492 { 1493 struct ftl_l2p_cache *cache = dev->l2p; 1494 1495 cache->lazy_unmap.qd--; 1496 1497 /* We will retry on next ftl_l2p_lazy_unmap_process */ 1498 if (spdk_unlikely(status != 0)) { 1499 return; 1500 } 1501 1502 if (ftl_l2p_cache_running(cache)) { 1503 ftl_l2p_cache_get(dev, pin_ctx->lba); 1504 } 1505 1506 ftl_l2p_cache_unpin(dev, pin_ctx->lba, pin_ctx->count); 1507 } 1508 1509 static void 1510 ftl_l2p_lazy_unmap_process(struct spdk_ftl_dev *dev) 1511 { 1512 struct ftl_l2p_cache *cache = dev->l2p; 1513 struct ftl_l2p_pin_ctx *pin_ctx; 1514 uint64_t page_no; 1515 1516 if (spdk_likely(!dev->unmap_in_progress)) { 1517 return; 1518 } 1519 1520 if (cache->lazy_unmap.qd == FTL_L2P_MAX_LAZY_UNMAP_QD) { 1521 return; 1522 } 1523 1524 page_no = ftl_bitmap_find_first_set(dev->unmap_map, cache->lazy_unmap.page_no, UINT64_MAX); 1525 if (page_no == UINT64_MAX) { 1526 cache->lazy_unmap.page_no = 0; 1527 1528 /* Check unmap map from beginning to detect unprocessed unmaps */ 1529 page_no = ftl_bitmap_find_first_set(dev->unmap_map, cache->lazy_unmap.page_no, UINT64_MAX); 1530 if (page_no == UINT64_MAX) { 1531 dev->unmap_in_progress = false; 1532 return; 1533 } 1534 } 1535 1536 cache->lazy_unmap.page_no = page_no; 1537 1538 pin_ctx = &cache->lazy_unmap.pin_ctx; 1539 1540 cache->lazy_unmap.qd++; 1541 assert(cache->lazy_unmap.qd <= FTL_L2P_MAX_LAZY_UNMAP_QD); 1542 assert(page_no < cache->num_pages); 1543 1544 pin_ctx->lba = page_no * cache->lbas_in_page; 1545 pin_ctx->count = 1; 1546 pin_ctx->cb = ftl_l2p_lazy_unmap_process_cb; 1547 pin_ctx->cb_ctx = pin_ctx; 1548 1549 ftl_l2p_cache_pin(dev, pin_ctx); 1550 } 1551 1552 void 1553 ftl_l2p_cache_process(struct spdk_ftl_dev *dev) 1554 { 1555 struct ftl_l2p_cache *cache = dev->l2p; 1556 int i; 1557 1558 if (spdk_unlikely(cache->state != L2P_CACHE_RUNNING)) { 1559 return; 1560 } 1561 1562 for (i = 0; i < 256; i++) { 1563 if (ftl_l2p_cache_process_page_sets(dev, cache)) { 1564 break; 1565 } 1566 } 1567 1568 ftl_l2p_cache_process_eviction(dev, cache); 1569 ftl_l2p_lazy_unmap_process(dev); 1570 } 1571