1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2022 Intel Corporation. 3 * All rights reserved. 4 */ 5 6 #include "ftl_l2p.h" 7 #include "ftl_band.h" 8 #include "ftl_nv_cache.h" 9 #include "ftl_l2p_cache.h" 10 #include "ftl_l2p_flat.h" 11 12 13 /* TODO: Verify why function pointers had worse performance than compile time constants */ 14 #ifdef SPDK_FTL_L2P_FLAT 15 #define FTL_L2P_OP(name) ftl_l2p_flat_ ## name 16 #else 17 #define FTL_L2P_OP(name) ftl_l2p_cache_ ## name 18 #endif 19 20 21 int 22 ftl_l2p_init(struct spdk_ftl_dev *dev) 23 { 24 TAILQ_INIT(&dev->l2p_deferred_pins); 25 return FTL_L2P_OP(init)(dev); 26 } 27 28 void 29 ftl_l2p_deinit(struct spdk_ftl_dev *dev) 30 { 31 FTL_L2P_OP(deinit)(dev); 32 } 33 34 static inline void 35 ftl_l2p_pin_ctx_init(struct ftl_l2p_pin_ctx *pin_ctx, uint64_t lba, uint64_t count, 36 ftl_l2p_pin_cb cb, void *cb_ctx) 37 { 38 pin_ctx->lba = lba; 39 pin_ctx->count = count; 40 pin_ctx->cb = cb; 41 pin_ctx->cb_ctx = cb_ctx; 42 } 43 44 void 45 ftl_l2p_pin(struct spdk_ftl_dev *dev, uint64_t lba, uint64_t count, ftl_l2p_pin_cb cb, void *cb_ctx, 46 struct ftl_l2p_pin_ctx *pin_ctx) 47 { 48 ftl_l2p_pin_ctx_init(pin_ctx, lba, count, cb, cb_ctx); 49 FTL_L2P_OP(pin)(dev, pin_ctx); 50 } 51 52 void 53 ftl_l2p_unpin(struct spdk_ftl_dev *dev, uint64_t lba, uint64_t count) 54 { 55 FTL_L2P_OP(unpin)(dev, lba, count); 56 } 57 58 void 59 ftl_l2p_pin_skip(struct spdk_ftl_dev *dev, ftl_l2p_pin_cb cb, void *cb_ctx, 60 struct ftl_l2p_pin_ctx *pin_ctx) 61 { 62 ftl_l2p_pin_ctx_init(pin_ctx, FTL_LBA_INVALID, 0, cb, cb_ctx); 63 cb(dev, 0, pin_ctx); 64 } 65 66 void 67 ftl_l2p_set(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr addr) 68 { 69 FTL_L2P_OP(set)(dev, lba, addr); 70 } 71 72 ftl_addr 73 ftl_l2p_get(struct spdk_ftl_dev *dev, uint64_t lba) 74 { 75 return FTL_L2P_OP(get)(dev, lba); 76 } 77 78 void 79 ftl_l2p_clear(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx) 80 { 81 FTL_L2P_OP(clear)(dev, cb, cb_ctx); 82 } 83 84 void 85 ftl_l2p_restore(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx) 86 { 87 FTL_L2P_OP(restore)(dev, cb, cb_ctx); 88 } 89 90 void 91 ftl_l2p_persist(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx) 92 { 93 FTL_L2P_OP(persist)(dev, cb, cb_ctx); 94 } 95 96 void 97 ftl_l2p_unmap(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx) 98 { 99 FTL_L2P_OP(unmap)(dev, cb, cb_ctx); 100 } 101 102 void 103 ftl_l2p_process(struct spdk_ftl_dev *dev) 104 { 105 struct ftl_l2p_pin_ctx *pin_ctx; 106 107 pin_ctx = TAILQ_FIRST(&dev->l2p_deferred_pins); 108 if (pin_ctx) { 109 TAILQ_REMOVE(&dev->l2p_deferred_pins, pin_ctx, link); 110 FTL_L2P_OP(pin)(dev, pin_ctx); 111 } 112 113 FTL_L2P_OP(process)(dev); 114 } 115 116 bool 117 ftl_l2p_is_halted(struct spdk_ftl_dev *dev) 118 { 119 if (!TAILQ_EMPTY(&dev->l2p_deferred_pins)) { 120 return false; 121 } 122 123 return FTL_L2P_OP(is_halted)(dev); 124 } 125 126 void 127 ftl_l2p_resume(struct spdk_ftl_dev *dev) 128 { 129 return FTL_L2P_OP(resume)(dev); 130 } 131 132 void 133 ftl_l2p_halt(struct spdk_ftl_dev *dev) 134 { 135 return FTL_L2P_OP(halt)(dev); 136 } 137 138 static uint64_t 139 get_trim_seq_id(struct spdk_ftl_dev *dev, uint64_t lba) 140 { 141 struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_TRIM_MD]; 142 uint64_t *page = ftl_md_get_buffer(md); 143 uint64_t page_no = lba / dev->layout.l2p.lbas_in_page; 144 145 return page[page_no]; 146 } 147 148 void 149 ftl_l2p_update_cache(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr new_addr, ftl_addr old_addr) 150 { 151 struct ftl_nv_cache_chunk *current_chunk, *new_chunk; 152 ftl_addr current_addr; 153 /* Updating L2P for data in cache device - used by user writes. 154 * Split off from updating L2P in base due to extra edge cases for handling dirty shutdown in the cache case, 155 * namely keeping two simultaneous writes to same LBA consistent before/after shutdown - on base device we 156 * can simply ignore the L2P update, here we need to keep the address with more advanced write pointer 157 */ 158 assert(ftl_check_core_thread(dev)); 159 assert(new_addr != FTL_ADDR_INVALID); 160 assert(ftl_addr_in_nvc(dev, new_addr)); 161 162 current_addr = ftl_l2p_get(dev, lba); 163 164 if (current_addr != FTL_ADDR_INVALID) { 165 166 /* Check if write-after-write happened (two simultaneous user writes to the same LBA) */ 167 if (spdk_unlikely(current_addr != old_addr 168 && ftl_addr_in_nvc(dev, current_addr))) { 169 170 current_chunk = ftl_nv_cache_get_chunk_from_addr(dev, current_addr); 171 new_chunk = ftl_nv_cache_get_chunk_from_addr(dev, new_addr); 172 173 /* To keep data consistency after recovery skip oldest block */ 174 /* If both user writes are to the same chunk, the highest address should 'win', to keep data after 175 * dirty shutdown recovery consistent. If they're on different chunks, then higher seq_id chunk 'wins' */ 176 if (current_chunk == new_chunk) { 177 if (new_addr < current_addr) { 178 return; 179 } 180 } else { 181 if (new_chunk->md->seq_id < current_chunk->md->seq_id) { 182 return; 183 } 184 } 185 } 186 187 /* For recovery from SHM case valid maps need to be set before l2p set and 188 * invalidated after it */ 189 190 /* DO NOT CHANGE ORDER - START */ 191 ftl_nv_cache_set_addr(dev, lba, new_addr); 192 ftl_l2p_set(dev, lba, new_addr); 193 ftl_invalidate_addr(dev, current_addr); 194 /* DO NOT CHANGE ORDER - END */ 195 return; 196 } else { 197 uint64_t trim_seq_id = get_trim_seq_id(dev, lba); 198 uint64_t new_seq_id = ftl_nv_cache_get_chunk_from_addr(dev, new_addr)->md->seq_id; 199 200 /* Check if region hasn't been unmapped during IO */ 201 if (new_seq_id < trim_seq_id) { 202 return; 203 } 204 } 205 206 /* If current address doesn't have any value (ie. it was never set, or it was trimmed), then we can just set L2P */ 207 /* DO NOT CHANGE ORDER - START (need to set P2L maps/valid map first) */ 208 ftl_nv_cache_set_addr(dev, lba, new_addr); 209 ftl_l2p_set(dev, lba, new_addr); 210 /* DO NOT CHANGE ORDER - END */ 211 } 212 213 void 214 ftl_l2p_update_base(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr new_addr, ftl_addr old_addr) 215 { 216 ftl_addr current_addr; 217 218 /* Updating L2P for data in base device - used by compaction and GC, may be invalidated by user write. 219 * Split off from updating L2P in cache due to extra edge cases for handling dirty shutdown in the cache case. 220 * Also some assumptions are not the same (can't assign INVALID address for base device - trim cases are done on cache) 221 */ 222 assert(ftl_check_core_thread(dev)); 223 assert(new_addr != FTL_ADDR_INVALID); 224 assert(old_addr != FTL_ADDR_INVALID); 225 assert(!ftl_addr_in_nvc(dev, new_addr)); 226 227 current_addr = ftl_l2p_get(dev, lba); 228 229 if (current_addr == old_addr) { 230 /* DO NOT CHANGE ORDER - START (need to set L2P (and valid bits), before invalidating old ones, 231 * due to dirty shutdown from shm recovery - it's ok to have too many bits set, but not ok to 232 * have too many cleared) */ 233 ftl_band_set_addr(ftl_band_from_addr(dev, new_addr), lba, new_addr); 234 ftl_l2p_set(dev, lba, new_addr); 235 /* DO NOT CHANGE ORDER - END */ 236 } else { 237 /* new addr could be set by running p2l checkpoint but in the time window between 238 * p2l checkpoint completion and l2p set operation new data could be written on 239 * open chunk so this address need to be invalidated */ 240 ftl_invalidate_addr(dev, new_addr); 241 } 242 243 ftl_invalidate_addr(dev, old_addr); 244 } 245 246 void 247 ftl_l2p_pin_complete(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx) 248 { 249 if (spdk_unlikely(status == -EAGAIN)) { 250 TAILQ_INSERT_TAIL(&dev->l2p_deferred_pins, pin_ctx, link); 251 } else { 252 pin_ctx->cb(dev, status, pin_ctx); 253 } 254 } 255