1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) Intel Corporation. 3 * All rights reserved. 4 */ 5 6 #include "ftl_l2p.h" 7 #include "ftl_band.h" 8 #include "ftl_nv_cache.h" 9 #include "ftl_l2p_flat.h" 10 11 12 /* TODO: Verify why function pointers had worse performance than compile time constants */ 13 #define FTL_L2P_OP(name) ftl_l2p_flat_ ## name 14 15 16 int 17 ftl_l2p_init(struct spdk_ftl_dev *dev) 18 { 19 return FTL_L2P_OP(init)(dev); 20 } 21 22 void 23 ftl_l2p_deinit(struct spdk_ftl_dev *dev) 24 { 25 FTL_L2P_OP(deinit)(dev); 26 } 27 28 static inline void 29 ftl_l2p_pin_ctx_init(struct ftl_l2p_pin_ctx *pin_ctx, uint64_t lba, uint64_t count, 30 ftl_l2p_pin_cb cb, void *cb_ctx) 31 { 32 pin_ctx->lba = lba; 33 pin_ctx->count = count; 34 pin_ctx->cb = cb; 35 pin_ctx->cb_ctx = cb_ctx; 36 } 37 38 void 39 ftl_l2p_pin(struct spdk_ftl_dev *dev, uint64_t lba, uint64_t count, ftl_l2p_pin_cb cb, void *cb_ctx, 40 struct ftl_l2p_pin_ctx *pin_ctx) 41 { 42 ftl_l2p_pin_ctx_init(pin_ctx, lba, count, cb, cb_ctx); 43 FTL_L2P_OP(pin)(dev, pin_ctx); 44 } 45 46 void 47 ftl_l2p_unpin(struct spdk_ftl_dev *dev, uint64_t lba, uint64_t count) 48 { 49 FTL_L2P_OP(unpin)(dev, lba, count); 50 } 51 52 void 53 ftl_l2p_pin_skip(struct spdk_ftl_dev *dev, ftl_l2p_pin_cb cb, void *cb_ctx, 54 struct ftl_l2p_pin_ctx *pin_ctx) 55 { 56 ftl_l2p_pin_ctx_init(pin_ctx, FTL_LBA_INVALID, 0, cb, cb_ctx); 57 cb(dev, 0, pin_ctx); 58 } 59 60 void 61 ftl_l2p_set(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr addr) 62 { 63 FTL_L2P_OP(set)(dev, lba, addr); 64 } 65 66 ftl_addr 67 ftl_l2p_get(struct spdk_ftl_dev *dev, uint64_t lba) 68 { 69 return FTL_L2P_OP(get)(dev, lba); 70 } 71 72 void 73 ftl_l2p_clear(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx) 74 { 75 FTL_L2P_OP(clear)(dev, cb, cb_ctx); 76 } 77 78 void 79 ftl_l2p_process(struct spdk_ftl_dev *dev) 80 { 81 FTL_L2P_OP(process)(dev); 82 } 83 84 bool 85 ftl_l2p_is_halted(struct spdk_ftl_dev *dev) 86 { 87 return FTL_L2P_OP(is_halted)(dev); 88 } 89 90 void 91 ftl_l2p_halt(struct spdk_ftl_dev *dev) 92 { 93 return FTL_L2P_OP(halt)(dev); 94 } 95 96 void 97 ftl_l2p_update_cache(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr new_addr, ftl_addr old_addr) 98 { 99 struct ftl_nv_cache_chunk *current_chunk, *new_chunk; 100 ftl_addr current_addr; 101 /* Updating L2P for data in cache device - used by user writes. 102 * Split off from updating L2P in base due to extra edge cases for handling dirty shutdown in the cache case, 103 * namely keeping two simultaneous writes to same LBA consistent before/after shutdown - on base device we 104 * can simply ignore the L2P update, here we need to keep the address with more advanced write pointer 105 */ 106 assert(ftl_check_core_thread(dev)); 107 assert(new_addr != FTL_ADDR_INVALID); 108 assert(ftl_addr_in_nvc(dev, new_addr)); 109 110 current_addr = ftl_l2p_get(dev, lba); 111 112 if (current_addr != FTL_ADDR_INVALID) { 113 114 /* Check if write-after-write happened (two simultaneous user writes to the same LBA) */ 115 if (spdk_unlikely(current_addr != old_addr 116 && ftl_addr_in_nvc(dev, current_addr))) { 117 118 current_chunk = ftl_nv_cache_get_chunk_from_addr(dev, current_addr); 119 new_chunk = ftl_nv_cache_get_chunk_from_addr(dev, new_addr); 120 121 /* To keep data consistency after recovery skip oldest block */ 122 /* If both user writes are to the same chunk, the highest address should 'win', to keep data after 123 * dirty shutdown recovery consistent. If they're on different chunks, then higher seq_id chunk 'wins' */ 124 if (current_chunk == new_chunk) { 125 if (new_addr < current_addr) { 126 return; 127 } 128 } 129 } 130 131 /* For recovery from SHM case valid maps need to be set before l2p set and 132 * invalidated after it */ 133 134 /* DO NOT CHANGE ORDER - START */ 135 ftl_nv_cache_set_addr(dev, lba, new_addr); 136 ftl_l2p_set(dev, lba, new_addr); 137 ftl_invalidate_addr(dev, current_addr); 138 /* DO NOT CHANGE ORDER - END */ 139 return; 140 } 141 142 /* If current address doesn't have any value (ie. it was never set, or it was trimmed), then we can just set L2P */ 143 /* DO NOT CHANGE ORDER - START (need to set P2L maps/valid map first) */ 144 ftl_nv_cache_set_addr(dev, lba, new_addr); 145 ftl_l2p_set(dev, lba, new_addr); 146 /* DO NOT CHANGE ORDER - END */ 147 } 148 149 void 150 ftl_l2p_update_base(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr new_addr, ftl_addr old_addr) 151 { 152 ftl_addr current_addr; 153 154 /* Updating L2P for data in base device - used by compaction and GC, may be invalidated by user write. 155 * Split off from updating L2P in cache due to extra edge cases for handling dirty shutdown in the cache case. 156 * Also some assumptions are not the same (can't assign INVALID address for base device - trim cases are done on cache) 157 */ 158 assert(ftl_check_core_thread(dev)); 159 assert(new_addr != FTL_ADDR_INVALID); 160 assert(old_addr != FTL_ADDR_INVALID); 161 assert(!ftl_addr_in_nvc(dev, new_addr)); 162 163 current_addr = ftl_l2p_get(dev, lba); 164 165 if (current_addr == old_addr) { 166 /* DO NOT CHANGE ORDER - START (need to set L2P (and valid bits), before invalidating old ones, 167 * due to dirty shutdown from shm recovery - it's ok to have too many bits set, but not ok to 168 * have too many cleared) */ 169 ftl_band_set_addr(ftl_band_from_addr(dev, new_addr), lba, new_addr); 170 ftl_l2p_set(dev, lba, new_addr); 171 /* DO NOT CHANGE ORDER - END */ 172 } else { 173 /* new addr could be set by running p2l checkpoint but in the time window between 174 * p2l checkpoint completion and l2p set operation new data could be written on 175 * open chunk so this address need to be invalidated */ 176 ftl_invalidate_addr(dev, new_addr); 177 } 178 179 ftl_invalidate_addr(dev, old_addr); 180 } 181 182 void 183 ftl_l2p_pin_complete(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx) 184 { 185 if (spdk_unlikely(status == -EAGAIN)) { 186 /* Path updated in later patch */ 187 assert(false); 188 } else { 189 pin_ctx->cb(dev, status, pin_ctx); 190 } 191 } 192