1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) Intel Corporation. 3 * All rights reserved. 4 */ 5 6 #ifndef FTL_CORE_H 7 #define FTL_CORE_H 8 9 #include "spdk/stdinc.h" 10 #include "spdk/uuid.h" 11 #include "spdk/thread.h" 12 #include "spdk/util.h" 13 #include "spdk/likely.h" 14 #include "spdk/queue.h" 15 #include "spdk/ftl.h" 16 #include "spdk/bdev.h" 17 18 #include "ftl_internal.h" 19 #include "ftl_io.h" 20 #include "ftl_nv_cache.h" 21 #include "ftl_writer.h" 22 #include "ftl_layout.h" 23 #include "ftl_sb.h" 24 #include "ftl_l2p.h" 25 #include "utils/ftl_bitmap.h" 26 #include "utils/ftl_log.h" 27 28 /* 29 * We need to reserve at least 2 buffers for band close / open sequence 30 * alone, plus additional (8) buffers for handling relocations. 31 */ 32 #define P2L_MEMPOOL_SIZE (2 + 8) 33 34 /* When using VSS on nvcache, FTL sometimes doesn't require the contents of metadata. 35 * Some devices have bugs when sending a NULL pointer as part of metadata when namespace 36 * is formatted with VSS. This buffer is passed to such calls to avoid the bug. */ 37 #define FTL_ZERO_BUFFER_SIZE 0x100000 38 extern void *g_ftl_write_buf; 39 extern void *g_ftl_read_buf; 40 41 struct spdk_ftl_dev { 42 /* Configuration */ 43 struct spdk_ftl_conf conf; 44 45 /* FTL device layout */ 46 struct ftl_layout layout; 47 48 /* FTL superblock */ 49 struct ftl_superblock *sb; 50 51 /* FTL shm superblock */ 52 struct ftl_superblock_shm *sb_shm; 53 struct ftl_md *sb_shm_md; 54 55 /* Queue of registered IO channels */ 56 TAILQ_HEAD(, ftl_io_channel) ioch_queue; 57 58 /* Underlying device */ 59 struct spdk_bdev_desc *base_bdev_desc; 60 61 /* Cached properties of the underlying device */ 62 uint64_t num_blocks_in_band; 63 bool is_zoned; 64 65 /* Indicates the device is fully initialized */ 66 bool initialized; 67 68 /* Indicates the device is about to be stopped */ 69 bool halt; 70 71 /* Indicates if the device is registered as an IO device */ 72 bool io_device_registered; 73 74 /* Management process to be continued after IO device unregistration completes */ 75 struct ftl_mngt_process *unregister_process; 76 77 /* Non-volatile write buffer cache */ 78 struct ftl_nv_cache nv_cache; 79 80 /* P2L map memory pool */ 81 struct ftl_mempool *p2l_pool; 82 83 /* Underlying SHM buf for P2L map mempool */ 84 struct ftl_md *p2l_pool_md; 85 86 /* Band md memory pool */ 87 struct ftl_mempool *band_md_pool; 88 89 /* counters for poller busy, include 90 1. nv cache read/write 91 2. metadata read/write 92 3. base bdev read/write */ 93 uint64_t io_activity_total; 94 95 /* Array of bands */ 96 struct ftl_band *bands; 97 98 /* Number of operational bands */ 99 uint64_t num_bands; 100 101 /* Next write band */ 102 struct ftl_band *next_band; 103 104 /* Free band list */ 105 TAILQ_HEAD(, ftl_band) free_bands; 106 107 /* Closed bands list */ 108 TAILQ_HEAD(, ftl_band) shut_bands; 109 110 /* Number of free bands */ 111 uint64_t num_free; 112 113 /* Logical -> physical table */ 114 void *l2p; 115 116 /* l2p deferred pins list */ 117 TAILQ_HEAD(, ftl_l2p_pin_ctx) l2p_deferred_pins; 118 119 /* Size of the l2p table */ 120 uint64_t num_lbas; 121 122 /* P2L valid map */ 123 struct ftl_bitmap *valid_map; 124 125 /* Metadata size */ 126 uint64_t md_size; 127 128 /* Transfer unit size */ 129 uint64_t xfer_size; 130 131 /* Current user write limit */ 132 int limit; 133 134 /* Inflight IO operations */ 135 uint32_t num_inflight; 136 137 /* Manages data relocation */ 138 struct ftl_reloc *reloc; 139 140 /* Thread on which the poller is running */ 141 struct spdk_thread *core_thread; 142 143 /* IO channel to the FTL device, used for internal management operations 144 * consuming FTL's external API 145 */ 146 struct spdk_io_channel *ioch; 147 148 /* Underlying device IO channel */ 149 struct spdk_io_channel *base_ioch; 150 151 /* Poller */ 152 struct spdk_poller *core_poller; 153 154 /* Read submission queue */ 155 TAILQ_HEAD(, ftl_io) rd_sq; 156 157 /* Write submission queue */ 158 TAILQ_HEAD(, ftl_io) wr_sq; 159 160 /* Writer for user IOs */ 161 struct ftl_writer writer_user; 162 163 /* Writer for GC IOs */ 164 struct ftl_writer writer_gc; 165 166 uint32_t num_logical_bands_in_physical; 167 168 /* Retry init sequence */ 169 bool init_retry; 170 171 /* P2L checkpointing */ 172 struct { 173 /* Free regions */ 174 TAILQ_HEAD(, ftl_p2l_ckpt) free; 175 /* In use regions */ 176 TAILQ_HEAD(, ftl_p2l_ckpt) inuse; 177 } p2l_ckpt; 178 }; 179 180 void ftl_apply_limits(struct spdk_ftl_dev *dev); 181 182 void ftl_invalidate_addr(struct spdk_ftl_dev *dev, ftl_addr addr); 183 184 int ftl_core_poller(void *ctx); 185 186 int ftl_io_channel_poll(void *arg); 187 188 struct ftl_io_channel *ftl_io_channel_get_ctx(struct spdk_io_channel *ioch); 189 190 bool ftl_needs_reloc(struct spdk_ftl_dev *dev); 191 192 struct ftl_band *ftl_band_get_next_free(struct spdk_ftl_dev *dev); 193 194 void ftl_recover_max_seq(struct spdk_ftl_dev *dev); 195 196 static inline uint64_t 197 ftl_get_num_blocks_in_band(const struct spdk_ftl_dev *dev) 198 { 199 return dev->num_blocks_in_band; 200 } 201 202 static inline uint64_t 203 ftl_addr_get_band(const struct spdk_ftl_dev *dev, ftl_addr addr) 204 { 205 return addr / ftl_get_num_blocks_in_band(dev); 206 } 207 208 static inline uint32_t 209 ftl_get_write_unit_size(struct spdk_bdev *bdev) 210 { 211 /* Full block of P2L map worth of xfer_sz is needed for P2L checkpointing */ 212 return FTL_NUM_LBA_IN_BLOCK; 213 } 214 215 static inline struct spdk_thread * 216 ftl_get_core_thread(const struct spdk_ftl_dev *dev) 217 { 218 return dev->core_thread; 219 } 220 221 static inline uint64_t 222 ftl_get_num_bands(const struct spdk_ftl_dev *dev) 223 { 224 return dev->num_bands; 225 } 226 227 static inline bool 228 ftl_check_core_thread(const struct spdk_ftl_dev *dev) 229 { 230 return dev->core_thread == spdk_get_thread(); 231 } 232 233 static inline int 234 ftl_addr_packed(const struct spdk_ftl_dev *dev) 235 { 236 return dev->layout.l2p.addr_size < sizeof(ftl_addr); 237 } 238 239 static inline int 240 ftl_addr_in_nvc(const struct spdk_ftl_dev *dev, ftl_addr addr) 241 { 242 assert(addr != FTL_ADDR_INVALID); 243 return addr >= dev->layout.base.total_blocks; 244 } 245 246 static inline uint64_t 247 ftl_addr_to_nvc_offset(const struct spdk_ftl_dev *dev, ftl_addr addr) 248 { 249 assert(ftl_addr_in_nvc(dev, addr)); 250 return addr - dev->layout.base.total_blocks; 251 } 252 253 static inline ftl_addr 254 ftl_addr_from_nvc_offset(const struct spdk_ftl_dev *dev, uint64_t cache_offset) 255 { 256 return cache_offset + dev->layout.base.total_blocks; 257 } 258 259 static inline uint64_t 260 ftl_get_next_seq_id(struct spdk_ftl_dev *dev) 261 { 262 return ++dev->sb->seq_id; 263 } 264 265 static inline size_t 266 ftl_p2l_map_num_blocks(const struct spdk_ftl_dev *dev) 267 { 268 return spdk_divide_round_up(ftl_get_num_blocks_in_band(dev) * 269 sizeof(struct ftl_p2l_map_entry), FTL_BLOCK_SIZE); 270 } 271 272 static inline size_t 273 ftl_tail_md_num_blocks(const struct spdk_ftl_dev *dev) 274 { 275 return spdk_divide_round_up( 276 ftl_p2l_map_num_blocks(dev), 277 dev->xfer_size) * dev->xfer_size; 278 } 279 280 /* 281 * shm_ready being set is a necessary part of the validity of the shm superblock 282 * If it's not set, then the recovery or startup must proceed from disk 283 * 284 * - If both sb and shm_sb are clean, then shm memory can be relied on for startup 285 * - If shm_sb wasn't set to clean, then disk startup/recovery needs to be done (which depends on the sb->clean flag) 286 * - sb->clean clear and sb_shm->clean is technically not possible (due to the order of these operations), but it should 287 * probably do a full recovery from disk to be on the safe side (which the ftl_fast_recovery will guarantee) 288 */ 289 290 static inline bool 291 ftl_fast_startup(const struct spdk_ftl_dev *dev) 292 { 293 return dev->sb->clean && dev->sb_shm->shm_clean && dev->sb_shm->shm_ready; 294 } 295 296 static inline bool 297 ftl_fast_recovery(const struct spdk_ftl_dev *dev) 298 { 299 return !dev->sb->clean && !dev->sb_shm->shm_clean && dev->sb_shm->shm_ready; 300 } 301 302 #endif /* FTL_CORE_H */ 303