1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) Intel Corporation. 3 * All rights reserved. 4 */ 5 6 #ifndef FTL_CORE_H 7 #define FTL_CORE_H 8 9 #include "spdk/stdinc.h" 10 #include "spdk/uuid.h" 11 #include "spdk/thread.h" 12 #include "spdk/util.h" 13 #include "spdk/likely.h" 14 #include "spdk/queue.h" 15 #include "spdk/ftl.h" 16 #include "spdk/bdev.h" 17 18 #include "ftl_internal.h" 19 #include "ftl_io.h" 20 #include "ftl_nv_cache.h" 21 #include "ftl_writer.h" 22 #include "ftl_layout.h" 23 #include "ftl_sb.h" 24 #include "ftl_l2p.h" 25 #include "utils/ftl_bitmap.h" 26 #include "utils/ftl_log.h" 27 28 /* 29 * We need to reserve at least 2 buffers for band close / open sequence 30 * alone, plus additional (8) buffers for handling relocations. 31 */ 32 #define P2L_MEMPOOL_SIZE (2 + 8) 33 34 /* When using VSS on nvcache, FTL sometimes doesn't require the contents of metadata. 35 * Some devices have bugs when sending a NULL pointer as part of metadata when namespace 36 * is formatted with VSS. This buffer is passed to such calls to avoid the bug. */ 37 #define FTL_ZERO_BUFFER_SIZE 0x100000 38 extern void *g_ftl_write_buf; 39 extern void *g_ftl_read_buf; 40 41 struct spdk_ftl_dev { 42 /* Configuration */ 43 struct spdk_ftl_conf conf; 44 45 /* FTL device layout */ 46 struct ftl_layout layout; 47 48 /* FTL superblock */ 49 struct ftl_superblock *sb; 50 51 /* FTL shm superblock */ 52 struct ftl_superblock_shm *sb_shm; 53 struct ftl_md *sb_shm_md; 54 55 /* Queue of registered IO channels */ 56 TAILQ_HEAD(, ftl_io_channel) ioch_queue; 57 58 /* Underlying device */ 59 struct spdk_bdev_desc *base_bdev_desc; 60 61 /* Cached properties of the underlying device */ 62 uint64_t num_blocks_in_band; 63 bool is_zoned; 64 65 /* Indicates the device is fully initialized */ 66 bool initialized; 67 68 /* Indicates the device is about to be stopped */ 69 bool halt; 70 71 /* Indicates if the device is registered as an IO device */ 72 bool io_device_registered; 73 74 /* Management process to be continued after IO device unregistration completes */ 75 struct ftl_mngt_process *unregister_process; 76 77 /* Non-volatile write buffer cache */ 78 struct ftl_nv_cache nv_cache; 79 80 /* P2L map memory pool */ 81 struct ftl_mempool *p2l_pool; 82 83 /* Underlying SHM buf for P2L map mempool */ 84 struct ftl_md *p2l_pool_md; 85 86 /* Band md memory pool */ 87 struct ftl_mempool *band_md_pool; 88 89 /* counters for poller busy, include 90 1. nv cache read/write 91 2. metadata read/write 92 3. base bdev read/write */ 93 uint64_t io_activity_total; 94 95 /* Array of bands */ 96 struct ftl_band *bands; 97 98 /* Number of operational bands */ 99 uint64_t num_bands; 100 101 /* Next write band */ 102 struct ftl_band *next_band; 103 104 /* Free band list */ 105 TAILQ_HEAD(, ftl_band) free_bands; 106 107 /* Closed bands list */ 108 TAILQ_HEAD(, ftl_band) shut_bands; 109 110 /* Number of free bands */ 111 uint64_t num_free; 112 113 /* Logical -> physical table */ 114 void *l2p; 115 116 /* l2p deferred pins list */ 117 TAILQ_HEAD(, ftl_l2p_pin_ctx) l2p_deferred_pins; 118 119 /* Size of the l2p table */ 120 uint64_t num_lbas; 121 122 /* P2L valid map */ 123 struct ftl_bitmap *valid_map; 124 125 /* Metadata size */ 126 uint64_t md_size; 127 128 /* Transfer unit size */ 129 uint64_t xfer_size; 130 131 /* Current user write limit */ 132 int limit; 133 134 /* Inflight IO operations */ 135 uint32_t num_inflight; 136 137 /* Manages data relocation */ 138 struct ftl_reloc *reloc; 139 140 /* Thread on which the poller is running */ 141 struct spdk_thread *core_thread; 142 143 /* IO channel to the FTL device, used for internal management operations 144 * consuming FTL's external API 145 */ 146 struct spdk_io_channel *ioch; 147 148 /* Underlying device IO channel */ 149 struct spdk_io_channel *base_ioch; 150 151 /* Poller */ 152 struct spdk_poller *core_poller; 153 154 /* Read submission queue */ 155 TAILQ_HEAD(, ftl_io) rd_sq; 156 157 /* Write submission queue */ 158 TAILQ_HEAD(, ftl_io) wr_sq; 159 160 /* Writer for user IOs */ 161 struct ftl_writer writer_user; 162 163 /* Writer for GC IOs */ 164 struct ftl_writer writer_gc; 165 166 uint32_t num_logical_bands_in_physical; 167 168 /* Retry init sequence */ 169 bool init_retry; 170 }; 171 172 void ftl_apply_limits(struct spdk_ftl_dev *dev); 173 174 void ftl_invalidate_addr(struct spdk_ftl_dev *dev, ftl_addr addr); 175 176 int ftl_core_poller(void *ctx); 177 178 int ftl_io_channel_poll(void *arg); 179 180 struct ftl_io_channel *ftl_io_channel_get_ctx(struct spdk_io_channel *ioch); 181 182 bool ftl_needs_reloc(struct spdk_ftl_dev *dev); 183 184 struct ftl_band *ftl_band_get_next_free(struct spdk_ftl_dev *dev); 185 186 static inline uint64_t 187 ftl_get_num_blocks_in_band(const struct spdk_ftl_dev *dev) 188 { 189 return dev->num_blocks_in_band; 190 } 191 192 static inline uint64_t 193 ftl_addr_get_band(const struct spdk_ftl_dev *dev, ftl_addr addr) 194 { 195 return addr / ftl_get_num_blocks_in_band(dev); 196 } 197 198 static inline uint32_t 199 ftl_get_write_unit_size(struct spdk_bdev *bdev) 200 { 201 if (spdk_bdev_is_zoned(bdev)) { 202 return spdk_bdev_get_write_unit_size(bdev); 203 } 204 205 /* TODO: this should be passed via input parameter */ 206 return 32; 207 } 208 209 static inline struct spdk_thread * 210 ftl_get_core_thread(const struct spdk_ftl_dev *dev) 211 { 212 return dev->core_thread; 213 } 214 215 static inline uint64_t 216 ftl_get_num_bands(const struct spdk_ftl_dev *dev) 217 { 218 return dev->num_bands; 219 } 220 221 static inline bool 222 ftl_check_core_thread(const struct spdk_ftl_dev *dev) 223 { 224 return dev->core_thread == spdk_get_thread(); 225 } 226 227 static inline int 228 ftl_addr_packed(const struct spdk_ftl_dev *dev) 229 { 230 return dev->layout.l2p.addr_size < sizeof(ftl_addr); 231 } 232 233 static inline int 234 ftl_addr_in_nvc(const struct spdk_ftl_dev *dev, ftl_addr addr) 235 { 236 assert(addr != FTL_ADDR_INVALID); 237 return addr >= dev->layout.base.total_blocks; 238 } 239 240 static inline uint64_t 241 ftl_addr_to_nvc_offset(const struct spdk_ftl_dev *dev, ftl_addr addr) 242 { 243 assert(ftl_addr_in_nvc(dev, addr)); 244 return addr - dev->layout.base.total_blocks; 245 } 246 247 static inline ftl_addr 248 ftl_addr_from_nvc_offset(const struct spdk_ftl_dev *dev, uint64_t cache_offset) 249 { 250 return cache_offset + dev->layout.base.total_blocks; 251 } 252 253 static inline size_t 254 ftl_p2l_map_num_blocks(const struct spdk_ftl_dev *dev) 255 { 256 return spdk_divide_round_up(ftl_get_num_blocks_in_band(dev) * sizeof(uint64_t), 257 FTL_BLOCK_SIZE); 258 } 259 260 static inline size_t 261 ftl_tail_md_num_blocks(const struct spdk_ftl_dev *dev) 262 { 263 return spdk_divide_round_up( 264 ftl_p2l_map_num_blocks(dev), 265 dev->xfer_size) * dev->xfer_size; 266 } 267 268 /* 269 * shm_ready being set is a necessary part of the validity of the shm superblock 270 * If it's not set, then the recovery or startup must proceed from disk 271 * 272 * - If both sb and shm_sb are clean, then shm memory can be relied on for startup 273 * - If shm_sb wasn't set to clean, then disk startup/recovery needs to be done (which depends on the sb->clean flag) 274 * - sb->clean clear and sb_shm->clean is technically not possible (due to the order of these operations), but it should 275 * probably do a full recovery from disk to be on the safe side (which the ftl_fast_recovery will guarantee) 276 */ 277 278 static inline bool 279 ftl_fast_startup(const struct spdk_ftl_dev *dev) 280 { 281 return dev->sb->clean && dev->sb_shm->shm_clean && dev->sb_shm->shm_ready; 282 } 283 284 static inline bool 285 ftl_fast_recovery(const struct spdk_ftl_dev *dev) 286 { 287 return !dev->sb->clean && !dev->sb_shm->shm_clean && dev->sb_shm->shm_ready; 288 } 289 290 #endif /* FTL_CORE_H */ 291