1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2018 Intel Corporation. 3 * All rights reserved. 4 */ 5 6 #ifndef FTL_CORE_H 7 #define FTL_CORE_H 8 9 #include "spdk/stdinc.h" 10 #include "spdk/uuid.h" 11 #include "spdk/thread.h" 12 #include "spdk/util.h" 13 #include "spdk/likely.h" 14 #include "spdk/queue.h" 15 #include "spdk/ftl.h" 16 #include "spdk/bdev.h" 17 18 #include "ftl_internal.h" 19 #include "ftl_io.h" 20 #include "ftl_trace.h" 21 #include "ftl_nv_cache.h" 22 #include "ftl_writer.h" 23 #include "ftl_layout.h" 24 #include "ftl_sb.h" 25 #include "ftl_l2p.h" 26 #include "base/ftl_base_dev.h" 27 #include "utils/ftl_bitmap.h" 28 #include "utils/ftl_log.h" 29 #include "utils/ftl_property.h" 30 31 /* 32 * We need to reserve at least 2 buffers for band close / open sequence 33 * alone, plus additional (8) buffers for handling relocations. 34 */ 35 #define P2L_MEMPOOL_SIZE (2 + 8) 36 37 /* When using VSS on nvcache, FTL sometimes doesn't require the contents of metadata. 38 * Some devices have bugs when sending a NULL pointer as part of metadata when namespace 39 * is formatted with VSS. This buffer is passed to such calls to avoid the bug. */ 40 #define FTL_ZERO_BUFFER_SIZE 0x100000 41 extern void *g_ftl_write_buf; 42 extern void *g_ftl_read_buf; 43 44 struct spdk_ftl_dev { 45 /* Configuration */ 46 struct spdk_ftl_conf conf; 47 48 /* FTL device layout */ 49 struct ftl_layout layout; 50 51 /* FTL superblock */ 52 struct ftl_superblock *sb; 53 54 /* FTL shm superblock */ 55 struct ftl_superblock_shm *sb_shm; 56 struct ftl_md *sb_shm_md; 57 58 /* Queue of registered IO channels */ 59 TAILQ_HEAD(, ftl_io_channel) ioch_queue; 60 61 /* Underlying device */ 62 struct spdk_bdev_desc *base_bdev_desc; 63 64 /* Base device type */ 65 const struct ftl_base_device_type *base_type; 66 67 /* Cached properties of the underlying device */ 68 uint64_t num_blocks_in_band; 69 bool is_zoned; 70 71 /* Indicates the device is fully initialized */ 72 bool initialized; 73 74 /* Indicates the device is about to be stopped */ 75 bool halt; 76 77 /* Indicates if the device is registered as an IO device */ 78 bool io_device_registered; 79 80 /* Management process to be continued after IO device unregistration completes */ 81 struct ftl_mngt_process *unregister_process; 82 83 /* Non-volatile write buffer cache */ 84 struct ftl_nv_cache nv_cache; 85 86 /* P2L map memory pool */ 87 struct ftl_mempool *p2l_pool; 88 89 /* Underlying SHM buf for P2L map mempool */ 90 struct ftl_md *p2l_pool_md; 91 92 /* Band md memory pool */ 93 struct ftl_mempool *band_md_pool; 94 95 /* Traces */ 96 struct ftl_trace trace; 97 98 /* Statistics */ 99 struct ftl_stats stats; 100 101 /* Array of bands */ 102 struct ftl_band *bands; 103 104 /* Number of operational bands */ 105 uint64_t num_bands; 106 107 /* Next write band */ 108 struct ftl_band *next_band; 109 110 /* Free band list */ 111 TAILQ_HEAD(, ftl_band) free_bands; 112 113 /* Closed bands list */ 114 TAILQ_HEAD(, ftl_band) shut_bands; 115 116 /* Number of free bands */ 117 uint64_t num_free; 118 119 /* Logical -> physical table */ 120 void *l2p; 121 122 /* l2p deferred pins list */ 123 TAILQ_HEAD(, ftl_l2p_pin_ctx) l2p_deferred_pins; 124 125 /* Size of the l2p table */ 126 uint64_t num_lbas; 127 128 /* P2L valid map */ 129 struct ftl_bitmap *valid_map; 130 131 /* Metadata size */ 132 uint64_t md_size; 133 134 /* Transfer unit size */ 135 uint64_t xfer_size; 136 137 /* Current user write limit */ 138 int limit; 139 140 /* Inflight IO operations */ 141 uint32_t num_inflight; 142 143 /* Manages data relocation */ 144 struct ftl_reloc *reloc; 145 146 /* Thread on which the poller is running */ 147 struct spdk_thread *core_thread; 148 149 /* IO channel to the FTL device, used for internal management operations 150 * consuming FTL's external API 151 */ 152 struct spdk_io_channel *ioch; 153 154 /* Underlying device IO channel */ 155 struct spdk_io_channel *base_ioch; 156 157 /* Poller */ 158 struct spdk_poller *core_poller; 159 160 /* Read submission queue */ 161 TAILQ_HEAD(, ftl_io) rd_sq; 162 163 /* Write submission queue */ 164 TAILQ_HEAD(, ftl_io) wr_sq; 165 166 /* Trim submission queue */ 167 TAILQ_HEAD(, ftl_io) unmap_sq; 168 169 /* Trim valid map */ 170 struct ftl_bitmap *unmap_map; 171 struct ftl_md *unmap_map_md; 172 size_t unmap_qd; 173 bool unmap_in_progress; 174 175 /* Writer for user IOs */ 176 struct ftl_writer writer_user; 177 178 /* Writer for GC IOs */ 179 struct ftl_writer writer_gc; 180 181 uint32_t num_logical_bands_in_physical; 182 183 /* Retry init sequence */ 184 bool init_retry; 185 186 /* P2L checkpointing */ 187 struct { 188 /* Free regions */ 189 TAILQ_HEAD(, ftl_p2l_ckpt) free; 190 /* In use regions */ 191 TAILQ_HEAD(, ftl_p2l_ckpt) inuse; 192 } p2l_ckpt; 193 194 /* FTL properties which can be configured by user */ 195 struct ftl_properties *properties; 196 }; 197 198 void ftl_apply_limits(struct spdk_ftl_dev *dev); 199 200 void ftl_invalidate_addr(struct spdk_ftl_dev *dev, ftl_addr addr); 201 202 int ftl_core_poller(void *ctx); 203 204 int ftl_io_channel_poll(void *arg); 205 206 struct ftl_io_channel *ftl_io_channel_get_ctx(struct spdk_io_channel *ioch); 207 208 bool ftl_needs_reloc(struct spdk_ftl_dev *dev); 209 210 struct ftl_band *ftl_band_get_next_free(struct spdk_ftl_dev *dev); 211 212 void ftl_set_unmap_map(struct spdk_ftl_dev *dev, uint64_t lba, uint64_t num_blocks, 213 uint64_t seq_id); 214 215 void ftl_recover_max_seq(struct spdk_ftl_dev *dev); 216 217 void ftl_stats_bdev_io_completed(struct spdk_ftl_dev *dev, enum ftl_stats_type type, 218 struct spdk_bdev_io *bdev_io); 219 220 void ftl_stats_crc_error(struct spdk_ftl_dev *dev, enum ftl_stats_type type); 221 222 int ftl_unmap(struct spdk_ftl_dev *dev, struct ftl_io *io, struct spdk_io_channel *ch, 223 uint64_t lba, size_t lba_cnt, spdk_ftl_fn cb_fn, void *cb_arg); 224 225 static inline uint64_t 226 ftl_get_num_blocks_in_band(const struct spdk_ftl_dev *dev) 227 { 228 return dev->num_blocks_in_band; 229 } 230 231 static inline uint32_t 232 ftl_get_write_unit_size(struct spdk_bdev *bdev) 233 { 234 /* Full block of P2L map worth of xfer_sz is needed for P2L checkpointing */ 235 return FTL_NUM_LBA_IN_BLOCK; 236 } 237 238 static inline struct spdk_thread * 239 ftl_get_core_thread(const struct spdk_ftl_dev *dev) 240 { 241 return dev->core_thread; 242 } 243 244 static inline void 245 ftl_add_io_activity(struct spdk_ftl_dev *dev) 246 { 247 dev->stats.io_activity_total++; 248 } 249 250 static inline uint64_t 251 ftl_get_num_bands(const struct spdk_ftl_dev *dev) 252 { 253 return dev->num_bands; 254 } 255 256 static inline bool 257 ftl_check_core_thread(const struct spdk_ftl_dev *dev) 258 { 259 return dev->core_thread == spdk_get_thread(); 260 } 261 262 static inline int 263 ftl_addr_packed(const struct spdk_ftl_dev *dev) 264 { 265 return dev->layout.l2p.addr_size < sizeof(ftl_addr); 266 } 267 268 static inline int 269 ftl_addr_in_nvc(const struct spdk_ftl_dev *dev, ftl_addr addr) 270 { 271 assert(addr != FTL_ADDR_INVALID); 272 return addr >= dev->layout.base.total_blocks; 273 } 274 275 static inline uint64_t 276 ftl_addr_to_nvc_offset(const struct spdk_ftl_dev *dev, ftl_addr addr) 277 { 278 assert(ftl_addr_in_nvc(dev, addr)); 279 return addr - dev->layout.base.total_blocks; 280 } 281 282 static inline ftl_addr 283 ftl_addr_from_nvc_offset(const struct spdk_ftl_dev *dev, uint64_t cache_offset) 284 { 285 return cache_offset + dev->layout.base.total_blocks; 286 } 287 288 static inline uint64_t 289 ftl_get_next_seq_id(struct spdk_ftl_dev *dev) 290 { 291 return ++dev->sb->seq_id; 292 } 293 294 static inline size_t 295 ftl_p2l_map_num_blocks(const struct spdk_ftl_dev *dev) 296 { 297 return spdk_divide_round_up(ftl_get_num_blocks_in_band(dev) * 298 sizeof(struct ftl_p2l_map_entry), FTL_BLOCK_SIZE); 299 } 300 301 static inline size_t 302 ftl_tail_md_num_blocks(const struct spdk_ftl_dev *dev) 303 { 304 return spdk_divide_round_up( 305 ftl_p2l_map_num_blocks(dev), 306 dev->xfer_size) * dev->xfer_size; 307 } 308 309 /* 310 * shm_ready being set is a necessary part of the validity of the shm superblock 311 * If it's not set, then the recovery or startup must proceed from disk 312 * 313 * - If both sb and shm_sb are clean, then shm memory can be relied on for startup 314 * - If shm_sb wasn't set to clean, then disk startup/recovery needs to be done (which depends on the sb->clean flag) 315 * - sb->clean clear and sb_shm->clean is technically not possible (due to the order of these operations), but it should 316 * probably do a full recovery from disk to be on the safe side (which the ftl_fast_recovery will guarantee) 317 */ 318 319 static inline bool 320 ftl_fast_startup(const struct spdk_ftl_dev *dev) 321 { 322 return dev->sb->clean && dev->sb_shm->shm_clean && dev->sb_shm->shm_ready; 323 } 324 325 static inline bool 326 ftl_fast_recovery(const struct spdk_ftl_dev *dev) 327 { 328 return !dev->sb->clean && !dev->sb_shm->shm_clean && dev->sb_shm->shm_ready; 329 } 330 331 #endif /* FTL_CORE_H */ 332