1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #ifndef FTL_CORE_H 35 #define FTL_CORE_H 36 37 #include "spdk/stdinc.h" 38 #include "spdk/nvme.h" 39 #include "spdk/nvme_ocssd.h" 40 #include "spdk/uuid.h" 41 #include "spdk/thread.h" 42 #include "spdk/util.h" 43 #include "spdk_internal/log.h" 44 #include "spdk/queue.h" 45 #include "spdk/ftl.h" 46 #include "spdk/bdev.h" 47 48 #include "ftl_ppa.h" 49 #include "ftl_io.h" 50 #include "ftl_trace.h" 51 52 struct spdk_ftl_dev; 53 struct ftl_band; 54 struct ftl_chunk; 55 struct ftl_io; 56 struct ftl_restore; 57 struct ftl_wptr; 58 struct ftl_flush; 59 struct ftl_reloc; 60 struct ftl_anm_event; 61 62 struct ftl_stats { 63 /* Number of writes scheduled directly by the user */ 64 uint64_t write_user; 65 66 /* Total number of writes */ 67 uint64_t write_total; 68 69 /* Traces */ 70 struct ftl_trace trace; 71 72 /* Number of limits applied */ 73 uint64_t limits[SPDK_FTL_LIMIT_MAX]; 74 }; 75 76 struct ftl_punit { 77 struct spdk_ftl_dev *dev; 78 79 struct ftl_ppa start_ppa; 80 }; 81 82 struct ftl_thread { 83 /* Owner */ 84 struct spdk_ftl_dev *dev; 85 /* I/O queue pair */ 86 struct spdk_nvme_qpair *qpair; 87 88 /* Thread on which the poller is running */ 89 struct spdk_thread *thread; 90 91 /* Poller */ 92 struct spdk_poller *poller; 93 /* Poller's function */ 94 spdk_poller_fn poller_fn; 95 /* Poller's frequency */ 96 uint64_t period_us; 97 }; 98 99 struct ftl_global_md { 100 /* Device instance */ 101 struct spdk_uuid uuid; 102 /* Size of the l2p table */ 103 uint64_t num_lbas; 104 }; 105 106 struct spdk_ftl_dev { 107 /* Device instance */ 108 struct spdk_uuid uuid; 109 /* Device name */ 110 char *name; 111 /* Configuration */ 112 struct spdk_ftl_conf conf; 113 114 /* Indicates the device is fully initialized */ 115 int initialized; 116 /* Indicates the device is about to be stopped */ 117 int halt; 118 119 /* Init callback */ 120 spdk_ftl_init_fn init_cb; 121 /* Init callback's context */ 122 void *init_arg; 123 124 /* Halt callback */ 125 spdk_ftl_fn halt_cb; 126 /* Halt callback's context */ 127 void *halt_arg; 128 /* Halt poller, checks if the device has been halted */ 129 struct spdk_poller *halt_poller; 130 131 /* IO channel */ 132 struct spdk_io_channel *ioch; 133 134 /* NVMe controller */ 135 struct spdk_nvme_ctrlr *ctrlr; 136 /* NVMe namespace */ 137 struct spdk_nvme_ns *ns; 138 /* NVMe transport ID */ 139 struct spdk_nvme_transport_id trid; 140 /* Write buffer cache */ 141 struct spdk_bdev_desc *cache_bdev_desc; 142 143 /* LBA map memory pool */ 144 struct spdk_mempool *lba_pool; 145 146 /* Statistics */ 147 struct ftl_stats stats; 148 149 /* Parallel unit range */ 150 struct spdk_ftl_punit_range range; 151 /* Array of parallel units */ 152 struct ftl_punit *punits; 153 154 /* Current sequence number */ 155 uint64_t seq; 156 157 /* Array of bands */ 158 struct ftl_band *bands; 159 /* Band being curently defraged */ 160 struct ftl_band *df_band; 161 /* Number of operational bands */ 162 size_t num_bands; 163 /* Next write band */ 164 struct ftl_band *next_band; 165 /* Free band list */ 166 LIST_HEAD(, ftl_band) free_bands; 167 /* Closed bands list */ 168 LIST_HEAD(, ftl_band) shut_bands; 169 /* Number of free bands */ 170 size_t num_free; 171 172 /* List of write pointers */ 173 LIST_HEAD(, ftl_wptr) wptr_list; 174 175 /* Logical -> physical table */ 176 void *l2p; 177 /* Size of the l2p table */ 178 uint64_t num_lbas; 179 180 /* PPA format */ 181 struct ftl_ppa_fmt ppaf; 182 /* PPA address size */ 183 size_t ppa_len; 184 /* Device's geometry */ 185 struct spdk_ocssd_geometry_data geo; 186 187 /* Flush list */ 188 LIST_HEAD(, ftl_flush) flush_list; 189 190 /* Device specific md buffer */ 191 struct ftl_global_md global_md; 192 193 /* Metadata size */ 194 size_t md_size; 195 196 /* Transfer unit size */ 197 size_t xfer_size; 198 /* Ring write buffer */ 199 struct ftl_rwb *rwb; 200 201 /* Current user write limit */ 202 int limit; 203 204 /* Inflight IO operations */ 205 uint32_t num_inflight; 206 /* Queue of IO awaiting retry */ 207 TAILQ_HEAD(, ftl_io) retry_queue; 208 209 /* Manages data relocation */ 210 struct ftl_reloc *reloc; 211 212 /* Threads */ 213 struct ftl_thread core_thread; 214 struct ftl_thread read_thread; 215 216 /* Devices' list */ 217 STAILQ_ENTRY(spdk_ftl_dev) stailq; 218 }; 219 220 typedef void (*ftl_restore_fn)(struct spdk_ftl_dev *, struct ftl_restore *, int); 221 222 void ftl_apply_limits(struct spdk_ftl_dev *dev); 223 int ftl_io_read(struct ftl_io *io); 224 int ftl_io_write(struct ftl_io *io); 225 int ftl_io_erase(struct ftl_io *io); 226 int ftl_io_flush(struct ftl_io *io); 227 int ftl_current_limit(const struct spdk_ftl_dev *dev); 228 int ftl_invalidate_addr(struct spdk_ftl_dev *dev, struct ftl_ppa ppa); 229 int ftl_task_core(void *ctx); 230 int ftl_task_read(void *ctx); 231 void ftl_process_anm_event(struct ftl_anm_event *event); 232 size_t ftl_tail_md_num_lbks(const struct spdk_ftl_dev *dev); 233 size_t ftl_tail_md_hdr_num_lbks(void); 234 size_t ftl_vld_map_num_lbks(const struct spdk_ftl_dev *dev); 235 size_t ftl_lba_map_num_lbks(const struct spdk_ftl_dev *dev); 236 size_t ftl_head_md_num_lbks(const struct spdk_ftl_dev *dev); 237 int ftl_restore_md(struct spdk_ftl_dev *dev, ftl_restore_fn cb); 238 int ftl_restore_device(struct ftl_restore *restore, ftl_restore_fn cb); 239 240 #define ftl_to_ppa(addr) \ 241 (struct ftl_ppa) { .ppa = (uint64_t)(addr) } 242 243 #define ftl_to_ppa_packed(addr) \ 244 (struct ftl_ppa) { .pack.ppa = (uint32_t)(addr) } 245 246 static inline struct spdk_thread * 247 ftl_get_core_thread(const struct spdk_ftl_dev *dev) 248 { 249 return dev->core_thread.thread; 250 } 251 252 static inline struct spdk_nvme_qpair * 253 ftl_get_write_qpair(const struct spdk_ftl_dev *dev) 254 { 255 return dev->core_thread.qpair; 256 } 257 258 static inline struct spdk_thread * 259 ftl_get_read_thread(const struct spdk_ftl_dev *dev) 260 { 261 return dev->read_thread.thread; 262 } 263 264 static inline struct spdk_nvme_qpair * 265 ftl_get_read_qpair(const struct spdk_ftl_dev *dev) 266 { 267 return dev->read_thread.qpair; 268 } 269 270 static inline int 271 ftl_ppa_packed(const struct spdk_ftl_dev *dev) 272 { 273 return dev->ppa_len < 32; 274 } 275 276 static inline int 277 ftl_ppa_invalid(struct ftl_ppa ppa) 278 { 279 return ppa.ppa == ftl_to_ppa(FTL_PPA_INVALID).ppa; 280 } 281 282 static inline int 283 ftl_ppa_cached(struct ftl_ppa ppa) 284 { 285 return !ftl_ppa_invalid(ppa) && ppa.cached; 286 } 287 288 static inline uint64_t 289 ftl_ppa_addr_pack(const struct spdk_ftl_dev *dev, struct ftl_ppa ppa) 290 { 291 return (ppa.lbk << dev->ppaf.lbk_offset) | 292 (ppa.chk << dev->ppaf.chk_offset) | 293 (ppa.pu << dev->ppaf.pu_offset) | 294 (ppa.grp << dev->ppaf.grp_offset); 295 } 296 297 static inline struct ftl_ppa 298 ftl_ppa_addr_unpack(const struct spdk_ftl_dev *dev, uint64_t ppa) 299 { 300 struct ftl_ppa res = {}; 301 302 res.lbk = (ppa >> dev->ppaf.lbk_offset) & dev->ppaf.lbk_mask; 303 res.chk = (ppa >> dev->ppaf.chk_offset) & dev->ppaf.chk_mask; 304 res.pu = (ppa >> dev->ppaf.pu_offset) & dev->ppaf.pu_mask; 305 res.grp = (ppa >> dev->ppaf.grp_offset) & dev->ppaf.grp_mask; 306 307 return res; 308 } 309 310 static inline struct ftl_ppa 311 ftl_ppa_to_packed(const struct spdk_ftl_dev *dev, struct ftl_ppa ppa) 312 { 313 struct ftl_ppa p = {}; 314 315 if (ftl_ppa_invalid(ppa)) { 316 p = ftl_to_ppa_packed(FTL_PPA_INVALID); 317 } else if (ftl_ppa_cached(ppa)) { 318 p.pack.cached = 1; 319 p.pack.offset = (uint32_t) ppa.offset; 320 } else { 321 p.pack.ppa = (uint32_t) ftl_ppa_addr_pack(dev, ppa); 322 } 323 324 return p; 325 } 326 327 static inline struct ftl_ppa 328 ftl_ppa_from_packed(const struct spdk_ftl_dev *dev, struct ftl_ppa p) 329 { 330 struct ftl_ppa ppa = {}; 331 332 if (p.pack.ppa == (uint32_t)FTL_PPA_INVALID) { 333 ppa = ftl_to_ppa(FTL_PPA_INVALID); 334 } else if (p.pack.cached) { 335 ppa.cached = 1; 336 ppa.offset = p.pack.offset; 337 } else { 338 ppa = ftl_ppa_addr_unpack(dev, p.pack.ppa); 339 } 340 341 return ppa; 342 } 343 344 static inline unsigned int 345 ftl_ppa_flatten_punit(const struct spdk_ftl_dev *dev, struct ftl_ppa ppa) 346 { 347 return ppa.pu * dev->geo.num_grp + ppa.grp - dev->range.begin; 348 } 349 350 static inline int 351 ftl_ppa_in_range(const struct spdk_ftl_dev *dev, struct ftl_ppa ppa) 352 { 353 unsigned int punit = ftl_ppa_flatten_punit(dev, ppa) + dev->range.begin; 354 355 if (punit >= dev->range.begin && punit <= dev->range.end) { 356 return 1; 357 } 358 359 return 0; 360 } 361 362 #define _ftl_l2p_set(l2p, off, val, bits) \ 363 __atomic_store_n(((uint##bits##_t *)(l2p)) + (off), val, __ATOMIC_SEQ_CST) 364 365 #define _ftl_l2p_set32(l2p, off, val) \ 366 _ftl_l2p_set(l2p, off, val, 32) 367 368 #define _ftl_l2p_set64(l2p, off, val) \ 369 _ftl_l2p_set(l2p, off, val, 64) 370 371 #define _ftl_l2p_get(l2p, off, bits) \ 372 __atomic_load_n(((uint##bits##_t *)(l2p)) + (off), __ATOMIC_SEQ_CST) 373 374 #define _ftl_l2p_get32(l2p, off) \ 375 _ftl_l2p_get(l2p, off, 32) 376 377 #define _ftl_l2p_get64(l2p, off) \ 378 _ftl_l2p_get(l2p, off, 64) 379 380 #define ftl_ppa_cmp(p1, p2) \ 381 ((p1).ppa == (p2).ppa) 382 383 static inline void 384 ftl_l2p_set(struct spdk_ftl_dev *dev, uint64_t lba, struct ftl_ppa ppa) 385 { 386 assert(dev->num_lbas > lba); 387 388 if (ftl_ppa_packed(dev)) { 389 _ftl_l2p_set32(dev->l2p, lba, ftl_ppa_to_packed(dev, ppa).ppa); 390 } else { 391 _ftl_l2p_set64(dev->l2p, lba, ppa.ppa); 392 } 393 } 394 395 static inline struct ftl_ppa 396 ftl_l2p_get(struct spdk_ftl_dev *dev, uint64_t lba) 397 { 398 assert(dev->num_lbas > lba); 399 400 if (ftl_ppa_packed(dev)) { 401 return ftl_ppa_from_packed(dev, ftl_to_ppa_packed( 402 _ftl_l2p_get32(dev->l2p, lba))); 403 } else { 404 return ftl_to_ppa(_ftl_l2p_get64(dev->l2p, lba)); 405 } 406 } 407 static inline size_t 408 ftl_dev_num_bands(const struct spdk_ftl_dev *dev) 409 { 410 return dev->geo.num_chk; 411 } 412 413 static inline size_t 414 ftl_dev_lbks_in_chunk(const struct spdk_ftl_dev *dev) 415 { 416 return dev->geo.clba; 417 } 418 419 static inline size_t 420 ftl_dev_num_punits(const struct spdk_ftl_dev *dev) 421 { 422 return dev->range.end - dev->range.begin + 1; 423 } 424 425 static inline uint64_t 426 ftl_num_band_lbks(const struct spdk_ftl_dev *dev) 427 { 428 return ftl_dev_num_punits(dev) * ftl_dev_lbks_in_chunk(dev); 429 } 430 431 static inline size_t 432 ftl_vld_map_size(const struct spdk_ftl_dev *dev) 433 { 434 return (size_t)spdk_divide_round_up(ftl_num_band_lbks(dev), CHAR_BIT); 435 } 436 437 #endif /* FTL_CORE_H */ 438