1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #ifndef FTL_CORE_H 35 #define FTL_CORE_H 36 37 #include "spdk/stdinc.h" 38 #include "spdk/nvme.h" 39 #include "spdk/nvme_ocssd.h" 40 #include "spdk/uuid.h" 41 #include "spdk/thread.h" 42 #include "spdk/util.h" 43 #include "spdk_internal/log.h" 44 #include "spdk/queue.h" 45 #include "spdk/ftl.h" 46 47 #include "ftl_ppa.h" 48 #include "ftl_io.h" 49 #include "ftl_trace.h" 50 51 struct spdk_ftl_dev; 52 struct ftl_band; 53 struct ftl_chunk; 54 struct ftl_io; 55 struct ftl_restore; 56 struct ftl_wptr; 57 struct ftl_flush; 58 struct ftl_reloc; 59 struct ftl_anm_event; 60 61 struct ftl_stats { 62 /* Number of writes scheduled directly by the user */ 63 uint64_t write_user; 64 65 /* Total number of writes */ 66 uint64_t write_total; 67 68 /* Traces */ 69 struct ftl_trace *trace; 70 71 /* Number of limits applied */ 72 uint64_t limits[SPDK_FTL_LIMIT_MAX]; 73 }; 74 75 struct ftl_punit { 76 struct spdk_ftl_dev *dev; 77 78 struct ftl_ppa start_ppa; 79 }; 80 81 struct ftl_thread { 82 /* Owner */ 83 struct spdk_ftl_dev *dev; 84 /* I/O queue pair */ 85 struct spdk_nvme_qpair *qpair; 86 87 /* Thread on which the poller is running */ 88 struct spdk_thread *thread; 89 /* Thread id */ 90 pthread_t tid; 91 92 /* Poller */ 93 struct spdk_poller *poller; 94 /* Poller's function */ 95 spdk_poller_fn poller_fn; 96 /* Poller's frequency */ 97 uint64_t period_us; 98 }; 99 100 struct ftl_global_md { 101 /* Device instance */ 102 struct spdk_uuid uuid; 103 /* Size of the l2p table */ 104 uint64_t num_lbas; 105 }; 106 107 struct spdk_ftl_dev { 108 /* Device instance */ 109 struct spdk_uuid uuid; 110 /* Device name */ 111 char *name; 112 /* Configuration */ 113 struct spdk_ftl_conf conf; 114 115 /* Indicates the device is fully initialized */ 116 int initialized; 117 /* Indicates the device is about to be stopped */ 118 int halt; 119 120 /* Init callback */ 121 spdk_ftl_init_fn init_cb; 122 /* Init callback's context */ 123 void *init_arg; 124 125 /* Halt callback */ 126 spdk_ftl_fn halt_cb; 127 /* Halt callback's context */ 128 void *halt_arg; 129 /* Halt poller, checks if the device has been halted */ 130 struct spdk_poller *halt_poller; 131 132 /* IO channel */ 133 struct spdk_io_channel *ioch; 134 135 /* NVMe controller */ 136 struct spdk_nvme_ctrlr *ctrlr; 137 /* NVMe namespace */ 138 struct spdk_nvme_ns *ns; 139 /* NVMe transport ID */ 140 struct spdk_nvme_transport_id trid; 141 142 /* LBA map memory pool */ 143 struct spdk_mempool *lba_pool; 144 145 /* Statistics */ 146 struct ftl_stats stats; 147 148 /* Parallel unit range */ 149 struct spdk_ftl_punit_range range; 150 /* Array of parallel units */ 151 struct ftl_punit *punits; 152 153 /* Current sequence number */ 154 uint64_t seq; 155 156 /* Array of bands */ 157 struct ftl_band *bands; 158 /* Band being curently defraged */ 159 struct ftl_band *df_band; 160 /* Number of operational bands */ 161 size_t num_bands; 162 /* Next write band */ 163 struct ftl_band *next_band; 164 /* Free band list */ 165 LIST_HEAD(, ftl_band) free_bands; 166 /* Closed bands list */ 167 LIST_HEAD(, ftl_band) shut_bands; 168 /* Number of free bands */ 169 size_t num_free; 170 171 /* List of write pointers */ 172 LIST_HEAD(, ftl_wptr) wptr_list; 173 174 /* Logical -> physical table */ 175 void *l2p; 176 /* Size of the l2p table */ 177 uint64_t num_lbas; 178 179 /* PPA format */ 180 struct ftl_ppa_fmt ppaf; 181 /* PPA address size */ 182 size_t ppa_len; 183 /* Device's geometry */ 184 struct spdk_ocssd_geometry_data geo; 185 186 /* Flush list */ 187 LIST_HEAD(, ftl_flush) flush_list; 188 189 /* Device specific md buffer */ 190 struct ftl_global_md global_md; 191 192 /* Metadata size */ 193 size_t md_size; 194 195 /* Transfer unit size */ 196 size_t xfer_size; 197 /* Ring write buffer */ 198 struct ftl_rwb *rwb; 199 200 /* Current user write limit */ 201 int limit; 202 203 /* Inflight io operations */ 204 uint32_t num_inflight; 205 206 /* Manages data relocation */ 207 struct ftl_reloc *reloc; 208 209 /* Threads */ 210 struct ftl_thread core_thread; 211 struct ftl_thread read_thread; 212 213 /* Devices' list */ 214 STAILQ_ENTRY(spdk_ftl_dev) stailq; 215 }; 216 217 typedef void (*ftl_restore_fn)(struct spdk_ftl_dev *, struct ftl_restore *, int); 218 219 void ftl_apply_limits(struct spdk_ftl_dev *dev); 220 int ftl_io_read(struct ftl_io *io); 221 int ftl_io_write(struct ftl_io *io); 222 int ftl_io_erase(struct ftl_io *io); 223 int ftl_io_flush(struct ftl_io *io); 224 int ftl_current_limit(const struct spdk_ftl_dev *dev); 225 int ftl_invalidate_addr(struct spdk_ftl_dev *dev, struct ftl_ppa ppa); 226 int ftl_task_core(void *ctx); 227 int ftl_task_read(void *ctx); 228 void ftl_process_anm_event(struct ftl_anm_event *event); 229 size_t ftl_tail_md_num_lbks(const struct spdk_ftl_dev *dev); 230 size_t ftl_tail_md_hdr_num_lbks(void); 231 size_t ftl_vld_map_num_lbks(const struct spdk_ftl_dev *dev); 232 size_t ftl_lba_map_num_lbks(const struct spdk_ftl_dev *dev); 233 size_t ftl_head_md_num_lbks(const struct spdk_ftl_dev *dev); 234 int ftl_restore_md(struct spdk_ftl_dev *dev, ftl_restore_fn cb); 235 int ftl_restore_device(struct ftl_restore *restore, ftl_restore_fn cb); 236 237 #define ftl_to_ppa(addr) \ 238 (struct ftl_ppa) { .ppa = (uint64_t)(addr) } 239 240 #define ftl_to_ppa_packed(addr) \ 241 (struct ftl_ppa) { .pack.ppa = (uint32_t)(addr) } 242 243 static inline struct spdk_thread * 244 ftl_get_core_thread(const struct spdk_ftl_dev *dev) 245 { 246 return dev->core_thread.thread; 247 } 248 249 static inline struct spdk_nvme_qpair * 250 ftl_get_write_qpair(const struct spdk_ftl_dev *dev) 251 { 252 return dev->core_thread.qpair; 253 } 254 255 static inline struct spdk_thread * 256 ftl_get_read_thread(const struct spdk_ftl_dev *dev) 257 { 258 return dev->read_thread.thread; 259 } 260 261 static inline struct spdk_nvme_qpair * 262 ftl_get_read_qpair(const struct spdk_ftl_dev *dev) 263 { 264 return dev->read_thread.qpair; 265 } 266 267 static inline int 268 ftl_ppa_packed(const struct spdk_ftl_dev *dev) 269 { 270 return dev->ppa_len < 32; 271 } 272 273 static inline int 274 ftl_ppa_invalid(struct ftl_ppa ppa) 275 { 276 return ppa.ppa == ftl_to_ppa(FTL_PPA_INVALID).ppa; 277 } 278 279 static inline int 280 ftl_ppa_cached(struct ftl_ppa ppa) 281 { 282 return !ftl_ppa_invalid(ppa) && ppa.cached; 283 } 284 285 static inline uint64_t 286 ftl_ppa_addr_pack(const struct spdk_ftl_dev *dev, struct ftl_ppa ppa) 287 { 288 return (ppa.lbk << dev->ppaf.lbk_offset) | 289 (ppa.chk << dev->ppaf.chk_offset) | 290 (ppa.pu << dev->ppaf.pu_offset) | 291 (ppa.grp << dev->ppaf.grp_offset); 292 } 293 294 static inline struct ftl_ppa 295 ftl_ppa_addr_unpack(const struct spdk_ftl_dev *dev, uint64_t ppa) 296 { 297 struct ftl_ppa res = {}; 298 299 res.lbk = (ppa >> dev->ppaf.lbk_offset) & dev->ppaf.lbk_mask; 300 res.chk = (ppa >> dev->ppaf.chk_offset) & dev->ppaf.chk_mask; 301 res.pu = (ppa >> dev->ppaf.pu_offset) & dev->ppaf.pu_mask; 302 res.grp = (ppa >> dev->ppaf.grp_offset) & dev->ppaf.grp_mask; 303 304 return res; 305 } 306 307 static inline struct ftl_ppa 308 ftl_ppa_to_packed(const struct spdk_ftl_dev *dev, struct ftl_ppa ppa) 309 { 310 struct ftl_ppa p = {}; 311 312 if (ftl_ppa_invalid(ppa)) { 313 p = ftl_to_ppa_packed(FTL_PPA_INVALID); 314 } else if (ftl_ppa_cached(ppa)) { 315 p.pack.cached = 1; 316 p.pack.offset = (uint32_t) ppa.offset; 317 } else { 318 p.pack.ppa = (uint32_t) ftl_ppa_addr_pack(dev, ppa); 319 } 320 321 return p; 322 } 323 324 static inline struct ftl_ppa 325 ftl_ppa_from_packed(const struct spdk_ftl_dev *dev, struct ftl_ppa p) 326 { 327 struct ftl_ppa ppa = {}; 328 329 if (p.pack.ppa == (uint32_t)FTL_PPA_INVALID) { 330 ppa = ftl_to_ppa(FTL_PPA_INVALID); 331 } else if (p.pack.cached) { 332 ppa.cached = 1; 333 ppa.offset = p.pack.offset; 334 } else { 335 ppa = ftl_ppa_addr_unpack(dev, p.pack.ppa); 336 } 337 338 return ppa; 339 } 340 341 static inline unsigned int 342 ftl_ppa_flatten_punit(const struct spdk_ftl_dev *dev, struct ftl_ppa ppa) 343 { 344 return ppa.pu * dev->geo.num_grp + ppa.grp - dev->range.begin; 345 } 346 347 static inline int 348 ftl_ppa_in_range(const struct spdk_ftl_dev *dev, struct ftl_ppa ppa) 349 { 350 unsigned int punit = ftl_ppa_flatten_punit(dev, ppa) + dev->range.begin; 351 352 if (punit >= dev->range.begin && punit <= dev->range.end) { 353 return 1; 354 } 355 356 return 0; 357 } 358 359 #define _ftl_l2p_set(l2p, off, val, bits) \ 360 __atomic_store_n(((uint##bits##_t *)(l2p)) + (off), val, __ATOMIC_SEQ_CST) 361 362 #define _ftl_l2p_set32(l2p, off, val) \ 363 _ftl_l2p_set(l2p, off, val, 32) 364 365 #define _ftl_l2p_set64(l2p, off, val) \ 366 _ftl_l2p_set(l2p, off, val, 64) 367 368 #define _ftl_l2p_get(l2p, off, bits) \ 369 __atomic_load_n(((uint##bits##_t *)(l2p)) + (off), __ATOMIC_SEQ_CST) 370 371 #define _ftl_l2p_get32(l2p, off) \ 372 _ftl_l2p_get(l2p, off, 32) 373 374 #define _ftl_l2p_get64(l2p, off) \ 375 _ftl_l2p_get(l2p, off, 64) 376 377 #define ftl_ppa_cmp(p1, p2) \ 378 ((p1).ppa == (p2).ppa) 379 380 static inline void 381 ftl_l2p_set(struct spdk_ftl_dev *dev, uint64_t lba, struct ftl_ppa ppa) 382 { 383 assert(dev->num_lbas > lba); 384 385 if (ftl_ppa_packed(dev)) { 386 _ftl_l2p_set32(dev->l2p, lba, ftl_ppa_to_packed(dev, ppa).ppa); 387 } else { 388 _ftl_l2p_set64(dev->l2p, lba, ppa.ppa); 389 } 390 } 391 392 static inline struct ftl_ppa 393 ftl_l2p_get(struct spdk_ftl_dev *dev, uint64_t lba) 394 { 395 assert(dev->num_lbas > lba); 396 397 if (ftl_ppa_packed(dev)) { 398 return ftl_ppa_from_packed(dev, ftl_to_ppa_packed( 399 _ftl_l2p_get32(dev->l2p, lba))); 400 } else { 401 return ftl_to_ppa(_ftl_l2p_get64(dev->l2p, lba)); 402 } 403 } 404 static inline size_t 405 ftl_dev_num_bands(const struct spdk_ftl_dev *dev) 406 { 407 return dev->geo.num_chk; 408 } 409 410 static inline size_t 411 ftl_dev_lbks_in_chunk(const struct spdk_ftl_dev *dev) 412 { 413 return dev->geo.clba; 414 } 415 416 static inline size_t 417 ftl_dev_num_punits(const struct spdk_ftl_dev *dev) 418 { 419 return dev->range.end - dev->range.begin + 1; 420 } 421 422 static inline uint64_t 423 ftl_num_band_lbks(const struct spdk_ftl_dev *dev) 424 { 425 return ftl_dev_num_punits(dev) * ftl_dev_lbks_in_chunk(dev); 426 } 427 428 static inline size_t 429 ftl_vld_map_size(const struct spdk_ftl_dev *dev) 430 { 431 return (size_t)spdk_divide_round_up(ftl_num_band_lbks(dev), CHAR_BIT); 432 } 433 434 static inline struct ftl_trace * 435 ftl_dev_trace(struct spdk_ftl_dev *dev) 436 { 437 return dev->stats.trace; 438 } 439 440 #endif /* FTL_CORE_H */ 441