1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #ifndef FTL_BAND_H 35 #define FTL_BAND_H 36 37 #include "spdk/stdinc.h" 38 #include "spdk/bit_array.h" 39 #include "spdk/queue.h" 40 41 #include "ftl_io.h" 42 #include "ftl_ppa.h" 43 #include "ftl_io.h" 44 45 /* Number of LBAs that could be stored in a single block */ 46 #define FTL_NUM_LBA_IN_BLOCK (FTL_BLOCK_SIZE / sizeof(uint64_t)) 47 48 struct spdk_ftl_dev; 49 struct ftl_lba_map_request; 50 51 enum ftl_chunk_state { 52 FTL_CHUNK_STATE_FREE, 53 FTL_CHUNK_STATE_OPEN, 54 FTL_CHUNK_STATE_CLOSED, 55 FTL_CHUNK_STATE_BAD, 56 FTL_CHUNK_STATE_VACANT, 57 }; 58 59 struct ftl_chunk { 60 /* Block state */ 61 enum ftl_chunk_state state; 62 63 /* Indicates that there is inflight write */ 64 bool busy; 65 66 /* Current logical block's offset */ 67 uint64_t write_offset; 68 69 /* First PPA */ 70 struct ftl_ppa start_ppa; 71 72 /* Pointer to parallel unit */ 73 struct ftl_punit *punit; 74 75 /* Position in band's chunk_buf */ 76 uint32_t pos; 77 78 CIRCLEQ_ENTRY(ftl_chunk) circleq; 79 }; 80 81 enum ftl_md_status { 82 FTL_MD_SUCCESS, 83 /* Metadata read failure */ 84 FTL_MD_IO_FAILURE, 85 /* Invalid version */ 86 FTL_MD_INVALID_VER, 87 /* UUID doesn't match */ 88 FTL_MD_NO_MD, 89 /* UUID and version matches but CRC doesn't */ 90 FTL_MD_INVALID_CRC, 91 /* Vld or lba map size doesn't match */ 92 FTL_MD_INVALID_SIZE 93 }; 94 95 enum ftl_lba_map_seg_state { 96 FTL_LBA_MAP_SEG_CLEAR, 97 FTL_LBA_MAP_SEG_PENDING, 98 FTL_LBA_MAP_SEG_CACHED 99 }; 100 101 struct ftl_lba_map { 102 /* LBA/vld map lock */ 103 pthread_spinlock_t lock; 104 105 /* Number of valid LBAs */ 106 size_t num_vld; 107 108 /* LBA map's reference count */ 109 size_t ref_cnt; 110 111 /* Bitmap of valid LBAs */ 112 struct spdk_bit_array *vld; 113 114 /* LBA map (only valid for open/relocating bands) */ 115 uint64_t *map; 116 117 /* LBA map segment state map (clear, pending, cached) */ 118 uint8_t *segments; 119 120 LIST_HEAD(, ftl_lba_map_request) request_list; 121 122 /* Metadata DMA buffer (only valid for open/relocating bands) */ 123 void *dma_buf; 124 }; 125 126 enum ftl_band_state { 127 FTL_BAND_STATE_FREE, 128 FTL_BAND_STATE_PREP, 129 FTL_BAND_STATE_OPENING, 130 FTL_BAND_STATE_OPEN, 131 FTL_BAND_STATE_FULL, 132 FTL_BAND_STATE_CLOSING, 133 FTL_BAND_STATE_CLOSED, 134 FTL_BAND_STATE_MAX 135 }; 136 137 struct ftl_lba_map_request { 138 /* Completion callback */ 139 ftl_io_fn cb; 140 141 /* Completion callback context */ 142 void *cb_ctx; 143 144 /* Bit array of requested segments */ 145 struct spdk_bit_array *segments; 146 147 /* Number of pending segments to read */ 148 size_t num_pending; 149 150 LIST_ENTRY(ftl_lba_map_request) list_entry; 151 }; 152 153 struct ftl_band { 154 /* Device this band belongs to */ 155 struct spdk_ftl_dev *dev; 156 157 /* Number of operational chunks */ 158 size_t num_chunks; 159 160 /* Array of chunks */ 161 struct ftl_chunk *chunk_buf; 162 163 /* List of operational chunks */ 164 CIRCLEQ_HEAD(, ftl_chunk) chunks; 165 166 /* LBA map */ 167 struct ftl_lba_map lba_map; 168 169 /* Band's state */ 170 enum ftl_band_state state; 171 172 /* Band's index */ 173 unsigned int id; 174 175 /* Latest merit calculation */ 176 double merit; 177 178 /* High defrag priority - means that the metadata should be copied and */ 179 /* the band should be defragged immediately */ 180 int high_prio; 181 182 /* Sequence number */ 183 uint64_t seq; 184 185 /* Number of defrag cycles */ 186 uint64_t wr_cnt; 187 188 /* End metadata start ppa */ 189 struct ftl_ppa tail_md_ppa; 190 191 /* Bitmap of all bands that have its data moved onto this band */ 192 struct spdk_bit_array *reloc_bitmap; 193 /* Number of open bands containing data moved from this band */ 194 size_t num_reloc_bands; 195 /* Number of blocks currently being moved from this band */ 196 size_t num_reloc_blocks; 197 198 /* Free/shut bands' lists */ 199 LIST_ENTRY(ftl_band) list_entry; 200 201 /* High priority queue link */ 202 STAILQ_ENTRY(ftl_band) prio_stailq; 203 }; 204 205 uint64_t ftl_band_lbkoff_from_ppa(struct ftl_band *band, struct ftl_ppa ppa); 206 struct ftl_ppa ftl_band_ppa_from_lbkoff(struct ftl_band *band, uint64_t lbkoff); 207 void ftl_band_set_state(struct ftl_band *band, enum ftl_band_state state); 208 size_t ftl_band_age(const struct ftl_band *band); 209 void ftl_band_acquire_lba_map(struct ftl_band *band); 210 int ftl_band_alloc_lba_map(struct ftl_band *band); 211 void ftl_band_clear_lba_map(struct ftl_band *band); 212 void ftl_band_release_lba_map(struct ftl_band *band); 213 int ftl_band_read_lba_map(struct ftl_band *band, 214 size_t offset, size_t lba_cnt, 215 ftl_io_fn cb_fn, void *cb_ctx); 216 struct ftl_ppa ftl_band_next_xfer_ppa(struct ftl_band *band, struct ftl_ppa ppa, 217 size_t num_lbks); 218 struct ftl_ppa ftl_band_next_ppa(struct ftl_band *band, struct ftl_ppa ppa, 219 size_t offset); 220 size_t ftl_band_num_usable_lbks(const struct ftl_band *band); 221 size_t ftl_band_user_lbks_left(const struct ftl_band *band, size_t offset); 222 size_t ftl_band_user_lbks(const struct ftl_band *band); 223 void ftl_band_set_addr(struct ftl_band *band, uint64_t lba, 224 struct ftl_ppa ppa); 225 struct ftl_band *ftl_band_from_ppa(struct spdk_ftl_dev *dev, struct ftl_ppa ppa); 226 struct ftl_chunk *ftl_band_chunk_from_ppa(struct ftl_band *band, struct ftl_ppa); 227 void ftl_band_md_clear(struct ftl_band *band); 228 int ftl_band_read_tail_md(struct ftl_band *band, struct ftl_ppa, 229 ftl_io_fn cb_fn, void *cb_ctx); 230 int ftl_band_read_head_md(struct ftl_band *band, ftl_io_fn cb_fn, void *cb_ctx); 231 int ftl_band_write_tail_md(struct ftl_band *band, ftl_io_fn cb); 232 int ftl_band_write_head_md(struct ftl_band *band, ftl_io_fn cb); 233 struct ftl_ppa ftl_band_tail_md_ppa(struct ftl_band *band); 234 struct ftl_ppa ftl_band_head_md_ppa(struct ftl_band *band); 235 void ftl_band_write_failed(struct ftl_band *band); 236 int ftl_band_full(struct ftl_band *band, size_t offset); 237 int ftl_band_erase(struct ftl_band *band); 238 int ftl_band_write_prep(struct ftl_band *band); 239 struct ftl_chunk *ftl_band_next_operational_chunk(struct ftl_band *band, 240 struct ftl_chunk *chunk); 241 size_t ftl_lba_map_pool_elem_size(struct spdk_ftl_dev *dev); 242 243 static inline int 244 ftl_band_empty(const struct ftl_band *band) 245 { 246 return band->lba_map.num_vld == 0; 247 } 248 249 static inline struct ftl_chunk * 250 ftl_band_next_chunk(struct ftl_band *band, struct ftl_chunk *chunk) 251 { 252 assert(chunk->state != FTL_CHUNK_STATE_BAD); 253 return CIRCLEQ_LOOP_NEXT(&band->chunks, chunk, circleq); 254 } 255 256 static inline void 257 ftl_band_set_next_state(struct ftl_band *band) 258 { 259 ftl_band_set_state(band, (band->state + 1) % FTL_BAND_STATE_MAX); 260 } 261 262 static inline int 263 ftl_band_state_changing(struct ftl_band *band) 264 { 265 return band->state == FTL_BAND_STATE_OPENING || 266 band->state == FTL_BAND_STATE_CLOSING; 267 } 268 269 static inline int 270 ftl_band_lbkoff_valid(struct ftl_band *band, size_t lbkoff) 271 { 272 struct ftl_lba_map *lba_map = &band->lba_map; 273 274 pthread_spin_lock(&lba_map->lock); 275 if (spdk_bit_array_get(lba_map->vld, lbkoff)) { 276 pthread_spin_unlock(&lba_map->lock); 277 return 1; 278 } 279 280 pthread_spin_unlock(&lba_map->lock); 281 return 0; 282 } 283 284 static inline int 285 ftl_band_chunk_is_last(struct ftl_band *band, struct ftl_chunk *chunk) 286 { 287 return chunk == CIRCLEQ_LAST(&band->chunks); 288 } 289 290 static inline int 291 ftl_band_chunk_is_first(struct ftl_band *band, struct ftl_chunk *chunk) 292 { 293 return chunk == CIRCLEQ_FIRST(&band->chunks); 294 } 295 296 static inline int 297 ftl_chunk_is_writable(const struct ftl_chunk *chunk) 298 { 299 return (chunk->state == FTL_CHUNK_STATE_OPEN || 300 chunk->state == FTL_CHUNK_STATE_FREE) && 301 !chunk->busy; 302 } 303 304 #endif /* FTL_BAND_H */ 305