1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) Intel Corporation. 3 * All rights reserved. 4 */ 5 6 #include "spdk/stdinc.h" 7 #include "spdk/queue.h" 8 #include "spdk/bdev_module.h" 9 10 #include "ftl_core.h" 11 #include "ftl_band.h" 12 #include "ftl_internal.h" 13 14 static void 15 write_rq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg) 16 { 17 struct ftl_rq *rq = arg; 18 19 rq->success = success; 20 21 ftl_p2l_ckpt_issue(rq); 22 23 spdk_bdev_free_io(bdev_io); 24 } 25 26 static void 27 ftl_band_rq_bdev_write(void *_rq) 28 { 29 struct ftl_rq *rq = _rq; 30 struct ftl_band *band = rq->io.band; 31 struct spdk_ftl_dev *dev = band->dev; 32 int rc; 33 34 rc = spdk_bdev_writev_blocks(dev->base_bdev_desc, dev->base_ioch, 35 rq->io_vec, rq->io_vec_size, 36 rq->io.addr, rq->num_blocks, 37 write_rq_end, rq); 38 39 if (spdk_unlikely(rc)) { 40 if (rc == -ENOMEM) { 41 struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc); 42 rq->io.bdev_io_wait.bdev = bdev; 43 rq->io.bdev_io_wait.cb_fn = ftl_band_rq_bdev_write; 44 rq->io.bdev_io_wait.cb_arg = rq; 45 spdk_bdev_queue_io_wait(bdev, dev->base_ioch, &rq->io.bdev_io_wait); 46 } else { 47 ftl_abort(); 48 } 49 } 50 } 51 52 void 53 ftl_band_rq_write(struct ftl_band *band, struct ftl_rq *rq) 54 { 55 struct spdk_ftl_dev *dev = band->dev; 56 57 rq->success = false; 58 rq->io.band = band; 59 rq->io.addr = band->md->iter.addr; 60 61 ftl_band_rq_bdev_write(rq); 62 63 band->queue_depth++; 64 dev->io_activity_total += rq->num_blocks; 65 66 ftl_band_iter_advance(band, rq->num_blocks); 67 if (ftl_band_filled(band, band->md->iter.offset)) { 68 ftl_band_set_state(band, FTL_BAND_STATE_FULL); 69 band->owner.state_change_fn(band); 70 } 71 } 72 73 static void ftl_band_rq_bdev_read(void *_entry); 74 75 static void 76 read_rq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg) 77 { 78 struct ftl_rq_entry *entry = arg; 79 struct ftl_band *band = entry->io.band; 80 struct ftl_rq *rq = ftl_rq_from_entry(entry); 81 82 rq->success = success; 83 if (spdk_unlikely(!success)) { 84 ftl_band_rq_bdev_read(entry); 85 spdk_bdev_free_io(bdev_io); 86 return; 87 } 88 89 assert(band->queue_depth > 0); 90 band->queue_depth--; 91 92 rq->owner.cb(rq); 93 spdk_bdev_free_io(bdev_io); 94 } 95 96 static void 97 ftl_band_rq_bdev_read(void *_entry) 98 { 99 struct ftl_rq_entry *entry = _entry; 100 struct ftl_rq *rq = ftl_rq_from_entry(entry); 101 struct spdk_ftl_dev *dev = rq->dev; 102 int rc; 103 104 rc = spdk_bdev_read_blocks(dev->base_bdev_desc, dev->base_ioch, entry->io_payload, 105 entry->bdev_io.offset_blocks, entry->bdev_io.num_blocks, 106 read_rq_end, entry); 107 if (spdk_unlikely(rc)) { 108 if (rc == -ENOMEM) { 109 struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc); 110 entry->bdev_io.wait_entry.bdev = bdev; 111 entry->bdev_io.wait_entry.cb_fn = ftl_band_rq_bdev_read; 112 entry->bdev_io.wait_entry.cb_arg = entry; 113 spdk_bdev_queue_io_wait(bdev, dev->base_ioch, &entry->bdev_io.wait_entry); 114 } else { 115 ftl_abort(); 116 } 117 } 118 } 119 120 void 121 ftl_band_rq_read(struct ftl_band *band, struct ftl_rq *rq) 122 { 123 struct spdk_ftl_dev *dev = band->dev; 124 struct ftl_rq_entry *entry = &rq->entries[rq->iter.idx]; 125 126 assert(rq->iter.idx + rq->iter.count <= rq->num_blocks); 127 128 rq->success = false; 129 rq->io.band = band; 130 rq->io.addr = band->md->iter.addr; 131 entry->io.band = band; 132 entry->bdev_io.offset_blocks = rq->io.addr; 133 entry->bdev_io.num_blocks = rq->iter.count; 134 135 ftl_band_rq_bdev_read(entry); 136 137 dev->io_activity_total += rq->num_blocks; 138 band->queue_depth++; 139 } 140 141 static void 142 write_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg) 143 { 144 struct ftl_basic_rq *brq = arg; 145 struct ftl_band *band = brq->io.band; 146 147 brq->success = success; 148 149 assert(band->queue_depth > 0); 150 band->queue_depth--; 151 152 brq->owner.cb(brq); 153 spdk_bdev_free_io(bdev_io); 154 } 155 156 static void 157 ftl_band_brq_bdev_write(void *_brq) 158 { 159 struct ftl_basic_rq *brq = _brq; 160 struct spdk_ftl_dev *dev = brq->dev; 161 int rc; 162 163 rc = spdk_bdev_write_blocks(dev->base_bdev_desc, dev->base_ioch, 164 brq->io_payload, brq->io.addr, 165 brq->num_blocks, write_brq_end, brq); 166 167 if (spdk_unlikely(rc)) { 168 if (rc == -ENOMEM) { 169 struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc); 170 brq->io.bdev_io_wait.bdev = bdev; 171 brq->io.bdev_io_wait.cb_fn = ftl_band_brq_bdev_write; 172 brq->io.bdev_io_wait.cb_arg = brq; 173 spdk_bdev_queue_io_wait(bdev, dev->base_ioch, &brq->io.bdev_io_wait); 174 } else { 175 ftl_abort(); 176 } 177 } 178 } 179 180 void 181 ftl_band_basic_rq_write(struct ftl_band *band, struct ftl_basic_rq *brq) 182 { 183 struct spdk_ftl_dev *dev = band->dev; 184 185 brq->io.addr = band->md->iter.addr; 186 brq->io.band = band; 187 brq->success = false; 188 189 ftl_band_brq_bdev_write(brq); 190 191 dev->io_activity_total += brq->num_blocks; 192 band->queue_depth++; 193 ftl_band_iter_advance(band, brq->num_blocks); 194 if (ftl_band_filled(band, band->md->iter.offset)) { 195 ftl_band_set_state(band, FTL_BAND_STATE_FULL); 196 band->owner.state_change_fn(band); 197 } 198 } 199 200 static void 201 read_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg) 202 { 203 struct ftl_basic_rq *brq = arg; 204 struct ftl_band *band = brq->io.band; 205 206 brq->success = success; 207 208 assert(band->queue_depth > 0); 209 band->queue_depth--; 210 211 brq->owner.cb(brq); 212 spdk_bdev_free_io(bdev_io); 213 } 214 215 static void 216 ftl_band_brq_bdev_read(void *_brq) 217 { 218 struct ftl_basic_rq *brq = _brq; 219 struct spdk_ftl_dev *dev = brq->dev; 220 int rc; 221 222 rc = spdk_bdev_read_blocks(dev->base_bdev_desc, dev->base_ioch, 223 brq->io_payload, brq->io.addr, 224 brq->num_blocks, read_brq_end, brq); 225 if (spdk_unlikely(rc)) { 226 if (rc == -ENOMEM) { 227 struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc); 228 brq->io.bdev_io_wait.bdev = bdev; 229 brq->io.bdev_io_wait.cb_fn = ftl_band_brq_bdev_read; 230 brq->io.bdev_io_wait.cb_arg = brq; 231 spdk_bdev_queue_io_wait(bdev, dev->base_ioch, &brq->io.bdev_io_wait); 232 } else { 233 ftl_abort(); 234 } 235 } 236 } 237 238 void 239 ftl_band_basic_rq_read(struct ftl_band *band, struct ftl_basic_rq *brq) 240 { 241 struct spdk_ftl_dev *dev = brq->dev; 242 243 brq->io.band = band; 244 245 ftl_band_brq_bdev_read(brq); 246 247 brq->io.band->queue_depth++; 248 dev->io_activity_total += brq->num_blocks; 249 } 250 251 static void 252 band_open_cb(int status, void *cb_arg) 253 { 254 struct ftl_band *band = cb_arg; 255 256 if (spdk_unlikely(status)) { 257 ftl_md_persist_entry_retry(&band->md_persist_entry_ctx); 258 return; 259 } 260 261 ftl_band_set_state(band, FTL_BAND_STATE_OPEN); 262 } 263 264 void 265 ftl_band_open(struct ftl_band *band, enum ftl_band_type type) 266 { 267 struct spdk_ftl_dev *dev = band->dev; 268 struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_BAND_MD]; 269 struct ftl_layout_region *region = &dev->layout.region[FTL_LAYOUT_REGION_TYPE_BAND_MD]; 270 struct ftl_p2l_map *p2l_map = &band->p2l_map; 271 272 ftl_band_set_type(band, type); 273 ftl_band_set_state(band, FTL_BAND_STATE_OPENING); 274 275 memcpy(p2l_map->band_dma_md, band->md, region->entry_size * FTL_BLOCK_SIZE); 276 p2l_map->band_dma_md->state = FTL_BAND_STATE_OPEN; 277 p2l_map->band_dma_md->p2l_map_checksum = 0; 278 279 if (spdk_unlikely(0 != band->p2l_map.num_valid)) { 280 /* 281 * This is inconsistent state, a band with valid block, 282 * it could be moved on the free list 283 */ 284 assert(false && 0 == band->p2l_map.num_valid); 285 ftl_abort(); 286 } 287 288 ftl_md_persist_entry(md, band->id, p2l_map->band_dma_md, NULL, 289 band_open_cb, band, &band->md_persist_entry_ctx); 290 } 291 292 static void 293 band_close_cb(int status, void *cb_arg) 294 { 295 struct ftl_band *band = cb_arg; 296 297 if (spdk_unlikely(status)) { 298 ftl_md_persist_entry_retry(&band->md_persist_entry_ctx); 299 return; 300 } 301 302 band->md->p2l_map_checksum = band->p2l_map.band_dma_md->p2l_map_checksum; 303 ftl_band_set_state(band, FTL_BAND_STATE_CLOSED); 304 } 305 306 static void 307 band_map_write_cb(struct ftl_basic_rq *brq) 308 { 309 struct ftl_band *band = brq->io.band; 310 struct ftl_p2l_map *p2l_map = &band->p2l_map; 311 struct spdk_ftl_dev *dev = band->dev; 312 struct ftl_layout_region *region = &dev->layout.region[FTL_LAYOUT_REGION_TYPE_BAND_MD]; 313 struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_BAND_MD]; 314 uint32_t band_map_crc; 315 316 if (spdk_likely(brq->success)) { 317 318 band_map_crc = spdk_crc32c_update(p2l_map->band_map, 319 ftl_tail_md_num_blocks(dev) * FTL_BLOCK_SIZE, 0); 320 memcpy(p2l_map->band_dma_md, band->md, region->entry_size * FTL_BLOCK_SIZE); 321 p2l_map->band_dma_md->state = FTL_BAND_STATE_CLOSED; 322 p2l_map->band_dma_md->p2l_map_checksum = band_map_crc; 323 324 ftl_md_persist_entry(md, band->id, p2l_map->band_dma_md, NULL, 325 band_close_cb, band, &band->md_persist_entry_ctx); 326 } else { 327 /* Try to retry in case of failure */ 328 ftl_band_brq_bdev_write(brq); 329 band->queue_depth++; 330 } 331 } 332 333 void 334 ftl_band_close(struct ftl_band *band) 335 { 336 struct spdk_ftl_dev *dev = band->dev; 337 void *metadata = band->p2l_map.band_map; 338 uint64_t num_blocks = ftl_tail_md_num_blocks(dev); 339 340 /* Write P2L map first, after completion, set the state to close on nvcache, then internally */ 341 band->md->close_seq_id = ftl_get_next_seq_id(dev); 342 ftl_band_set_state(band, FTL_BAND_STATE_CLOSING); 343 ftl_basic_rq_init(dev, &band->metadata_rq, metadata, num_blocks); 344 ftl_basic_rq_set_owner(&band->metadata_rq, band_map_write_cb, band); 345 346 ftl_band_basic_rq_write(band, &band->metadata_rq); 347 } 348 349 static void 350 band_free_cb(int status, void *ctx) 351 { 352 struct ftl_band *band = (struct ftl_band *)ctx; 353 354 if (spdk_unlikely(status)) { 355 ftl_md_persist_entry_retry(&band->md_persist_entry_ctx); 356 return; 357 } 358 359 ftl_band_release_p2l_map(band); 360 FTL_DEBUGLOG(band->dev, "Band is going to free state. Band id: %u\n", band->id); 361 ftl_band_set_state(band, FTL_BAND_STATE_FREE); 362 assert(0 == band->p2l_map.ref_cnt); 363 } 364 365 void 366 ftl_band_free(struct ftl_band *band) 367 { 368 struct spdk_ftl_dev *dev = band->dev; 369 struct ftl_p2l_map *p2l_map = &band->p2l_map; 370 struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_BAND_MD]; 371 struct ftl_layout_region *region = &dev->layout.region[FTL_LAYOUT_REGION_TYPE_BAND_MD]; 372 373 memcpy(p2l_map->band_dma_md, band->md, region->entry_size * FTL_BLOCK_SIZE); 374 p2l_map->band_dma_md->state = FTL_BAND_STATE_FREE; 375 p2l_map->band_dma_md->close_seq_id = 0; 376 p2l_map->band_dma_md->p2l_map_checksum = 0; 377 378 ftl_md_persist_entry(md, band->id, p2l_map->band_dma_md, NULL, 379 band_free_cb, band, &band->md_persist_entry_ctx); 380 381 /* TODO: The whole band erase code should probably be done here instead */ 382 } 383 384 static void 385 read_md_cb(struct ftl_basic_rq *brq) 386 { 387 struct ftl_band *band = brq->owner.priv; 388 struct spdk_ftl_dev *dev = band->dev; 389 ftl_band_ops_cb cb; 390 uint32_t band_map_crc; 391 bool success = true; 392 void *priv; 393 394 cb = band->owner.ops_fn; 395 priv = band->owner.priv; 396 397 if (!brq->success) { 398 ftl_band_basic_rq_read(band, &band->metadata_rq); 399 return; 400 } 401 402 band_map_crc = spdk_crc32c_update(band->p2l_map.band_map, 403 ftl_tail_md_num_blocks(band->dev) * FTL_BLOCK_SIZE, 0); 404 if (band->md->p2l_map_checksum && band->md->p2l_map_checksum != band_map_crc) { 405 FTL_ERRLOG(dev, "GC error, inconsistent P2L map CRC\n"); 406 success = false; 407 } 408 band->owner.ops_fn = NULL; 409 band->owner.priv = NULL; 410 cb(band, priv, success); 411 } 412 413 static int 414 _read_md(struct ftl_band *band) 415 { 416 struct spdk_ftl_dev *dev = band->dev; 417 struct ftl_basic_rq *rq = &band->metadata_rq; 418 419 if (ftl_band_alloc_p2l_map(band)) { 420 return -ENOMEM; 421 } 422 423 /* Read P2L map */ 424 ftl_basic_rq_init(dev, rq, band->p2l_map.band_map, ftl_p2l_map_num_blocks(dev)); 425 ftl_basic_rq_set_owner(rq, read_md_cb, band); 426 427 rq->io.band = band; 428 rq->io.addr = ftl_band_p2l_map_addr(band); 429 430 ftl_band_basic_rq_read(band, &band->metadata_rq); 431 432 return 0; 433 } 434 435 static void 436 read_md(void *band) 437 { 438 int rc; 439 440 rc = _read_md(band); 441 if (spdk_unlikely(rc)) { 442 spdk_thread_send_msg(spdk_get_thread(), read_md, band); 443 } 444 } 445 446 static void 447 read_tail_md_cb(struct ftl_basic_rq *brq) 448 { 449 struct ftl_band *band = brq->owner.priv; 450 enum ftl_md_status status = FTL_MD_IO_FAILURE; 451 ftl_band_md_cb cb; 452 void *priv; 453 454 if (spdk_unlikely(!brq->success)) { 455 /* Retries the read in case of error */ 456 ftl_band_basic_rq_read(band, &band->metadata_rq); 457 return; 458 } 459 460 cb = band->owner.md_fn; 461 band->owner.md_fn = NULL; 462 463 priv = band->owner.priv; 464 band->owner.priv = NULL; 465 466 status = FTL_MD_SUCCESS; 467 468 cb(band, priv, status); 469 } 470 471 void 472 ftl_band_read_tail_brq_md(struct ftl_band *band, ftl_band_md_cb cb, void *cntx) 473 { 474 struct spdk_ftl_dev *dev = band->dev; 475 struct ftl_basic_rq *rq = &band->metadata_rq; 476 477 ftl_basic_rq_init(dev, rq, band->p2l_map.band_map, ftl_tail_md_num_blocks(dev)); 478 ftl_basic_rq_set_owner(rq, read_tail_md_cb, band); 479 480 assert(!band->owner.md_fn); 481 assert(!band->owner.priv); 482 band->owner.md_fn = cb; 483 band->owner.priv = cntx; 484 485 rq->io.band = band; 486 rq->io.addr = band->tail_md_addr; 487 488 ftl_band_basic_rq_read(band, &band->metadata_rq); 489 } 490 491 void 492 ftl_band_get_next_gc(struct spdk_ftl_dev *dev, ftl_band_ops_cb cb, void *cntx) 493 { 494 struct ftl_band *band = ftl_band_search_next_to_reloc(dev); 495 496 /* if disk is very small, GC start very early that no band is ready for it */ 497 if (spdk_unlikely(!band)) { 498 cb(NULL, cntx, false); 499 return; 500 } 501 502 /* Only one owner is allowed */ 503 assert(!band->queue_depth); 504 assert(!band->owner.ops_fn); 505 assert(!band->owner.priv); 506 band->owner.ops_fn = cb; 507 band->owner.priv = cntx; 508 509 read_md(band); 510 } 511