1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "spdk/blob.h" 10 #include "spdk/crc32.h" 11 #include "spdk/env.h" 12 #include "spdk/queue.h" 13 #include "spdk/thread.h" 14 #include "spdk/bit_array.h" 15 #include "spdk/bit_pool.h" 16 #include "spdk/likely.h" 17 #include "spdk/util.h" 18 #include "spdk/string.h" 19 20 #include "spdk_internal/assert.h" 21 #include "spdk/log.h" 22 23 #include "blobstore.h" 24 25 #define BLOB_CRC32C_INITIAL 0xffffffffUL 26 27 static int bs_register_md_thread(struct spdk_blob_store *bs); 28 static int bs_unregister_md_thread(struct spdk_blob_store *bs); 29 static void blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 30 static void blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 31 uint64_t cluster, uint32_t extent, struct spdk_blob_md_page *page, 32 spdk_blob_op_complete cb_fn, void *cb_arg); 33 static void blob_free_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 34 uint32_t extent_page, struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg); 35 36 static int blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 37 uint16_t value_len, bool internal); 38 static int blob_get_xattr_value(struct spdk_blob *blob, const char *name, 39 const void **value, size_t *value_len, bool internal); 40 static int blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal); 41 42 static void blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 43 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg); 44 45 /* 46 * External snapshots require a channel per thread per esnap bdev. The tree 47 * is populated lazily as blob IOs are handled by the back_bs_dev. When this 48 * channel is destroyed, all the channels in the tree are destroyed. 49 */ 50 51 struct blob_esnap_channel { 52 RB_ENTRY(blob_esnap_channel) node; 53 spdk_blob_id blob_id; 54 struct spdk_io_channel *channel; 55 }; 56 57 static int blob_esnap_channel_compare(struct blob_esnap_channel *c1, struct blob_esnap_channel *c2); 58 static void blob_esnap_destroy_bs_dev_channels(struct spdk_blob *blob, bool abort_io, 59 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg); 60 static void blob_esnap_destroy_bs_channel(struct spdk_bs_channel *ch); 61 RB_GENERATE_STATIC(blob_esnap_channel_tree, blob_esnap_channel, node, blob_esnap_channel_compare) 62 63 static inline bool 64 blob_is_esnap_clone(const struct spdk_blob *blob) 65 { 66 assert(blob != NULL); 67 return !!(blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT); 68 } 69 70 static int 71 blob_id_cmp(struct spdk_blob *blob1, struct spdk_blob *blob2) 72 { 73 assert(blob1 != NULL && blob2 != NULL); 74 return (blob1->id < blob2->id ? -1 : blob1->id > blob2->id); 75 } 76 77 RB_GENERATE_STATIC(spdk_blob_tree, spdk_blob, link, blob_id_cmp); 78 79 static void 80 blob_verify_md_op(struct spdk_blob *blob) 81 { 82 assert(blob != NULL); 83 assert(spdk_get_thread() == blob->bs->md_thread); 84 assert(blob->state != SPDK_BLOB_STATE_LOADING); 85 } 86 87 static struct spdk_blob_list * 88 bs_get_snapshot_entry(struct spdk_blob_store *bs, spdk_blob_id blobid) 89 { 90 struct spdk_blob_list *snapshot_entry = NULL; 91 92 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 93 if (snapshot_entry->id == blobid) { 94 break; 95 } 96 } 97 98 return snapshot_entry; 99 } 100 101 static void 102 bs_claim_md_page(struct spdk_blob_store *bs, uint32_t page) 103 { 104 assert(spdk_spin_held(&bs->used_lock)); 105 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 106 assert(spdk_bit_array_get(bs->used_md_pages, page) == false); 107 108 spdk_bit_array_set(bs->used_md_pages, page); 109 } 110 111 static void 112 bs_release_md_page(struct spdk_blob_store *bs, uint32_t page) 113 { 114 assert(spdk_spin_held(&bs->used_lock)); 115 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 116 assert(spdk_bit_array_get(bs->used_md_pages, page) == true); 117 118 spdk_bit_array_clear(bs->used_md_pages, page); 119 } 120 121 static uint32_t 122 bs_claim_cluster(struct spdk_blob_store *bs) 123 { 124 uint32_t cluster_num; 125 126 assert(spdk_spin_held(&bs->used_lock)); 127 128 cluster_num = spdk_bit_pool_allocate_bit(bs->used_clusters); 129 if (cluster_num == UINT32_MAX) { 130 return UINT32_MAX; 131 } 132 133 SPDK_DEBUGLOG(blob, "Claiming cluster %u\n", cluster_num); 134 bs->num_free_clusters--; 135 136 return cluster_num; 137 } 138 139 static void 140 bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 141 { 142 assert(spdk_spin_held(&bs->used_lock)); 143 assert(cluster_num < spdk_bit_pool_capacity(bs->used_clusters)); 144 assert(spdk_bit_pool_is_allocated(bs->used_clusters, cluster_num) == true); 145 assert(bs->num_free_clusters < bs->total_clusters); 146 147 SPDK_DEBUGLOG(blob, "Releasing cluster %u\n", cluster_num); 148 149 spdk_bit_pool_free_bit(bs->used_clusters, cluster_num); 150 bs->num_free_clusters++; 151 } 152 153 static int 154 blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster) 155 { 156 uint64_t *cluster_lba = &blob->active.clusters[cluster_num]; 157 158 blob_verify_md_op(blob); 159 160 if (*cluster_lba != 0) { 161 return -EEXIST; 162 } 163 164 *cluster_lba = bs_cluster_to_lba(blob->bs, cluster); 165 return 0; 166 } 167 168 static int 169 bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num, 170 uint64_t *cluster, uint32_t *lowest_free_md_page, bool update_map) 171 { 172 uint32_t *extent_page = 0; 173 174 assert(spdk_spin_held(&blob->bs->used_lock)); 175 176 *cluster = bs_claim_cluster(blob->bs); 177 if (*cluster == UINT32_MAX) { 178 /* No more free clusters. Cannot satisfy the request */ 179 return -ENOSPC; 180 } 181 182 if (blob->use_extent_table) { 183 extent_page = bs_cluster_to_extent_page(blob, cluster_num); 184 if (*extent_page == 0) { 185 /* Extent page shall never occupy md_page so start the search from 1 */ 186 if (*lowest_free_md_page == 0) { 187 *lowest_free_md_page = 1; 188 } 189 /* No extent_page is allocated for the cluster */ 190 *lowest_free_md_page = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, 191 *lowest_free_md_page); 192 if (*lowest_free_md_page == UINT32_MAX) { 193 /* No more free md pages. Cannot satisfy the request */ 194 bs_release_cluster(blob->bs, *cluster); 195 return -ENOSPC; 196 } 197 bs_claim_md_page(blob->bs, *lowest_free_md_page); 198 } 199 } 200 201 SPDK_DEBUGLOG(blob, "Claiming cluster %" PRIu64 " for blob 0x%" PRIx64 "\n", *cluster, 202 blob->id); 203 204 if (update_map) { 205 blob_insert_cluster(blob, cluster_num, *cluster); 206 if (blob->use_extent_table && *extent_page == 0) { 207 *extent_page = *lowest_free_md_page; 208 } 209 } 210 211 return 0; 212 } 213 214 static void 215 blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs) 216 { 217 xattrs->count = 0; 218 xattrs->names = NULL; 219 xattrs->ctx = NULL; 220 xattrs->get_value = NULL; 221 } 222 223 void 224 spdk_blob_opts_init(struct spdk_blob_opts *opts, size_t opts_size) 225 { 226 if (!opts) { 227 SPDK_ERRLOG("opts should not be NULL\n"); 228 return; 229 } 230 231 if (!opts_size) { 232 SPDK_ERRLOG("opts_size should not be zero value\n"); 233 return; 234 } 235 236 memset(opts, 0, opts_size); 237 opts->opts_size = opts_size; 238 239 #define FIELD_OK(field) \ 240 offsetof(struct spdk_blob_opts, field) + sizeof(opts->field) <= opts_size 241 242 #define SET_FIELD(field, value) \ 243 if (FIELD_OK(field)) { \ 244 opts->field = value; \ 245 } \ 246 247 SET_FIELD(num_clusters, 0); 248 SET_FIELD(thin_provision, false); 249 SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT); 250 251 if (FIELD_OK(xattrs)) { 252 blob_xattrs_init(&opts->xattrs); 253 } 254 255 SET_FIELD(use_extent_table, true); 256 257 #undef FIELD_OK 258 #undef SET_FIELD 259 } 260 261 void 262 spdk_blob_open_opts_init(struct spdk_blob_open_opts *opts, size_t opts_size) 263 { 264 if (!opts) { 265 SPDK_ERRLOG("opts should not be NULL\n"); 266 return; 267 } 268 269 if (!opts_size) { 270 SPDK_ERRLOG("opts_size should not be zero value\n"); 271 return; 272 } 273 274 memset(opts, 0, opts_size); 275 opts->opts_size = opts_size; 276 277 #define FIELD_OK(field) \ 278 offsetof(struct spdk_blob_open_opts, field) + sizeof(opts->field) <= opts_size 279 280 #define SET_FIELD(field, value) \ 281 if (FIELD_OK(field)) { \ 282 opts->field = value; \ 283 } \ 284 285 SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT); 286 287 #undef FIELD_OK 288 #undef SET_FILED 289 } 290 291 static struct spdk_blob * 292 blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id) 293 { 294 struct spdk_blob *blob; 295 296 blob = calloc(1, sizeof(*blob)); 297 if (!blob) { 298 return NULL; 299 } 300 301 blob->id = id; 302 blob->bs = bs; 303 304 blob->parent_id = SPDK_BLOBID_INVALID; 305 306 blob->state = SPDK_BLOB_STATE_DIRTY; 307 blob->extent_rle_found = false; 308 blob->extent_table_found = false; 309 blob->active.num_pages = 1; 310 blob->active.pages = calloc(1, sizeof(*blob->active.pages)); 311 if (!blob->active.pages) { 312 free(blob); 313 return NULL; 314 } 315 316 blob->active.pages[0] = bs_blobid_to_page(id); 317 318 TAILQ_INIT(&blob->xattrs); 319 TAILQ_INIT(&blob->xattrs_internal); 320 TAILQ_INIT(&blob->pending_persists); 321 TAILQ_INIT(&blob->persists_to_complete); 322 323 return blob; 324 } 325 326 static void 327 xattrs_free(struct spdk_xattr_tailq *xattrs) 328 { 329 struct spdk_xattr *xattr, *xattr_tmp; 330 331 TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) { 332 TAILQ_REMOVE(xattrs, xattr, link); 333 free(xattr->name); 334 free(xattr->value); 335 free(xattr); 336 } 337 } 338 339 static void 340 blob_free(struct spdk_blob *blob) 341 { 342 assert(blob != NULL); 343 assert(TAILQ_EMPTY(&blob->pending_persists)); 344 assert(TAILQ_EMPTY(&blob->persists_to_complete)); 345 346 free(blob->active.extent_pages); 347 free(blob->clean.extent_pages); 348 free(blob->active.clusters); 349 free(blob->clean.clusters); 350 free(blob->active.pages); 351 free(blob->clean.pages); 352 353 xattrs_free(&blob->xattrs); 354 xattrs_free(&blob->xattrs_internal); 355 356 if (blob->back_bs_dev) { 357 blob->back_bs_dev->destroy(blob->back_bs_dev); 358 } 359 360 free(blob); 361 } 362 363 static void 364 blob_back_bs_destroy_esnap_done(void *ctx, struct spdk_blob *blob, int bserrno) 365 { 366 struct spdk_bs_dev *bs_dev = ctx; 367 368 if (bserrno != 0) { 369 /* 370 * This is probably due to a memory allocation failure when creating the 371 * blob_esnap_destroy_ctx before iterating threads. 372 */ 373 SPDK_ERRLOG("blob 0x%" PRIx64 ": Unable to destroy bs dev channels: error %d\n", 374 blob->id, bserrno); 375 assert(false); 376 } 377 378 if (bs_dev == NULL) { 379 /* 380 * This check exists to make scanbuild happy. 381 * 382 * blob->back_bs_dev for an esnap is NULL during the first iteration of blobs while 383 * the blobstore is being loaded. It could also be NULL if there was an error 384 * opening the esnap device. In each of these cases, no channels could have been 385 * created because back_bs_dev->create_channel() would have led to a NULL pointer 386 * deref. 387 */ 388 assert(false); 389 return; 390 } 391 392 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": calling destroy on back_bs_dev\n", blob->id); 393 bs_dev->destroy(bs_dev); 394 } 395 396 static void 397 blob_back_bs_destroy(struct spdk_blob *blob) 398 { 399 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": preparing to destroy back_bs_dev\n", 400 blob->id); 401 402 blob_esnap_destroy_bs_dev_channels(blob, false, blob_back_bs_destroy_esnap_done, 403 blob->back_bs_dev); 404 blob->back_bs_dev = NULL; 405 } 406 407 struct freeze_io_ctx { 408 struct spdk_bs_cpl cpl; 409 struct spdk_blob *blob; 410 }; 411 412 static void 413 blob_io_sync(struct spdk_io_channel_iter *i) 414 { 415 spdk_for_each_channel_continue(i, 0); 416 } 417 418 static void 419 blob_execute_queued_io(struct spdk_io_channel_iter *i) 420 { 421 struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); 422 struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch); 423 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 424 struct spdk_bs_request_set *set; 425 struct spdk_bs_user_op_args *args; 426 spdk_bs_user_op_t *op, *tmp; 427 428 TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) { 429 set = (struct spdk_bs_request_set *)op; 430 args = &set->u.user_op; 431 432 if (args->blob == ctx->blob) { 433 TAILQ_REMOVE(&ch->queued_io, op, link); 434 bs_user_op_execute(op); 435 } 436 } 437 438 spdk_for_each_channel_continue(i, 0); 439 } 440 441 static void 442 blob_io_cpl(struct spdk_io_channel_iter *i, int status) 443 { 444 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 445 446 ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0); 447 448 free(ctx); 449 } 450 451 static void 452 blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 453 { 454 struct freeze_io_ctx *ctx; 455 456 blob_verify_md_op(blob); 457 458 ctx = calloc(1, sizeof(*ctx)); 459 if (!ctx) { 460 cb_fn(cb_arg, -ENOMEM); 461 return; 462 } 463 464 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 465 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 466 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 467 ctx->blob = blob; 468 469 /* Freeze I/O on blob */ 470 blob->frozen_refcnt++; 471 472 spdk_for_each_channel(blob->bs, blob_io_sync, ctx, blob_io_cpl); 473 } 474 475 static void 476 blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 477 { 478 struct freeze_io_ctx *ctx; 479 480 blob_verify_md_op(blob); 481 482 ctx = calloc(1, sizeof(*ctx)); 483 if (!ctx) { 484 cb_fn(cb_arg, -ENOMEM); 485 return; 486 } 487 488 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 489 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 490 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 491 ctx->blob = blob; 492 493 assert(blob->frozen_refcnt > 0); 494 495 blob->frozen_refcnt--; 496 497 spdk_for_each_channel(blob->bs, blob_execute_queued_io, ctx, blob_io_cpl); 498 } 499 500 static int 501 blob_mark_clean(struct spdk_blob *blob) 502 { 503 uint32_t *extent_pages = NULL; 504 uint64_t *clusters = NULL; 505 uint32_t *pages = NULL; 506 507 assert(blob != NULL); 508 509 if (blob->active.num_extent_pages) { 510 assert(blob->active.extent_pages); 511 extent_pages = calloc(blob->active.num_extent_pages, sizeof(*blob->active.extent_pages)); 512 if (!extent_pages) { 513 return -ENOMEM; 514 } 515 memcpy(extent_pages, blob->active.extent_pages, 516 blob->active.num_extent_pages * sizeof(*extent_pages)); 517 } 518 519 if (blob->active.num_clusters) { 520 assert(blob->active.clusters); 521 clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters)); 522 if (!clusters) { 523 free(extent_pages); 524 return -ENOMEM; 525 } 526 memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 527 } 528 529 if (blob->active.num_pages) { 530 assert(blob->active.pages); 531 pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages)); 532 if (!pages) { 533 free(extent_pages); 534 free(clusters); 535 return -ENOMEM; 536 } 537 memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 538 } 539 540 free(blob->clean.extent_pages); 541 free(blob->clean.clusters); 542 free(blob->clean.pages); 543 544 blob->clean.num_extent_pages = blob->active.num_extent_pages; 545 blob->clean.extent_pages = blob->active.extent_pages; 546 blob->clean.num_clusters = blob->active.num_clusters; 547 blob->clean.clusters = blob->active.clusters; 548 blob->clean.num_pages = blob->active.num_pages; 549 blob->clean.pages = blob->active.pages; 550 551 blob->active.extent_pages = extent_pages; 552 blob->active.clusters = clusters; 553 blob->active.pages = pages; 554 555 /* If the metadata was dirtied again while the metadata was being written to disk, 556 * we do not want to revert the DIRTY state back to CLEAN here. 557 */ 558 if (blob->state == SPDK_BLOB_STATE_LOADING) { 559 blob->state = SPDK_BLOB_STATE_CLEAN; 560 } 561 562 return 0; 563 } 564 565 static int 566 blob_deserialize_xattr(struct spdk_blob *blob, 567 struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal) 568 { 569 struct spdk_xattr *xattr; 570 571 if (desc_xattr->length != sizeof(desc_xattr->name_length) + 572 sizeof(desc_xattr->value_length) + 573 desc_xattr->name_length + desc_xattr->value_length) { 574 return -EINVAL; 575 } 576 577 xattr = calloc(1, sizeof(*xattr)); 578 if (xattr == NULL) { 579 return -ENOMEM; 580 } 581 582 xattr->name = malloc(desc_xattr->name_length + 1); 583 if (xattr->name == NULL) { 584 free(xattr); 585 return -ENOMEM; 586 } 587 588 xattr->value = malloc(desc_xattr->value_length); 589 if (xattr->value == NULL) { 590 free(xattr->name); 591 free(xattr); 592 return -ENOMEM; 593 } 594 595 memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length); 596 xattr->name[desc_xattr->name_length] = '\0'; 597 xattr->value_len = desc_xattr->value_length; 598 memcpy(xattr->value, 599 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 600 desc_xattr->value_length); 601 602 TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link); 603 604 return 0; 605 } 606 607 608 static int 609 blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob) 610 { 611 struct spdk_blob_md_descriptor *desc; 612 size_t cur_desc = 0; 613 void *tmp; 614 615 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 616 while (cur_desc < sizeof(page->descriptors)) { 617 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 618 if (desc->length == 0) { 619 /* If padding and length are 0, this terminates the page */ 620 break; 621 } 622 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 623 struct spdk_blob_md_descriptor_flags *desc_flags; 624 625 desc_flags = (struct spdk_blob_md_descriptor_flags *)desc; 626 627 if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) { 628 return -EINVAL; 629 } 630 631 if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) != 632 SPDK_BLOB_INVALID_FLAGS_MASK) { 633 return -EINVAL; 634 } 635 636 if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) != 637 SPDK_BLOB_DATA_RO_FLAGS_MASK) { 638 blob->data_ro = true; 639 blob->md_ro = true; 640 } 641 642 if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) != 643 SPDK_BLOB_MD_RO_FLAGS_MASK) { 644 blob->md_ro = true; 645 } 646 647 if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 648 blob->data_ro = true; 649 blob->md_ro = true; 650 } 651 652 blob->invalid_flags = desc_flags->invalid_flags; 653 blob->data_ro_flags = desc_flags->data_ro_flags; 654 blob->md_ro_flags = desc_flags->md_ro_flags; 655 656 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 657 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 658 unsigned int i, j; 659 unsigned int cluster_count = blob->active.num_clusters; 660 661 if (blob->extent_table_found) { 662 /* Extent Table already present in the md, 663 * both descriptors should never be at the same time. */ 664 return -EINVAL; 665 } 666 blob->extent_rle_found = true; 667 668 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 669 670 if (desc_extent_rle->length == 0 || 671 (desc_extent_rle->length % sizeof(desc_extent_rle->extents[0]) != 0)) { 672 return -EINVAL; 673 } 674 675 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 676 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 677 if (desc_extent_rle->extents[i].cluster_idx != 0) { 678 if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, 679 desc_extent_rle->extents[i].cluster_idx + j)) { 680 return -EINVAL; 681 } 682 } 683 cluster_count++; 684 } 685 } 686 687 if (cluster_count == 0) { 688 return -EINVAL; 689 } 690 tmp = realloc(blob->active.clusters, cluster_count * sizeof(*blob->active.clusters)); 691 if (tmp == NULL) { 692 return -ENOMEM; 693 } 694 blob->active.clusters = tmp; 695 blob->active.cluster_array_size = cluster_count; 696 697 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 698 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 699 if (desc_extent_rle->extents[i].cluster_idx != 0) { 700 blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs, 701 desc_extent_rle->extents[i].cluster_idx + j); 702 } else if (spdk_blob_is_thin_provisioned(blob)) { 703 blob->active.clusters[blob->active.num_clusters++] = 0; 704 } else { 705 return -EINVAL; 706 } 707 } 708 } 709 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 710 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 711 uint32_t num_extent_pages = blob->active.num_extent_pages; 712 uint32_t i, j; 713 size_t extent_pages_length; 714 715 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 716 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 717 718 if (blob->extent_rle_found) { 719 /* This means that Extent RLE is present in MD, 720 * both should never be at the same time. */ 721 return -EINVAL; 722 } else if (blob->extent_table_found && 723 desc_extent_table->num_clusters != blob->remaining_clusters_in_et) { 724 /* Number of clusters in this ET does not match number 725 * from previously read EXTENT_TABLE. */ 726 return -EINVAL; 727 } 728 729 if (desc_extent_table->length == 0 || 730 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 731 return -EINVAL; 732 } 733 734 blob->extent_table_found = true; 735 736 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 737 num_extent_pages += desc_extent_table->extent_page[i].num_pages; 738 } 739 740 if (num_extent_pages > 0) { 741 tmp = realloc(blob->active.extent_pages, num_extent_pages * sizeof(uint32_t)); 742 if (tmp == NULL) { 743 return -ENOMEM; 744 } 745 blob->active.extent_pages = tmp; 746 } 747 blob->active.extent_pages_array_size = num_extent_pages; 748 749 blob->remaining_clusters_in_et = desc_extent_table->num_clusters; 750 751 /* Extent table entries contain md page numbers for extent pages. 752 * Zeroes represent unallocated extent pages, those are run-length-encoded. 753 */ 754 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 755 if (desc_extent_table->extent_page[i].page_idx != 0) { 756 assert(desc_extent_table->extent_page[i].num_pages == 1); 757 blob->active.extent_pages[blob->active.num_extent_pages++] = 758 desc_extent_table->extent_page[i].page_idx; 759 } else if (spdk_blob_is_thin_provisioned(blob)) { 760 for (j = 0; j < desc_extent_table->extent_page[i].num_pages; j++) { 761 blob->active.extent_pages[blob->active.num_extent_pages++] = 0; 762 } 763 } else { 764 return -EINVAL; 765 } 766 } 767 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 768 struct spdk_blob_md_descriptor_extent_page *desc_extent; 769 unsigned int i; 770 unsigned int cluster_count = 0; 771 size_t cluster_idx_length; 772 773 if (blob->extent_rle_found) { 774 /* This means that Extent RLE is present in MD, 775 * both should never be at the same time. */ 776 return -EINVAL; 777 } 778 779 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 780 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 781 782 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 783 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 784 return -EINVAL; 785 } 786 787 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 788 if (desc_extent->cluster_idx[i] != 0) { 789 if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, desc_extent->cluster_idx[i])) { 790 return -EINVAL; 791 } 792 } 793 cluster_count++; 794 } 795 796 if (cluster_count == 0) { 797 return -EINVAL; 798 } 799 800 /* When reading extent pages sequentially starting cluster idx should match 801 * current size of a blob. 802 * If changed to batch reading, this check shall be removed. */ 803 if (desc_extent->start_cluster_idx != blob->active.num_clusters) { 804 return -EINVAL; 805 } 806 807 tmp = realloc(blob->active.clusters, 808 (cluster_count + blob->active.num_clusters) * sizeof(*blob->active.clusters)); 809 if (tmp == NULL) { 810 return -ENOMEM; 811 } 812 blob->active.clusters = tmp; 813 blob->active.cluster_array_size = (cluster_count + blob->active.num_clusters); 814 815 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 816 if (desc_extent->cluster_idx[i] != 0) { 817 blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs, 818 desc_extent->cluster_idx[i]); 819 } else if (spdk_blob_is_thin_provisioned(blob)) { 820 blob->active.clusters[blob->active.num_clusters++] = 0; 821 } else { 822 return -EINVAL; 823 } 824 } 825 assert(desc_extent->start_cluster_idx + cluster_count == blob->active.num_clusters); 826 assert(blob->remaining_clusters_in_et >= cluster_count); 827 blob->remaining_clusters_in_et -= cluster_count; 828 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 829 int rc; 830 831 rc = blob_deserialize_xattr(blob, 832 (struct spdk_blob_md_descriptor_xattr *) desc, false); 833 if (rc != 0) { 834 return rc; 835 } 836 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 837 int rc; 838 839 rc = blob_deserialize_xattr(blob, 840 (struct spdk_blob_md_descriptor_xattr *) desc, true); 841 if (rc != 0) { 842 return rc; 843 } 844 } else { 845 /* Unrecognized descriptor type. Do not fail - just continue to the 846 * next descriptor. If this descriptor is associated with some feature 847 * defined in a newer version of blobstore, that version of blobstore 848 * should create and set an associated feature flag to specify if this 849 * blob can be loaded or not. 850 */ 851 } 852 853 /* Advance to the next descriptor */ 854 cur_desc += sizeof(*desc) + desc->length; 855 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 856 break; 857 } 858 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 859 } 860 861 return 0; 862 } 863 864 static bool bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page); 865 866 static int 867 blob_parse_extent_page(struct spdk_blob_md_page *extent_page, struct spdk_blob *blob) 868 { 869 assert(blob != NULL); 870 assert(blob->state == SPDK_BLOB_STATE_LOADING); 871 872 if (bs_load_cur_extent_page_valid(extent_page) == false) { 873 return -ENOENT; 874 } 875 876 return blob_parse_page(extent_page, blob); 877 } 878 879 static int 880 blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count, 881 struct spdk_blob *blob) 882 { 883 const struct spdk_blob_md_page *page; 884 uint32_t i; 885 int rc; 886 void *tmp; 887 888 assert(page_count > 0); 889 assert(pages[0].sequence_num == 0); 890 assert(blob != NULL); 891 assert(blob->state == SPDK_BLOB_STATE_LOADING); 892 assert(blob->active.clusters == NULL); 893 894 /* The blobid provided doesn't match what's in the MD, this can 895 * happen for example if a bogus blobid is passed in through open. 896 */ 897 if (blob->id != pages[0].id) { 898 SPDK_ERRLOG("Blobid (0x%" PRIx64 ") doesn't match what's in metadata " 899 "(0x%" PRIx64 ")\n", blob->id, pages[0].id); 900 return -ENOENT; 901 } 902 903 tmp = realloc(blob->active.pages, page_count * sizeof(*blob->active.pages)); 904 if (!tmp) { 905 return -ENOMEM; 906 } 907 blob->active.pages = tmp; 908 909 blob->active.pages[0] = pages[0].id; 910 911 for (i = 1; i < page_count; i++) { 912 assert(spdk_bit_array_get(blob->bs->used_md_pages, pages[i - 1].next)); 913 blob->active.pages[i] = pages[i - 1].next; 914 } 915 blob->active.num_pages = page_count; 916 917 for (i = 0; i < page_count; i++) { 918 page = &pages[i]; 919 920 assert(page->id == blob->id); 921 assert(page->sequence_num == i); 922 923 rc = blob_parse_page(page, blob); 924 if (rc != 0) { 925 return rc; 926 } 927 } 928 929 return 0; 930 } 931 932 static int 933 blob_serialize_add_page(const struct spdk_blob *blob, 934 struct spdk_blob_md_page **pages, 935 uint32_t *page_count, 936 struct spdk_blob_md_page **last_page) 937 { 938 struct spdk_blob_md_page *page, *tmp_pages; 939 940 assert(pages != NULL); 941 assert(page_count != NULL); 942 943 *last_page = NULL; 944 if (*page_count == 0) { 945 assert(*pages == NULL); 946 *pages = spdk_malloc(SPDK_BS_PAGE_SIZE, 0, 947 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 948 if (*pages == NULL) { 949 return -ENOMEM; 950 } 951 *page_count = 1; 952 } else { 953 assert(*pages != NULL); 954 tmp_pages = spdk_realloc(*pages, SPDK_BS_PAGE_SIZE * (*page_count + 1), 0); 955 if (tmp_pages == NULL) { 956 return -ENOMEM; 957 } 958 (*page_count)++; 959 *pages = tmp_pages; 960 } 961 962 page = &(*pages)[*page_count - 1]; 963 memset(page, 0, sizeof(*page)); 964 page->id = blob->id; 965 page->sequence_num = *page_count - 1; 966 page->next = SPDK_INVALID_MD_PAGE; 967 *last_page = page; 968 969 return 0; 970 } 971 972 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor. 973 * Update required_sz on both success and failure. 974 * 975 */ 976 static int 977 blob_serialize_xattr(const struct spdk_xattr *xattr, 978 uint8_t *buf, size_t buf_sz, 979 size_t *required_sz, bool internal) 980 { 981 struct spdk_blob_md_descriptor_xattr *desc; 982 983 *required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) + 984 strlen(xattr->name) + 985 xattr->value_len; 986 987 if (buf_sz < *required_sz) { 988 return -1; 989 } 990 991 desc = (struct spdk_blob_md_descriptor_xattr *)buf; 992 993 desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR; 994 desc->length = sizeof(desc->name_length) + 995 sizeof(desc->value_length) + 996 strlen(xattr->name) + 997 xattr->value_len; 998 desc->name_length = strlen(xattr->name); 999 desc->value_length = xattr->value_len; 1000 1001 memcpy(desc->name, xattr->name, desc->name_length); 1002 memcpy((void *)((uintptr_t)desc->name + desc->name_length), 1003 xattr->value, 1004 desc->value_length); 1005 1006 return 0; 1007 } 1008 1009 static void 1010 blob_serialize_extent_table_entry(const struct spdk_blob *blob, 1011 uint64_t start_ep, uint64_t *next_ep, 1012 uint8_t **buf, size_t *remaining_sz) 1013 { 1014 struct spdk_blob_md_descriptor_extent_table *desc; 1015 size_t cur_sz; 1016 uint64_t i, et_idx; 1017 uint32_t extent_page, ep_len; 1018 1019 /* The buffer must have room for at least num_clusters entry */ 1020 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->num_clusters); 1021 if (*remaining_sz < cur_sz) { 1022 *next_ep = start_ep; 1023 return; 1024 } 1025 1026 desc = (struct spdk_blob_md_descriptor_extent_table *)*buf; 1027 desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE; 1028 1029 desc->num_clusters = blob->active.num_clusters; 1030 1031 ep_len = 1; 1032 et_idx = 0; 1033 for (i = start_ep; i < blob->active.num_extent_pages; i++) { 1034 if (*remaining_sz < cur_sz + sizeof(desc->extent_page[0])) { 1035 /* If we ran out of buffer space, return */ 1036 break; 1037 } 1038 1039 extent_page = blob->active.extent_pages[i]; 1040 /* Verify that next extent_page is unallocated */ 1041 if (extent_page == 0 && 1042 (i + 1 < blob->active.num_extent_pages && blob->active.extent_pages[i + 1] == 0)) { 1043 ep_len++; 1044 continue; 1045 } 1046 desc->extent_page[et_idx].page_idx = extent_page; 1047 desc->extent_page[et_idx].num_pages = ep_len; 1048 et_idx++; 1049 1050 ep_len = 1; 1051 cur_sz += sizeof(desc->extent_page[et_idx]); 1052 } 1053 *next_ep = i; 1054 1055 desc->length = sizeof(desc->num_clusters) + sizeof(desc->extent_page[0]) * et_idx; 1056 *remaining_sz -= sizeof(struct spdk_blob_md_descriptor) + desc->length; 1057 *buf += sizeof(struct spdk_blob_md_descriptor) + desc->length; 1058 } 1059 1060 static int 1061 blob_serialize_extent_table(const struct spdk_blob *blob, 1062 struct spdk_blob_md_page **pages, 1063 struct spdk_blob_md_page *cur_page, 1064 uint32_t *page_count, uint8_t **buf, 1065 size_t *remaining_sz) 1066 { 1067 uint64_t last_extent_page; 1068 int rc; 1069 1070 last_extent_page = 0; 1071 /* At least single extent table entry has to be always persisted. 1072 * Such case occurs with num_extent_pages == 0. */ 1073 while (last_extent_page <= blob->active.num_extent_pages) { 1074 blob_serialize_extent_table_entry(blob, last_extent_page, &last_extent_page, buf, 1075 remaining_sz); 1076 1077 if (last_extent_page == blob->active.num_extent_pages) { 1078 break; 1079 } 1080 1081 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1082 if (rc < 0) { 1083 return rc; 1084 } 1085 1086 *buf = (uint8_t *)cur_page->descriptors; 1087 *remaining_sz = sizeof(cur_page->descriptors); 1088 } 1089 1090 return 0; 1091 } 1092 1093 static void 1094 blob_serialize_extent_rle(const struct spdk_blob *blob, 1095 uint64_t start_cluster, uint64_t *next_cluster, 1096 uint8_t **buf, size_t *buf_sz) 1097 { 1098 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 1099 size_t cur_sz; 1100 uint64_t i, extent_idx; 1101 uint64_t lba, lba_per_cluster, lba_count; 1102 1103 /* The buffer must have room for at least one extent */ 1104 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc_extent_rle->extents[0]); 1105 if (*buf_sz < cur_sz) { 1106 *next_cluster = start_cluster; 1107 return; 1108 } 1109 1110 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)*buf; 1111 desc_extent_rle->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE; 1112 1113 lba_per_cluster = bs_cluster_to_lba(blob->bs, 1); 1114 /* Assert for scan-build false positive */ 1115 assert(lba_per_cluster > 0); 1116 1117 lba = blob->active.clusters[start_cluster]; 1118 lba_count = lba_per_cluster; 1119 extent_idx = 0; 1120 for (i = start_cluster + 1; i < blob->active.num_clusters; i++) { 1121 if ((lba + lba_count) == blob->active.clusters[i] && lba != 0) { 1122 /* Run-length encode sequential non-zero LBA */ 1123 lba_count += lba_per_cluster; 1124 continue; 1125 } else if (lba == 0 && blob->active.clusters[i] == 0) { 1126 /* Run-length encode unallocated clusters */ 1127 lba_count += lba_per_cluster; 1128 continue; 1129 } 1130 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1131 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1132 extent_idx++; 1133 1134 cur_sz += sizeof(desc_extent_rle->extents[extent_idx]); 1135 1136 if (*buf_sz < cur_sz) { 1137 /* If we ran out of buffer space, return */ 1138 *next_cluster = i; 1139 break; 1140 } 1141 1142 lba = blob->active.clusters[i]; 1143 lba_count = lba_per_cluster; 1144 } 1145 1146 if (*buf_sz >= cur_sz) { 1147 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1148 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1149 extent_idx++; 1150 1151 *next_cluster = blob->active.num_clusters; 1152 } 1153 1154 desc_extent_rle->length = sizeof(desc_extent_rle->extents[0]) * extent_idx; 1155 *buf_sz -= sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1156 *buf += sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1157 } 1158 1159 static int 1160 blob_serialize_extents_rle(const struct spdk_blob *blob, 1161 struct spdk_blob_md_page **pages, 1162 struct spdk_blob_md_page *cur_page, 1163 uint32_t *page_count, uint8_t **buf, 1164 size_t *remaining_sz) 1165 { 1166 uint64_t last_cluster; 1167 int rc; 1168 1169 last_cluster = 0; 1170 while (last_cluster < blob->active.num_clusters) { 1171 blob_serialize_extent_rle(blob, last_cluster, &last_cluster, buf, remaining_sz); 1172 1173 if (last_cluster == blob->active.num_clusters) { 1174 break; 1175 } 1176 1177 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1178 if (rc < 0) { 1179 return rc; 1180 } 1181 1182 *buf = (uint8_t *)cur_page->descriptors; 1183 *remaining_sz = sizeof(cur_page->descriptors); 1184 } 1185 1186 return 0; 1187 } 1188 1189 static void 1190 blob_serialize_extent_page(const struct spdk_blob *blob, 1191 uint64_t cluster, struct spdk_blob_md_page *page) 1192 { 1193 struct spdk_blob_md_descriptor_extent_page *desc_extent; 1194 uint64_t i, extent_idx; 1195 uint64_t lba, lba_per_cluster; 1196 uint64_t start_cluster_idx = (cluster / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP; 1197 1198 desc_extent = (struct spdk_blob_md_descriptor_extent_page *) page->descriptors; 1199 desc_extent->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE; 1200 1201 lba_per_cluster = bs_cluster_to_lba(blob->bs, 1); 1202 1203 desc_extent->start_cluster_idx = start_cluster_idx; 1204 extent_idx = 0; 1205 for (i = start_cluster_idx; i < blob->active.num_clusters; i++) { 1206 lba = blob->active.clusters[i]; 1207 desc_extent->cluster_idx[extent_idx++] = lba / lba_per_cluster; 1208 if (extent_idx >= SPDK_EXTENTS_PER_EP) { 1209 break; 1210 } 1211 } 1212 desc_extent->length = sizeof(desc_extent->start_cluster_idx) + 1213 sizeof(desc_extent->cluster_idx[0]) * extent_idx; 1214 } 1215 1216 static void 1217 blob_serialize_flags(const struct spdk_blob *blob, 1218 uint8_t *buf, size_t *buf_sz) 1219 { 1220 struct spdk_blob_md_descriptor_flags *desc; 1221 1222 /* 1223 * Flags get serialized first, so we should always have room for the flags 1224 * descriptor. 1225 */ 1226 assert(*buf_sz >= sizeof(*desc)); 1227 1228 desc = (struct spdk_blob_md_descriptor_flags *)buf; 1229 desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS; 1230 desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor); 1231 desc->invalid_flags = blob->invalid_flags; 1232 desc->data_ro_flags = blob->data_ro_flags; 1233 desc->md_ro_flags = blob->md_ro_flags; 1234 1235 *buf_sz -= sizeof(*desc); 1236 } 1237 1238 static int 1239 blob_serialize_xattrs(const struct spdk_blob *blob, 1240 const struct spdk_xattr_tailq *xattrs, bool internal, 1241 struct spdk_blob_md_page **pages, 1242 struct spdk_blob_md_page *cur_page, 1243 uint32_t *page_count, uint8_t **buf, 1244 size_t *remaining_sz) 1245 { 1246 const struct spdk_xattr *xattr; 1247 int rc; 1248 1249 TAILQ_FOREACH(xattr, xattrs, link) { 1250 size_t required_sz = 0; 1251 1252 rc = blob_serialize_xattr(xattr, 1253 *buf, *remaining_sz, 1254 &required_sz, internal); 1255 if (rc < 0) { 1256 /* Need to add a new page to the chain */ 1257 rc = blob_serialize_add_page(blob, pages, page_count, 1258 &cur_page); 1259 if (rc < 0) { 1260 spdk_free(*pages); 1261 *pages = NULL; 1262 *page_count = 0; 1263 return rc; 1264 } 1265 1266 *buf = (uint8_t *)cur_page->descriptors; 1267 *remaining_sz = sizeof(cur_page->descriptors); 1268 1269 /* Try again */ 1270 required_sz = 0; 1271 rc = blob_serialize_xattr(xattr, 1272 *buf, *remaining_sz, 1273 &required_sz, internal); 1274 1275 if (rc < 0) { 1276 spdk_free(*pages); 1277 *pages = NULL; 1278 *page_count = 0; 1279 return rc; 1280 } 1281 } 1282 1283 *remaining_sz -= required_sz; 1284 *buf += required_sz; 1285 } 1286 1287 return 0; 1288 } 1289 1290 static int 1291 blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages, 1292 uint32_t *page_count) 1293 { 1294 struct spdk_blob_md_page *cur_page; 1295 int rc; 1296 uint8_t *buf; 1297 size_t remaining_sz; 1298 1299 assert(pages != NULL); 1300 assert(page_count != NULL); 1301 assert(blob != NULL); 1302 assert(blob->state == SPDK_BLOB_STATE_DIRTY); 1303 1304 *pages = NULL; 1305 *page_count = 0; 1306 1307 /* A blob always has at least 1 page, even if it has no descriptors */ 1308 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1309 if (rc < 0) { 1310 return rc; 1311 } 1312 1313 buf = (uint8_t *)cur_page->descriptors; 1314 remaining_sz = sizeof(cur_page->descriptors); 1315 1316 /* Serialize flags */ 1317 blob_serialize_flags(blob, buf, &remaining_sz); 1318 buf += sizeof(struct spdk_blob_md_descriptor_flags); 1319 1320 /* Serialize xattrs */ 1321 rc = blob_serialize_xattrs(blob, &blob->xattrs, false, 1322 pages, cur_page, page_count, &buf, &remaining_sz); 1323 if (rc < 0) { 1324 return rc; 1325 } 1326 1327 /* Serialize internal xattrs */ 1328 rc = blob_serialize_xattrs(blob, &blob->xattrs_internal, true, 1329 pages, cur_page, page_count, &buf, &remaining_sz); 1330 if (rc < 0) { 1331 return rc; 1332 } 1333 1334 if (blob->use_extent_table) { 1335 /* Serialize extent table */ 1336 rc = blob_serialize_extent_table(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1337 } else { 1338 /* Serialize extents */ 1339 rc = blob_serialize_extents_rle(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1340 } 1341 1342 return rc; 1343 } 1344 1345 struct spdk_blob_load_ctx { 1346 struct spdk_blob *blob; 1347 1348 struct spdk_blob_md_page *pages; 1349 uint32_t num_pages; 1350 uint32_t next_extent_page; 1351 spdk_bs_sequence_t *seq; 1352 1353 spdk_bs_sequence_cpl cb_fn; 1354 void *cb_arg; 1355 }; 1356 1357 static uint32_t 1358 blob_md_page_calc_crc(void *page) 1359 { 1360 uint32_t crc; 1361 1362 crc = BLOB_CRC32C_INITIAL; 1363 crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc); 1364 crc ^= BLOB_CRC32C_INITIAL; 1365 1366 return crc; 1367 1368 } 1369 1370 static void 1371 blob_load_final(struct spdk_blob_load_ctx *ctx, int bserrno) 1372 { 1373 struct spdk_blob *blob = ctx->blob; 1374 1375 if (bserrno == 0) { 1376 blob_mark_clean(blob); 1377 } 1378 1379 ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno); 1380 1381 /* Free the memory */ 1382 spdk_free(ctx->pages); 1383 free(ctx); 1384 } 1385 1386 static void 1387 blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno) 1388 { 1389 struct spdk_blob_load_ctx *ctx = cb_arg; 1390 struct spdk_blob *blob = ctx->blob; 1391 1392 if (bserrno == 0) { 1393 blob->back_bs_dev = bs_create_blob_bs_dev(snapshot); 1394 if (blob->back_bs_dev == NULL) { 1395 bserrno = -ENOMEM; 1396 } 1397 } 1398 if (bserrno != 0) { 1399 SPDK_ERRLOG("Snapshot fail\n"); 1400 } 1401 1402 blob_load_final(ctx, bserrno); 1403 } 1404 1405 static void blob_update_clear_method(struct spdk_blob *blob); 1406 1407 static int 1408 blob_load_esnap(struct spdk_blob *blob, void *blob_ctx) 1409 { 1410 struct spdk_blob_store *bs = blob->bs; 1411 struct spdk_bs_dev *bs_dev = NULL; 1412 const void *esnap_id = NULL; 1413 size_t id_len = 0; 1414 int rc; 1415 1416 if (bs->esnap_bs_dev_create == NULL) { 1417 SPDK_NOTICELOG("blob 0x%" PRIx64 " is an esnap clone but the blobstore was opened " 1418 "without support for esnap clones\n", blob->id); 1419 return -ENOTSUP; 1420 } 1421 assert(blob->back_bs_dev == NULL); 1422 1423 rc = blob_get_xattr_value(blob, BLOB_EXTERNAL_SNAPSHOT_ID, &esnap_id, &id_len, true); 1424 if (rc != 0) { 1425 SPDK_ERRLOG("blob 0x%" PRIx64 " is an esnap clone but has no esnap ID\n", blob->id); 1426 return -EINVAL; 1427 } 1428 assert(id_len > 0 && id_len < UINT32_MAX); 1429 1430 SPDK_INFOLOG(blob, "Creating external snapshot device\n"); 1431 1432 rc = bs->esnap_bs_dev_create(bs->esnap_ctx, blob_ctx, blob, esnap_id, (uint32_t)id_len, 1433 &bs_dev); 1434 if (rc != 0) { 1435 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": failed to load back_bs_dev " 1436 "with error %d\n", blob->id, rc); 1437 return rc; 1438 } 1439 1440 /* 1441 * Note: bs_dev might be NULL if the consumer chose to not open the external snapshot. 1442 * This especially might happen during spdk_bs_load() iteration. 1443 */ 1444 if (bs_dev != NULL) { 1445 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": loaded back_bs_dev\n", blob->id); 1446 if ((bs->io_unit_size % bs_dev->blocklen) != 0) { 1447 SPDK_NOTICELOG("blob 0x%" PRIx64 " external snapshot device block size %u " 1448 "is not compatible with blobstore block size %u\n", 1449 blob->id, bs_dev->blocklen, bs->io_unit_size); 1450 bs_dev->destroy(bs_dev); 1451 return -EINVAL; 1452 } 1453 } 1454 1455 blob->back_bs_dev = bs_dev; 1456 blob->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT; 1457 1458 return 0; 1459 } 1460 1461 static void 1462 blob_load_backing_dev(spdk_bs_sequence_t *seq, void *cb_arg) 1463 { 1464 struct spdk_blob_load_ctx *ctx = cb_arg; 1465 struct spdk_blob *blob = ctx->blob; 1466 const void *value; 1467 size_t len; 1468 int rc; 1469 1470 if (blob_is_esnap_clone(blob)) { 1471 rc = blob_load_esnap(blob, seq->cpl.u.blob_handle.esnap_ctx); 1472 blob_load_final(ctx, rc); 1473 return; 1474 } 1475 1476 if (spdk_blob_is_thin_provisioned(blob)) { 1477 rc = blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true); 1478 if (rc == 0) { 1479 if (len != sizeof(spdk_blob_id)) { 1480 blob_load_final(ctx, -EINVAL); 1481 return; 1482 } 1483 /* open snapshot blob and continue in the callback function */ 1484 blob->parent_id = *(spdk_blob_id *)value; 1485 spdk_bs_open_blob(blob->bs, blob->parent_id, 1486 blob_load_snapshot_cpl, ctx); 1487 return; 1488 } else { 1489 /* add zeroes_dev for thin provisioned blob */ 1490 blob->back_bs_dev = bs_create_zeroes_dev(); 1491 } 1492 } else { 1493 /* standard blob */ 1494 blob->back_bs_dev = NULL; 1495 } 1496 blob_load_final(ctx, 0); 1497 } 1498 1499 static void 1500 blob_load_cpl_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1501 { 1502 struct spdk_blob_load_ctx *ctx = cb_arg; 1503 struct spdk_blob *blob = ctx->blob; 1504 struct spdk_blob_md_page *page; 1505 uint64_t i; 1506 uint32_t crc; 1507 uint64_t lba; 1508 void *tmp; 1509 uint64_t sz; 1510 1511 if (bserrno) { 1512 SPDK_ERRLOG("Extent page read failed: %d\n", bserrno); 1513 blob_load_final(ctx, bserrno); 1514 return; 1515 } 1516 1517 if (ctx->pages == NULL) { 1518 /* First iteration of this function, allocate buffer for single EXTENT_PAGE */ 1519 ctx->pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 1520 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 1521 if (!ctx->pages) { 1522 blob_load_final(ctx, -ENOMEM); 1523 return; 1524 } 1525 ctx->num_pages = 1; 1526 ctx->next_extent_page = 0; 1527 } else { 1528 page = &ctx->pages[0]; 1529 crc = blob_md_page_calc_crc(page); 1530 if (crc != page->crc) { 1531 blob_load_final(ctx, -EINVAL); 1532 return; 1533 } 1534 1535 if (page->next != SPDK_INVALID_MD_PAGE) { 1536 blob_load_final(ctx, -EINVAL); 1537 return; 1538 } 1539 1540 bserrno = blob_parse_extent_page(page, blob); 1541 if (bserrno) { 1542 blob_load_final(ctx, bserrno); 1543 return; 1544 } 1545 } 1546 1547 for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) { 1548 if (blob->active.extent_pages[i] != 0) { 1549 /* Extent page was allocated, read and parse it. */ 1550 lba = bs_md_page_to_lba(blob->bs, blob->active.extent_pages[i]); 1551 ctx->next_extent_page = i + 1; 1552 1553 bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1554 bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 1555 blob_load_cpl_extents_cpl, ctx); 1556 return; 1557 } else { 1558 /* Thin provisioned blobs can point to unallocated extent pages. 1559 * In this case blob size should be increased by up to the amount left in remaining_clusters_in_et. */ 1560 1561 sz = spdk_min(blob->remaining_clusters_in_et, SPDK_EXTENTS_PER_EP); 1562 blob->active.num_clusters += sz; 1563 blob->remaining_clusters_in_et -= sz; 1564 1565 assert(spdk_blob_is_thin_provisioned(blob)); 1566 assert(i + 1 < blob->active.num_extent_pages || blob->remaining_clusters_in_et == 0); 1567 1568 tmp = realloc(blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 1569 if (tmp == NULL) { 1570 blob_load_final(ctx, -ENOMEM); 1571 return; 1572 } 1573 memset(tmp + sizeof(*blob->active.clusters) * blob->active.cluster_array_size, 0, 1574 sizeof(*blob->active.clusters) * (blob->active.num_clusters - blob->active.cluster_array_size)); 1575 blob->active.clusters = tmp; 1576 blob->active.cluster_array_size = blob->active.num_clusters; 1577 } 1578 } 1579 1580 blob_load_backing_dev(seq, ctx); 1581 } 1582 1583 static void 1584 blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1585 { 1586 struct spdk_blob_load_ctx *ctx = cb_arg; 1587 struct spdk_blob *blob = ctx->blob; 1588 struct spdk_blob_md_page *page; 1589 int rc; 1590 uint32_t crc; 1591 uint32_t current_page; 1592 1593 if (ctx->num_pages == 1) { 1594 current_page = bs_blobid_to_page(blob->id); 1595 } else { 1596 assert(ctx->num_pages != 0); 1597 page = &ctx->pages[ctx->num_pages - 2]; 1598 current_page = page->next; 1599 } 1600 1601 if (bserrno) { 1602 SPDK_ERRLOG("Metadata page %d read failed for blobid 0x%" PRIx64 ": %d\n", 1603 current_page, blob->id, bserrno); 1604 blob_load_final(ctx, bserrno); 1605 return; 1606 } 1607 1608 page = &ctx->pages[ctx->num_pages - 1]; 1609 crc = blob_md_page_calc_crc(page); 1610 if (crc != page->crc) { 1611 SPDK_ERRLOG("Metadata page %d crc mismatch for blobid 0x%" PRIx64 "\n", 1612 current_page, blob->id); 1613 blob_load_final(ctx, -EINVAL); 1614 return; 1615 } 1616 1617 if (page->next != SPDK_INVALID_MD_PAGE) { 1618 struct spdk_blob_md_page *tmp_pages; 1619 uint32_t next_page = page->next; 1620 uint64_t next_lba = bs_md_page_to_lba(blob->bs, next_page); 1621 1622 /* Read the next page */ 1623 tmp_pages = spdk_realloc(ctx->pages, (sizeof(*page) * (ctx->num_pages + 1)), 0); 1624 if (tmp_pages == NULL) { 1625 blob_load_final(ctx, -ENOMEM); 1626 return; 1627 } 1628 ctx->num_pages++; 1629 ctx->pages = tmp_pages; 1630 1631 bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1], 1632 next_lba, 1633 bs_byte_to_lba(blob->bs, sizeof(*page)), 1634 blob_load_cpl, ctx); 1635 return; 1636 } 1637 1638 /* Parse the pages */ 1639 rc = blob_parse(ctx->pages, ctx->num_pages, blob); 1640 if (rc) { 1641 blob_load_final(ctx, rc); 1642 return; 1643 } 1644 1645 if (blob->extent_table_found == true) { 1646 /* If EXTENT_TABLE was found, that means support for it should be enabled. */ 1647 assert(blob->extent_rle_found == false); 1648 blob->use_extent_table = true; 1649 } else { 1650 /* If EXTENT_RLE or no extent_* descriptor was found disable support 1651 * for extent table. No extent_* descriptors means that blob has length of 0 1652 * and no extent_rle descriptors were persisted for it. 1653 * EXTENT_TABLE if used, is always present in metadata regardless of length. */ 1654 blob->use_extent_table = false; 1655 } 1656 1657 /* Check the clear_method stored in metadata vs what may have been passed 1658 * via spdk_bs_open_blob_ext() and update accordingly. 1659 */ 1660 blob_update_clear_method(blob); 1661 1662 spdk_free(ctx->pages); 1663 ctx->pages = NULL; 1664 1665 if (blob->extent_table_found) { 1666 blob_load_cpl_extents_cpl(seq, ctx, 0); 1667 } else { 1668 blob_load_backing_dev(seq, ctx); 1669 } 1670 } 1671 1672 /* Load a blob from disk given a blobid */ 1673 static void 1674 blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 1675 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 1676 { 1677 struct spdk_blob_load_ctx *ctx; 1678 struct spdk_blob_store *bs; 1679 uint32_t page_num; 1680 uint64_t lba; 1681 1682 blob_verify_md_op(blob); 1683 1684 bs = blob->bs; 1685 1686 ctx = calloc(1, sizeof(*ctx)); 1687 if (!ctx) { 1688 cb_fn(seq, cb_arg, -ENOMEM); 1689 return; 1690 } 1691 1692 ctx->blob = blob; 1693 ctx->pages = spdk_realloc(ctx->pages, SPDK_BS_PAGE_SIZE, 0); 1694 if (!ctx->pages) { 1695 free(ctx); 1696 cb_fn(seq, cb_arg, -ENOMEM); 1697 return; 1698 } 1699 ctx->num_pages = 1; 1700 ctx->cb_fn = cb_fn; 1701 ctx->cb_arg = cb_arg; 1702 ctx->seq = seq; 1703 1704 page_num = bs_blobid_to_page(blob->id); 1705 lba = bs_md_page_to_lba(blob->bs, page_num); 1706 1707 blob->state = SPDK_BLOB_STATE_LOADING; 1708 1709 bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1710 bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE), 1711 blob_load_cpl, ctx); 1712 } 1713 1714 struct spdk_blob_persist_ctx { 1715 struct spdk_blob *blob; 1716 1717 struct spdk_blob_md_page *pages; 1718 uint32_t next_extent_page; 1719 struct spdk_blob_md_page *extent_page; 1720 1721 spdk_bs_sequence_t *seq; 1722 spdk_bs_sequence_cpl cb_fn; 1723 void *cb_arg; 1724 TAILQ_ENTRY(spdk_blob_persist_ctx) link; 1725 }; 1726 1727 static void 1728 bs_batch_clear_dev(struct spdk_blob *blob, spdk_bs_batch_t *batch, uint64_t lba, 1729 uint64_t lba_count) 1730 { 1731 switch (blob->clear_method) { 1732 case BLOB_CLEAR_WITH_DEFAULT: 1733 case BLOB_CLEAR_WITH_UNMAP: 1734 bs_batch_unmap_dev(batch, lba, lba_count); 1735 break; 1736 case BLOB_CLEAR_WITH_WRITE_ZEROES: 1737 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1738 break; 1739 case BLOB_CLEAR_WITH_NONE: 1740 default: 1741 break; 1742 } 1743 } 1744 1745 static int 1746 bs_super_validate(struct spdk_bs_super_block *super, struct spdk_blob_store *bs) 1747 { 1748 uint32_t crc; 1749 static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH]; 1750 1751 if (super->version > SPDK_BS_VERSION || 1752 super->version < SPDK_BS_INITIAL_VERSION) { 1753 return -EILSEQ; 1754 } 1755 1756 if (memcmp(super->signature, SPDK_BS_SUPER_BLOCK_SIG, 1757 sizeof(super->signature)) != 0) { 1758 return -EILSEQ; 1759 } 1760 1761 crc = blob_md_page_calc_crc(super); 1762 if (crc != super->crc) { 1763 return -EILSEQ; 1764 } 1765 1766 if (memcmp(&bs->bstype, &super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 1767 SPDK_DEBUGLOG(blob, "Bstype matched - loading blobstore\n"); 1768 } else if (memcmp(&bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 1769 SPDK_DEBUGLOG(blob, "Bstype wildcard used - loading blobstore regardless bstype\n"); 1770 } else { 1771 SPDK_DEBUGLOG(blob, "Unexpected bstype\n"); 1772 SPDK_LOGDUMP(blob, "Expected:", bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 1773 SPDK_LOGDUMP(blob, "Found:", super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 1774 return -ENXIO; 1775 } 1776 1777 if (super->size > bs->dev->blockcnt * bs->dev->blocklen) { 1778 SPDK_NOTICELOG("Size mismatch, dev size: %" PRIu64 ", blobstore size: %" PRIu64 "\n", 1779 bs->dev->blockcnt * bs->dev->blocklen, super->size); 1780 return -EILSEQ; 1781 } 1782 1783 return 0; 1784 } 1785 1786 static void bs_mark_dirty(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 1787 spdk_bs_sequence_cpl cb_fn, void *cb_arg); 1788 1789 static void 1790 blob_persist_complete_cb(void *arg) 1791 { 1792 struct spdk_blob_persist_ctx *ctx = arg; 1793 1794 /* Call user callback */ 1795 ctx->cb_fn(ctx->seq, ctx->cb_arg, 0); 1796 1797 /* Free the memory */ 1798 spdk_free(ctx->pages); 1799 free(ctx); 1800 } 1801 1802 static void blob_persist_start(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 1803 1804 static void 1805 blob_persist_complete(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx, int bserrno) 1806 { 1807 struct spdk_blob_persist_ctx *next_persist, *tmp; 1808 struct spdk_blob *blob = ctx->blob; 1809 1810 if (bserrno == 0) { 1811 blob_mark_clean(blob); 1812 } 1813 1814 assert(ctx == TAILQ_FIRST(&blob->persists_to_complete)); 1815 1816 /* Complete all persists that were pending when the current persist started */ 1817 TAILQ_FOREACH_SAFE(next_persist, &blob->persists_to_complete, link, tmp) { 1818 TAILQ_REMOVE(&blob->persists_to_complete, next_persist, link); 1819 spdk_thread_send_msg(spdk_get_thread(), blob_persist_complete_cb, next_persist); 1820 } 1821 1822 if (TAILQ_EMPTY(&blob->pending_persists)) { 1823 return; 1824 } 1825 1826 /* Queue up all pending persists for completion and start blob persist with first one */ 1827 TAILQ_SWAP(&blob->persists_to_complete, &blob->pending_persists, spdk_blob_persist_ctx, link); 1828 next_persist = TAILQ_FIRST(&blob->persists_to_complete); 1829 1830 blob->state = SPDK_BLOB_STATE_DIRTY; 1831 bs_mark_dirty(seq, blob->bs, blob_persist_start, next_persist); 1832 } 1833 1834 static void 1835 blob_persist_clear_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1836 { 1837 struct spdk_blob_persist_ctx *ctx = cb_arg; 1838 struct spdk_blob *blob = ctx->blob; 1839 struct spdk_blob_store *bs = blob->bs; 1840 size_t i; 1841 1842 if (bserrno != 0) { 1843 blob_persist_complete(seq, ctx, bserrno); 1844 return; 1845 } 1846 1847 spdk_spin_lock(&bs->used_lock); 1848 1849 /* Release all extent_pages that were truncated */ 1850 for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) { 1851 /* Nothing to release if it was not allocated */ 1852 if (blob->active.extent_pages[i] != 0) { 1853 bs_release_md_page(bs, blob->active.extent_pages[i]); 1854 } 1855 } 1856 1857 spdk_spin_unlock(&bs->used_lock); 1858 1859 if (blob->active.num_extent_pages == 0) { 1860 free(blob->active.extent_pages); 1861 blob->active.extent_pages = NULL; 1862 blob->active.extent_pages_array_size = 0; 1863 } else if (blob->active.num_extent_pages != blob->active.extent_pages_array_size) { 1864 #ifndef __clang_analyzer__ 1865 void *tmp; 1866 1867 /* scan-build really can't figure reallocs, workaround it */ 1868 tmp = realloc(blob->active.extent_pages, sizeof(uint32_t) * blob->active.num_extent_pages); 1869 assert(tmp != NULL); 1870 blob->active.extent_pages = tmp; 1871 #endif 1872 blob->active.extent_pages_array_size = blob->active.num_extent_pages; 1873 } 1874 1875 blob_persist_complete(seq, ctx, bserrno); 1876 } 1877 1878 static void 1879 blob_persist_clear_extents(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 1880 { 1881 struct spdk_blob *blob = ctx->blob; 1882 struct spdk_blob_store *bs = blob->bs; 1883 size_t i; 1884 uint64_t lba; 1885 uint64_t lba_count; 1886 spdk_bs_batch_t *batch; 1887 1888 batch = bs_sequence_to_batch(seq, blob_persist_clear_extents_cpl, ctx); 1889 lba_count = bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE); 1890 1891 /* Clear all extent_pages that were truncated */ 1892 for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) { 1893 /* Nothing to clear if it was not allocated */ 1894 if (blob->active.extent_pages[i] != 0) { 1895 lba = bs_md_page_to_lba(bs, blob->active.extent_pages[i]); 1896 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1897 } 1898 } 1899 1900 bs_batch_close(batch); 1901 } 1902 1903 static void 1904 blob_persist_clear_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1905 { 1906 struct spdk_blob_persist_ctx *ctx = cb_arg; 1907 struct spdk_blob *blob = ctx->blob; 1908 struct spdk_blob_store *bs = blob->bs; 1909 size_t i; 1910 1911 if (bserrno != 0) { 1912 blob_persist_complete(seq, ctx, bserrno); 1913 return; 1914 } 1915 1916 spdk_spin_lock(&bs->used_lock); 1917 /* Release all clusters that were truncated */ 1918 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1919 uint32_t cluster_num = bs_lba_to_cluster(bs, blob->active.clusters[i]); 1920 1921 /* Nothing to release if it was not allocated */ 1922 if (blob->active.clusters[i] != 0) { 1923 bs_release_cluster(bs, cluster_num); 1924 } 1925 } 1926 spdk_spin_unlock(&bs->used_lock); 1927 1928 if (blob->active.num_clusters == 0) { 1929 free(blob->active.clusters); 1930 blob->active.clusters = NULL; 1931 blob->active.cluster_array_size = 0; 1932 } else if (blob->active.num_clusters != blob->active.cluster_array_size) { 1933 #ifndef __clang_analyzer__ 1934 void *tmp; 1935 1936 /* scan-build really can't figure reallocs, workaround it */ 1937 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * blob->active.num_clusters); 1938 assert(tmp != NULL); 1939 blob->active.clusters = tmp; 1940 1941 #endif 1942 blob->active.cluster_array_size = blob->active.num_clusters; 1943 } 1944 1945 /* Move on to clearing extent pages */ 1946 blob_persist_clear_extents(seq, ctx); 1947 } 1948 1949 static void 1950 blob_persist_clear_clusters(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 1951 { 1952 struct spdk_blob *blob = ctx->blob; 1953 struct spdk_blob_store *bs = blob->bs; 1954 spdk_bs_batch_t *batch; 1955 size_t i; 1956 uint64_t lba; 1957 uint64_t lba_count; 1958 1959 /* Clusters don't move around in blobs. The list shrinks or grows 1960 * at the end, but no changes ever occur in the middle of the list. 1961 */ 1962 1963 batch = bs_sequence_to_batch(seq, blob_persist_clear_clusters_cpl, ctx); 1964 1965 /* Clear all clusters that were truncated */ 1966 lba = 0; 1967 lba_count = 0; 1968 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1969 uint64_t next_lba = blob->active.clusters[i]; 1970 uint64_t next_lba_count = bs_cluster_to_lba(bs, 1); 1971 1972 if (next_lba > 0 && (lba + lba_count) == next_lba) { 1973 /* This cluster is contiguous with the previous one. */ 1974 lba_count += next_lba_count; 1975 continue; 1976 } else if (next_lba == 0) { 1977 continue; 1978 } 1979 1980 /* This cluster is not contiguous with the previous one. */ 1981 1982 /* If a run of LBAs previously existing, clear them now */ 1983 if (lba_count > 0) { 1984 bs_batch_clear_dev(ctx->blob, batch, lba, lba_count); 1985 } 1986 1987 /* Start building the next batch */ 1988 lba = next_lba; 1989 if (next_lba > 0) { 1990 lba_count = next_lba_count; 1991 } else { 1992 lba_count = 0; 1993 } 1994 } 1995 1996 /* If we ended with a contiguous set of LBAs, clear them now */ 1997 if (lba_count > 0) { 1998 bs_batch_clear_dev(ctx->blob, batch, lba, lba_count); 1999 } 2000 2001 bs_batch_close(batch); 2002 } 2003 2004 static void 2005 blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2006 { 2007 struct spdk_blob_persist_ctx *ctx = cb_arg; 2008 struct spdk_blob *blob = ctx->blob; 2009 struct spdk_blob_store *bs = blob->bs; 2010 size_t i; 2011 2012 if (bserrno != 0) { 2013 blob_persist_complete(seq, ctx, bserrno); 2014 return; 2015 } 2016 2017 spdk_spin_lock(&bs->used_lock); 2018 2019 /* This loop starts at 1 because the first page is special and handled 2020 * below. The pages (except the first) are never written in place, 2021 * so any pages in the clean list must be zeroed. 2022 */ 2023 for (i = 1; i < blob->clean.num_pages; i++) { 2024 bs_release_md_page(bs, blob->clean.pages[i]); 2025 } 2026 2027 if (blob->active.num_pages == 0) { 2028 uint32_t page_num; 2029 2030 page_num = bs_blobid_to_page(blob->id); 2031 bs_release_md_page(bs, page_num); 2032 } 2033 2034 spdk_spin_unlock(&bs->used_lock); 2035 2036 /* Move on to clearing clusters */ 2037 blob_persist_clear_clusters(seq, ctx); 2038 } 2039 2040 static void 2041 blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2042 { 2043 struct spdk_blob_persist_ctx *ctx = cb_arg; 2044 struct spdk_blob *blob = ctx->blob; 2045 struct spdk_blob_store *bs = blob->bs; 2046 uint64_t lba; 2047 uint64_t lba_count; 2048 spdk_bs_batch_t *batch; 2049 size_t i; 2050 2051 if (bserrno != 0) { 2052 blob_persist_complete(seq, ctx, bserrno); 2053 return; 2054 } 2055 2056 batch = bs_sequence_to_batch(seq, blob_persist_zero_pages_cpl, ctx); 2057 2058 lba_count = bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE); 2059 2060 /* This loop starts at 1 because the first page is special and handled 2061 * below. The pages (except the first) are never written in place, 2062 * so any pages in the clean list must be zeroed. 2063 */ 2064 for (i = 1; i < blob->clean.num_pages; i++) { 2065 lba = bs_md_page_to_lba(bs, blob->clean.pages[i]); 2066 2067 bs_batch_write_zeroes_dev(batch, lba, lba_count); 2068 } 2069 2070 /* The first page will only be zeroed if this is a delete. */ 2071 if (blob->active.num_pages == 0) { 2072 uint32_t page_num; 2073 2074 /* The first page in the metadata goes where the blobid indicates */ 2075 page_num = bs_blobid_to_page(blob->id); 2076 lba = bs_md_page_to_lba(bs, page_num); 2077 2078 bs_batch_write_zeroes_dev(batch, lba, lba_count); 2079 } 2080 2081 bs_batch_close(batch); 2082 } 2083 2084 static void 2085 blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2086 { 2087 struct spdk_blob_persist_ctx *ctx = cb_arg; 2088 struct spdk_blob *blob = ctx->blob; 2089 struct spdk_blob_store *bs = blob->bs; 2090 uint64_t lba; 2091 uint32_t lba_count; 2092 struct spdk_blob_md_page *page; 2093 2094 if (bserrno != 0) { 2095 blob_persist_complete(seq, ctx, bserrno); 2096 return; 2097 } 2098 2099 if (blob->active.num_pages == 0) { 2100 /* Move on to the next step */ 2101 blob_persist_zero_pages(seq, ctx, 0); 2102 return; 2103 } 2104 2105 lba_count = bs_byte_to_lba(bs, sizeof(*page)); 2106 2107 page = &ctx->pages[0]; 2108 /* The first page in the metadata goes where the blobid indicates */ 2109 lba = bs_md_page_to_lba(bs, bs_blobid_to_page(blob->id)); 2110 2111 bs_sequence_write_dev(seq, page, lba, lba_count, 2112 blob_persist_zero_pages, ctx); 2113 } 2114 2115 static void 2116 blob_persist_write_page_chain(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 2117 { 2118 struct spdk_blob *blob = ctx->blob; 2119 struct spdk_blob_store *bs = blob->bs; 2120 uint64_t lba; 2121 uint32_t lba_count; 2122 struct spdk_blob_md_page *page; 2123 spdk_bs_batch_t *batch; 2124 size_t i; 2125 2126 /* Clusters don't move around in blobs. The list shrinks or grows 2127 * at the end, but no changes ever occur in the middle of the list. 2128 */ 2129 2130 lba_count = bs_byte_to_lba(bs, sizeof(*page)); 2131 2132 batch = bs_sequence_to_batch(seq, blob_persist_write_page_root, ctx); 2133 2134 /* This starts at 1. The root page is not written until 2135 * all of the others are finished 2136 */ 2137 for (i = 1; i < blob->active.num_pages; i++) { 2138 page = &ctx->pages[i]; 2139 assert(page->sequence_num == i); 2140 2141 lba = bs_md_page_to_lba(bs, blob->active.pages[i]); 2142 2143 bs_batch_write_dev(batch, page, lba, lba_count); 2144 } 2145 2146 bs_batch_close(batch); 2147 } 2148 2149 static int 2150 blob_resize(struct spdk_blob *blob, uint64_t sz) 2151 { 2152 uint64_t i; 2153 uint64_t *tmp; 2154 uint64_t cluster; 2155 uint32_t lfmd; /* lowest free md page */ 2156 uint64_t num_clusters; 2157 uint32_t *ep_tmp; 2158 uint64_t new_num_ep = 0, current_num_ep = 0; 2159 struct spdk_blob_store *bs; 2160 int rc; 2161 2162 bs = blob->bs; 2163 2164 blob_verify_md_op(blob); 2165 2166 if (blob->active.num_clusters == sz) { 2167 return 0; 2168 } 2169 2170 if (blob->active.num_clusters < blob->active.cluster_array_size) { 2171 /* If this blob was resized to be larger, then smaller, then 2172 * larger without syncing, then the cluster array already 2173 * contains spare assigned clusters we can use. 2174 */ 2175 num_clusters = spdk_min(blob->active.cluster_array_size, 2176 sz); 2177 } else { 2178 num_clusters = blob->active.num_clusters; 2179 } 2180 2181 if (blob->use_extent_table) { 2182 /* Round up since every cluster beyond current Extent Table size, 2183 * requires new extent page. */ 2184 new_num_ep = spdk_divide_round_up(sz, SPDK_EXTENTS_PER_EP); 2185 current_num_ep = spdk_divide_round_up(num_clusters, SPDK_EXTENTS_PER_EP); 2186 } 2187 2188 assert(!spdk_spin_held(&bs->used_lock)); 2189 2190 /* Check first that we have enough clusters and md pages before we start claiming them. 2191 * bs->used_lock is held to ensure that clusters we think are free are still free when we go 2192 * to claim them later in this function. 2193 */ 2194 if (sz > num_clusters && spdk_blob_is_thin_provisioned(blob) == false) { 2195 spdk_spin_lock(&bs->used_lock); 2196 if ((sz - num_clusters) > bs->num_free_clusters) { 2197 rc = -ENOSPC; 2198 goto out; 2199 } 2200 lfmd = 0; 2201 for (i = current_num_ep; i < new_num_ep ; i++) { 2202 lfmd = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, lfmd); 2203 if (lfmd == UINT32_MAX) { 2204 /* No more free md pages. Cannot satisfy the request */ 2205 rc = -ENOSPC; 2206 goto out; 2207 } 2208 } 2209 } 2210 2211 if (sz > num_clusters) { 2212 /* Expand the cluster array if necessary. 2213 * We only shrink the array when persisting. 2214 */ 2215 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * sz); 2216 if (sz > 0 && tmp == NULL) { 2217 rc = -ENOMEM; 2218 goto out; 2219 } 2220 memset(tmp + blob->active.cluster_array_size, 0, 2221 sizeof(*blob->active.clusters) * (sz - blob->active.cluster_array_size)); 2222 blob->active.clusters = tmp; 2223 blob->active.cluster_array_size = sz; 2224 2225 /* Expand the extents table, only if enough clusters were added */ 2226 if (new_num_ep > current_num_ep && blob->use_extent_table) { 2227 ep_tmp = realloc(blob->active.extent_pages, sizeof(*blob->active.extent_pages) * new_num_ep); 2228 if (new_num_ep > 0 && ep_tmp == NULL) { 2229 rc = -ENOMEM; 2230 goto out; 2231 } 2232 memset(ep_tmp + blob->active.extent_pages_array_size, 0, 2233 sizeof(*blob->active.extent_pages) * (new_num_ep - blob->active.extent_pages_array_size)); 2234 blob->active.extent_pages = ep_tmp; 2235 blob->active.extent_pages_array_size = new_num_ep; 2236 } 2237 } 2238 2239 blob->state = SPDK_BLOB_STATE_DIRTY; 2240 2241 if (spdk_blob_is_thin_provisioned(blob) == false) { 2242 cluster = 0; 2243 lfmd = 0; 2244 for (i = num_clusters; i < sz; i++) { 2245 bs_allocate_cluster(blob, i, &cluster, &lfmd, true); 2246 /* Do not increment lfmd here. lfmd will get updated 2247 * to the md_page allocated (if any) when a new extent 2248 * page is needed. Just pass that value again, 2249 * bs_allocate_cluster will just start at that index 2250 * to find the next free md_page when needed. 2251 */ 2252 } 2253 } 2254 2255 blob->active.num_clusters = sz; 2256 blob->active.num_extent_pages = new_num_ep; 2257 2258 rc = 0; 2259 out: 2260 if (spdk_spin_held(&bs->used_lock)) { 2261 spdk_spin_unlock(&bs->used_lock); 2262 } 2263 2264 return rc; 2265 } 2266 2267 static void 2268 blob_persist_generate_new_md(struct spdk_blob_persist_ctx *ctx) 2269 { 2270 spdk_bs_sequence_t *seq = ctx->seq; 2271 struct spdk_blob *blob = ctx->blob; 2272 struct spdk_blob_store *bs = blob->bs; 2273 uint64_t i; 2274 uint32_t page_num; 2275 void *tmp; 2276 int rc; 2277 2278 /* Generate the new metadata */ 2279 rc = blob_serialize(blob, &ctx->pages, &blob->active.num_pages); 2280 if (rc < 0) { 2281 blob_persist_complete(seq, ctx, rc); 2282 return; 2283 } 2284 2285 assert(blob->active.num_pages >= 1); 2286 2287 /* Resize the cache of page indices */ 2288 tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 2289 if (!tmp) { 2290 blob_persist_complete(seq, ctx, -ENOMEM); 2291 return; 2292 } 2293 blob->active.pages = tmp; 2294 2295 /* Assign this metadata to pages. This requires two passes - one to verify that there are 2296 * enough pages and a second to actually claim them. The used_lock is held across 2297 * both passes to ensure things don't change in the middle. 2298 */ 2299 spdk_spin_lock(&bs->used_lock); 2300 page_num = 0; 2301 /* Note that this loop starts at one. The first page location is fixed by the blobid. */ 2302 for (i = 1; i < blob->active.num_pages; i++) { 2303 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 2304 if (page_num == UINT32_MAX) { 2305 spdk_spin_unlock(&bs->used_lock); 2306 blob_persist_complete(seq, ctx, -ENOMEM); 2307 return; 2308 } 2309 page_num++; 2310 } 2311 2312 page_num = 0; 2313 blob->active.pages[0] = bs_blobid_to_page(blob->id); 2314 for (i = 1; i < blob->active.num_pages; i++) { 2315 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 2316 ctx->pages[i - 1].next = page_num; 2317 /* Now that previous metadata page is complete, calculate the crc for it. */ 2318 ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]); 2319 blob->active.pages[i] = page_num; 2320 bs_claim_md_page(bs, page_num); 2321 SPDK_DEBUGLOG(blob, "Claiming page %u for blob 0x%" PRIx64 "\n", page_num, 2322 blob->id); 2323 page_num++; 2324 } 2325 spdk_spin_unlock(&bs->used_lock); 2326 ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]); 2327 /* Start writing the metadata from last page to first */ 2328 blob->state = SPDK_BLOB_STATE_CLEAN; 2329 blob_persist_write_page_chain(seq, ctx); 2330 } 2331 2332 static void 2333 blob_persist_write_extent_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2334 { 2335 struct spdk_blob_persist_ctx *ctx = cb_arg; 2336 struct spdk_blob *blob = ctx->blob; 2337 size_t i; 2338 uint32_t extent_page_id; 2339 uint32_t page_count = 0; 2340 int rc; 2341 2342 if (ctx->extent_page != NULL) { 2343 spdk_free(ctx->extent_page); 2344 ctx->extent_page = NULL; 2345 } 2346 2347 if (bserrno != 0) { 2348 blob_persist_complete(seq, ctx, bserrno); 2349 return; 2350 } 2351 2352 /* Only write out Extent Pages when blob was resized. */ 2353 for (i = ctx->next_extent_page; i < blob->active.extent_pages_array_size; i++) { 2354 extent_page_id = blob->active.extent_pages[i]; 2355 if (extent_page_id == 0) { 2356 /* No Extent Page to persist */ 2357 assert(spdk_blob_is_thin_provisioned(blob)); 2358 continue; 2359 } 2360 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent_page_id)); 2361 ctx->next_extent_page = i + 1; 2362 rc = blob_serialize_add_page(ctx->blob, &ctx->extent_page, &page_count, &ctx->extent_page); 2363 if (rc < 0) { 2364 blob_persist_complete(seq, ctx, rc); 2365 return; 2366 } 2367 2368 blob->state = SPDK_BLOB_STATE_DIRTY; 2369 blob_serialize_extent_page(blob, i * SPDK_EXTENTS_PER_EP, ctx->extent_page); 2370 2371 ctx->extent_page->crc = blob_md_page_calc_crc(ctx->extent_page); 2372 2373 bs_sequence_write_dev(seq, ctx->extent_page, bs_md_page_to_lba(blob->bs, extent_page_id), 2374 bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 2375 blob_persist_write_extent_pages, ctx); 2376 return; 2377 } 2378 2379 blob_persist_generate_new_md(ctx); 2380 } 2381 2382 static void 2383 blob_persist_start(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2384 { 2385 struct spdk_blob_persist_ctx *ctx = cb_arg; 2386 struct spdk_blob *blob = ctx->blob; 2387 2388 if (bserrno != 0) { 2389 blob_persist_complete(seq, ctx, bserrno); 2390 return; 2391 } 2392 2393 if (blob->active.num_pages == 0) { 2394 /* This is the signal that the blob should be deleted. 2395 * Immediately jump to the clean up routine. */ 2396 assert(blob->clean.num_pages > 0); 2397 blob->state = SPDK_BLOB_STATE_CLEAN; 2398 blob_persist_zero_pages(seq, ctx, 0); 2399 return; 2400 2401 } 2402 2403 if (blob->clean.num_clusters < blob->active.num_clusters) { 2404 /* Blob was resized up */ 2405 assert(blob->clean.num_extent_pages <= blob->active.num_extent_pages); 2406 ctx->next_extent_page = spdk_max(1, blob->clean.num_extent_pages) - 1; 2407 } else if (blob->active.num_clusters < blob->active.cluster_array_size) { 2408 /* Blob was resized down */ 2409 assert(blob->clean.num_extent_pages >= blob->active.num_extent_pages); 2410 ctx->next_extent_page = spdk_max(1, blob->active.num_extent_pages) - 1; 2411 } else { 2412 /* No change in size occurred */ 2413 blob_persist_generate_new_md(ctx); 2414 return; 2415 } 2416 2417 blob_persist_write_extent_pages(seq, ctx, 0); 2418 } 2419 2420 struct spdk_bs_mark_dirty { 2421 struct spdk_blob_store *bs; 2422 struct spdk_bs_super_block *super; 2423 spdk_bs_sequence_cpl cb_fn; 2424 void *cb_arg; 2425 }; 2426 2427 static void 2428 bs_mark_dirty_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2429 { 2430 struct spdk_bs_mark_dirty *ctx = cb_arg; 2431 2432 if (bserrno == 0) { 2433 ctx->bs->clean = 0; 2434 } 2435 2436 ctx->cb_fn(seq, ctx->cb_arg, bserrno); 2437 2438 spdk_free(ctx->super); 2439 free(ctx); 2440 } 2441 2442 static void bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2443 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg); 2444 2445 2446 static void 2447 bs_mark_dirty_write(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2448 { 2449 struct spdk_bs_mark_dirty *ctx = cb_arg; 2450 int rc; 2451 2452 if (bserrno != 0) { 2453 bs_mark_dirty_write_cpl(seq, ctx, bserrno); 2454 return; 2455 } 2456 2457 rc = bs_super_validate(ctx->super, ctx->bs); 2458 if (rc != 0) { 2459 bs_mark_dirty_write_cpl(seq, ctx, rc); 2460 return; 2461 } 2462 2463 ctx->super->clean = 0; 2464 if (ctx->super->size == 0) { 2465 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 2466 } 2467 2468 bs_write_super(seq, ctx->bs, ctx->super, bs_mark_dirty_write_cpl, ctx); 2469 } 2470 2471 static void 2472 bs_mark_dirty(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2473 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2474 { 2475 struct spdk_bs_mark_dirty *ctx; 2476 2477 /* Blobstore is already marked dirty */ 2478 if (bs->clean == 0) { 2479 cb_fn(seq, cb_arg, 0); 2480 return; 2481 } 2482 2483 ctx = calloc(1, sizeof(*ctx)); 2484 if (!ctx) { 2485 cb_fn(seq, cb_arg, -ENOMEM); 2486 return; 2487 } 2488 ctx->bs = bs; 2489 ctx->cb_fn = cb_fn; 2490 ctx->cb_arg = cb_arg; 2491 2492 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 2493 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 2494 if (!ctx->super) { 2495 free(ctx); 2496 cb_fn(seq, cb_arg, -ENOMEM); 2497 return; 2498 } 2499 2500 bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0), 2501 bs_byte_to_lba(bs, sizeof(*ctx->super)), 2502 bs_mark_dirty_write, ctx); 2503 } 2504 2505 /* Write a blob to disk */ 2506 static void 2507 blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 2508 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2509 { 2510 struct spdk_blob_persist_ctx *ctx; 2511 2512 blob_verify_md_op(blob); 2513 2514 if (blob->state == SPDK_BLOB_STATE_CLEAN && TAILQ_EMPTY(&blob->persists_to_complete)) { 2515 cb_fn(seq, cb_arg, 0); 2516 return; 2517 } 2518 2519 ctx = calloc(1, sizeof(*ctx)); 2520 if (!ctx) { 2521 cb_fn(seq, cb_arg, -ENOMEM); 2522 return; 2523 } 2524 ctx->blob = blob; 2525 ctx->seq = seq; 2526 ctx->cb_fn = cb_fn; 2527 ctx->cb_arg = cb_arg; 2528 2529 /* Multiple blob persists can affect one another, via blob->state or 2530 * blob mutable data changes. To prevent it, queue up the persists. */ 2531 if (!TAILQ_EMPTY(&blob->persists_to_complete)) { 2532 TAILQ_INSERT_TAIL(&blob->pending_persists, ctx, link); 2533 return; 2534 } 2535 TAILQ_INSERT_HEAD(&blob->persists_to_complete, ctx, link); 2536 2537 bs_mark_dirty(seq, blob->bs, blob_persist_start, ctx); 2538 } 2539 2540 struct spdk_blob_copy_cluster_ctx { 2541 struct spdk_blob *blob; 2542 uint8_t *buf; 2543 uint64_t page; 2544 uint64_t new_cluster; 2545 uint32_t new_extent_page; 2546 spdk_bs_sequence_t *seq; 2547 struct spdk_blob_md_page *new_cluster_page; 2548 }; 2549 2550 struct spdk_blob_free_cluster_ctx { 2551 struct spdk_blob *blob; 2552 uint64_t page; 2553 struct spdk_blob_md_page *md_page; 2554 uint64_t cluster_num; 2555 uint32_t extent_page; 2556 spdk_bs_sequence_t *seq; 2557 }; 2558 2559 static void 2560 blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno) 2561 { 2562 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2563 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq; 2564 TAILQ_HEAD(, spdk_bs_request_set) requests; 2565 spdk_bs_user_op_t *op; 2566 2567 TAILQ_INIT(&requests); 2568 TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link); 2569 2570 while (!TAILQ_EMPTY(&requests)) { 2571 op = TAILQ_FIRST(&requests); 2572 TAILQ_REMOVE(&requests, op, link); 2573 if (bserrno == 0) { 2574 bs_user_op_execute(op); 2575 } else { 2576 bs_user_op_abort(op, bserrno); 2577 } 2578 } 2579 2580 spdk_free(ctx->buf); 2581 free(ctx); 2582 } 2583 2584 static void 2585 blob_free_cluster_cpl(void *cb_arg, int bserrno) 2586 { 2587 struct spdk_blob_free_cluster_ctx *ctx = cb_arg; 2588 spdk_bs_sequence_t *seq = ctx->seq; 2589 2590 bs_sequence_finish(seq, bserrno); 2591 2592 free(ctx); 2593 } 2594 2595 static void 2596 blob_insert_cluster_revert(struct spdk_blob_copy_cluster_ctx *ctx) 2597 { 2598 spdk_spin_lock(&ctx->blob->bs->used_lock); 2599 bs_release_cluster(ctx->blob->bs, ctx->new_cluster); 2600 if (ctx->new_extent_page != 0) { 2601 bs_release_md_page(ctx->blob->bs, ctx->new_extent_page); 2602 } 2603 spdk_spin_unlock(&ctx->blob->bs->used_lock); 2604 } 2605 2606 static void 2607 blob_insert_cluster_clear_cpl(void *cb_arg, int bserrno) 2608 { 2609 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2610 2611 if (bserrno) { 2612 SPDK_WARNLOG("Failed to clear cluster: %d\n", bserrno); 2613 } 2614 2615 blob_insert_cluster_revert(ctx); 2616 bs_sequence_finish(ctx->seq, bserrno); 2617 } 2618 2619 static void 2620 blob_insert_cluster_clear(struct spdk_blob_copy_cluster_ctx *ctx) 2621 { 2622 struct spdk_bs_cpl cpl; 2623 spdk_bs_batch_t *batch; 2624 struct spdk_io_channel *ch = spdk_io_channel_from_ctx(ctx->seq->channel); 2625 2626 /* 2627 * We allocated a cluster and we copied data to it. But now, we realized that we don't need 2628 * this cluster and we want to release it. We must ensure that we clear the data on this 2629 * cluster. 2630 * The cluster may later be re-allocated by a thick-provisioned blob for example. When 2631 * reading from this thick-provisioned blob before writing data, we should read zeroes. 2632 */ 2633 2634 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2635 cpl.u.blob_basic.cb_fn = blob_insert_cluster_clear_cpl; 2636 cpl.u.blob_basic.cb_arg = ctx; 2637 2638 batch = bs_batch_open(ch, &cpl, ctx->blob); 2639 if (!batch) { 2640 blob_insert_cluster_clear_cpl(ctx, -ENOMEM); 2641 return; 2642 } 2643 2644 bs_batch_clear_dev(ctx->blob, batch, bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 2645 bs_cluster_to_lba(ctx->blob->bs, 1)); 2646 bs_batch_close(batch); 2647 } 2648 2649 static void 2650 blob_insert_cluster_cpl(void *cb_arg, int bserrno) 2651 { 2652 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2653 2654 if (bserrno) { 2655 if (bserrno == -EEXIST) { 2656 /* The metadata insert failed because another thread 2657 * allocated the cluster first. Clear and free our cluster 2658 * but continue without error. */ 2659 blob_insert_cluster_clear(ctx); 2660 return; 2661 } 2662 2663 blob_insert_cluster_revert(ctx); 2664 } 2665 2666 bs_sequence_finish(ctx->seq, bserrno); 2667 } 2668 2669 static void 2670 blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2671 { 2672 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2673 uint32_t cluster_number; 2674 2675 if (bserrno) { 2676 /* The write failed, so jump to the final completion handler */ 2677 bs_sequence_finish(seq, bserrno); 2678 return; 2679 } 2680 2681 cluster_number = bs_page_to_cluster(ctx->blob->bs, ctx->page); 2682 2683 blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2684 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx); 2685 } 2686 2687 static void 2688 blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2689 { 2690 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2691 2692 if (bserrno != 0) { 2693 /* The read failed, so jump to the final completion handler */ 2694 bs_sequence_finish(seq, bserrno); 2695 return; 2696 } 2697 2698 /* Write whole cluster */ 2699 bs_sequence_write_dev(seq, ctx->buf, 2700 bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 2701 bs_cluster_to_lba(ctx->blob->bs, 1), 2702 blob_write_copy_cpl, ctx); 2703 } 2704 2705 static bool 2706 blob_can_copy(struct spdk_blob *blob, uint32_t cluster_start_page, uint64_t *base_lba) 2707 { 2708 uint64_t lba = bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page); 2709 2710 return (!blob_is_esnap_clone(blob) && blob->bs->dev->copy != NULL) && 2711 blob->back_bs_dev->translate_lba(blob->back_bs_dev, lba, base_lba); 2712 } 2713 2714 static void 2715 blob_copy(struct spdk_blob_copy_cluster_ctx *ctx, spdk_bs_user_op_t *op, uint64_t src_lba) 2716 { 2717 struct spdk_blob *blob = ctx->blob; 2718 uint64_t lba_count = bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz); 2719 2720 bs_sequence_copy_dev(ctx->seq, 2721 bs_cluster_to_lba(blob->bs, ctx->new_cluster), 2722 src_lba, 2723 lba_count, 2724 blob_write_copy_cpl, ctx); 2725 } 2726 2727 static void 2728 bs_allocate_and_copy_cluster(struct spdk_blob *blob, 2729 struct spdk_io_channel *_ch, 2730 uint64_t io_unit, spdk_bs_user_op_t *op) 2731 { 2732 struct spdk_bs_cpl cpl; 2733 struct spdk_bs_channel *ch; 2734 struct spdk_blob_copy_cluster_ctx *ctx; 2735 uint32_t cluster_start_page; 2736 uint32_t cluster_number; 2737 bool is_zeroes; 2738 bool can_copy; 2739 bool is_valid_range; 2740 uint64_t copy_src_lba; 2741 int rc; 2742 2743 ch = spdk_io_channel_get_ctx(_ch); 2744 2745 if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) { 2746 /* There are already operations pending. Queue this user op 2747 * and return because it will be re-executed when the outstanding 2748 * cluster allocation completes. */ 2749 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2750 return; 2751 } 2752 2753 /* Round the io_unit offset down to the first page in the cluster */ 2754 cluster_start_page = bs_io_unit_to_cluster_start(blob, io_unit); 2755 2756 /* Calculate which index in the metadata cluster array the corresponding 2757 * cluster is supposed to be at. */ 2758 cluster_number = bs_io_unit_to_cluster_number(blob, io_unit); 2759 2760 ctx = calloc(1, sizeof(*ctx)); 2761 if (!ctx) { 2762 bs_user_op_abort(op, -ENOMEM); 2763 return; 2764 } 2765 2766 assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0); 2767 2768 ctx->blob = blob; 2769 ctx->page = cluster_start_page; 2770 ctx->new_cluster_page = ch->new_cluster_page; 2771 memset(ctx->new_cluster_page, 0, SPDK_BS_PAGE_SIZE); 2772 2773 /* Check if the cluster that we intend to do CoW for is valid for 2774 * the backing dev. For zeroes backing dev, it'll be always valid. 2775 * For other backing dev e.g. a snapshot, it could be invalid if 2776 * the blob has been resized after snapshot was taken. */ 2777 is_valid_range = blob->back_bs_dev->is_range_valid(blob->back_bs_dev, 2778 bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page), 2779 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz)); 2780 2781 can_copy = is_valid_range && blob_can_copy(blob, cluster_start_page, ©_src_lba); 2782 2783 is_zeroes = is_valid_range && blob->back_bs_dev->is_zeroes(blob->back_bs_dev, 2784 bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page), 2785 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz)); 2786 if (blob->parent_id != SPDK_BLOBID_INVALID && !is_zeroes && !can_copy) { 2787 ctx->buf = spdk_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen, 2788 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 2789 if (!ctx->buf) { 2790 SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n", 2791 blob->bs->cluster_sz); 2792 free(ctx); 2793 bs_user_op_abort(op, -ENOMEM); 2794 return; 2795 } 2796 } 2797 2798 spdk_spin_lock(&blob->bs->used_lock); 2799 rc = bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, &ctx->new_extent_page, 2800 false); 2801 spdk_spin_unlock(&blob->bs->used_lock); 2802 if (rc != 0) { 2803 spdk_free(ctx->buf); 2804 free(ctx); 2805 bs_user_op_abort(op, rc); 2806 return; 2807 } 2808 2809 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2810 cpl.u.blob_basic.cb_fn = blob_allocate_and_copy_cluster_cpl; 2811 cpl.u.blob_basic.cb_arg = ctx; 2812 2813 ctx->seq = bs_sequence_start_blob(_ch, &cpl, blob); 2814 if (!ctx->seq) { 2815 spdk_spin_lock(&blob->bs->used_lock); 2816 bs_release_cluster(blob->bs, ctx->new_cluster); 2817 spdk_spin_unlock(&blob->bs->used_lock); 2818 spdk_free(ctx->buf); 2819 free(ctx); 2820 bs_user_op_abort(op, -ENOMEM); 2821 return; 2822 } 2823 2824 /* Queue the user op to block other incoming operations */ 2825 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2826 2827 if (blob->parent_id != SPDK_BLOBID_INVALID && !is_zeroes) { 2828 if (can_copy) { 2829 blob_copy(ctx, op, copy_src_lba); 2830 } else { 2831 /* Read cluster from backing device */ 2832 bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf, 2833 bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page), 2834 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz), 2835 blob_write_copy, ctx); 2836 } 2837 2838 } else { 2839 blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2840 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx); 2841 } 2842 } 2843 2844 static inline bool 2845 blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t io_unit, uint64_t length, 2846 uint64_t *lba, uint64_t *lba_count) 2847 { 2848 *lba_count = length; 2849 2850 if (!bs_io_unit_is_allocated(blob, io_unit)) { 2851 assert(blob->back_bs_dev != NULL); 2852 *lba = bs_io_unit_to_back_dev_lba(blob, io_unit); 2853 *lba_count = bs_io_unit_to_back_dev_lba(blob, *lba_count); 2854 return false; 2855 } else { 2856 *lba = bs_blob_io_unit_to_lba(blob, io_unit); 2857 return true; 2858 } 2859 } 2860 2861 struct op_split_ctx { 2862 struct spdk_blob *blob; 2863 struct spdk_io_channel *channel; 2864 uint64_t io_unit_offset; 2865 uint64_t io_units_remaining; 2866 void *curr_payload; 2867 enum spdk_blob_op_type op_type; 2868 spdk_bs_sequence_t *seq; 2869 bool in_submit_ctx; 2870 bool completed_in_submit_ctx; 2871 bool done; 2872 }; 2873 2874 static void 2875 blob_request_submit_op_split_next(void *cb_arg, int bserrno) 2876 { 2877 struct op_split_ctx *ctx = cb_arg; 2878 struct spdk_blob *blob = ctx->blob; 2879 struct spdk_io_channel *ch = ctx->channel; 2880 enum spdk_blob_op_type op_type = ctx->op_type; 2881 uint8_t *buf; 2882 uint64_t offset; 2883 uint64_t length; 2884 uint64_t op_length; 2885 2886 if (bserrno != 0 || ctx->io_units_remaining == 0) { 2887 bs_sequence_finish(ctx->seq, bserrno); 2888 if (ctx->in_submit_ctx) { 2889 /* Defer freeing of the ctx object, since it will be 2890 * accessed when this unwinds back to the submisison 2891 * context. 2892 */ 2893 ctx->done = true; 2894 } else { 2895 free(ctx); 2896 } 2897 return; 2898 } 2899 2900 if (ctx->in_submit_ctx) { 2901 /* If this split operation completed in the context 2902 * of its submission, mark the flag and return immediately 2903 * to avoid recursion. 2904 */ 2905 ctx->completed_in_submit_ctx = true; 2906 return; 2907 } 2908 2909 while (true) { 2910 ctx->completed_in_submit_ctx = false; 2911 2912 offset = ctx->io_unit_offset; 2913 length = ctx->io_units_remaining; 2914 buf = ctx->curr_payload; 2915 op_length = spdk_min(length, bs_num_io_units_to_cluster_boundary(blob, 2916 offset)); 2917 2918 /* Update length and payload for next operation */ 2919 ctx->io_units_remaining -= op_length; 2920 ctx->io_unit_offset += op_length; 2921 if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) { 2922 ctx->curr_payload += op_length * blob->bs->io_unit_size; 2923 } 2924 2925 assert(!ctx->in_submit_ctx); 2926 ctx->in_submit_ctx = true; 2927 2928 switch (op_type) { 2929 case SPDK_BLOB_READ: 2930 spdk_blob_io_read(blob, ch, buf, offset, op_length, 2931 blob_request_submit_op_split_next, ctx); 2932 break; 2933 case SPDK_BLOB_WRITE: 2934 spdk_blob_io_write(blob, ch, buf, offset, op_length, 2935 blob_request_submit_op_split_next, ctx); 2936 break; 2937 case SPDK_BLOB_UNMAP: 2938 spdk_blob_io_unmap(blob, ch, offset, op_length, 2939 blob_request_submit_op_split_next, ctx); 2940 break; 2941 case SPDK_BLOB_WRITE_ZEROES: 2942 spdk_blob_io_write_zeroes(blob, ch, offset, op_length, 2943 blob_request_submit_op_split_next, ctx); 2944 break; 2945 case SPDK_BLOB_READV: 2946 case SPDK_BLOB_WRITEV: 2947 SPDK_ERRLOG("readv/write not valid\n"); 2948 bs_sequence_finish(ctx->seq, -EINVAL); 2949 free(ctx); 2950 return; 2951 } 2952 2953 #ifndef __clang_analyzer__ 2954 /* scan-build reports a false positive around accessing the ctx here. It 2955 * forms a path that recursively calls this function, but then says 2956 * "assuming ctx->in_submit_ctx is false", when that isn't possible. 2957 * This path does free(ctx), returns to here, and reports a use-after-free 2958 * bug. Wrapping this bit of code so that scan-build doesn't see it 2959 * works around the scan-build bug. 2960 */ 2961 assert(ctx->in_submit_ctx); 2962 ctx->in_submit_ctx = false; 2963 2964 /* If the operation completed immediately, loop back and submit the 2965 * next operation. Otherwise we can return and the next split 2966 * operation will get submitted when this current operation is 2967 * later completed asynchronously. 2968 */ 2969 if (ctx->completed_in_submit_ctx) { 2970 continue; 2971 } else if (ctx->done) { 2972 free(ctx); 2973 } 2974 #endif 2975 break; 2976 } 2977 } 2978 2979 static void 2980 blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob, 2981 void *payload, uint64_t offset, uint64_t length, 2982 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 2983 { 2984 struct op_split_ctx *ctx; 2985 spdk_bs_sequence_t *seq; 2986 struct spdk_bs_cpl cpl; 2987 2988 assert(blob != NULL); 2989 2990 ctx = calloc(1, sizeof(struct op_split_ctx)); 2991 if (ctx == NULL) { 2992 cb_fn(cb_arg, -ENOMEM); 2993 return; 2994 } 2995 2996 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2997 cpl.u.blob_basic.cb_fn = cb_fn; 2998 cpl.u.blob_basic.cb_arg = cb_arg; 2999 3000 seq = bs_sequence_start_blob(ch, &cpl, blob); 3001 if (!seq) { 3002 free(ctx); 3003 cb_fn(cb_arg, -ENOMEM); 3004 return; 3005 } 3006 3007 ctx->blob = blob; 3008 ctx->channel = ch; 3009 ctx->curr_payload = payload; 3010 ctx->io_unit_offset = offset; 3011 ctx->io_units_remaining = length; 3012 ctx->op_type = op_type; 3013 ctx->seq = seq; 3014 3015 blob_request_submit_op_split_next(ctx, 0); 3016 } 3017 3018 static void 3019 spdk_free_cluster_unmap_complete(void *cb_arg, int bserrno) 3020 { 3021 struct spdk_blob_free_cluster_ctx *ctx = cb_arg; 3022 3023 if (bserrno) { 3024 bs_sequence_finish(ctx->seq, bserrno); 3025 free(ctx); 3026 return; 3027 } 3028 3029 blob_free_cluster_on_md_thread(ctx->blob, ctx->cluster_num, 3030 ctx->extent_page, ctx->md_page, blob_free_cluster_cpl, ctx); 3031 } 3032 3033 static void 3034 blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob, 3035 void *payload, uint64_t offset, uint64_t length, 3036 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 3037 { 3038 struct spdk_bs_cpl cpl; 3039 uint64_t lba; 3040 uint64_t lba_count; 3041 bool is_allocated; 3042 3043 assert(blob != NULL); 3044 3045 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 3046 cpl.u.blob_basic.cb_fn = cb_fn; 3047 cpl.u.blob_basic.cb_arg = cb_arg; 3048 3049 if (blob->frozen_refcnt) { 3050 /* This blob I/O is frozen */ 3051 spdk_bs_user_op_t *op; 3052 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 3053 3054 op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 3055 if (!op) { 3056 cb_fn(cb_arg, -ENOMEM); 3057 return; 3058 } 3059 3060 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 3061 3062 return; 3063 } 3064 3065 is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 3066 3067 switch (op_type) { 3068 case SPDK_BLOB_READ: { 3069 spdk_bs_batch_t *batch; 3070 3071 batch = bs_batch_open(_ch, &cpl, blob); 3072 if (!batch) { 3073 cb_fn(cb_arg, -ENOMEM); 3074 return; 3075 } 3076 3077 if (is_allocated) { 3078 /* Read from the blob */ 3079 bs_batch_read_dev(batch, payload, lba, lba_count); 3080 } else { 3081 /* Read from the backing block device */ 3082 bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count); 3083 } 3084 3085 bs_batch_close(batch); 3086 break; 3087 } 3088 case SPDK_BLOB_WRITE: 3089 case SPDK_BLOB_WRITE_ZEROES: { 3090 if (is_allocated) { 3091 /* Write to the blob */ 3092 spdk_bs_batch_t *batch; 3093 3094 if (lba_count == 0) { 3095 cb_fn(cb_arg, 0); 3096 return; 3097 } 3098 3099 batch = bs_batch_open(_ch, &cpl, blob); 3100 if (!batch) { 3101 cb_fn(cb_arg, -ENOMEM); 3102 return; 3103 } 3104 3105 if (op_type == SPDK_BLOB_WRITE) { 3106 bs_batch_write_dev(batch, payload, lba, lba_count); 3107 } else { 3108 bs_batch_write_zeroes_dev(batch, lba, lba_count); 3109 } 3110 3111 bs_batch_close(batch); 3112 } else { 3113 /* Queue this operation and allocate the cluster */ 3114 spdk_bs_user_op_t *op; 3115 3116 op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 3117 if (!op) { 3118 cb_fn(cb_arg, -ENOMEM); 3119 return; 3120 } 3121 3122 bs_allocate_and_copy_cluster(blob, _ch, offset, op); 3123 } 3124 break; 3125 } 3126 case SPDK_BLOB_UNMAP: { 3127 struct spdk_blob_free_cluster_ctx *ctx = NULL; 3128 spdk_bs_batch_t *batch; 3129 3130 /* if aligned with cluster release cluster */ 3131 if (spdk_blob_is_thin_provisioned(blob) && is_allocated && 3132 bs_io_units_per_cluster(blob) == length) { 3133 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 3134 uint32_t cluster_start_page; 3135 uint32_t cluster_number; 3136 3137 assert(offset % bs_io_units_per_cluster(blob) == 0); 3138 3139 /* Round the io_unit offset down to the first page in the cluster */ 3140 cluster_start_page = bs_io_unit_to_cluster_start(blob, offset); 3141 3142 /* Calculate which index in the metadata cluster array the corresponding 3143 * cluster is supposed to be at. */ 3144 cluster_number = bs_io_unit_to_cluster_number(blob, offset); 3145 3146 ctx = calloc(1, sizeof(*ctx)); 3147 if (!ctx) { 3148 cb_fn(cb_arg, -ENOMEM); 3149 return; 3150 } 3151 /* When freeing a cluster the flow should be (in order): 3152 * 1. Unmap the underlying area (so if the cluster is reclaimed in the future, it won't leak 3153 * old data) 3154 * 2. Once the unmap completes (to avoid any races with incoming writes that may claim the 3155 * cluster), update and sync metadata freeing the cluster 3156 * 3. Once metadata update is done, complete the user unmap request 3157 */ 3158 ctx->blob = blob; 3159 ctx->page = cluster_start_page; 3160 ctx->cluster_num = cluster_number; 3161 ctx->md_page = bs_channel->new_cluster_page; 3162 ctx->seq = bs_sequence_start_bs(_ch, &cpl); 3163 if (!ctx->seq) { 3164 free(ctx); 3165 cb_fn(cb_arg, -ENOMEM); 3166 return; 3167 } 3168 3169 if (blob->use_extent_table) { 3170 ctx->extent_page = *bs_cluster_to_extent_page(blob, cluster_number); 3171 } 3172 3173 cpl.u.blob_basic.cb_fn = spdk_free_cluster_unmap_complete; 3174 cpl.u.blob_basic.cb_arg = ctx; 3175 } 3176 3177 batch = bs_batch_open(_ch, &cpl, blob); 3178 if (!batch) { 3179 free(ctx); 3180 cb_fn(cb_arg, -ENOMEM); 3181 return; 3182 } 3183 3184 if (is_allocated) { 3185 bs_batch_unmap_dev(batch, lba, lba_count); 3186 } 3187 3188 bs_batch_close(batch); 3189 break; 3190 } 3191 case SPDK_BLOB_READV: 3192 case SPDK_BLOB_WRITEV: 3193 SPDK_ERRLOG("readv/write not valid\n"); 3194 cb_fn(cb_arg, -EINVAL); 3195 break; 3196 } 3197 } 3198 3199 static void 3200 blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel, 3201 void *payload, uint64_t offset, uint64_t length, 3202 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 3203 { 3204 assert(blob != NULL); 3205 3206 if (blob->data_ro && op_type != SPDK_BLOB_READ) { 3207 cb_fn(cb_arg, -EPERM); 3208 return; 3209 } 3210 3211 if (length == 0) { 3212 cb_fn(cb_arg, 0); 3213 return; 3214 } 3215 3216 if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 3217 cb_fn(cb_arg, -EINVAL); 3218 return; 3219 } 3220 if (length <= bs_num_io_units_to_cluster_boundary(blob, offset)) { 3221 blob_request_submit_op_single(_channel, blob, payload, offset, length, 3222 cb_fn, cb_arg, op_type); 3223 } else { 3224 blob_request_submit_op_split(_channel, blob, payload, offset, length, 3225 cb_fn, cb_arg, op_type); 3226 } 3227 } 3228 3229 struct rw_iov_ctx { 3230 struct spdk_blob *blob; 3231 struct spdk_io_channel *channel; 3232 spdk_blob_op_complete cb_fn; 3233 void *cb_arg; 3234 bool read; 3235 int iovcnt; 3236 struct iovec *orig_iov; 3237 uint64_t io_unit_offset; 3238 uint64_t io_units_remaining; 3239 uint64_t io_units_done; 3240 struct spdk_blob_ext_io_opts *ext_io_opts; 3241 struct iovec iov[0]; 3242 }; 3243 3244 static void 3245 rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3246 { 3247 assert(cb_arg == NULL); 3248 bs_sequence_finish(seq, bserrno); 3249 } 3250 3251 static void 3252 rw_iov_split_next(void *cb_arg, int bserrno) 3253 { 3254 struct rw_iov_ctx *ctx = cb_arg; 3255 struct spdk_blob *blob = ctx->blob; 3256 struct iovec *iov, *orig_iov; 3257 int iovcnt; 3258 size_t orig_iovoff; 3259 uint64_t io_units_count, io_units_to_boundary, io_unit_offset; 3260 uint64_t byte_count; 3261 3262 if (bserrno != 0 || ctx->io_units_remaining == 0) { 3263 ctx->cb_fn(ctx->cb_arg, bserrno); 3264 free(ctx); 3265 return; 3266 } 3267 3268 io_unit_offset = ctx->io_unit_offset; 3269 io_units_to_boundary = bs_num_io_units_to_cluster_boundary(blob, io_unit_offset); 3270 io_units_count = spdk_min(ctx->io_units_remaining, io_units_to_boundary); 3271 /* 3272 * Get index and offset into the original iov array for our current position in the I/O sequence. 3273 * byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will 3274 * point to the current position in the I/O sequence. 3275 */ 3276 byte_count = ctx->io_units_done * blob->bs->io_unit_size; 3277 orig_iov = &ctx->orig_iov[0]; 3278 orig_iovoff = 0; 3279 while (byte_count > 0) { 3280 if (byte_count >= orig_iov->iov_len) { 3281 byte_count -= orig_iov->iov_len; 3282 orig_iov++; 3283 } else { 3284 orig_iovoff = byte_count; 3285 byte_count = 0; 3286 } 3287 } 3288 3289 /* 3290 * Build an iov array for the next I/O in the sequence. byte_count will keep track of how many 3291 * bytes of this next I/O remain to be accounted for in the new iov array. 3292 */ 3293 byte_count = io_units_count * blob->bs->io_unit_size; 3294 iov = &ctx->iov[0]; 3295 iovcnt = 0; 3296 while (byte_count > 0) { 3297 assert(iovcnt < ctx->iovcnt); 3298 iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff); 3299 iov->iov_base = orig_iov->iov_base + orig_iovoff; 3300 byte_count -= iov->iov_len; 3301 orig_iovoff = 0; 3302 orig_iov++; 3303 iov++; 3304 iovcnt++; 3305 } 3306 3307 ctx->io_unit_offset += io_units_count; 3308 ctx->io_units_remaining -= io_units_count; 3309 ctx->io_units_done += io_units_count; 3310 iov = &ctx->iov[0]; 3311 3312 if (ctx->read) { 3313 spdk_blob_io_readv_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 3314 io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts); 3315 } else { 3316 spdk_blob_io_writev_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 3317 io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts); 3318 } 3319 } 3320 3321 static void 3322 blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel, 3323 struct iovec *iov, int iovcnt, 3324 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg, bool read, 3325 struct spdk_blob_ext_io_opts *ext_io_opts) 3326 { 3327 struct spdk_bs_cpl cpl; 3328 3329 assert(blob != NULL); 3330 3331 if (!read && blob->data_ro) { 3332 cb_fn(cb_arg, -EPERM); 3333 return; 3334 } 3335 3336 if (length == 0) { 3337 cb_fn(cb_arg, 0); 3338 return; 3339 } 3340 3341 if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 3342 cb_fn(cb_arg, -EINVAL); 3343 return; 3344 } 3345 3346 /* 3347 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having 3348 * to split a request that spans a cluster boundary. For I/O that do not span a cluster boundary, 3349 * there will be no noticeable difference compared to using a batch. For I/O that do span a cluster 3350 * boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need 3351 * to allocate a separate iov array and split the I/O such that none of the resulting 3352 * smaller I/O cross a cluster boundary. These smaller I/O will be issued in sequence (not in parallel) 3353 * but since this case happens very infrequently, any performance impact will be negligible. 3354 * 3355 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs 3356 * for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them 3357 * in a batch. That would also require creating an intermediate spdk_bs_cpl that would get called 3358 * when the batch was completed, to allow for freeing the memory for the iov arrays. 3359 */ 3360 if (spdk_likely(length <= bs_num_io_units_to_cluster_boundary(blob, offset))) { 3361 uint64_t lba_count; 3362 uint64_t lba; 3363 bool is_allocated; 3364 3365 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 3366 cpl.u.blob_basic.cb_fn = cb_fn; 3367 cpl.u.blob_basic.cb_arg = cb_arg; 3368 3369 if (blob->frozen_refcnt) { 3370 /* This blob I/O is frozen */ 3371 enum spdk_blob_op_type op_type; 3372 spdk_bs_user_op_t *op; 3373 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel); 3374 3375 op_type = read ? SPDK_BLOB_READV : SPDK_BLOB_WRITEV; 3376 op = bs_user_op_alloc(_channel, &cpl, op_type, blob, iov, iovcnt, offset, length); 3377 if (!op) { 3378 cb_fn(cb_arg, -ENOMEM); 3379 return; 3380 } 3381 3382 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 3383 3384 return; 3385 } 3386 3387 is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 3388 3389 if (read) { 3390 spdk_bs_sequence_t *seq; 3391 3392 seq = bs_sequence_start_blob(_channel, &cpl, blob); 3393 if (!seq) { 3394 cb_fn(cb_arg, -ENOMEM); 3395 return; 3396 } 3397 3398 seq->ext_io_opts = ext_io_opts; 3399 3400 if (is_allocated) { 3401 bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL); 3402 } else { 3403 bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count, 3404 rw_iov_done, NULL); 3405 } 3406 } else { 3407 if (is_allocated) { 3408 spdk_bs_sequence_t *seq; 3409 3410 seq = bs_sequence_start_blob(_channel, &cpl, blob); 3411 if (!seq) { 3412 cb_fn(cb_arg, -ENOMEM); 3413 return; 3414 } 3415 3416 seq->ext_io_opts = ext_io_opts; 3417 3418 bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL); 3419 } else { 3420 /* Queue this operation and allocate the cluster */ 3421 spdk_bs_user_op_t *op; 3422 3423 op = bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset, 3424 length); 3425 if (!op) { 3426 cb_fn(cb_arg, -ENOMEM); 3427 return; 3428 } 3429 3430 op->ext_io_opts = ext_io_opts; 3431 3432 bs_allocate_and_copy_cluster(blob, _channel, offset, op); 3433 } 3434 } 3435 } else { 3436 struct rw_iov_ctx *ctx; 3437 3438 ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec)); 3439 if (ctx == NULL) { 3440 cb_fn(cb_arg, -ENOMEM); 3441 return; 3442 } 3443 3444 ctx->blob = blob; 3445 ctx->channel = _channel; 3446 ctx->cb_fn = cb_fn; 3447 ctx->cb_arg = cb_arg; 3448 ctx->read = read; 3449 ctx->orig_iov = iov; 3450 ctx->iovcnt = iovcnt; 3451 ctx->io_unit_offset = offset; 3452 ctx->io_units_remaining = length; 3453 ctx->io_units_done = 0; 3454 ctx->ext_io_opts = ext_io_opts; 3455 3456 rw_iov_split_next(ctx, 0); 3457 } 3458 } 3459 3460 static struct spdk_blob * 3461 blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid) 3462 { 3463 struct spdk_blob find; 3464 3465 if (spdk_bit_array_get(bs->open_blobids, blobid) == 0) { 3466 return NULL; 3467 } 3468 3469 find.id = blobid; 3470 return RB_FIND(spdk_blob_tree, &bs->open_blobs, &find); 3471 } 3472 3473 static void 3474 blob_get_snapshot_and_clone_entries(struct spdk_blob *blob, 3475 struct spdk_blob_list **snapshot_entry, struct spdk_blob_list **clone_entry) 3476 { 3477 assert(blob != NULL); 3478 *snapshot_entry = NULL; 3479 *clone_entry = NULL; 3480 3481 if (blob->parent_id == SPDK_BLOBID_INVALID) { 3482 return; 3483 } 3484 3485 TAILQ_FOREACH(*snapshot_entry, &blob->bs->snapshots, link) { 3486 if ((*snapshot_entry)->id == blob->parent_id) { 3487 break; 3488 } 3489 } 3490 3491 if (*snapshot_entry != NULL) { 3492 TAILQ_FOREACH(*clone_entry, &(*snapshot_entry)->clones, link) { 3493 if ((*clone_entry)->id == blob->id) { 3494 break; 3495 } 3496 } 3497 3498 assert(*clone_entry != NULL); 3499 } 3500 } 3501 3502 static int 3503 bs_channel_create(void *io_device, void *ctx_buf) 3504 { 3505 struct spdk_blob_store *bs = io_device; 3506 struct spdk_bs_channel *channel = ctx_buf; 3507 struct spdk_bs_dev *dev; 3508 uint32_t max_ops = bs->max_channel_ops; 3509 uint32_t i; 3510 3511 dev = bs->dev; 3512 3513 channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set)); 3514 if (!channel->req_mem) { 3515 return -1; 3516 } 3517 3518 TAILQ_INIT(&channel->reqs); 3519 3520 for (i = 0; i < max_ops; i++) { 3521 TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link); 3522 } 3523 3524 channel->bs = bs; 3525 channel->dev = dev; 3526 channel->dev_channel = dev->create_channel(dev); 3527 3528 if (!channel->dev_channel) { 3529 SPDK_ERRLOG("Failed to create device channel.\n"); 3530 free(channel->req_mem); 3531 return -1; 3532 } 3533 3534 channel->new_cluster_page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, NULL, SPDK_ENV_SOCKET_ID_ANY, 3535 SPDK_MALLOC_DMA); 3536 if (!channel->new_cluster_page) { 3537 SPDK_ERRLOG("Failed to allocate new cluster page\n"); 3538 free(channel->req_mem); 3539 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 3540 return -1; 3541 } 3542 3543 TAILQ_INIT(&channel->need_cluster_alloc); 3544 TAILQ_INIT(&channel->queued_io); 3545 RB_INIT(&channel->esnap_channels); 3546 3547 return 0; 3548 } 3549 3550 static void 3551 bs_channel_destroy(void *io_device, void *ctx_buf) 3552 { 3553 struct spdk_bs_channel *channel = ctx_buf; 3554 spdk_bs_user_op_t *op; 3555 3556 while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) { 3557 op = TAILQ_FIRST(&channel->need_cluster_alloc); 3558 TAILQ_REMOVE(&channel->need_cluster_alloc, op, link); 3559 bs_user_op_abort(op, -EIO); 3560 } 3561 3562 while (!TAILQ_EMPTY(&channel->queued_io)) { 3563 op = TAILQ_FIRST(&channel->queued_io); 3564 TAILQ_REMOVE(&channel->queued_io, op, link); 3565 bs_user_op_abort(op, -EIO); 3566 } 3567 3568 blob_esnap_destroy_bs_channel(channel); 3569 3570 free(channel->req_mem); 3571 spdk_free(channel->new_cluster_page); 3572 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 3573 } 3574 3575 static void 3576 bs_dev_destroy(void *io_device) 3577 { 3578 struct spdk_blob_store *bs = io_device; 3579 struct spdk_blob *blob, *blob_tmp; 3580 3581 bs->dev->destroy(bs->dev); 3582 3583 RB_FOREACH_SAFE(blob, spdk_blob_tree, &bs->open_blobs, blob_tmp) { 3584 RB_REMOVE(spdk_blob_tree, &bs->open_blobs, blob); 3585 spdk_bit_array_clear(bs->open_blobids, blob->id); 3586 blob_free(blob); 3587 } 3588 3589 spdk_spin_destroy(&bs->used_lock); 3590 3591 spdk_bit_array_free(&bs->open_blobids); 3592 spdk_bit_array_free(&bs->used_blobids); 3593 spdk_bit_array_free(&bs->used_md_pages); 3594 spdk_bit_pool_free(&bs->used_clusters); 3595 /* 3596 * If this function is called for any reason except a successful unload, 3597 * the unload_cpl type will be NONE and this will be a nop. 3598 */ 3599 bs_call_cpl(&bs->unload_cpl, bs->unload_err); 3600 3601 free(bs); 3602 } 3603 3604 static int 3605 bs_blob_list_add(struct spdk_blob *blob) 3606 { 3607 spdk_blob_id snapshot_id; 3608 struct spdk_blob_list *snapshot_entry = NULL; 3609 struct spdk_blob_list *clone_entry = NULL; 3610 3611 assert(blob != NULL); 3612 3613 snapshot_id = blob->parent_id; 3614 if (snapshot_id == SPDK_BLOBID_INVALID || 3615 snapshot_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 3616 return 0; 3617 } 3618 3619 snapshot_entry = bs_get_snapshot_entry(blob->bs, snapshot_id); 3620 if (snapshot_entry == NULL) { 3621 /* Snapshot not found */ 3622 snapshot_entry = calloc(1, sizeof(struct spdk_blob_list)); 3623 if (snapshot_entry == NULL) { 3624 return -ENOMEM; 3625 } 3626 snapshot_entry->id = snapshot_id; 3627 TAILQ_INIT(&snapshot_entry->clones); 3628 TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link); 3629 } else { 3630 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 3631 if (clone_entry->id == blob->id) { 3632 break; 3633 } 3634 } 3635 } 3636 3637 if (clone_entry == NULL) { 3638 /* Clone not found */ 3639 clone_entry = calloc(1, sizeof(struct spdk_blob_list)); 3640 if (clone_entry == NULL) { 3641 return -ENOMEM; 3642 } 3643 clone_entry->id = blob->id; 3644 TAILQ_INIT(&clone_entry->clones); 3645 TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link); 3646 snapshot_entry->clone_count++; 3647 } 3648 3649 return 0; 3650 } 3651 3652 static void 3653 bs_blob_list_remove(struct spdk_blob *blob) 3654 { 3655 struct spdk_blob_list *snapshot_entry = NULL; 3656 struct spdk_blob_list *clone_entry = NULL; 3657 3658 blob_get_snapshot_and_clone_entries(blob, &snapshot_entry, &clone_entry); 3659 3660 if (snapshot_entry == NULL) { 3661 return; 3662 } 3663 3664 blob->parent_id = SPDK_BLOBID_INVALID; 3665 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3666 free(clone_entry); 3667 3668 snapshot_entry->clone_count--; 3669 } 3670 3671 static int 3672 bs_blob_list_free(struct spdk_blob_store *bs) 3673 { 3674 struct spdk_blob_list *snapshot_entry; 3675 struct spdk_blob_list *snapshot_entry_tmp; 3676 struct spdk_blob_list *clone_entry; 3677 struct spdk_blob_list *clone_entry_tmp; 3678 3679 TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) { 3680 TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) { 3681 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3682 free(clone_entry); 3683 } 3684 TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link); 3685 free(snapshot_entry); 3686 } 3687 3688 return 0; 3689 } 3690 3691 static void 3692 bs_free(struct spdk_blob_store *bs) 3693 { 3694 bs_blob_list_free(bs); 3695 3696 bs_unregister_md_thread(bs); 3697 spdk_io_device_unregister(bs, bs_dev_destroy); 3698 } 3699 3700 void 3701 spdk_bs_opts_init(struct spdk_bs_opts *opts, size_t opts_size) 3702 { 3703 3704 if (!opts) { 3705 SPDK_ERRLOG("opts should not be NULL\n"); 3706 return; 3707 } 3708 3709 if (!opts_size) { 3710 SPDK_ERRLOG("opts_size should not be zero value\n"); 3711 return; 3712 } 3713 3714 memset(opts, 0, opts_size); 3715 opts->opts_size = opts_size; 3716 3717 #define FIELD_OK(field) \ 3718 offsetof(struct spdk_bs_opts, field) + sizeof(opts->field) <= opts_size 3719 3720 #define SET_FIELD(field, value) \ 3721 if (FIELD_OK(field)) { \ 3722 opts->field = value; \ 3723 } \ 3724 3725 SET_FIELD(cluster_sz, SPDK_BLOB_OPTS_CLUSTER_SZ); 3726 SET_FIELD(num_md_pages, SPDK_BLOB_OPTS_NUM_MD_PAGES); 3727 SET_FIELD(max_md_ops, SPDK_BLOB_OPTS_NUM_MD_PAGES); 3728 SET_FIELD(max_channel_ops, SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS); 3729 SET_FIELD(clear_method, BS_CLEAR_WITH_UNMAP); 3730 3731 if (FIELD_OK(bstype)) { 3732 memset(&opts->bstype, 0, sizeof(opts->bstype)); 3733 } 3734 3735 SET_FIELD(iter_cb_fn, NULL); 3736 SET_FIELD(iter_cb_arg, NULL); 3737 SET_FIELD(force_recover, false); 3738 SET_FIELD(esnap_bs_dev_create, NULL); 3739 SET_FIELD(esnap_ctx, NULL); 3740 3741 #undef FIELD_OK 3742 #undef SET_FIELD 3743 } 3744 3745 static int 3746 bs_opts_verify(struct spdk_bs_opts *opts) 3747 { 3748 if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 || 3749 opts->max_channel_ops == 0) { 3750 SPDK_ERRLOG("Blobstore options cannot be set to 0\n"); 3751 return -1; 3752 } 3753 3754 return 0; 3755 } 3756 3757 /* START spdk_bs_load */ 3758 3759 /* spdk_bs_load_ctx is used for init, load, unload and dump code paths. */ 3760 3761 struct spdk_bs_load_ctx { 3762 struct spdk_blob_store *bs; 3763 struct spdk_bs_super_block *super; 3764 3765 struct spdk_bs_md_mask *mask; 3766 bool in_page_chain; 3767 uint32_t page_index; 3768 uint32_t cur_page; 3769 struct spdk_blob_md_page *page; 3770 3771 uint64_t num_extent_pages; 3772 uint32_t *extent_page_num; 3773 struct spdk_blob_md_page *extent_pages; 3774 struct spdk_bit_array *used_clusters; 3775 3776 spdk_bs_sequence_t *seq; 3777 spdk_blob_op_with_handle_complete iter_cb_fn; 3778 void *iter_cb_arg; 3779 struct spdk_blob *blob; 3780 spdk_blob_id blobid; 3781 3782 bool force_recover; 3783 3784 /* These fields are used in the spdk_bs_dump path. */ 3785 bool dumping; 3786 FILE *fp; 3787 spdk_bs_dump_print_xattr print_xattr_fn; 3788 char xattr_name[4096]; 3789 }; 3790 3791 static int 3792 bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_blob_store **_bs, 3793 struct spdk_bs_load_ctx **_ctx) 3794 { 3795 struct spdk_blob_store *bs; 3796 struct spdk_bs_load_ctx *ctx; 3797 uint64_t dev_size; 3798 int rc; 3799 3800 dev_size = dev->blocklen * dev->blockcnt; 3801 if (dev_size < opts->cluster_sz) { 3802 /* Device size cannot be smaller than cluster size of blobstore */ 3803 SPDK_INFOLOG(blob, "Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n", 3804 dev_size, opts->cluster_sz); 3805 return -ENOSPC; 3806 } 3807 if (opts->cluster_sz < SPDK_BS_PAGE_SIZE) { 3808 /* Cluster size cannot be smaller than page size */ 3809 SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n", 3810 opts->cluster_sz, SPDK_BS_PAGE_SIZE); 3811 return -EINVAL; 3812 } 3813 bs = calloc(1, sizeof(struct spdk_blob_store)); 3814 if (!bs) { 3815 return -ENOMEM; 3816 } 3817 3818 ctx = calloc(1, sizeof(struct spdk_bs_load_ctx)); 3819 if (!ctx) { 3820 free(bs); 3821 return -ENOMEM; 3822 } 3823 3824 ctx->bs = bs; 3825 ctx->iter_cb_fn = opts->iter_cb_fn; 3826 ctx->iter_cb_arg = opts->iter_cb_arg; 3827 ctx->force_recover = opts->force_recover; 3828 3829 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 3830 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3831 if (!ctx->super) { 3832 free(ctx); 3833 free(bs); 3834 return -ENOMEM; 3835 } 3836 3837 RB_INIT(&bs->open_blobs); 3838 TAILQ_INIT(&bs->snapshots); 3839 bs->dev = dev; 3840 bs->md_thread = spdk_get_thread(); 3841 assert(bs->md_thread != NULL); 3842 3843 /* 3844 * Do not use bs_lba_to_cluster() here since blockcnt may not be an 3845 * even multiple of the cluster size. 3846 */ 3847 bs->cluster_sz = opts->cluster_sz; 3848 bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen); 3849 ctx->used_clusters = spdk_bit_array_create(bs->total_clusters); 3850 if (!ctx->used_clusters) { 3851 spdk_free(ctx->super); 3852 free(ctx); 3853 free(bs); 3854 return -ENOMEM; 3855 } 3856 3857 bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE; 3858 if (spdk_u32_is_pow2(bs->pages_per_cluster)) { 3859 bs->pages_per_cluster_shift = spdk_u32log2(bs->pages_per_cluster); 3860 } 3861 bs->num_free_clusters = bs->total_clusters; 3862 bs->io_unit_size = dev->blocklen; 3863 3864 bs->max_channel_ops = opts->max_channel_ops; 3865 bs->super_blob = SPDK_BLOBID_INVALID; 3866 memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype)); 3867 bs->esnap_bs_dev_create = opts->esnap_bs_dev_create; 3868 bs->esnap_ctx = opts->esnap_ctx; 3869 3870 /* The metadata is assumed to be at least 1 page */ 3871 bs->used_md_pages = spdk_bit_array_create(1); 3872 bs->used_blobids = spdk_bit_array_create(0); 3873 bs->open_blobids = spdk_bit_array_create(0); 3874 3875 spdk_spin_init(&bs->used_lock); 3876 3877 spdk_io_device_register(bs, bs_channel_create, bs_channel_destroy, 3878 sizeof(struct spdk_bs_channel), "blobstore"); 3879 rc = bs_register_md_thread(bs); 3880 if (rc == -1) { 3881 spdk_io_device_unregister(bs, NULL); 3882 spdk_spin_destroy(&bs->used_lock); 3883 spdk_bit_array_free(&bs->open_blobids); 3884 spdk_bit_array_free(&bs->used_blobids); 3885 spdk_bit_array_free(&bs->used_md_pages); 3886 spdk_bit_array_free(&ctx->used_clusters); 3887 spdk_free(ctx->super); 3888 free(ctx); 3889 free(bs); 3890 /* FIXME: this is a lie but don't know how to get a proper error code here */ 3891 return -ENOMEM; 3892 } 3893 3894 *_ctx = ctx; 3895 *_bs = bs; 3896 return 0; 3897 } 3898 3899 static void 3900 bs_load_ctx_fail(struct spdk_bs_load_ctx *ctx, int bserrno) 3901 { 3902 assert(bserrno != 0); 3903 3904 spdk_free(ctx->super); 3905 bs_sequence_finish(ctx->seq, bserrno); 3906 bs_free(ctx->bs); 3907 spdk_bit_array_free(&ctx->used_clusters); 3908 free(ctx); 3909 } 3910 3911 static void 3912 bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 3913 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg) 3914 { 3915 /* Update the values in the super block */ 3916 super->super_blob = bs->super_blob; 3917 memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype)); 3918 super->crc = blob_md_page_calc_crc(super); 3919 bs_sequence_write_dev(seq, super, bs_page_to_lba(bs, 0), 3920 bs_byte_to_lba(bs, sizeof(*super)), 3921 cb_fn, cb_arg); 3922 } 3923 3924 static void 3925 bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 3926 { 3927 struct spdk_bs_load_ctx *ctx = arg; 3928 uint64_t mask_size, lba, lba_count; 3929 3930 /* Write out the used clusters mask */ 3931 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 3932 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 3933 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3934 if (!ctx->mask) { 3935 bs_load_ctx_fail(ctx, -ENOMEM); 3936 return; 3937 } 3938 3939 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS; 3940 ctx->mask->length = ctx->bs->total_clusters; 3941 /* We could get here through the normal unload path, or through dirty 3942 * shutdown recovery. For the normal unload path, we use the mask from 3943 * the bit pool. For dirty shutdown recovery, we don't have a bit pool yet - 3944 * only the bit array from the load ctx. 3945 */ 3946 if (ctx->bs->used_clusters) { 3947 assert(ctx->mask->length == spdk_bit_pool_capacity(ctx->bs->used_clusters)); 3948 spdk_bit_pool_store_mask(ctx->bs->used_clusters, ctx->mask->mask); 3949 } else { 3950 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->used_clusters)); 3951 spdk_bit_array_store_mask(ctx->used_clusters, ctx->mask->mask); 3952 } 3953 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 3954 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 3955 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 3956 } 3957 3958 static void 3959 bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 3960 { 3961 struct spdk_bs_load_ctx *ctx = arg; 3962 uint64_t mask_size, lba, lba_count; 3963 3964 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 3965 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 3966 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3967 if (!ctx->mask) { 3968 bs_load_ctx_fail(ctx, -ENOMEM); 3969 return; 3970 } 3971 3972 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES; 3973 ctx->mask->length = ctx->super->md_len; 3974 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages)); 3975 3976 spdk_bit_array_store_mask(ctx->bs->used_md_pages, ctx->mask->mask); 3977 lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 3978 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 3979 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 3980 } 3981 3982 static void 3983 bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 3984 { 3985 struct spdk_bs_load_ctx *ctx = arg; 3986 uint64_t mask_size, lba, lba_count; 3987 3988 if (ctx->super->used_blobid_mask_len == 0) { 3989 /* 3990 * This is a pre-v3 on-disk format where the blobid mask does not get 3991 * written to disk. 3992 */ 3993 cb_fn(seq, arg, 0); 3994 return; 3995 } 3996 3997 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 3998 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 3999 SPDK_MALLOC_DMA); 4000 if (!ctx->mask) { 4001 bs_load_ctx_fail(ctx, -ENOMEM); 4002 return; 4003 } 4004 4005 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS; 4006 ctx->mask->length = ctx->super->md_len; 4007 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids)); 4008 4009 spdk_bit_array_store_mask(ctx->bs->used_blobids, ctx->mask->mask); 4010 lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 4011 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 4012 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 4013 } 4014 4015 static void 4016 blob_set_thin_provision(struct spdk_blob *blob) 4017 { 4018 blob_verify_md_op(blob); 4019 blob->invalid_flags |= SPDK_BLOB_THIN_PROV; 4020 blob->state = SPDK_BLOB_STATE_DIRTY; 4021 } 4022 4023 static void 4024 blob_set_clear_method(struct spdk_blob *blob, enum blob_clear_method clear_method) 4025 { 4026 blob_verify_md_op(blob); 4027 blob->clear_method = clear_method; 4028 blob->md_ro_flags |= (clear_method << SPDK_BLOB_CLEAR_METHOD_SHIFT); 4029 blob->state = SPDK_BLOB_STATE_DIRTY; 4030 } 4031 4032 static void bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno); 4033 4034 static void 4035 bs_delete_corrupted_blob_cpl(void *cb_arg, int bserrno) 4036 { 4037 struct spdk_bs_load_ctx *ctx = cb_arg; 4038 spdk_blob_id id; 4039 int64_t page_num; 4040 4041 /* Iterate to next blob (we can't use spdk_bs_iter_next function as our 4042 * last blob has been removed */ 4043 page_num = bs_blobid_to_page(ctx->blobid); 4044 page_num++; 4045 page_num = spdk_bit_array_find_first_set(ctx->bs->used_blobids, page_num); 4046 if (page_num >= spdk_bit_array_capacity(ctx->bs->used_blobids)) { 4047 bs_load_iter(ctx, NULL, -ENOENT); 4048 return; 4049 } 4050 4051 id = bs_page_to_blobid(page_num); 4052 4053 spdk_bs_open_blob(ctx->bs, id, bs_load_iter, ctx); 4054 } 4055 4056 static void 4057 bs_delete_corrupted_close_cb(void *cb_arg, int bserrno) 4058 { 4059 struct spdk_bs_load_ctx *ctx = cb_arg; 4060 4061 if (bserrno != 0) { 4062 SPDK_ERRLOG("Failed to close corrupted blob\n"); 4063 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4064 return; 4065 } 4066 4067 spdk_bs_delete_blob(ctx->bs, ctx->blobid, bs_delete_corrupted_blob_cpl, ctx); 4068 } 4069 4070 static void 4071 bs_delete_corrupted_blob(void *cb_arg, int bserrno) 4072 { 4073 struct spdk_bs_load_ctx *ctx = cb_arg; 4074 uint64_t i; 4075 4076 if (bserrno != 0) { 4077 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 4078 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4079 return; 4080 } 4081 4082 /* Snapshot and clone have the same copy of cluster map and extent pages 4083 * at this point. Let's clear both for snapshot now, 4084 * so that it won't be cleared for clone later when we remove snapshot. 4085 * Also set thin provision to pass data corruption check */ 4086 for (i = 0; i < ctx->blob->active.num_clusters; i++) { 4087 ctx->blob->active.clusters[i] = 0; 4088 } 4089 for (i = 0; i < ctx->blob->active.num_extent_pages; i++) { 4090 ctx->blob->active.extent_pages[i] = 0; 4091 } 4092 4093 ctx->blob->md_ro = false; 4094 4095 blob_set_thin_provision(ctx->blob); 4096 4097 ctx->blobid = ctx->blob->id; 4098 4099 spdk_blob_close(ctx->blob, bs_delete_corrupted_close_cb, ctx); 4100 } 4101 4102 static void 4103 bs_update_corrupted_blob(void *cb_arg, int bserrno) 4104 { 4105 struct spdk_bs_load_ctx *ctx = cb_arg; 4106 4107 if (bserrno != 0) { 4108 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 4109 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4110 return; 4111 } 4112 4113 ctx->blob->md_ro = false; 4114 blob_remove_xattr(ctx->blob, SNAPSHOT_PENDING_REMOVAL, true); 4115 blob_remove_xattr(ctx->blob, SNAPSHOT_IN_PROGRESS, true); 4116 spdk_blob_set_read_only(ctx->blob); 4117 4118 if (ctx->iter_cb_fn) { 4119 ctx->iter_cb_fn(ctx->iter_cb_arg, ctx->blob, 0); 4120 } 4121 bs_blob_list_add(ctx->blob); 4122 4123 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4124 } 4125 4126 static void 4127 bs_examine_clone(void *cb_arg, struct spdk_blob *blob, int bserrno) 4128 { 4129 struct spdk_bs_load_ctx *ctx = cb_arg; 4130 4131 if (bserrno != 0) { 4132 SPDK_ERRLOG("Failed to open clone of a corrupted blob\n"); 4133 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4134 return; 4135 } 4136 4137 if (blob->parent_id == ctx->blob->id) { 4138 /* Power failure occurred before updating clone (snapshot delete case) 4139 * or after updating clone (creating snapshot case) - keep snapshot */ 4140 spdk_blob_close(blob, bs_update_corrupted_blob, ctx); 4141 } else { 4142 /* Power failure occurred after updating clone (snapshot delete case) 4143 * or before updating clone (creating snapshot case) - remove snapshot */ 4144 spdk_blob_close(blob, bs_delete_corrupted_blob, ctx); 4145 } 4146 } 4147 4148 static void 4149 bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno) 4150 { 4151 struct spdk_bs_load_ctx *ctx = arg; 4152 const void *value; 4153 size_t len; 4154 int rc = 0; 4155 4156 if (bserrno == 0) { 4157 /* Examine blob if it is corrupted after power failure. Fix 4158 * the ones that can be fixed and remove any other corrupted 4159 * ones. If it is not corrupted just process it */ 4160 rc = blob_get_xattr_value(blob, SNAPSHOT_PENDING_REMOVAL, &value, &len, true); 4161 if (rc != 0) { 4162 rc = blob_get_xattr_value(blob, SNAPSHOT_IN_PROGRESS, &value, &len, true); 4163 if (rc != 0) { 4164 /* Not corrupted - process it and continue with iterating through blobs */ 4165 if (ctx->iter_cb_fn) { 4166 ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0); 4167 } 4168 bs_blob_list_add(blob); 4169 spdk_bs_iter_next(ctx->bs, blob, bs_load_iter, ctx); 4170 return; 4171 } 4172 4173 } 4174 4175 assert(len == sizeof(spdk_blob_id)); 4176 4177 ctx->blob = blob; 4178 4179 /* Open clone to check if we are able to fix this blob or should we remove it */ 4180 spdk_bs_open_blob(ctx->bs, *(spdk_blob_id *)value, bs_examine_clone, ctx); 4181 return; 4182 } else if (bserrno == -ENOENT) { 4183 bserrno = 0; 4184 } else { 4185 /* 4186 * This case needs to be looked at further. Same problem 4187 * exists with applications that rely on explicit blob 4188 * iteration. We should just skip the blob that failed 4189 * to load and continue on to the next one. 4190 */ 4191 SPDK_ERRLOG("Error in iterating blobs\n"); 4192 } 4193 4194 ctx->iter_cb_fn = NULL; 4195 4196 spdk_free(ctx->super); 4197 spdk_free(ctx->mask); 4198 bs_sequence_finish(ctx->seq, bserrno); 4199 free(ctx); 4200 } 4201 4202 static void bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg); 4203 4204 static void 4205 bs_load_complete(struct spdk_bs_load_ctx *ctx) 4206 { 4207 ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters); 4208 if (ctx->dumping) { 4209 bs_dump_read_md_page(ctx->seq, ctx); 4210 return; 4211 } 4212 spdk_bs_iter_first(ctx->bs, bs_load_iter, ctx); 4213 } 4214 4215 static void 4216 bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4217 { 4218 struct spdk_bs_load_ctx *ctx = cb_arg; 4219 int rc; 4220 4221 /* The type must be correct */ 4222 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS); 4223 4224 /* The length of the mask (in bits) must not be greater than 4225 * the length of the buffer (converted to bits) */ 4226 assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE * 8)); 4227 4228 /* The length of the mask must be exactly equal to the size 4229 * (in pages) of the metadata region */ 4230 assert(ctx->mask->length == ctx->super->md_len); 4231 4232 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->mask->length); 4233 if (rc < 0) { 4234 spdk_free(ctx->mask); 4235 bs_load_ctx_fail(ctx, rc); 4236 return; 4237 } 4238 4239 spdk_bit_array_load_mask(ctx->bs->used_blobids, ctx->mask->mask); 4240 bs_load_complete(ctx); 4241 } 4242 4243 static void 4244 bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4245 { 4246 struct spdk_bs_load_ctx *ctx = cb_arg; 4247 uint64_t lba, lba_count, mask_size; 4248 int rc; 4249 4250 if (bserrno != 0) { 4251 bs_load_ctx_fail(ctx, bserrno); 4252 return; 4253 } 4254 4255 /* The type must be correct */ 4256 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 4257 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 4258 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 4259 struct spdk_blob_md_page) * 8)); 4260 /* 4261 * The length of the mask must be equal to or larger than the total number of clusters. It may be 4262 * larger than the total number of clusters due to a failure spdk_bs_grow. 4263 */ 4264 assert(ctx->mask->length >= ctx->bs->total_clusters); 4265 if (ctx->mask->length > ctx->bs->total_clusters) { 4266 SPDK_WARNLOG("Shrink the used_custers mask length to total_clusters"); 4267 ctx->mask->length = ctx->bs->total_clusters; 4268 } 4269 4270 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->mask->length); 4271 if (rc < 0) { 4272 spdk_free(ctx->mask); 4273 bs_load_ctx_fail(ctx, rc); 4274 return; 4275 } 4276 4277 spdk_bit_array_load_mask(ctx->used_clusters, ctx->mask->mask); 4278 ctx->bs->num_free_clusters = spdk_bit_array_count_clear(ctx->used_clusters); 4279 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 4280 4281 spdk_free(ctx->mask); 4282 4283 /* Read the used blobids mask */ 4284 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 4285 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 4286 SPDK_MALLOC_DMA); 4287 if (!ctx->mask) { 4288 bs_load_ctx_fail(ctx, -ENOMEM); 4289 return; 4290 } 4291 lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 4292 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 4293 bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 4294 bs_load_used_blobids_cpl, ctx); 4295 } 4296 4297 static void 4298 bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4299 { 4300 struct spdk_bs_load_ctx *ctx = cb_arg; 4301 uint64_t lba, lba_count, mask_size; 4302 int rc; 4303 4304 if (bserrno != 0) { 4305 bs_load_ctx_fail(ctx, bserrno); 4306 return; 4307 } 4308 4309 /* The type must be correct */ 4310 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES); 4311 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 4312 assert(ctx->mask->length <= (ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE * 4313 8)); 4314 /* The length of the mask must be exactly equal to the size (in pages) of the metadata region */ 4315 if (ctx->mask->length != ctx->super->md_len) { 4316 SPDK_ERRLOG("mismatched md_len in used_pages mask: " 4317 "mask->length=%" PRIu32 " super->md_len=%" PRIu32 "\n", 4318 ctx->mask->length, ctx->super->md_len); 4319 assert(false); 4320 } 4321 4322 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->mask->length); 4323 if (rc < 0) { 4324 spdk_free(ctx->mask); 4325 bs_load_ctx_fail(ctx, rc); 4326 return; 4327 } 4328 4329 spdk_bit_array_load_mask(ctx->bs->used_md_pages, ctx->mask->mask); 4330 spdk_free(ctx->mask); 4331 4332 /* Read the used clusters mask */ 4333 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 4334 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 4335 SPDK_MALLOC_DMA); 4336 if (!ctx->mask) { 4337 bs_load_ctx_fail(ctx, -ENOMEM); 4338 return; 4339 } 4340 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 4341 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 4342 bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 4343 bs_load_used_clusters_cpl, ctx); 4344 } 4345 4346 static void 4347 bs_load_read_used_pages(struct spdk_bs_load_ctx *ctx) 4348 { 4349 uint64_t lba, lba_count, mask_size; 4350 4351 /* Read the used pages mask */ 4352 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 4353 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 4354 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4355 if (!ctx->mask) { 4356 bs_load_ctx_fail(ctx, -ENOMEM); 4357 return; 4358 } 4359 4360 lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 4361 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 4362 bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count, 4363 bs_load_used_pages_cpl, ctx); 4364 } 4365 4366 static int 4367 bs_load_replay_md_parse_page(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_page *page) 4368 { 4369 struct spdk_blob_store *bs = ctx->bs; 4370 struct spdk_blob_md_descriptor *desc; 4371 size_t cur_desc = 0; 4372 4373 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4374 while (cur_desc < sizeof(page->descriptors)) { 4375 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 4376 if (desc->length == 0) { 4377 /* If padding and length are 0, this terminates the page */ 4378 break; 4379 } 4380 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 4381 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 4382 unsigned int i, j; 4383 unsigned int cluster_count = 0; 4384 uint32_t cluster_idx; 4385 4386 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 4387 4388 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 4389 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 4390 cluster_idx = desc_extent_rle->extents[i].cluster_idx; 4391 /* 4392 * cluster_idx = 0 means an unallocated cluster - don't mark that 4393 * in the used cluster map. 4394 */ 4395 if (cluster_idx != 0) { 4396 SPDK_NOTICELOG("Recover: cluster %" PRIu32 "\n", cluster_idx + j); 4397 spdk_bit_array_set(ctx->used_clusters, cluster_idx + j); 4398 if (bs->num_free_clusters == 0) { 4399 return -ENOSPC; 4400 } 4401 bs->num_free_clusters--; 4402 } 4403 cluster_count++; 4404 } 4405 } 4406 if (cluster_count == 0) { 4407 return -EINVAL; 4408 } 4409 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4410 struct spdk_blob_md_descriptor_extent_page *desc_extent; 4411 uint32_t i; 4412 uint32_t cluster_count = 0; 4413 uint32_t cluster_idx; 4414 size_t cluster_idx_length; 4415 4416 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 4417 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 4418 4419 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 4420 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 4421 return -EINVAL; 4422 } 4423 4424 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 4425 cluster_idx = desc_extent->cluster_idx[i]; 4426 /* 4427 * cluster_idx = 0 means an unallocated cluster - don't mark that 4428 * in the used cluster map. 4429 */ 4430 if (cluster_idx != 0) { 4431 if (cluster_idx < desc_extent->start_cluster_idx && 4432 cluster_idx >= desc_extent->start_cluster_idx + cluster_count) { 4433 return -EINVAL; 4434 } 4435 spdk_bit_array_set(ctx->used_clusters, cluster_idx); 4436 if (bs->num_free_clusters == 0) { 4437 return -ENOSPC; 4438 } 4439 bs->num_free_clusters--; 4440 } 4441 cluster_count++; 4442 } 4443 4444 if (cluster_count == 0) { 4445 return -EINVAL; 4446 } 4447 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 4448 /* Skip this item */ 4449 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 4450 /* Skip this item */ 4451 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 4452 /* Skip this item */ 4453 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 4454 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 4455 uint32_t num_extent_pages = ctx->num_extent_pages; 4456 uint32_t i; 4457 size_t extent_pages_length; 4458 void *tmp; 4459 4460 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 4461 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 4462 4463 if (desc_extent_table->length == 0 || 4464 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 4465 return -EINVAL; 4466 } 4467 4468 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 4469 if (desc_extent_table->extent_page[i].page_idx != 0) { 4470 if (desc_extent_table->extent_page[i].num_pages != 1) { 4471 return -EINVAL; 4472 } 4473 num_extent_pages += 1; 4474 } 4475 } 4476 4477 if (num_extent_pages > 0) { 4478 tmp = realloc(ctx->extent_page_num, num_extent_pages * sizeof(uint32_t)); 4479 if (tmp == NULL) { 4480 return -ENOMEM; 4481 } 4482 ctx->extent_page_num = tmp; 4483 4484 /* Extent table entries contain md page numbers for extent pages. 4485 * Zeroes represent unallocated extent pages, those are run-length-encoded. 4486 */ 4487 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 4488 if (desc_extent_table->extent_page[i].page_idx != 0) { 4489 ctx->extent_page_num[ctx->num_extent_pages] = desc_extent_table->extent_page[i].page_idx; 4490 ctx->num_extent_pages += 1; 4491 } 4492 } 4493 } 4494 } else { 4495 /* Error */ 4496 return -EINVAL; 4497 } 4498 /* Advance to the next descriptor */ 4499 cur_desc += sizeof(*desc) + desc->length; 4500 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 4501 break; 4502 } 4503 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 4504 } 4505 return 0; 4506 } 4507 4508 static bool 4509 bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page) 4510 { 4511 uint32_t crc; 4512 struct spdk_blob_md_descriptor *desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4513 size_t desc_len; 4514 4515 crc = blob_md_page_calc_crc(page); 4516 if (crc != page->crc) { 4517 return false; 4518 } 4519 4520 /* Extent page should always be of sequence num 0. */ 4521 if (page->sequence_num != 0) { 4522 return false; 4523 } 4524 4525 /* Descriptor type must be EXTENT_PAGE. */ 4526 if (desc->type != SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4527 return false; 4528 } 4529 4530 /* Descriptor length cannot exceed the page. */ 4531 desc_len = sizeof(*desc) + desc->length; 4532 if (desc_len > sizeof(page->descriptors)) { 4533 return false; 4534 } 4535 4536 /* It has to be the only descriptor in the page. */ 4537 if (desc_len + sizeof(*desc) <= sizeof(page->descriptors)) { 4538 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + desc_len); 4539 if (desc->length != 0) { 4540 return false; 4541 } 4542 } 4543 4544 return true; 4545 } 4546 4547 static bool 4548 bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx) 4549 { 4550 uint32_t crc; 4551 struct spdk_blob_md_page *page = ctx->page; 4552 4553 crc = blob_md_page_calc_crc(page); 4554 if (crc != page->crc) { 4555 return false; 4556 } 4557 4558 /* First page of a sequence should match the blobid. */ 4559 if (page->sequence_num == 0 && 4560 bs_page_to_blobid(ctx->cur_page) != page->id) { 4561 return false; 4562 } 4563 assert(bs_load_cur_extent_page_valid(page) == false); 4564 4565 return true; 4566 } 4567 4568 static void bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx); 4569 4570 static void 4571 bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4572 { 4573 struct spdk_bs_load_ctx *ctx = cb_arg; 4574 4575 if (bserrno != 0) { 4576 bs_load_ctx_fail(ctx, bserrno); 4577 return; 4578 } 4579 4580 bs_load_complete(ctx); 4581 } 4582 4583 static void 4584 bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4585 { 4586 struct spdk_bs_load_ctx *ctx = cb_arg; 4587 4588 spdk_free(ctx->mask); 4589 ctx->mask = NULL; 4590 4591 if (bserrno != 0) { 4592 bs_load_ctx_fail(ctx, bserrno); 4593 return; 4594 } 4595 4596 bs_write_used_clusters(seq, ctx, bs_load_write_used_clusters_cpl); 4597 } 4598 4599 static void 4600 bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4601 { 4602 struct spdk_bs_load_ctx *ctx = cb_arg; 4603 4604 spdk_free(ctx->mask); 4605 ctx->mask = NULL; 4606 4607 if (bserrno != 0) { 4608 bs_load_ctx_fail(ctx, bserrno); 4609 return; 4610 } 4611 4612 bs_write_used_blobids(seq, ctx, bs_load_write_used_blobids_cpl); 4613 } 4614 4615 static void 4616 bs_load_write_used_md(struct spdk_bs_load_ctx *ctx) 4617 { 4618 bs_write_used_md(ctx->seq, ctx, bs_load_write_used_pages_cpl); 4619 } 4620 4621 static void 4622 bs_load_replay_md_chain_cpl(struct spdk_bs_load_ctx *ctx) 4623 { 4624 uint64_t num_md_clusters; 4625 uint64_t i; 4626 4627 ctx->in_page_chain = false; 4628 4629 do { 4630 ctx->page_index++; 4631 } while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true); 4632 4633 if (ctx->page_index < ctx->super->md_len) { 4634 ctx->cur_page = ctx->page_index; 4635 bs_load_replay_cur_md_page(ctx); 4636 } else { 4637 /* Claim all of the clusters used by the metadata */ 4638 num_md_clusters = spdk_divide_round_up( 4639 ctx->super->md_start + ctx->super->md_len, ctx->bs->pages_per_cluster); 4640 for (i = 0; i < num_md_clusters; i++) { 4641 spdk_bit_array_set(ctx->used_clusters, i); 4642 } 4643 ctx->bs->num_free_clusters -= num_md_clusters; 4644 spdk_free(ctx->page); 4645 bs_load_write_used_md(ctx); 4646 } 4647 } 4648 4649 static void 4650 bs_load_replay_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4651 { 4652 struct spdk_bs_load_ctx *ctx = cb_arg; 4653 uint32_t page_num; 4654 uint64_t i; 4655 4656 if (bserrno != 0) { 4657 spdk_free(ctx->extent_pages); 4658 bs_load_ctx_fail(ctx, bserrno); 4659 return; 4660 } 4661 4662 for (i = 0; i < ctx->num_extent_pages; i++) { 4663 /* Extent pages are only read when present within in chain md. 4664 * Integrity of md is not right if that page was not a valid extent page. */ 4665 if (bs_load_cur_extent_page_valid(&ctx->extent_pages[i]) != true) { 4666 spdk_free(ctx->extent_pages); 4667 bs_load_ctx_fail(ctx, -EILSEQ); 4668 return; 4669 } 4670 4671 page_num = ctx->extent_page_num[i]; 4672 spdk_bit_array_set(ctx->bs->used_md_pages, page_num); 4673 if (bs_load_replay_md_parse_page(ctx, &ctx->extent_pages[i])) { 4674 spdk_free(ctx->extent_pages); 4675 bs_load_ctx_fail(ctx, -EILSEQ); 4676 return; 4677 } 4678 } 4679 4680 spdk_free(ctx->extent_pages); 4681 free(ctx->extent_page_num); 4682 ctx->extent_page_num = NULL; 4683 ctx->num_extent_pages = 0; 4684 4685 bs_load_replay_md_chain_cpl(ctx); 4686 } 4687 4688 static void 4689 bs_load_replay_extent_pages(struct spdk_bs_load_ctx *ctx) 4690 { 4691 spdk_bs_batch_t *batch; 4692 uint32_t page; 4693 uint64_t lba; 4694 uint64_t i; 4695 4696 ctx->extent_pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE * ctx->num_extent_pages, 0, 4697 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4698 if (!ctx->extent_pages) { 4699 bs_load_ctx_fail(ctx, -ENOMEM); 4700 return; 4701 } 4702 4703 batch = bs_sequence_to_batch(ctx->seq, bs_load_replay_extent_page_cpl, ctx); 4704 4705 for (i = 0; i < ctx->num_extent_pages; i++) { 4706 page = ctx->extent_page_num[i]; 4707 assert(page < ctx->super->md_len); 4708 lba = bs_md_page_to_lba(ctx->bs, page); 4709 bs_batch_read_dev(batch, &ctx->extent_pages[i], lba, 4710 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE)); 4711 } 4712 4713 bs_batch_close(batch); 4714 } 4715 4716 static void 4717 bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4718 { 4719 struct spdk_bs_load_ctx *ctx = cb_arg; 4720 uint32_t page_num; 4721 struct spdk_blob_md_page *page; 4722 4723 if (bserrno != 0) { 4724 bs_load_ctx_fail(ctx, bserrno); 4725 return; 4726 } 4727 4728 page_num = ctx->cur_page; 4729 page = ctx->page; 4730 if (bs_load_cur_md_page_valid(ctx) == true) { 4731 if (page->sequence_num == 0 || ctx->in_page_chain == true) { 4732 spdk_spin_lock(&ctx->bs->used_lock); 4733 bs_claim_md_page(ctx->bs, page_num); 4734 spdk_spin_unlock(&ctx->bs->used_lock); 4735 if (page->sequence_num == 0) { 4736 SPDK_NOTICELOG("Recover: blob 0x%" PRIx32 "\n", page_num); 4737 spdk_bit_array_set(ctx->bs->used_blobids, page_num); 4738 } 4739 if (bs_load_replay_md_parse_page(ctx, page)) { 4740 bs_load_ctx_fail(ctx, -EILSEQ); 4741 return; 4742 } 4743 if (page->next != SPDK_INVALID_MD_PAGE) { 4744 ctx->in_page_chain = true; 4745 ctx->cur_page = page->next; 4746 bs_load_replay_cur_md_page(ctx); 4747 return; 4748 } 4749 if (ctx->num_extent_pages != 0) { 4750 bs_load_replay_extent_pages(ctx); 4751 return; 4752 } 4753 } 4754 } 4755 bs_load_replay_md_chain_cpl(ctx); 4756 } 4757 4758 static void 4759 bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx) 4760 { 4761 uint64_t lba; 4762 4763 assert(ctx->cur_page < ctx->super->md_len); 4764 lba = bs_md_page_to_lba(ctx->bs, ctx->cur_page); 4765 bs_sequence_read_dev(ctx->seq, ctx->page, lba, 4766 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 4767 bs_load_replay_md_cpl, ctx); 4768 } 4769 4770 static void 4771 bs_load_replay_md(struct spdk_bs_load_ctx *ctx) 4772 { 4773 ctx->page_index = 0; 4774 ctx->cur_page = 0; 4775 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 4776 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4777 if (!ctx->page) { 4778 bs_load_ctx_fail(ctx, -ENOMEM); 4779 return; 4780 } 4781 bs_load_replay_cur_md_page(ctx); 4782 } 4783 4784 static void 4785 bs_recover(struct spdk_bs_load_ctx *ctx) 4786 { 4787 int rc; 4788 4789 SPDK_NOTICELOG("Performing recovery on blobstore\n"); 4790 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len); 4791 if (rc < 0) { 4792 bs_load_ctx_fail(ctx, -ENOMEM); 4793 return; 4794 } 4795 4796 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len); 4797 if (rc < 0) { 4798 bs_load_ctx_fail(ctx, -ENOMEM); 4799 return; 4800 } 4801 4802 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 4803 if (rc < 0) { 4804 bs_load_ctx_fail(ctx, -ENOMEM); 4805 return; 4806 } 4807 4808 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->super->md_len); 4809 if (rc < 0) { 4810 bs_load_ctx_fail(ctx, -ENOMEM); 4811 return; 4812 } 4813 4814 ctx->bs->num_free_clusters = ctx->bs->total_clusters; 4815 bs_load_replay_md(ctx); 4816 } 4817 4818 static int 4819 bs_parse_super(struct spdk_bs_load_ctx *ctx) 4820 { 4821 int rc; 4822 4823 if (ctx->super->size == 0) { 4824 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 4825 } 4826 4827 if (ctx->super->io_unit_size == 0) { 4828 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 4829 } 4830 4831 ctx->bs->clean = 1; 4832 ctx->bs->cluster_sz = ctx->super->cluster_size; 4833 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 4834 ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE; 4835 if (spdk_u32_is_pow2(ctx->bs->pages_per_cluster)) { 4836 ctx->bs->pages_per_cluster_shift = spdk_u32log2(ctx->bs->pages_per_cluster); 4837 } 4838 ctx->bs->io_unit_size = ctx->super->io_unit_size; 4839 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 4840 if (rc < 0) { 4841 return -ENOMEM; 4842 } 4843 ctx->bs->md_start = ctx->super->md_start; 4844 ctx->bs->md_len = ctx->super->md_len; 4845 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len); 4846 if (rc < 0) { 4847 return -ENOMEM; 4848 } 4849 4850 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 4851 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 4852 ctx->bs->super_blob = ctx->super->super_blob; 4853 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 4854 4855 return 0; 4856 } 4857 4858 static void 4859 bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4860 { 4861 struct spdk_bs_load_ctx *ctx = cb_arg; 4862 int rc; 4863 4864 rc = bs_super_validate(ctx->super, ctx->bs); 4865 if (rc != 0) { 4866 bs_load_ctx_fail(ctx, rc); 4867 return; 4868 } 4869 4870 rc = bs_parse_super(ctx); 4871 if (rc < 0) { 4872 bs_load_ctx_fail(ctx, rc); 4873 return; 4874 } 4875 4876 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0 || ctx->force_recover) { 4877 bs_recover(ctx); 4878 } else { 4879 bs_load_read_used_pages(ctx); 4880 } 4881 } 4882 4883 static inline int 4884 bs_opts_copy(struct spdk_bs_opts *src, struct spdk_bs_opts *dst) 4885 { 4886 4887 if (!src->opts_size) { 4888 SPDK_ERRLOG("opts_size should not be zero value\n"); 4889 return -1; 4890 } 4891 4892 #define FIELD_OK(field) \ 4893 offsetof(struct spdk_bs_opts, field) + sizeof(src->field) <= src->opts_size 4894 4895 #define SET_FIELD(field) \ 4896 if (FIELD_OK(field)) { \ 4897 dst->field = src->field; \ 4898 } \ 4899 4900 SET_FIELD(cluster_sz); 4901 SET_FIELD(num_md_pages); 4902 SET_FIELD(max_md_ops); 4903 SET_FIELD(max_channel_ops); 4904 SET_FIELD(clear_method); 4905 4906 if (FIELD_OK(bstype)) { 4907 memcpy(&dst->bstype, &src->bstype, sizeof(dst->bstype)); 4908 } 4909 SET_FIELD(iter_cb_fn); 4910 SET_FIELD(iter_cb_arg); 4911 SET_FIELD(force_recover); 4912 SET_FIELD(esnap_bs_dev_create); 4913 SET_FIELD(esnap_ctx); 4914 4915 dst->opts_size = src->opts_size; 4916 4917 /* You should not remove this statement, but need to update the assert statement 4918 * if you add a new field, and also add a corresponding SET_FIELD statement */ 4919 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_opts) == 88, "Incorrect size"); 4920 4921 #undef FIELD_OK 4922 #undef SET_FIELD 4923 4924 return 0; 4925 } 4926 4927 void 4928 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 4929 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 4930 { 4931 struct spdk_blob_store *bs; 4932 struct spdk_bs_cpl cpl; 4933 struct spdk_bs_load_ctx *ctx; 4934 struct spdk_bs_opts opts = {}; 4935 int err; 4936 4937 SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev); 4938 4939 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 4940 SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen); 4941 dev->destroy(dev); 4942 cb_fn(cb_arg, NULL, -EINVAL); 4943 return; 4944 } 4945 4946 spdk_bs_opts_init(&opts, sizeof(opts)); 4947 if (o) { 4948 if (bs_opts_copy(o, &opts)) { 4949 return; 4950 } 4951 } 4952 4953 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 4954 dev->destroy(dev); 4955 cb_fn(cb_arg, NULL, -EINVAL); 4956 return; 4957 } 4958 4959 err = bs_alloc(dev, &opts, &bs, &ctx); 4960 if (err) { 4961 dev->destroy(dev); 4962 cb_fn(cb_arg, NULL, err); 4963 return; 4964 } 4965 4966 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 4967 cpl.u.bs_handle.cb_fn = cb_fn; 4968 cpl.u.bs_handle.cb_arg = cb_arg; 4969 cpl.u.bs_handle.bs = bs; 4970 4971 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 4972 if (!ctx->seq) { 4973 spdk_free(ctx->super); 4974 free(ctx); 4975 bs_free(bs); 4976 cb_fn(cb_arg, NULL, -ENOMEM); 4977 return; 4978 } 4979 4980 /* Read the super block */ 4981 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 4982 bs_byte_to_lba(bs, sizeof(*ctx->super)), 4983 bs_load_super_cpl, ctx); 4984 } 4985 4986 /* END spdk_bs_load */ 4987 4988 /* START spdk_bs_dump */ 4989 4990 static void 4991 bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno) 4992 { 4993 spdk_free(ctx->super); 4994 4995 /* 4996 * We need to defer calling bs_call_cpl() until after 4997 * dev destruction, so tuck these away for later use. 4998 */ 4999 ctx->bs->unload_err = bserrno; 5000 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5001 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5002 5003 bs_sequence_finish(seq, 0); 5004 bs_free(ctx->bs); 5005 free(ctx); 5006 } 5007 5008 static void 5009 bs_dump_print_xattr(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 5010 { 5011 struct spdk_blob_md_descriptor_xattr *desc_xattr; 5012 uint32_t i; 5013 const char *type; 5014 5015 desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc; 5016 5017 if (desc_xattr->length != 5018 sizeof(desc_xattr->name_length) + sizeof(desc_xattr->value_length) + 5019 desc_xattr->name_length + desc_xattr->value_length) { 5020 } 5021 5022 memcpy(ctx->xattr_name, desc_xattr->name, desc_xattr->name_length); 5023 ctx->xattr_name[desc_xattr->name_length] = '\0'; 5024 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 5025 type = "XATTR"; 5026 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 5027 type = "XATTR_INTERNAL"; 5028 } else { 5029 assert(false); 5030 type = "XATTR_?"; 5031 } 5032 fprintf(ctx->fp, "%s: name = \"%s\"\n", type, ctx->xattr_name); 5033 fprintf(ctx->fp, " value = \""); 5034 ctx->print_xattr_fn(ctx->fp, ctx->super->bstype.bstype, ctx->xattr_name, 5035 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 5036 desc_xattr->value_length); 5037 fprintf(ctx->fp, "\"\n"); 5038 for (i = 0; i < desc_xattr->value_length; i++) { 5039 if (i % 16 == 0) { 5040 fprintf(ctx->fp, " "); 5041 } 5042 fprintf(ctx->fp, "%02" PRIx8 " ", *((uint8_t *)desc_xattr->name + desc_xattr->name_length + i)); 5043 if ((i + 1) % 16 == 0) { 5044 fprintf(ctx->fp, "\n"); 5045 } 5046 } 5047 if (i % 16 != 0) { 5048 fprintf(ctx->fp, "\n"); 5049 } 5050 } 5051 5052 struct type_flag_desc { 5053 uint64_t mask; 5054 uint64_t val; 5055 const char *name; 5056 }; 5057 5058 static void 5059 bs_dump_print_type_bits(struct spdk_bs_load_ctx *ctx, uint64_t flags, 5060 struct type_flag_desc *desc, size_t numflags) 5061 { 5062 uint64_t covered = 0; 5063 size_t i; 5064 5065 for (i = 0; i < numflags; i++) { 5066 if ((desc[i].mask & flags) != desc[i].val) { 5067 continue; 5068 } 5069 fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " %s", desc[i].val, desc[i].name); 5070 if (desc[i].mask != desc[i].val) { 5071 fprintf(ctx->fp, " (mask 0x%" PRIx64 " value 0x%" PRIx64 ")", 5072 desc[i].mask, desc[i].val); 5073 } 5074 fprintf(ctx->fp, "\n"); 5075 covered |= desc[i].mask; 5076 } 5077 if ((flags & ~covered) != 0) { 5078 fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " Unknown\n", flags & ~covered); 5079 } 5080 } 5081 5082 static void 5083 bs_dump_print_type_flags(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 5084 { 5085 struct spdk_blob_md_descriptor_flags *type_desc; 5086 #define ADD_FLAG(f) { f, f, #f } 5087 #define ADD_MASK_VAL(m, v) { m, v, #v } 5088 static struct type_flag_desc invalid[] = { 5089 ADD_FLAG(SPDK_BLOB_THIN_PROV), 5090 ADD_FLAG(SPDK_BLOB_INTERNAL_XATTR), 5091 ADD_FLAG(SPDK_BLOB_EXTENT_TABLE), 5092 }; 5093 static struct type_flag_desc data_ro[] = { 5094 ADD_FLAG(SPDK_BLOB_READ_ONLY), 5095 }; 5096 static struct type_flag_desc md_ro[] = { 5097 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_DEFAULT), 5098 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_NONE), 5099 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_UNMAP), 5100 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_WRITE_ZEROES), 5101 }; 5102 #undef ADD_FLAG 5103 #undef ADD_MASK_VAL 5104 5105 type_desc = (struct spdk_blob_md_descriptor_flags *)desc; 5106 fprintf(ctx->fp, "Flags:\n"); 5107 fprintf(ctx->fp, "\tinvalid: 0x%016" PRIx64 "\n", type_desc->invalid_flags); 5108 bs_dump_print_type_bits(ctx, type_desc->invalid_flags, invalid, 5109 SPDK_COUNTOF(invalid)); 5110 fprintf(ctx->fp, "\tdata_ro: 0x%016" PRIx64 "\n", type_desc->data_ro_flags); 5111 bs_dump_print_type_bits(ctx, type_desc->data_ro_flags, data_ro, 5112 SPDK_COUNTOF(data_ro)); 5113 fprintf(ctx->fp, "\t md_ro: 0x%016" PRIx64 "\n", type_desc->md_ro_flags); 5114 bs_dump_print_type_bits(ctx, type_desc->md_ro_flags, md_ro, 5115 SPDK_COUNTOF(md_ro)); 5116 } 5117 5118 static void 5119 bs_dump_print_extent_table(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 5120 { 5121 struct spdk_blob_md_descriptor_extent_table *et_desc; 5122 uint64_t num_extent_pages; 5123 uint32_t et_idx; 5124 5125 et_desc = (struct spdk_blob_md_descriptor_extent_table *)desc; 5126 num_extent_pages = (et_desc->length - sizeof(et_desc->num_clusters)) / 5127 sizeof(et_desc->extent_page[0]); 5128 5129 fprintf(ctx->fp, "Extent table:\n"); 5130 for (et_idx = 0; et_idx < num_extent_pages; et_idx++) { 5131 if (et_desc->extent_page[et_idx].page_idx == 0) { 5132 /* Zeroes represent unallocated extent pages. */ 5133 continue; 5134 } 5135 fprintf(ctx->fp, "\tExtent page: %5" PRIu32 " length %3" PRIu32 5136 " at LBA %" PRIu64 "\n", et_desc->extent_page[et_idx].page_idx, 5137 et_desc->extent_page[et_idx].num_pages, 5138 bs_md_page_to_lba(ctx->bs, et_desc->extent_page[et_idx].page_idx)); 5139 } 5140 } 5141 5142 static void 5143 bs_dump_print_md_page(struct spdk_bs_load_ctx *ctx) 5144 { 5145 uint32_t page_idx = ctx->cur_page; 5146 struct spdk_blob_md_page *page = ctx->page; 5147 struct spdk_blob_md_descriptor *desc; 5148 size_t cur_desc = 0; 5149 uint32_t crc; 5150 5151 fprintf(ctx->fp, "=========\n"); 5152 fprintf(ctx->fp, "Metadata Page Index: %" PRIu32 " (0x%" PRIx32 ")\n", page_idx, page_idx); 5153 fprintf(ctx->fp, "Start LBA: %" PRIu64 "\n", bs_md_page_to_lba(ctx->bs, page_idx)); 5154 fprintf(ctx->fp, "Blob ID: 0x%" PRIx64 "\n", page->id); 5155 fprintf(ctx->fp, "Sequence: %" PRIu32 "\n", page->sequence_num); 5156 if (page->next == SPDK_INVALID_MD_PAGE) { 5157 fprintf(ctx->fp, "Next: None\n"); 5158 } else { 5159 fprintf(ctx->fp, "Next: %" PRIu32 "\n", page->next); 5160 } 5161 fprintf(ctx->fp, "In used bit array%s:", ctx->super->clean ? "" : " (not clean: dubious)"); 5162 if (spdk_bit_array_get(ctx->bs->used_md_pages, page_idx)) { 5163 fprintf(ctx->fp, " md"); 5164 } 5165 if (spdk_bit_array_get(ctx->bs->used_blobids, page_idx)) { 5166 fprintf(ctx->fp, " blob"); 5167 } 5168 fprintf(ctx->fp, "\n"); 5169 5170 crc = blob_md_page_calc_crc(page); 5171 fprintf(ctx->fp, "CRC: 0x%" PRIx32 " (%s)\n", page->crc, crc == page->crc ? "OK" : "Mismatch"); 5172 5173 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 5174 while (cur_desc < sizeof(page->descriptors)) { 5175 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 5176 if (desc->length == 0) { 5177 /* If padding and length are 0, this terminates the page */ 5178 break; 5179 } 5180 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 5181 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 5182 unsigned int i; 5183 5184 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 5185 5186 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 5187 if (desc_extent_rle->extents[i].cluster_idx != 0) { 5188 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 5189 desc_extent_rle->extents[i].cluster_idx); 5190 } else { 5191 fprintf(ctx->fp, "Unallocated Extent - "); 5192 } 5193 fprintf(ctx->fp, " Length: %" PRIu32, desc_extent_rle->extents[i].length); 5194 fprintf(ctx->fp, "\n"); 5195 } 5196 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 5197 struct spdk_blob_md_descriptor_extent_page *desc_extent; 5198 unsigned int i; 5199 5200 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 5201 5202 for (i = 0; i < desc_extent->length / sizeof(desc_extent->cluster_idx[0]); i++) { 5203 if (desc_extent->cluster_idx[i] != 0) { 5204 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 5205 desc_extent->cluster_idx[i]); 5206 } else { 5207 fprintf(ctx->fp, "Unallocated Extent"); 5208 } 5209 fprintf(ctx->fp, "\n"); 5210 } 5211 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 5212 bs_dump_print_xattr(ctx, desc); 5213 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 5214 bs_dump_print_xattr(ctx, desc); 5215 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 5216 bs_dump_print_type_flags(ctx, desc); 5217 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 5218 bs_dump_print_extent_table(ctx, desc); 5219 } else { 5220 /* Error */ 5221 fprintf(ctx->fp, "Unknown descriptor type %" PRIu8 "\n", desc->type); 5222 } 5223 /* Advance to the next descriptor */ 5224 cur_desc += sizeof(*desc) + desc->length; 5225 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 5226 break; 5227 } 5228 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 5229 } 5230 } 5231 5232 static void 5233 bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5234 { 5235 struct spdk_bs_load_ctx *ctx = cb_arg; 5236 5237 if (bserrno != 0) { 5238 bs_dump_finish(seq, ctx, bserrno); 5239 return; 5240 } 5241 5242 if (ctx->page->id != 0) { 5243 bs_dump_print_md_page(ctx); 5244 } 5245 5246 ctx->cur_page++; 5247 5248 if (ctx->cur_page < ctx->super->md_len) { 5249 bs_dump_read_md_page(seq, ctx); 5250 } else { 5251 spdk_free(ctx->page); 5252 bs_dump_finish(seq, ctx, 0); 5253 } 5254 } 5255 5256 static void 5257 bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg) 5258 { 5259 struct spdk_bs_load_ctx *ctx = cb_arg; 5260 uint64_t lba; 5261 5262 assert(ctx->cur_page < ctx->super->md_len); 5263 lba = bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page); 5264 bs_sequence_read_dev(seq, ctx->page, lba, 5265 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 5266 bs_dump_read_md_page_cpl, ctx); 5267 } 5268 5269 static void 5270 bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5271 { 5272 struct spdk_bs_load_ctx *ctx = cb_arg; 5273 int rc; 5274 5275 fprintf(ctx->fp, "Signature: \"%.8s\" ", ctx->super->signature); 5276 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 5277 sizeof(ctx->super->signature)) != 0) { 5278 fprintf(ctx->fp, "(Mismatch)\n"); 5279 bs_dump_finish(seq, ctx, bserrno); 5280 return; 5281 } else { 5282 fprintf(ctx->fp, "(OK)\n"); 5283 } 5284 fprintf(ctx->fp, "Version: %" PRIu32 "\n", ctx->super->version); 5285 fprintf(ctx->fp, "CRC: 0x%x (%s)\n", ctx->super->crc, 5286 (ctx->super->crc == blob_md_page_calc_crc(ctx->super)) ? "OK" : "Mismatch"); 5287 fprintf(ctx->fp, "Blobstore Type: %.*s\n", SPDK_BLOBSTORE_TYPE_LENGTH, ctx->super->bstype.bstype); 5288 fprintf(ctx->fp, "Cluster Size: %" PRIu32 "\n", ctx->super->cluster_size); 5289 fprintf(ctx->fp, "Super Blob ID: "); 5290 if (ctx->super->super_blob == SPDK_BLOBID_INVALID) { 5291 fprintf(ctx->fp, "(None)\n"); 5292 } else { 5293 fprintf(ctx->fp, "0x%" PRIx64 "\n", ctx->super->super_blob); 5294 } 5295 fprintf(ctx->fp, "Clean: %" PRIu32 "\n", ctx->super->clean); 5296 fprintf(ctx->fp, "Used Metadata Page Mask Start: %" PRIu32 "\n", ctx->super->used_page_mask_start); 5297 fprintf(ctx->fp, "Used Metadata Page Mask Length: %" PRIu32 "\n", ctx->super->used_page_mask_len); 5298 fprintf(ctx->fp, "Used Cluster Mask Start: %" PRIu32 "\n", ctx->super->used_cluster_mask_start); 5299 fprintf(ctx->fp, "Used Cluster Mask Length: %" PRIu32 "\n", ctx->super->used_cluster_mask_len); 5300 fprintf(ctx->fp, "Used Blob ID Mask Start: %" PRIu32 "\n", ctx->super->used_blobid_mask_start); 5301 fprintf(ctx->fp, "Used Blob ID Mask Length: %" PRIu32 "\n", ctx->super->used_blobid_mask_len); 5302 fprintf(ctx->fp, "Metadata Start: %" PRIu32 "\n", ctx->super->md_start); 5303 fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len); 5304 5305 ctx->cur_page = 0; 5306 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 5307 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 5308 if (!ctx->page) { 5309 bs_dump_finish(seq, ctx, -ENOMEM); 5310 return; 5311 } 5312 5313 rc = bs_parse_super(ctx); 5314 if (rc < 0) { 5315 bs_load_ctx_fail(ctx, rc); 5316 return; 5317 } 5318 5319 bs_load_read_used_pages(ctx); 5320 } 5321 5322 void 5323 spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_xattr_fn, 5324 spdk_bs_op_complete cb_fn, void *cb_arg) 5325 { 5326 struct spdk_blob_store *bs; 5327 struct spdk_bs_cpl cpl; 5328 struct spdk_bs_load_ctx *ctx; 5329 struct spdk_bs_opts opts = {}; 5330 int err; 5331 5332 SPDK_DEBUGLOG(blob, "Dumping blobstore from dev %p\n", dev); 5333 5334 spdk_bs_opts_init(&opts, sizeof(opts)); 5335 5336 err = bs_alloc(dev, &opts, &bs, &ctx); 5337 if (err) { 5338 dev->destroy(dev); 5339 cb_fn(cb_arg, err); 5340 return; 5341 } 5342 5343 ctx->dumping = true; 5344 ctx->fp = fp; 5345 ctx->print_xattr_fn = print_xattr_fn; 5346 5347 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5348 cpl.u.bs_basic.cb_fn = cb_fn; 5349 cpl.u.bs_basic.cb_arg = cb_arg; 5350 5351 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5352 if (!ctx->seq) { 5353 spdk_free(ctx->super); 5354 free(ctx); 5355 bs_free(bs); 5356 cb_fn(cb_arg, -ENOMEM); 5357 return; 5358 } 5359 5360 /* Read the super block */ 5361 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5362 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5363 bs_dump_super_cpl, ctx); 5364 } 5365 5366 /* END spdk_bs_dump */ 5367 5368 /* START spdk_bs_init */ 5369 5370 static void 5371 bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5372 { 5373 struct spdk_bs_load_ctx *ctx = cb_arg; 5374 5375 ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters); 5376 spdk_free(ctx->super); 5377 free(ctx); 5378 5379 bs_sequence_finish(seq, bserrno); 5380 } 5381 5382 static void 5383 bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5384 { 5385 struct spdk_bs_load_ctx *ctx = cb_arg; 5386 5387 /* Write super block */ 5388 bs_sequence_write_dev(seq, ctx->super, bs_page_to_lba(ctx->bs, 0), 5389 bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 5390 bs_init_persist_super_cpl, ctx); 5391 } 5392 5393 void 5394 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 5395 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 5396 { 5397 struct spdk_bs_load_ctx *ctx; 5398 struct spdk_blob_store *bs; 5399 struct spdk_bs_cpl cpl; 5400 spdk_bs_sequence_t *seq; 5401 spdk_bs_batch_t *batch; 5402 uint64_t num_md_lba; 5403 uint64_t num_md_pages; 5404 uint64_t num_md_clusters; 5405 uint64_t max_used_cluster_mask_len; 5406 uint32_t i; 5407 struct spdk_bs_opts opts = {}; 5408 int rc; 5409 uint64_t lba, lba_count; 5410 5411 SPDK_DEBUGLOG(blob, "Initializing blobstore on dev %p\n", dev); 5412 5413 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 5414 SPDK_ERRLOG("unsupported dev block length of %d\n", 5415 dev->blocklen); 5416 dev->destroy(dev); 5417 cb_fn(cb_arg, NULL, -EINVAL); 5418 return; 5419 } 5420 5421 spdk_bs_opts_init(&opts, sizeof(opts)); 5422 if (o) { 5423 if (bs_opts_copy(o, &opts)) { 5424 return; 5425 } 5426 } 5427 5428 if (bs_opts_verify(&opts) != 0) { 5429 dev->destroy(dev); 5430 cb_fn(cb_arg, NULL, -EINVAL); 5431 return; 5432 } 5433 5434 rc = bs_alloc(dev, &opts, &bs, &ctx); 5435 if (rc) { 5436 dev->destroy(dev); 5437 cb_fn(cb_arg, NULL, rc); 5438 return; 5439 } 5440 5441 if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) { 5442 /* By default, allocate 1 page per cluster. 5443 * Technically, this over-allocates metadata 5444 * because more metadata will reduce the number 5445 * of usable clusters. This can be addressed with 5446 * more complex math in the future. 5447 */ 5448 bs->md_len = bs->total_clusters; 5449 } else { 5450 bs->md_len = opts.num_md_pages; 5451 } 5452 rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len); 5453 if (rc < 0) { 5454 spdk_free(ctx->super); 5455 free(ctx); 5456 bs_free(bs); 5457 cb_fn(cb_arg, NULL, -ENOMEM); 5458 return; 5459 } 5460 5461 rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len); 5462 if (rc < 0) { 5463 spdk_free(ctx->super); 5464 free(ctx); 5465 bs_free(bs); 5466 cb_fn(cb_arg, NULL, -ENOMEM); 5467 return; 5468 } 5469 5470 rc = spdk_bit_array_resize(&bs->open_blobids, bs->md_len); 5471 if (rc < 0) { 5472 spdk_free(ctx->super); 5473 free(ctx); 5474 bs_free(bs); 5475 cb_fn(cb_arg, NULL, -ENOMEM); 5476 return; 5477 } 5478 5479 memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 5480 sizeof(ctx->super->signature)); 5481 ctx->super->version = SPDK_BS_VERSION; 5482 ctx->super->length = sizeof(*ctx->super); 5483 ctx->super->super_blob = bs->super_blob; 5484 ctx->super->clean = 0; 5485 ctx->super->cluster_size = bs->cluster_sz; 5486 ctx->super->io_unit_size = bs->io_unit_size; 5487 memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype)); 5488 5489 /* Calculate how many pages the metadata consumes at the front 5490 * of the disk. 5491 */ 5492 5493 /* The super block uses 1 page */ 5494 num_md_pages = 1; 5495 5496 /* The used_md_pages mask requires 1 bit per metadata page, rounded 5497 * up to the nearest page, plus a header. 5498 */ 5499 ctx->super->used_page_mask_start = num_md_pages; 5500 ctx->super->used_page_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5501 spdk_divide_round_up(bs->md_len, 8), 5502 SPDK_BS_PAGE_SIZE); 5503 num_md_pages += ctx->super->used_page_mask_len; 5504 5505 /* The used_clusters mask requires 1 bit per cluster, rounded 5506 * up to the nearest page, plus a header. 5507 */ 5508 ctx->super->used_cluster_mask_start = num_md_pages; 5509 ctx->super->used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5510 spdk_divide_round_up(bs->total_clusters, 8), 5511 SPDK_BS_PAGE_SIZE); 5512 /* The blobstore might be extended, then the used_cluster bitmap will need more space. 5513 * Here we calculate the max clusters we can support according to the 5514 * num_md_pages (bs->md_len). 5515 */ 5516 max_used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5517 spdk_divide_round_up(bs->md_len, 8), 5518 SPDK_BS_PAGE_SIZE); 5519 max_used_cluster_mask_len = spdk_max(max_used_cluster_mask_len, 5520 ctx->super->used_cluster_mask_len); 5521 num_md_pages += max_used_cluster_mask_len; 5522 5523 /* The used_blobids mask requires 1 bit per metadata page, rounded 5524 * up to the nearest page, plus a header. 5525 */ 5526 ctx->super->used_blobid_mask_start = num_md_pages; 5527 ctx->super->used_blobid_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5528 spdk_divide_round_up(bs->md_len, 8), 5529 SPDK_BS_PAGE_SIZE); 5530 num_md_pages += ctx->super->used_blobid_mask_len; 5531 5532 /* The metadata region size was chosen above */ 5533 ctx->super->md_start = bs->md_start = num_md_pages; 5534 ctx->super->md_len = bs->md_len; 5535 num_md_pages += bs->md_len; 5536 5537 num_md_lba = bs_page_to_lba(bs, num_md_pages); 5538 5539 ctx->super->size = dev->blockcnt * dev->blocklen; 5540 5541 ctx->super->crc = blob_md_page_calc_crc(ctx->super); 5542 5543 num_md_clusters = spdk_divide_round_up(num_md_pages, bs->pages_per_cluster); 5544 if (num_md_clusters > bs->total_clusters) { 5545 SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, " 5546 "please decrease number of pages reserved for metadata " 5547 "or increase cluster size.\n"); 5548 spdk_free(ctx->super); 5549 spdk_bit_array_free(&ctx->used_clusters); 5550 free(ctx); 5551 bs_free(bs); 5552 cb_fn(cb_arg, NULL, -ENOMEM); 5553 return; 5554 } 5555 /* Claim all of the clusters used by the metadata */ 5556 for (i = 0; i < num_md_clusters; i++) { 5557 spdk_bit_array_set(ctx->used_clusters, i); 5558 } 5559 5560 bs->num_free_clusters -= num_md_clusters; 5561 bs->total_data_clusters = bs->num_free_clusters; 5562 5563 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 5564 cpl.u.bs_handle.cb_fn = cb_fn; 5565 cpl.u.bs_handle.cb_arg = cb_arg; 5566 cpl.u.bs_handle.bs = bs; 5567 5568 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5569 if (!seq) { 5570 spdk_free(ctx->super); 5571 free(ctx); 5572 bs_free(bs); 5573 cb_fn(cb_arg, NULL, -ENOMEM); 5574 return; 5575 } 5576 5577 batch = bs_sequence_to_batch(seq, bs_init_trim_cpl, ctx); 5578 5579 /* Clear metadata space */ 5580 bs_batch_write_zeroes_dev(batch, 0, num_md_lba); 5581 5582 lba = num_md_lba; 5583 lba_count = ctx->bs->dev->blockcnt - lba; 5584 switch (opts.clear_method) { 5585 case BS_CLEAR_WITH_UNMAP: 5586 /* Trim data clusters */ 5587 bs_batch_unmap_dev(batch, lba, lba_count); 5588 break; 5589 case BS_CLEAR_WITH_WRITE_ZEROES: 5590 /* Write_zeroes to data clusters */ 5591 bs_batch_write_zeroes_dev(batch, lba, lba_count); 5592 break; 5593 case BS_CLEAR_WITH_NONE: 5594 default: 5595 break; 5596 } 5597 5598 bs_batch_close(batch); 5599 } 5600 5601 /* END spdk_bs_init */ 5602 5603 /* START spdk_bs_destroy */ 5604 5605 static void 5606 bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5607 { 5608 struct spdk_bs_load_ctx *ctx = cb_arg; 5609 struct spdk_blob_store *bs = ctx->bs; 5610 5611 /* 5612 * We need to defer calling bs_call_cpl() until after 5613 * dev destruction, so tuck these away for later use. 5614 */ 5615 bs->unload_err = bserrno; 5616 memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5617 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5618 5619 bs_sequence_finish(seq, bserrno); 5620 5621 bs_free(bs); 5622 free(ctx); 5623 } 5624 5625 void 5626 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, 5627 void *cb_arg) 5628 { 5629 struct spdk_bs_cpl cpl; 5630 spdk_bs_sequence_t *seq; 5631 struct spdk_bs_load_ctx *ctx; 5632 5633 SPDK_DEBUGLOG(blob, "Destroying blobstore\n"); 5634 5635 if (!RB_EMPTY(&bs->open_blobs)) { 5636 SPDK_ERRLOG("Blobstore still has open blobs\n"); 5637 cb_fn(cb_arg, -EBUSY); 5638 return; 5639 } 5640 5641 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5642 cpl.u.bs_basic.cb_fn = cb_fn; 5643 cpl.u.bs_basic.cb_arg = cb_arg; 5644 5645 ctx = calloc(1, sizeof(*ctx)); 5646 if (!ctx) { 5647 cb_fn(cb_arg, -ENOMEM); 5648 return; 5649 } 5650 5651 ctx->bs = bs; 5652 5653 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5654 if (!seq) { 5655 free(ctx); 5656 cb_fn(cb_arg, -ENOMEM); 5657 return; 5658 } 5659 5660 /* Write zeroes to the super block */ 5661 bs_sequence_write_zeroes_dev(seq, 5662 bs_page_to_lba(bs, 0), 5663 bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)), 5664 bs_destroy_trim_cpl, ctx); 5665 } 5666 5667 /* END spdk_bs_destroy */ 5668 5669 /* START spdk_bs_unload */ 5670 5671 static void 5672 bs_unload_finish(struct spdk_bs_load_ctx *ctx, int bserrno) 5673 { 5674 spdk_bs_sequence_t *seq = ctx->seq; 5675 5676 spdk_free(ctx->super); 5677 5678 /* 5679 * We need to defer calling bs_call_cpl() until after 5680 * dev destruction, so tuck these away for later use. 5681 */ 5682 ctx->bs->unload_err = bserrno; 5683 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5684 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5685 5686 bs_sequence_finish(seq, bserrno); 5687 5688 bs_free(ctx->bs); 5689 free(ctx); 5690 } 5691 5692 static void 5693 bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5694 { 5695 struct spdk_bs_load_ctx *ctx = cb_arg; 5696 5697 bs_unload_finish(ctx, bserrno); 5698 } 5699 5700 static void 5701 bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5702 { 5703 struct spdk_bs_load_ctx *ctx = cb_arg; 5704 5705 spdk_free(ctx->mask); 5706 5707 if (bserrno != 0) { 5708 bs_unload_finish(ctx, bserrno); 5709 return; 5710 } 5711 5712 ctx->super->clean = 1; 5713 5714 bs_write_super(seq, ctx->bs, ctx->super, bs_unload_write_super_cpl, ctx); 5715 } 5716 5717 static void 5718 bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5719 { 5720 struct spdk_bs_load_ctx *ctx = cb_arg; 5721 5722 spdk_free(ctx->mask); 5723 ctx->mask = NULL; 5724 5725 if (bserrno != 0) { 5726 bs_unload_finish(ctx, bserrno); 5727 return; 5728 } 5729 5730 bs_write_used_clusters(seq, ctx, bs_unload_write_used_clusters_cpl); 5731 } 5732 5733 static void 5734 bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5735 { 5736 struct spdk_bs_load_ctx *ctx = cb_arg; 5737 5738 spdk_free(ctx->mask); 5739 ctx->mask = NULL; 5740 5741 if (bserrno != 0) { 5742 bs_unload_finish(ctx, bserrno); 5743 return; 5744 } 5745 5746 bs_write_used_blobids(seq, ctx, bs_unload_write_used_blobids_cpl); 5747 } 5748 5749 static void 5750 bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5751 { 5752 struct spdk_bs_load_ctx *ctx = cb_arg; 5753 int rc; 5754 5755 if (bserrno != 0) { 5756 bs_unload_finish(ctx, bserrno); 5757 return; 5758 } 5759 5760 rc = bs_super_validate(ctx->super, ctx->bs); 5761 if (rc != 0) { 5762 bs_unload_finish(ctx, rc); 5763 return; 5764 } 5765 5766 bs_write_used_md(seq, cb_arg, bs_unload_write_used_pages_cpl); 5767 } 5768 5769 void 5770 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg) 5771 { 5772 struct spdk_bs_cpl cpl; 5773 struct spdk_bs_load_ctx *ctx; 5774 5775 SPDK_DEBUGLOG(blob, "Syncing blobstore\n"); 5776 5777 /* 5778 * If external snapshot channels are being destroyed while the blobstore is unloaded, the 5779 * unload is deferred until after the channel destruction completes. 5780 */ 5781 if (bs->esnap_channels_unloading != 0) { 5782 if (bs->esnap_unload_cb_fn != NULL) { 5783 SPDK_ERRLOG("Blobstore unload in progress\n"); 5784 cb_fn(cb_arg, -EBUSY); 5785 return; 5786 } 5787 SPDK_DEBUGLOG(blob_esnap, "Blobstore unload deferred: %" PRIu32 5788 " esnap clones are unloading\n", bs->esnap_channels_unloading); 5789 bs->esnap_unload_cb_fn = cb_fn; 5790 bs->esnap_unload_cb_arg = cb_arg; 5791 return; 5792 } 5793 if (bs->esnap_unload_cb_fn != NULL) { 5794 SPDK_DEBUGLOG(blob_esnap, "Blobstore deferred unload progressing\n"); 5795 assert(bs->esnap_unload_cb_fn == cb_fn); 5796 assert(bs->esnap_unload_cb_arg == cb_arg); 5797 bs->esnap_unload_cb_fn = NULL; 5798 bs->esnap_unload_cb_arg = NULL; 5799 } 5800 5801 if (!RB_EMPTY(&bs->open_blobs)) { 5802 SPDK_ERRLOG("Blobstore still has open blobs\n"); 5803 cb_fn(cb_arg, -EBUSY); 5804 return; 5805 } 5806 5807 ctx = calloc(1, sizeof(*ctx)); 5808 if (!ctx) { 5809 cb_fn(cb_arg, -ENOMEM); 5810 return; 5811 } 5812 5813 ctx->bs = bs; 5814 5815 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 5816 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 5817 if (!ctx->super) { 5818 free(ctx); 5819 cb_fn(cb_arg, -ENOMEM); 5820 return; 5821 } 5822 5823 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5824 cpl.u.bs_basic.cb_fn = cb_fn; 5825 cpl.u.bs_basic.cb_arg = cb_arg; 5826 5827 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5828 if (!ctx->seq) { 5829 spdk_free(ctx->super); 5830 free(ctx); 5831 cb_fn(cb_arg, -ENOMEM); 5832 return; 5833 } 5834 5835 /* Read super block */ 5836 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5837 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5838 bs_unload_read_super_cpl, ctx); 5839 } 5840 5841 /* END spdk_bs_unload */ 5842 5843 /* START spdk_bs_set_super */ 5844 5845 struct spdk_bs_set_super_ctx { 5846 struct spdk_blob_store *bs; 5847 struct spdk_bs_super_block *super; 5848 }; 5849 5850 static void 5851 bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5852 { 5853 struct spdk_bs_set_super_ctx *ctx = cb_arg; 5854 5855 if (bserrno != 0) { 5856 SPDK_ERRLOG("Unable to write to super block of blobstore\n"); 5857 } 5858 5859 spdk_free(ctx->super); 5860 5861 bs_sequence_finish(seq, bserrno); 5862 5863 free(ctx); 5864 } 5865 5866 static void 5867 bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5868 { 5869 struct spdk_bs_set_super_ctx *ctx = cb_arg; 5870 int rc; 5871 5872 if (bserrno != 0) { 5873 SPDK_ERRLOG("Unable to read super block of blobstore\n"); 5874 spdk_free(ctx->super); 5875 bs_sequence_finish(seq, bserrno); 5876 free(ctx); 5877 return; 5878 } 5879 5880 rc = bs_super_validate(ctx->super, ctx->bs); 5881 if (rc != 0) { 5882 SPDK_ERRLOG("Not a valid super block\n"); 5883 spdk_free(ctx->super); 5884 bs_sequence_finish(seq, rc); 5885 free(ctx); 5886 return; 5887 } 5888 5889 bs_write_super(seq, ctx->bs, ctx->super, bs_set_super_write_cpl, ctx); 5890 } 5891 5892 void 5893 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid, 5894 spdk_bs_op_complete cb_fn, void *cb_arg) 5895 { 5896 struct spdk_bs_cpl cpl; 5897 spdk_bs_sequence_t *seq; 5898 struct spdk_bs_set_super_ctx *ctx; 5899 5900 SPDK_DEBUGLOG(blob, "Setting super blob id on blobstore\n"); 5901 5902 ctx = calloc(1, sizeof(*ctx)); 5903 if (!ctx) { 5904 cb_fn(cb_arg, -ENOMEM); 5905 return; 5906 } 5907 5908 ctx->bs = bs; 5909 5910 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 5911 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 5912 if (!ctx->super) { 5913 free(ctx); 5914 cb_fn(cb_arg, -ENOMEM); 5915 return; 5916 } 5917 5918 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5919 cpl.u.bs_basic.cb_fn = cb_fn; 5920 cpl.u.bs_basic.cb_arg = cb_arg; 5921 5922 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5923 if (!seq) { 5924 spdk_free(ctx->super); 5925 free(ctx); 5926 cb_fn(cb_arg, -ENOMEM); 5927 return; 5928 } 5929 5930 bs->super_blob = blobid; 5931 5932 /* Read super block */ 5933 bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0), 5934 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5935 bs_set_super_read_cpl, ctx); 5936 } 5937 5938 /* END spdk_bs_set_super */ 5939 5940 void 5941 spdk_bs_get_super(struct spdk_blob_store *bs, 5942 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5943 { 5944 if (bs->super_blob == SPDK_BLOBID_INVALID) { 5945 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT); 5946 } else { 5947 cb_fn(cb_arg, bs->super_blob, 0); 5948 } 5949 } 5950 5951 uint64_t 5952 spdk_bs_get_cluster_size(struct spdk_blob_store *bs) 5953 { 5954 return bs->cluster_sz; 5955 } 5956 5957 uint64_t 5958 spdk_bs_get_page_size(struct spdk_blob_store *bs) 5959 { 5960 return SPDK_BS_PAGE_SIZE; 5961 } 5962 5963 uint64_t 5964 spdk_bs_get_io_unit_size(struct spdk_blob_store *bs) 5965 { 5966 return bs->io_unit_size; 5967 } 5968 5969 uint64_t 5970 spdk_bs_free_cluster_count(struct spdk_blob_store *bs) 5971 { 5972 return bs->num_free_clusters; 5973 } 5974 5975 uint64_t 5976 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs) 5977 { 5978 return bs->total_data_clusters; 5979 } 5980 5981 static int 5982 bs_register_md_thread(struct spdk_blob_store *bs) 5983 { 5984 bs->md_channel = spdk_get_io_channel(bs); 5985 if (!bs->md_channel) { 5986 SPDK_ERRLOG("Failed to get IO channel.\n"); 5987 return -1; 5988 } 5989 5990 return 0; 5991 } 5992 5993 static int 5994 bs_unregister_md_thread(struct spdk_blob_store *bs) 5995 { 5996 spdk_put_io_channel(bs->md_channel); 5997 5998 return 0; 5999 } 6000 6001 spdk_blob_id 6002 spdk_blob_get_id(struct spdk_blob *blob) 6003 { 6004 assert(blob != NULL); 6005 6006 return blob->id; 6007 } 6008 6009 uint64_t 6010 spdk_blob_get_num_pages(struct spdk_blob *blob) 6011 { 6012 assert(blob != NULL); 6013 6014 return bs_cluster_to_page(blob->bs, blob->active.num_clusters); 6015 } 6016 6017 uint64_t 6018 spdk_blob_get_num_io_units(struct spdk_blob *blob) 6019 { 6020 assert(blob != NULL); 6021 6022 return spdk_blob_get_num_pages(blob) * bs_io_unit_per_page(blob->bs); 6023 } 6024 6025 uint64_t 6026 spdk_blob_get_num_clusters(struct spdk_blob *blob) 6027 { 6028 assert(blob != NULL); 6029 6030 return blob->active.num_clusters; 6031 } 6032 6033 static uint64_t 6034 blob_find_io_unit(struct spdk_blob *blob, uint64_t offset, bool is_allocated) 6035 { 6036 uint64_t blob_io_unit_num = spdk_blob_get_num_io_units(blob); 6037 6038 while (offset < blob_io_unit_num) { 6039 if (bs_io_unit_is_allocated(blob, offset) == is_allocated) { 6040 return offset; 6041 } 6042 6043 offset += bs_num_io_units_to_cluster_boundary(blob, offset); 6044 } 6045 6046 return UINT64_MAX; 6047 } 6048 6049 uint64_t 6050 spdk_blob_get_next_allocated_io_unit(struct spdk_blob *blob, uint64_t offset) 6051 { 6052 return blob_find_io_unit(blob, offset, true); 6053 } 6054 6055 uint64_t 6056 spdk_blob_get_next_unallocated_io_unit(struct spdk_blob *blob, uint64_t offset) 6057 { 6058 return blob_find_io_unit(blob, offset, false); 6059 } 6060 6061 /* START spdk_bs_create_blob */ 6062 6063 static void 6064 bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 6065 { 6066 struct spdk_blob *blob = cb_arg; 6067 uint32_t page_idx = bs_blobid_to_page(blob->id); 6068 6069 if (bserrno != 0) { 6070 spdk_spin_lock(&blob->bs->used_lock); 6071 spdk_bit_array_clear(blob->bs->used_blobids, page_idx); 6072 bs_release_md_page(blob->bs, page_idx); 6073 spdk_spin_unlock(&blob->bs->used_lock); 6074 } 6075 6076 blob_free(blob); 6077 6078 bs_sequence_finish(seq, bserrno); 6079 } 6080 6081 static int 6082 blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs, 6083 bool internal) 6084 { 6085 uint64_t i; 6086 size_t value_len = 0; 6087 int rc; 6088 const void *value = NULL; 6089 if (xattrs->count > 0 && xattrs->get_value == NULL) { 6090 return -EINVAL; 6091 } 6092 for (i = 0; i < xattrs->count; i++) { 6093 xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len); 6094 if (value == NULL || value_len == 0) { 6095 return -EINVAL; 6096 } 6097 rc = blob_set_xattr(blob, xattrs->names[i], value, value_len, internal); 6098 if (rc < 0) { 6099 return rc; 6100 } 6101 } 6102 return 0; 6103 } 6104 6105 static void 6106 blob_opts_copy(const struct spdk_blob_opts *src, struct spdk_blob_opts *dst) 6107 { 6108 #define FIELD_OK(field) \ 6109 offsetof(struct spdk_blob_opts, field) + sizeof(src->field) <= src->opts_size 6110 6111 #define SET_FIELD(field) \ 6112 if (FIELD_OK(field)) { \ 6113 dst->field = src->field; \ 6114 } \ 6115 6116 SET_FIELD(num_clusters); 6117 SET_FIELD(thin_provision); 6118 SET_FIELD(clear_method); 6119 6120 if (FIELD_OK(xattrs)) { 6121 memcpy(&dst->xattrs, &src->xattrs, sizeof(src->xattrs)); 6122 } 6123 6124 SET_FIELD(use_extent_table); 6125 SET_FIELD(esnap_id); 6126 SET_FIELD(esnap_id_len); 6127 6128 dst->opts_size = src->opts_size; 6129 6130 /* You should not remove this statement, but need to update the assert statement 6131 * if you add a new field, and also add a corresponding SET_FIELD statement */ 6132 SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_opts) == 80, "Incorrect size"); 6133 6134 #undef FIELD_OK 6135 #undef SET_FIELD 6136 } 6137 6138 static void 6139 bs_create_blob(struct spdk_blob_store *bs, 6140 const struct spdk_blob_opts *opts, 6141 const struct spdk_blob_xattr_opts *internal_xattrs, 6142 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6143 { 6144 struct spdk_blob *blob; 6145 uint32_t page_idx; 6146 struct spdk_bs_cpl cpl; 6147 struct spdk_blob_opts opts_local; 6148 struct spdk_blob_xattr_opts internal_xattrs_default; 6149 spdk_bs_sequence_t *seq; 6150 spdk_blob_id id; 6151 int rc; 6152 6153 assert(spdk_get_thread() == bs->md_thread); 6154 6155 spdk_spin_lock(&bs->used_lock); 6156 page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); 6157 if (page_idx == UINT32_MAX) { 6158 spdk_spin_unlock(&bs->used_lock); 6159 cb_fn(cb_arg, 0, -ENOMEM); 6160 return; 6161 } 6162 spdk_bit_array_set(bs->used_blobids, page_idx); 6163 bs_claim_md_page(bs, page_idx); 6164 spdk_spin_unlock(&bs->used_lock); 6165 6166 id = bs_page_to_blobid(page_idx); 6167 6168 SPDK_DEBUGLOG(blob, "Creating blob with id 0x%" PRIx64 " at page %u\n", id, page_idx); 6169 6170 spdk_blob_opts_init(&opts_local, sizeof(opts_local)); 6171 if (opts) { 6172 blob_opts_copy(opts, &opts_local); 6173 } 6174 6175 blob = blob_alloc(bs, id); 6176 if (!blob) { 6177 rc = -ENOMEM; 6178 goto error; 6179 } 6180 6181 blob->use_extent_table = opts_local.use_extent_table; 6182 if (blob->use_extent_table) { 6183 blob->invalid_flags |= SPDK_BLOB_EXTENT_TABLE; 6184 } 6185 6186 if (!internal_xattrs) { 6187 blob_xattrs_init(&internal_xattrs_default); 6188 internal_xattrs = &internal_xattrs_default; 6189 } 6190 6191 rc = blob_set_xattrs(blob, &opts_local.xattrs, false); 6192 if (rc < 0) { 6193 goto error; 6194 } 6195 6196 rc = blob_set_xattrs(blob, internal_xattrs, true); 6197 if (rc < 0) { 6198 goto error; 6199 } 6200 6201 if (opts_local.thin_provision) { 6202 blob_set_thin_provision(blob); 6203 } 6204 6205 blob_set_clear_method(blob, opts_local.clear_method); 6206 6207 if (opts_local.esnap_id != NULL) { 6208 if (opts_local.esnap_id_len > UINT16_MAX) { 6209 SPDK_ERRLOG("esnap id length %" PRIu64 "is too long\n", 6210 opts_local.esnap_id_len); 6211 rc = -EINVAL; 6212 goto error; 6213 6214 } 6215 blob_set_thin_provision(blob); 6216 blob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 6217 rc = blob_set_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID, 6218 opts_local.esnap_id, opts_local.esnap_id_len, true); 6219 if (rc != 0) { 6220 goto error; 6221 } 6222 } 6223 6224 rc = blob_resize(blob, opts_local.num_clusters); 6225 if (rc < 0) { 6226 goto error; 6227 } 6228 cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6229 cpl.u.blobid.cb_fn = cb_fn; 6230 cpl.u.blobid.cb_arg = cb_arg; 6231 cpl.u.blobid.blobid = blob->id; 6232 6233 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 6234 if (!seq) { 6235 rc = -ENOMEM; 6236 goto error; 6237 } 6238 6239 blob_persist(seq, blob, bs_create_blob_cpl, blob); 6240 return; 6241 6242 error: 6243 SPDK_ERRLOG("Failed to create blob: %s, size in clusters/size: %lu (clusters)\n", 6244 spdk_strerror(rc), opts_local.num_clusters); 6245 if (blob != NULL) { 6246 blob_free(blob); 6247 } 6248 spdk_spin_lock(&bs->used_lock); 6249 spdk_bit_array_clear(bs->used_blobids, page_idx); 6250 bs_release_md_page(bs, page_idx); 6251 spdk_spin_unlock(&bs->used_lock); 6252 cb_fn(cb_arg, 0, rc); 6253 } 6254 6255 void 6256 spdk_bs_create_blob(struct spdk_blob_store *bs, 6257 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6258 { 6259 bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg); 6260 } 6261 6262 void 6263 spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts, 6264 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6265 { 6266 bs_create_blob(bs, opts, NULL, cb_fn, cb_arg); 6267 } 6268 6269 /* END spdk_bs_create_blob */ 6270 6271 /* START blob_cleanup */ 6272 6273 struct spdk_clone_snapshot_ctx { 6274 struct spdk_bs_cpl cpl; 6275 int bserrno; 6276 bool frozen; 6277 6278 struct spdk_io_channel *channel; 6279 6280 /* Current cluster for inflate operation */ 6281 uint64_t cluster; 6282 6283 /* For inflation force allocation of all unallocated clusters and remove 6284 * thin-provisioning. Otherwise only decouple parent and keep clone thin. */ 6285 bool allocate_all; 6286 6287 struct { 6288 spdk_blob_id id; 6289 struct spdk_blob *blob; 6290 bool md_ro; 6291 } original; 6292 struct { 6293 spdk_blob_id id; 6294 struct spdk_blob *blob; 6295 } new; 6296 6297 /* xattrs specified for snapshot/clones only. They have no impact on 6298 * the original blobs xattrs. */ 6299 const struct spdk_blob_xattr_opts *xattrs; 6300 }; 6301 6302 static void 6303 bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno) 6304 { 6305 struct spdk_clone_snapshot_ctx *ctx = cb_arg; 6306 struct spdk_bs_cpl *cpl = &ctx->cpl; 6307 6308 if (bserrno != 0) { 6309 if (ctx->bserrno != 0) { 6310 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6311 } else { 6312 ctx->bserrno = bserrno; 6313 } 6314 } 6315 6316 switch (cpl->type) { 6317 case SPDK_BS_CPL_TYPE_BLOBID: 6318 cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno); 6319 break; 6320 case SPDK_BS_CPL_TYPE_BLOB_BASIC: 6321 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno); 6322 break; 6323 default: 6324 SPDK_UNREACHABLE(); 6325 break; 6326 } 6327 6328 free(ctx); 6329 } 6330 6331 static void 6332 bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 6333 { 6334 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6335 struct spdk_blob *origblob = ctx->original.blob; 6336 6337 if (bserrno != 0) { 6338 if (ctx->bserrno != 0) { 6339 SPDK_ERRLOG("Unfreeze error %d\n", bserrno); 6340 } else { 6341 ctx->bserrno = bserrno; 6342 } 6343 } 6344 6345 ctx->original.id = origblob->id; 6346 origblob->locked_operation_in_progress = false; 6347 6348 /* Revert md_ro to original state */ 6349 origblob->md_ro = ctx->original.md_ro; 6350 6351 spdk_blob_close(origblob, bs_clone_snapshot_cleanup_finish, ctx); 6352 } 6353 6354 static void 6355 bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno) 6356 { 6357 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6358 struct spdk_blob *origblob = ctx->original.blob; 6359 6360 if (bserrno != 0) { 6361 if (ctx->bserrno != 0) { 6362 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6363 } else { 6364 ctx->bserrno = bserrno; 6365 } 6366 } 6367 6368 if (ctx->frozen) { 6369 /* Unfreeze any outstanding I/O */ 6370 blob_unfreeze_io(origblob, bs_snapshot_unfreeze_cpl, ctx); 6371 } else { 6372 bs_snapshot_unfreeze_cpl(ctx, 0); 6373 } 6374 6375 } 6376 6377 static void 6378 bs_clone_snapshot_newblob_cleanup(struct spdk_clone_snapshot_ctx *ctx, int bserrno) 6379 { 6380 struct spdk_blob *newblob = ctx->new.blob; 6381 6382 if (bserrno != 0) { 6383 if (ctx->bserrno != 0) { 6384 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6385 } else { 6386 ctx->bserrno = bserrno; 6387 } 6388 } 6389 6390 ctx->new.id = newblob->id; 6391 spdk_blob_close(newblob, bs_clone_snapshot_origblob_cleanup, ctx); 6392 } 6393 6394 /* END blob_cleanup */ 6395 6396 /* START spdk_bs_create_snapshot */ 6397 6398 static void 6399 bs_snapshot_swap_cluster_maps(struct spdk_blob *blob1, struct spdk_blob *blob2) 6400 { 6401 uint64_t *cluster_temp; 6402 uint32_t *extent_page_temp; 6403 6404 cluster_temp = blob1->active.clusters; 6405 blob1->active.clusters = blob2->active.clusters; 6406 blob2->active.clusters = cluster_temp; 6407 6408 extent_page_temp = blob1->active.extent_pages; 6409 blob1->active.extent_pages = blob2->active.extent_pages; 6410 blob2->active.extent_pages = extent_page_temp; 6411 } 6412 6413 /* Copies an internal xattr */ 6414 static int 6415 bs_snapshot_copy_xattr(struct spdk_blob *toblob, struct spdk_blob *fromblob, const char *name) 6416 { 6417 const void *val = NULL; 6418 size_t len; 6419 int bserrno; 6420 6421 bserrno = blob_get_xattr_value(fromblob, name, &val, &len, true); 6422 if (bserrno != 0) { 6423 SPDK_ERRLOG("blob 0x%" PRIx64 " missing %s XATTR\n", fromblob->id, name); 6424 return bserrno; 6425 } 6426 6427 bserrno = blob_set_xattr(toblob, name, val, len, true); 6428 if (bserrno != 0) { 6429 SPDK_ERRLOG("could not set %s XATTR on blob 0x%" PRIx64 "\n", 6430 name, toblob->id); 6431 return bserrno; 6432 } 6433 return 0; 6434 } 6435 6436 static void 6437 bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno) 6438 { 6439 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6440 struct spdk_blob *origblob = ctx->original.blob; 6441 struct spdk_blob *newblob = ctx->new.blob; 6442 6443 if (bserrno != 0) { 6444 bs_snapshot_swap_cluster_maps(newblob, origblob); 6445 if (blob_is_esnap_clone(newblob)) { 6446 bs_snapshot_copy_xattr(origblob, newblob, BLOB_EXTERNAL_SNAPSHOT_ID); 6447 origblob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 6448 } 6449 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6450 return; 6451 } 6452 6453 /* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */ 6454 bserrno = blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true); 6455 if (bserrno != 0) { 6456 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6457 return; 6458 } 6459 6460 bs_blob_list_add(ctx->original.blob); 6461 6462 spdk_blob_set_read_only(newblob); 6463 6464 /* sync snapshot metadata */ 6465 spdk_blob_sync_md(newblob, bs_clone_snapshot_origblob_cleanup, ctx); 6466 } 6467 6468 static void 6469 bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno) 6470 { 6471 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6472 struct spdk_blob *origblob = ctx->original.blob; 6473 struct spdk_blob *newblob = ctx->new.blob; 6474 6475 if (bserrno != 0) { 6476 /* return cluster map back to original */ 6477 bs_snapshot_swap_cluster_maps(newblob, origblob); 6478 6479 /* Newblob md sync failed. Valid clusters are only present in origblob. 6480 * Since I/O is frozen on origblob, not changes to zeroed out cluster map should have occurred. 6481 * Newblob needs to be reverted to thin_provisioned state at creation to properly close. */ 6482 blob_set_thin_provision(newblob); 6483 assert(spdk_mem_all_zero(newblob->active.clusters, 6484 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 6485 assert(spdk_mem_all_zero(newblob->active.extent_pages, 6486 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 6487 6488 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6489 return; 6490 } 6491 6492 /* Set internal xattr for snapshot id */ 6493 bserrno = blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true); 6494 if (bserrno != 0) { 6495 /* return cluster map back to original */ 6496 bs_snapshot_swap_cluster_maps(newblob, origblob); 6497 blob_set_thin_provision(newblob); 6498 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6499 return; 6500 } 6501 6502 /* Create new back_bs_dev for snapshot */ 6503 origblob->back_bs_dev = bs_create_blob_bs_dev(newblob); 6504 if (origblob->back_bs_dev == NULL) { 6505 /* return cluster map back to original */ 6506 bs_snapshot_swap_cluster_maps(newblob, origblob); 6507 blob_set_thin_provision(newblob); 6508 bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL); 6509 return; 6510 } 6511 6512 /* Remove the xattr that references an external snapshot */ 6513 if (blob_is_esnap_clone(origblob)) { 6514 origblob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT; 6515 bserrno = blob_remove_xattr(origblob, BLOB_EXTERNAL_SNAPSHOT_ID, true); 6516 if (bserrno != 0) { 6517 if (bserrno == -ENOENT) { 6518 SPDK_ERRLOG("blob 0x%" PRIx64 " has no " BLOB_EXTERNAL_SNAPSHOT_ID 6519 " xattr to remove\n", origblob->id); 6520 assert(false); 6521 } else { 6522 /* return cluster map back to original */ 6523 bs_snapshot_swap_cluster_maps(newblob, origblob); 6524 blob_set_thin_provision(newblob); 6525 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6526 return; 6527 } 6528 } 6529 } 6530 6531 bs_blob_list_remove(origblob); 6532 origblob->parent_id = newblob->id; 6533 /* set clone blob as thin provisioned */ 6534 blob_set_thin_provision(origblob); 6535 6536 bs_blob_list_add(newblob); 6537 6538 /* sync clone metadata */ 6539 spdk_blob_sync_md(origblob, bs_snapshot_origblob_sync_cpl, ctx); 6540 } 6541 6542 static void 6543 bs_snapshot_freeze_cpl(void *cb_arg, int rc) 6544 { 6545 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6546 struct spdk_blob *origblob = ctx->original.blob; 6547 struct spdk_blob *newblob = ctx->new.blob; 6548 int bserrno; 6549 6550 if (rc != 0) { 6551 bs_clone_snapshot_newblob_cleanup(ctx, rc); 6552 return; 6553 } 6554 6555 ctx->frozen = true; 6556 6557 if (blob_is_esnap_clone(origblob)) { 6558 /* Clean up any channels associated with the original blob id because future IO will 6559 * perform IO using the snapshot blob_id. 6560 */ 6561 blob_esnap_destroy_bs_dev_channels(origblob, false, NULL, NULL); 6562 } 6563 if (newblob->back_bs_dev) { 6564 blob_back_bs_destroy(newblob); 6565 } 6566 /* set new back_bs_dev for snapshot */ 6567 newblob->back_bs_dev = origblob->back_bs_dev; 6568 /* Set invalid flags from origblob */ 6569 newblob->invalid_flags = origblob->invalid_flags; 6570 6571 /* inherit parent from original blob if set */ 6572 newblob->parent_id = origblob->parent_id; 6573 switch (origblob->parent_id) { 6574 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 6575 bserrno = bs_snapshot_copy_xattr(newblob, origblob, BLOB_EXTERNAL_SNAPSHOT_ID); 6576 if (bserrno != 0) { 6577 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6578 return; 6579 } 6580 break; 6581 case SPDK_BLOBID_INVALID: 6582 break; 6583 default: 6584 /* Set internal xattr for snapshot id */ 6585 bserrno = blob_set_xattr(newblob, BLOB_SNAPSHOT, 6586 &origblob->parent_id, sizeof(spdk_blob_id), true); 6587 if (bserrno != 0) { 6588 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6589 return; 6590 } 6591 } 6592 6593 /* swap cluster maps */ 6594 bs_snapshot_swap_cluster_maps(newblob, origblob); 6595 6596 /* Set the clear method on the new blob to match the original. */ 6597 blob_set_clear_method(newblob, origblob->clear_method); 6598 6599 /* sync snapshot metadata */ 6600 spdk_blob_sync_md(newblob, bs_snapshot_newblob_sync_cpl, ctx); 6601 } 6602 6603 static void 6604 bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6605 { 6606 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6607 struct spdk_blob *origblob = ctx->original.blob; 6608 struct spdk_blob *newblob = _blob; 6609 6610 if (bserrno != 0) { 6611 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6612 return; 6613 } 6614 6615 ctx->new.blob = newblob; 6616 assert(spdk_blob_is_thin_provisioned(newblob)); 6617 assert(spdk_mem_all_zero(newblob->active.clusters, 6618 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 6619 assert(spdk_mem_all_zero(newblob->active.extent_pages, 6620 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 6621 6622 blob_freeze_io(origblob, bs_snapshot_freeze_cpl, ctx); 6623 } 6624 6625 static void 6626 bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 6627 { 6628 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6629 struct spdk_blob *origblob = ctx->original.blob; 6630 6631 if (bserrno != 0) { 6632 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6633 return; 6634 } 6635 6636 ctx->new.id = blobid; 6637 ctx->cpl.u.blobid.blobid = blobid; 6638 6639 spdk_bs_open_blob(origblob->bs, ctx->new.id, bs_snapshot_newblob_open_cpl, ctx); 6640 } 6641 6642 6643 static void 6644 bs_xattr_snapshot(void *arg, const char *name, 6645 const void **value, size_t *value_len) 6646 { 6647 assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0); 6648 6649 struct spdk_blob *blob = (struct spdk_blob *)arg; 6650 *value = &blob->id; 6651 *value_len = sizeof(blob->id); 6652 } 6653 6654 static void 6655 bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6656 { 6657 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6658 struct spdk_blob_opts opts; 6659 struct spdk_blob_xattr_opts internal_xattrs; 6660 char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS }; 6661 6662 if (bserrno != 0) { 6663 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6664 return; 6665 } 6666 6667 ctx->original.blob = _blob; 6668 6669 if (_blob->data_ro || _blob->md_ro) { 6670 SPDK_DEBUGLOG(blob, "Cannot create snapshot from read only blob with id 0x%" 6671 PRIx64 "\n", _blob->id); 6672 ctx->bserrno = -EINVAL; 6673 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6674 return; 6675 } 6676 6677 if (_blob->locked_operation_in_progress) { 6678 SPDK_DEBUGLOG(blob, "Cannot create snapshot - another operation in progress\n"); 6679 ctx->bserrno = -EBUSY; 6680 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6681 return; 6682 } 6683 6684 _blob->locked_operation_in_progress = true; 6685 6686 spdk_blob_opts_init(&opts, sizeof(opts)); 6687 blob_xattrs_init(&internal_xattrs); 6688 6689 /* Change the size of new blob to the same as in original blob, 6690 * but do not allocate clusters */ 6691 opts.thin_provision = true; 6692 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 6693 opts.use_extent_table = _blob->use_extent_table; 6694 6695 /* If there are any xattrs specified for snapshot, set them now */ 6696 if (ctx->xattrs) { 6697 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 6698 } 6699 /* Set internal xattr SNAPSHOT_IN_PROGRESS */ 6700 internal_xattrs.count = 1; 6701 internal_xattrs.ctx = _blob; 6702 internal_xattrs.names = xattrs_names; 6703 internal_xattrs.get_value = bs_xattr_snapshot; 6704 6705 bs_create_blob(_blob->bs, &opts, &internal_xattrs, 6706 bs_snapshot_newblob_create_cpl, ctx); 6707 } 6708 6709 void 6710 spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid, 6711 const struct spdk_blob_xattr_opts *snapshot_xattrs, 6712 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6713 { 6714 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6715 6716 if (!ctx) { 6717 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 6718 return; 6719 } 6720 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6721 ctx->cpl.u.blobid.cb_fn = cb_fn; 6722 ctx->cpl.u.blobid.cb_arg = cb_arg; 6723 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 6724 ctx->bserrno = 0; 6725 ctx->frozen = false; 6726 ctx->original.id = blobid; 6727 ctx->xattrs = snapshot_xattrs; 6728 6729 spdk_bs_open_blob(bs, ctx->original.id, bs_snapshot_origblob_open_cpl, ctx); 6730 } 6731 /* END spdk_bs_create_snapshot */ 6732 6733 /* START spdk_bs_create_clone */ 6734 6735 static void 6736 bs_xattr_clone(void *arg, const char *name, 6737 const void **value, size_t *value_len) 6738 { 6739 assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0); 6740 6741 struct spdk_blob *blob = (struct spdk_blob *)arg; 6742 *value = &blob->id; 6743 *value_len = sizeof(blob->id); 6744 } 6745 6746 static void 6747 bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6748 { 6749 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6750 struct spdk_blob *clone = _blob; 6751 6752 ctx->new.blob = clone; 6753 bs_blob_list_add(clone); 6754 6755 spdk_blob_close(clone, bs_clone_snapshot_origblob_cleanup, ctx); 6756 } 6757 6758 static void 6759 bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 6760 { 6761 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6762 6763 ctx->cpl.u.blobid.blobid = blobid; 6764 spdk_bs_open_blob(ctx->original.blob->bs, blobid, bs_clone_newblob_open_cpl, ctx); 6765 } 6766 6767 static void 6768 bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6769 { 6770 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6771 struct spdk_blob_opts opts; 6772 struct spdk_blob_xattr_opts internal_xattrs; 6773 char *xattr_names[] = { BLOB_SNAPSHOT }; 6774 6775 if (bserrno != 0) { 6776 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6777 return; 6778 } 6779 6780 ctx->original.blob = _blob; 6781 ctx->original.md_ro = _blob->md_ro; 6782 6783 if (!_blob->data_ro || !_blob->md_ro) { 6784 SPDK_DEBUGLOG(blob, "Clone not from read-only blob\n"); 6785 ctx->bserrno = -EINVAL; 6786 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6787 return; 6788 } 6789 6790 if (_blob->locked_operation_in_progress) { 6791 SPDK_DEBUGLOG(blob, "Cannot create clone - another operation in progress\n"); 6792 ctx->bserrno = -EBUSY; 6793 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6794 return; 6795 } 6796 6797 _blob->locked_operation_in_progress = true; 6798 6799 spdk_blob_opts_init(&opts, sizeof(opts)); 6800 blob_xattrs_init(&internal_xattrs); 6801 6802 opts.thin_provision = true; 6803 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 6804 opts.use_extent_table = _blob->use_extent_table; 6805 if (ctx->xattrs) { 6806 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 6807 } 6808 6809 /* Set internal xattr BLOB_SNAPSHOT */ 6810 internal_xattrs.count = 1; 6811 internal_xattrs.ctx = _blob; 6812 internal_xattrs.names = xattr_names; 6813 internal_xattrs.get_value = bs_xattr_clone; 6814 6815 bs_create_blob(_blob->bs, &opts, &internal_xattrs, 6816 bs_clone_newblob_create_cpl, ctx); 6817 } 6818 6819 void 6820 spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid, 6821 const struct spdk_blob_xattr_opts *clone_xattrs, 6822 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6823 { 6824 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6825 6826 if (!ctx) { 6827 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 6828 return; 6829 } 6830 6831 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6832 ctx->cpl.u.blobid.cb_fn = cb_fn; 6833 ctx->cpl.u.blobid.cb_arg = cb_arg; 6834 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 6835 ctx->bserrno = 0; 6836 ctx->xattrs = clone_xattrs; 6837 ctx->original.id = blobid; 6838 6839 spdk_bs_open_blob(bs, ctx->original.id, bs_clone_origblob_open_cpl, ctx); 6840 } 6841 6842 /* END spdk_bs_create_clone */ 6843 6844 /* START spdk_bs_inflate_blob */ 6845 6846 static void 6847 bs_inflate_blob_set_parent_cpl(void *cb_arg, struct spdk_blob *_parent, int bserrno) 6848 { 6849 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6850 struct spdk_blob *_blob = ctx->original.blob; 6851 6852 if (bserrno != 0) { 6853 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6854 return; 6855 } 6856 6857 /* Temporarily override md_ro flag for MD modification */ 6858 _blob->md_ro = false; 6859 6860 bserrno = blob_set_xattr(_blob, BLOB_SNAPSHOT, &_parent->id, sizeof(spdk_blob_id), true); 6861 if (bserrno != 0) { 6862 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6863 return; 6864 } 6865 6866 assert(_parent != NULL); 6867 6868 bs_blob_list_remove(_blob); 6869 _blob->parent_id = _parent->id; 6870 6871 blob_back_bs_destroy(_blob); 6872 _blob->back_bs_dev = bs_create_blob_bs_dev(_parent); 6873 bs_blob_list_add(_blob); 6874 6875 spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx); 6876 } 6877 6878 static void 6879 bs_inflate_blob_done(struct spdk_clone_snapshot_ctx *ctx) 6880 { 6881 struct spdk_blob *_blob = ctx->original.blob; 6882 struct spdk_blob *_parent; 6883 6884 if (ctx->allocate_all) { 6885 /* remove thin provisioning */ 6886 bs_blob_list_remove(_blob); 6887 if (_blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 6888 blob_remove_xattr(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, true); 6889 _blob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT; 6890 } else { 6891 blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 6892 } 6893 _blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV; 6894 blob_back_bs_destroy(_blob); 6895 _blob->parent_id = SPDK_BLOBID_INVALID; 6896 } else { 6897 /* For now, esnap clones always have allocate_all set. */ 6898 assert(!blob_is_esnap_clone(_blob)); 6899 6900 _parent = ((struct spdk_blob_bs_dev *)(_blob->back_bs_dev))->blob; 6901 if (_parent->parent_id != SPDK_BLOBID_INVALID) { 6902 /* We must change the parent of the inflated blob */ 6903 spdk_bs_open_blob(_blob->bs, _parent->parent_id, 6904 bs_inflate_blob_set_parent_cpl, ctx); 6905 return; 6906 } 6907 6908 bs_blob_list_remove(_blob); 6909 _blob->parent_id = SPDK_BLOBID_INVALID; 6910 blob_back_bs_destroy(_blob); 6911 _blob->back_bs_dev = bs_create_zeroes_dev(); 6912 } 6913 6914 /* Temporarily override md_ro flag for MD modification */ 6915 _blob->md_ro = false; 6916 blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 6917 _blob->state = SPDK_BLOB_STATE_DIRTY; 6918 6919 spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx); 6920 } 6921 6922 /* Check if cluster needs allocation */ 6923 static inline bool 6924 bs_cluster_needs_allocation(struct spdk_blob *blob, uint64_t cluster, bool allocate_all) 6925 { 6926 struct spdk_blob_bs_dev *b; 6927 6928 assert(blob != NULL); 6929 6930 if (blob->active.clusters[cluster] != 0) { 6931 /* Cluster is already allocated */ 6932 return false; 6933 } 6934 6935 if (blob->parent_id == SPDK_BLOBID_INVALID) { 6936 /* Blob have no parent blob */ 6937 return allocate_all; 6938 } 6939 6940 if (blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 6941 return true; 6942 } 6943 6944 b = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 6945 return (allocate_all || b->blob->active.clusters[cluster] != 0); 6946 } 6947 6948 static void 6949 bs_inflate_blob_touch_next(void *cb_arg, int bserrno) 6950 { 6951 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6952 struct spdk_blob *_blob = ctx->original.blob; 6953 struct spdk_bs_cpl cpl; 6954 spdk_bs_user_op_t *op; 6955 uint64_t offset; 6956 6957 if (bserrno != 0) { 6958 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6959 return; 6960 } 6961 6962 for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) { 6963 if (bs_cluster_needs_allocation(_blob, ctx->cluster, ctx->allocate_all)) { 6964 break; 6965 } 6966 } 6967 6968 if (ctx->cluster < _blob->active.num_clusters) { 6969 offset = bs_cluster_to_lba(_blob->bs, ctx->cluster); 6970 6971 /* We may safely increment a cluster before copying */ 6972 ctx->cluster++; 6973 6974 /* Use a dummy 0B read as a context for cluster copy */ 6975 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 6976 cpl.u.blob_basic.cb_fn = bs_inflate_blob_touch_next; 6977 cpl.u.blob_basic.cb_arg = ctx; 6978 6979 op = bs_user_op_alloc(ctx->channel, &cpl, SPDK_BLOB_READ, _blob, 6980 NULL, 0, offset, 0); 6981 if (!op) { 6982 bs_clone_snapshot_origblob_cleanup(ctx, -ENOMEM); 6983 return; 6984 } 6985 6986 bs_allocate_and_copy_cluster(_blob, ctx->channel, offset, op); 6987 } else { 6988 bs_inflate_blob_done(ctx); 6989 } 6990 } 6991 6992 static void 6993 bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6994 { 6995 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6996 uint64_t clusters_needed; 6997 uint64_t i; 6998 6999 if (bserrno != 0) { 7000 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 7001 return; 7002 } 7003 7004 ctx->original.blob = _blob; 7005 ctx->original.md_ro = _blob->md_ro; 7006 7007 if (_blob->locked_operation_in_progress) { 7008 SPDK_DEBUGLOG(blob, "Cannot inflate blob - another operation in progress\n"); 7009 ctx->bserrno = -EBUSY; 7010 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 7011 return; 7012 } 7013 7014 _blob->locked_operation_in_progress = true; 7015 7016 switch (_blob->parent_id) { 7017 case SPDK_BLOBID_INVALID: 7018 if (!ctx->allocate_all) { 7019 /* This blob has no parent, so we cannot decouple it. */ 7020 SPDK_ERRLOG("Cannot decouple parent of blob with no parent.\n"); 7021 bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 7022 return; 7023 } 7024 break; 7025 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 7026 /* 7027 * It would be better to rely on back_bs_dev->is_zeroes(), to determine which 7028 * clusters require allocation. Until there is a blobstore consumer that 7029 * uses esnaps with an spdk_bs_dev that implements a useful is_zeroes() it is not 7030 * worth the effort. 7031 */ 7032 ctx->allocate_all = true; 7033 break; 7034 default: 7035 break; 7036 } 7037 7038 if (spdk_blob_is_thin_provisioned(_blob) == false) { 7039 /* This is not thin provisioned blob. No need to inflate. */ 7040 bs_clone_snapshot_origblob_cleanup(ctx, 0); 7041 return; 7042 } 7043 7044 /* Do two passes - one to verify that we can obtain enough clusters 7045 * and another to actually claim them. 7046 */ 7047 clusters_needed = 0; 7048 for (i = 0; i < _blob->active.num_clusters; i++) { 7049 if (bs_cluster_needs_allocation(_blob, i, ctx->allocate_all)) { 7050 clusters_needed++; 7051 } 7052 } 7053 7054 if (clusters_needed > _blob->bs->num_free_clusters) { 7055 /* Not enough free clusters. Cannot satisfy the request. */ 7056 bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC); 7057 return; 7058 } 7059 7060 ctx->cluster = 0; 7061 bs_inflate_blob_touch_next(ctx, 0); 7062 } 7063 7064 static void 7065 bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7066 spdk_blob_id blobid, bool allocate_all, spdk_blob_op_complete cb_fn, void *cb_arg) 7067 { 7068 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 7069 7070 if (!ctx) { 7071 cb_fn(cb_arg, -ENOMEM); 7072 return; 7073 } 7074 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7075 ctx->cpl.u.bs_basic.cb_fn = cb_fn; 7076 ctx->cpl.u.bs_basic.cb_arg = cb_arg; 7077 ctx->bserrno = 0; 7078 ctx->original.id = blobid; 7079 ctx->channel = channel; 7080 ctx->allocate_all = allocate_all; 7081 7082 spdk_bs_open_blob(bs, ctx->original.id, bs_inflate_blob_open_cpl, ctx); 7083 } 7084 7085 void 7086 spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7087 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 7088 { 7089 bs_inflate_blob(bs, channel, blobid, true, cb_fn, cb_arg); 7090 } 7091 7092 void 7093 spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7094 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 7095 { 7096 bs_inflate_blob(bs, channel, blobid, false, cb_fn, cb_arg); 7097 } 7098 /* END spdk_bs_inflate_blob */ 7099 7100 /* START spdk_blob_resize */ 7101 struct spdk_bs_resize_ctx { 7102 spdk_blob_op_complete cb_fn; 7103 void *cb_arg; 7104 struct spdk_blob *blob; 7105 uint64_t sz; 7106 int rc; 7107 }; 7108 7109 static void 7110 bs_resize_unfreeze_cpl(void *cb_arg, int rc) 7111 { 7112 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 7113 7114 if (rc != 0) { 7115 SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc); 7116 } 7117 7118 if (ctx->rc != 0) { 7119 SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc); 7120 rc = ctx->rc; 7121 } 7122 7123 ctx->blob->locked_operation_in_progress = false; 7124 7125 ctx->cb_fn(ctx->cb_arg, rc); 7126 free(ctx); 7127 } 7128 7129 static void 7130 bs_resize_freeze_cpl(void *cb_arg, int rc) 7131 { 7132 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 7133 7134 if (rc != 0) { 7135 ctx->blob->locked_operation_in_progress = false; 7136 ctx->cb_fn(ctx->cb_arg, rc); 7137 free(ctx); 7138 return; 7139 } 7140 7141 ctx->rc = blob_resize(ctx->blob, ctx->sz); 7142 7143 blob_unfreeze_io(ctx->blob, bs_resize_unfreeze_cpl, ctx); 7144 } 7145 7146 void 7147 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg) 7148 { 7149 struct spdk_bs_resize_ctx *ctx; 7150 7151 blob_verify_md_op(blob); 7152 7153 SPDK_DEBUGLOG(blob, "Resizing blob 0x%" PRIx64 " to %" PRIu64 " clusters\n", blob->id, sz); 7154 7155 if (blob->md_ro) { 7156 cb_fn(cb_arg, -EPERM); 7157 return; 7158 } 7159 7160 if (sz == blob->active.num_clusters) { 7161 cb_fn(cb_arg, 0); 7162 return; 7163 } 7164 7165 if (blob->locked_operation_in_progress) { 7166 cb_fn(cb_arg, -EBUSY); 7167 return; 7168 } 7169 7170 ctx = calloc(1, sizeof(*ctx)); 7171 if (!ctx) { 7172 cb_fn(cb_arg, -ENOMEM); 7173 return; 7174 } 7175 7176 blob->locked_operation_in_progress = true; 7177 ctx->cb_fn = cb_fn; 7178 ctx->cb_arg = cb_arg; 7179 ctx->blob = blob; 7180 ctx->sz = sz; 7181 blob_freeze_io(blob, bs_resize_freeze_cpl, ctx); 7182 } 7183 7184 /* END spdk_blob_resize */ 7185 7186 7187 /* START spdk_bs_delete_blob */ 7188 7189 static void 7190 bs_delete_close_cpl(void *cb_arg, int bserrno) 7191 { 7192 spdk_bs_sequence_t *seq = cb_arg; 7193 7194 bs_sequence_finish(seq, bserrno); 7195 } 7196 7197 static void 7198 bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7199 { 7200 struct spdk_blob *blob = cb_arg; 7201 7202 if (bserrno != 0) { 7203 /* 7204 * We already removed this blob from the blobstore tailq, so 7205 * we need to free it here since this is the last reference 7206 * to it. 7207 */ 7208 blob_free(blob); 7209 bs_delete_close_cpl(seq, bserrno); 7210 return; 7211 } 7212 7213 /* 7214 * This will immediately decrement the ref_count and call 7215 * the completion routine since the metadata state is clean. 7216 * By calling spdk_blob_close, we reduce the number of call 7217 * points into code that touches the blob->open_ref count 7218 * and the blobstore's blob list. 7219 */ 7220 spdk_blob_close(blob, bs_delete_close_cpl, seq); 7221 } 7222 7223 struct delete_snapshot_ctx { 7224 struct spdk_blob_list *parent_snapshot_entry; 7225 struct spdk_blob *snapshot; 7226 struct spdk_blob_md_page *page; 7227 bool snapshot_md_ro; 7228 struct spdk_blob *clone; 7229 bool clone_md_ro; 7230 spdk_blob_op_with_handle_complete cb_fn; 7231 void *cb_arg; 7232 int bserrno; 7233 uint32_t next_extent_page; 7234 }; 7235 7236 static void 7237 delete_blob_cleanup_finish(void *cb_arg, int bserrno) 7238 { 7239 struct delete_snapshot_ctx *ctx = cb_arg; 7240 7241 if (bserrno != 0) { 7242 SPDK_ERRLOG("Snapshot cleanup error %d\n", bserrno); 7243 } 7244 7245 assert(ctx != NULL); 7246 7247 if (bserrno != 0 && ctx->bserrno == 0) { 7248 ctx->bserrno = bserrno; 7249 } 7250 7251 ctx->cb_fn(ctx->cb_arg, ctx->snapshot, ctx->bserrno); 7252 spdk_free(ctx->page); 7253 free(ctx); 7254 } 7255 7256 static void 7257 delete_snapshot_cleanup_snapshot(void *cb_arg, int bserrno) 7258 { 7259 struct delete_snapshot_ctx *ctx = cb_arg; 7260 7261 if (bserrno != 0) { 7262 ctx->bserrno = bserrno; 7263 SPDK_ERRLOG("Clone cleanup error %d\n", bserrno); 7264 } 7265 7266 if (ctx->bserrno != 0) { 7267 assert(blob_lookup(ctx->snapshot->bs, ctx->snapshot->id) == NULL); 7268 RB_INSERT(spdk_blob_tree, &ctx->snapshot->bs->open_blobs, ctx->snapshot); 7269 spdk_bit_array_set(ctx->snapshot->bs->open_blobids, ctx->snapshot->id); 7270 } 7271 7272 ctx->snapshot->locked_operation_in_progress = false; 7273 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 7274 7275 spdk_blob_close(ctx->snapshot, delete_blob_cleanup_finish, ctx); 7276 } 7277 7278 static void 7279 delete_snapshot_cleanup_clone(void *cb_arg, int bserrno) 7280 { 7281 struct delete_snapshot_ctx *ctx = cb_arg; 7282 7283 ctx->clone->locked_operation_in_progress = false; 7284 ctx->clone->md_ro = ctx->clone_md_ro; 7285 7286 spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx); 7287 } 7288 7289 static void 7290 delete_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 7291 { 7292 struct delete_snapshot_ctx *ctx = cb_arg; 7293 7294 if (bserrno) { 7295 ctx->bserrno = bserrno; 7296 delete_snapshot_cleanup_clone(ctx, 0); 7297 return; 7298 } 7299 7300 ctx->clone->locked_operation_in_progress = false; 7301 spdk_blob_close(ctx->clone, delete_blob_cleanup_finish, ctx); 7302 } 7303 7304 static void 7305 delete_snapshot_sync_snapshot_cpl(void *cb_arg, int bserrno) 7306 { 7307 struct delete_snapshot_ctx *ctx = cb_arg; 7308 struct spdk_blob_list *parent_snapshot_entry = NULL; 7309 struct spdk_blob_list *snapshot_entry = NULL; 7310 struct spdk_blob_list *clone_entry = NULL; 7311 struct spdk_blob_list *snapshot_clone_entry = NULL; 7312 7313 if (bserrno) { 7314 SPDK_ERRLOG("Failed to sync MD on blob\n"); 7315 ctx->bserrno = bserrno; 7316 delete_snapshot_cleanup_clone(ctx, 0); 7317 return; 7318 } 7319 7320 /* Get snapshot entry for the snapshot we want to remove */ 7321 snapshot_entry = bs_get_snapshot_entry(ctx->snapshot->bs, ctx->snapshot->id); 7322 7323 assert(snapshot_entry != NULL); 7324 7325 /* Remove clone entry in this snapshot (at this point there can be only one clone) */ 7326 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 7327 assert(clone_entry != NULL); 7328 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 7329 snapshot_entry->clone_count--; 7330 assert(TAILQ_EMPTY(&snapshot_entry->clones)); 7331 7332 switch (ctx->snapshot->parent_id) { 7333 case SPDK_BLOBID_INVALID: 7334 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 7335 /* No parent snapshot - just remove clone entry */ 7336 free(clone_entry); 7337 break; 7338 default: 7339 /* This snapshot is at the same time a clone of another snapshot - we need to 7340 * update parent snapshot (remove current clone, add new one inherited from 7341 * the snapshot that is being removed) */ 7342 7343 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 7344 * snapshot that we are removing */ 7345 blob_get_snapshot_and_clone_entries(ctx->snapshot, &parent_snapshot_entry, 7346 &snapshot_clone_entry); 7347 7348 /* Switch clone entry in parent snapshot */ 7349 TAILQ_INSERT_TAIL(&parent_snapshot_entry->clones, clone_entry, link); 7350 TAILQ_REMOVE(&parent_snapshot_entry->clones, snapshot_clone_entry, link); 7351 free(snapshot_clone_entry); 7352 } 7353 7354 /* Restore md_ro flags */ 7355 ctx->clone->md_ro = ctx->clone_md_ro; 7356 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 7357 7358 blob_unfreeze_io(ctx->clone, delete_snapshot_unfreeze_cpl, ctx); 7359 } 7360 7361 static void 7362 delete_snapshot_sync_clone_cpl(void *cb_arg, int bserrno) 7363 { 7364 struct delete_snapshot_ctx *ctx = cb_arg; 7365 uint64_t i; 7366 7367 ctx->snapshot->md_ro = false; 7368 7369 if (bserrno) { 7370 SPDK_ERRLOG("Failed to sync MD on clone\n"); 7371 ctx->bserrno = bserrno; 7372 7373 /* Restore snapshot to previous state */ 7374 bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true); 7375 if (bserrno != 0) { 7376 delete_snapshot_cleanup_clone(ctx, bserrno); 7377 return; 7378 } 7379 7380 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx); 7381 return; 7382 } 7383 7384 /* Clear cluster map entries for snapshot */ 7385 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 7386 if (ctx->clone->active.clusters[i] == ctx->snapshot->active.clusters[i]) { 7387 ctx->snapshot->active.clusters[i] = 0; 7388 } 7389 } 7390 for (i = 0; i < ctx->snapshot->active.num_extent_pages && 7391 i < ctx->clone->active.num_extent_pages; i++) { 7392 if (ctx->clone->active.extent_pages[i] == ctx->snapshot->active.extent_pages[i]) { 7393 ctx->snapshot->active.extent_pages[i] = 0; 7394 } 7395 } 7396 7397 blob_set_thin_provision(ctx->snapshot); 7398 ctx->snapshot->state = SPDK_BLOB_STATE_DIRTY; 7399 7400 if (ctx->parent_snapshot_entry != NULL) { 7401 ctx->snapshot->back_bs_dev = NULL; 7402 } 7403 7404 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_cpl, ctx); 7405 } 7406 7407 static void 7408 delete_snapshot_update_extent_pages_cpl(struct delete_snapshot_ctx *ctx) 7409 { 7410 int bserrno; 7411 7412 /* Delete old backing bs_dev from clone (related to snapshot that will be removed) */ 7413 blob_back_bs_destroy(ctx->clone); 7414 7415 /* Set/remove snapshot xattr and switch parent ID and backing bs_dev on clone... */ 7416 if (ctx->snapshot->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 7417 bserrno = bs_snapshot_copy_xattr(ctx->clone, ctx->snapshot, 7418 BLOB_EXTERNAL_SNAPSHOT_ID); 7419 if (bserrno != 0) { 7420 ctx->bserrno = bserrno; 7421 7422 /* Restore snapshot to previous state */ 7423 bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true); 7424 if (bserrno != 0) { 7425 delete_snapshot_cleanup_clone(ctx, bserrno); 7426 return; 7427 } 7428 7429 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx); 7430 return; 7431 } 7432 ctx->clone->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT; 7433 ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev; 7434 /* Do not delete the external snapshot along with this snapshot */ 7435 ctx->snapshot->back_bs_dev = NULL; 7436 ctx->clone->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 7437 } else if (ctx->parent_snapshot_entry != NULL) { 7438 /* ...to parent snapshot */ 7439 ctx->clone->parent_id = ctx->parent_snapshot_entry->id; 7440 ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev; 7441 blob_set_xattr(ctx->clone, BLOB_SNAPSHOT, &ctx->parent_snapshot_entry->id, 7442 sizeof(spdk_blob_id), 7443 true); 7444 } else { 7445 /* ...to blobid invalid and zeroes dev */ 7446 ctx->clone->parent_id = SPDK_BLOBID_INVALID; 7447 ctx->clone->back_bs_dev = bs_create_zeroes_dev(); 7448 blob_remove_xattr(ctx->clone, BLOB_SNAPSHOT, true); 7449 } 7450 7451 spdk_blob_sync_md(ctx->clone, delete_snapshot_sync_clone_cpl, ctx); 7452 } 7453 7454 static void 7455 delete_snapshot_update_extent_pages(void *cb_arg, int bserrno) 7456 { 7457 struct delete_snapshot_ctx *ctx = cb_arg; 7458 uint32_t *extent_page; 7459 uint64_t i; 7460 7461 for (i = ctx->next_extent_page; i < ctx->snapshot->active.num_extent_pages && 7462 i < ctx->clone->active.num_extent_pages; i++) { 7463 if (ctx->snapshot->active.extent_pages[i] == 0) { 7464 /* No extent page to use from snapshot */ 7465 continue; 7466 } 7467 7468 extent_page = &ctx->clone->active.extent_pages[i]; 7469 if (*extent_page == 0) { 7470 /* Copy extent page from snapshot when clone did not have a matching one */ 7471 *extent_page = ctx->snapshot->active.extent_pages[i]; 7472 continue; 7473 } 7474 7475 /* Clone and snapshot both contain partially filled matching extent pages. 7476 * Update the clone extent page in place with cluster map containing the mix of both. */ 7477 ctx->next_extent_page = i + 1; 7478 memset(ctx->page, 0, SPDK_BS_PAGE_SIZE); 7479 7480 blob_write_extent_page(ctx->clone, *extent_page, i * SPDK_EXTENTS_PER_EP, ctx->page, 7481 delete_snapshot_update_extent_pages, ctx); 7482 return; 7483 } 7484 delete_snapshot_update_extent_pages_cpl(ctx); 7485 } 7486 7487 static void 7488 delete_snapshot_sync_snapshot_xattr_cpl(void *cb_arg, int bserrno) 7489 { 7490 struct delete_snapshot_ctx *ctx = cb_arg; 7491 uint64_t i; 7492 7493 /* Temporarily override md_ro flag for clone for MD modification */ 7494 ctx->clone_md_ro = ctx->clone->md_ro; 7495 ctx->clone->md_ro = false; 7496 7497 if (bserrno) { 7498 SPDK_ERRLOG("Failed to sync MD with xattr on blob\n"); 7499 ctx->bserrno = bserrno; 7500 delete_snapshot_cleanup_clone(ctx, 0); 7501 return; 7502 } 7503 7504 /* Copy snapshot map to clone map (only unallocated clusters in clone) */ 7505 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 7506 if (ctx->clone->active.clusters[i] == 0) { 7507 ctx->clone->active.clusters[i] = ctx->snapshot->active.clusters[i]; 7508 } 7509 } 7510 ctx->next_extent_page = 0; 7511 delete_snapshot_update_extent_pages(ctx, 0); 7512 } 7513 7514 static void 7515 delete_snapshot_esnap_channels_destroyed_cb(void *cb_arg, struct spdk_blob *blob, int bserrno) 7516 { 7517 struct delete_snapshot_ctx *ctx = cb_arg; 7518 7519 if (bserrno != 0) { 7520 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to destroy esnap channels: %d\n", 7521 blob->id, bserrno); 7522 /* That error should not stop us from syncing metadata. */ 7523 } 7524 7525 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx); 7526 } 7527 7528 static void 7529 delete_snapshot_freeze_io_cb(void *cb_arg, int bserrno) 7530 { 7531 struct delete_snapshot_ctx *ctx = cb_arg; 7532 7533 if (bserrno) { 7534 SPDK_ERRLOG("Failed to freeze I/O on clone\n"); 7535 ctx->bserrno = bserrno; 7536 delete_snapshot_cleanup_clone(ctx, 0); 7537 return; 7538 } 7539 7540 /* Temporarily override md_ro flag for snapshot for MD modification */ 7541 ctx->snapshot_md_ro = ctx->snapshot->md_ro; 7542 ctx->snapshot->md_ro = false; 7543 7544 /* Mark blob as pending for removal for power failure safety, use clone id for recovery */ 7545 ctx->bserrno = blob_set_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, &ctx->clone->id, 7546 sizeof(spdk_blob_id), true); 7547 if (ctx->bserrno != 0) { 7548 delete_snapshot_cleanup_clone(ctx, 0); 7549 return; 7550 } 7551 7552 if (blob_is_esnap_clone(ctx->snapshot)) { 7553 blob_esnap_destroy_bs_dev_channels(ctx->snapshot, false, 7554 delete_snapshot_esnap_channels_destroyed_cb, 7555 ctx); 7556 return; 7557 } 7558 7559 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx); 7560 } 7561 7562 static void 7563 delete_snapshot_open_clone_cb(void *cb_arg, struct spdk_blob *clone, int bserrno) 7564 { 7565 struct delete_snapshot_ctx *ctx = cb_arg; 7566 7567 if (bserrno) { 7568 SPDK_ERRLOG("Failed to open clone\n"); 7569 ctx->bserrno = bserrno; 7570 delete_snapshot_cleanup_snapshot(ctx, 0); 7571 return; 7572 } 7573 7574 ctx->clone = clone; 7575 7576 if (clone->locked_operation_in_progress) { 7577 SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress on its clone\n"); 7578 ctx->bserrno = -EBUSY; 7579 spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx); 7580 return; 7581 } 7582 7583 clone->locked_operation_in_progress = true; 7584 7585 blob_freeze_io(clone, delete_snapshot_freeze_io_cb, ctx); 7586 } 7587 7588 static void 7589 update_clone_on_snapshot_deletion(struct spdk_blob *snapshot, struct delete_snapshot_ctx *ctx) 7590 { 7591 struct spdk_blob_list *snapshot_entry = NULL; 7592 struct spdk_blob_list *clone_entry = NULL; 7593 struct spdk_blob_list *snapshot_clone_entry = NULL; 7594 7595 /* Get snapshot entry for the snapshot we want to remove */ 7596 snapshot_entry = bs_get_snapshot_entry(snapshot->bs, snapshot->id); 7597 7598 assert(snapshot_entry != NULL); 7599 7600 /* Get clone of the snapshot (at this point there can be only one clone) */ 7601 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 7602 assert(snapshot_entry->clone_count == 1); 7603 assert(clone_entry != NULL); 7604 7605 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 7606 * snapshot that we are removing */ 7607 blob_get_snapshot_and_clone_entries(snapshot, &ctx->parent_snapshot_entry, 7608 &snapshot_clone_entry); 7609 7610 spdk_bs_open_blob(snapshot->bs, clone_entry->id, delete_snapshot_open_clone_cb, ctx); 7611 } 7612 7613 static void 7614 bs_delete_blob_finish(void *cb_arg, struct spdk_blob *blob, int bserrno) 7615 { 7616 spdk_bs_sequence_t *seq = cb_arg; 7617 struct spdk_blob_list *snapshot_entry = NULL; 7618 uint32_t page_num; 7619 7620 if (bserrno) { 7621 SPDK_ERRLOG("Failed to remove blob\n"); 7622 bs_sequence_finish(seq, bserrno); 7623 return; 7624 } 7625 7626 /* Remove snapshot from the list */ 7627 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 7628 if (snapshot_entry != NULL) { 7629 TAILQ_REMOVE(&blob->bs->snapshots, snapshot_entry, link); 7630 free(snapshot_entry); 7631 } 7632 7633 page_num = bs_blobid_to_page(blob->id); 7634 spdk_bit_array_clear(blob->bs->used_blobids, page_num); 7635 blob->state = SPDK_BLOB_STATE_DIRTY; 7636 blob->active.num_pages = 0; 7637 blob_resize(blob, 0); 7638 7639 blob_persist(seq, blob, bs_delete_persist_cpl, blob); 7640 } 7641 7642 static int 7643 bs_is_blob_deletable(struct spdk_blob *blob, bool *update_clone) 7644 { 7645 struct spdk_blob_list *snapshot_entry = NULL; 7646 struct spdk_blob_list *clone_entry = NULL; 7647 struct spdk_blob *clone = NULL; 7648 bool has_one_clone = false; 7649 7650 /* Check if this is a snapshot with clones */ 7651 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 7652 if (snapshot_entry != NULL) { 7653 if (snapshot_entry->clone_count > 1) { 7654 SPDK_ERRLOG("Cannot remove snapshot with more than one clone\n"); 7655 return -EBUSY; 7656 } else if (snapshot_entry->clone_count == 1) { 7657 has_one_clone = true; 7658 } 7659 } 7660 7661 /* Check if someone has this blob open (besides this delete context): 7662 * - open_ref = 1 - only this context opened blob, so it is ok to remove it 7663 * - open_ref <= 2 && has_one_clone = true - clone is holding snapshot 7664 * and that is ok, because we will update it accordingly */ 7665 if (blob->open_ref <= 2 && has_one_clone) { 7666 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 7667 assert(clone_entry != NULL); 7668 clone = blob_lookup(blob->bs, clone_entry->id); 7669 7670 if (blob->open_ref == 2 && clone == NULL) { 7671 /* Clone is closed and someone else opened this blob */ 7672 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 7673 return -EBUSY; 7674 } 7675 7676 *update_clone = true; 7677 return 0; 7678 } 7679 7680 if (blob->open_ref > 1) { 7681 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 7682 return -EBUSY; 7683 } 7684 7685 assert(has_one_clone == false); 7686 *update_clone = false; 7687 return 0; 7688 } 7689 7690 static void 7691 bs_delete_enomem_close_cpl(void *cb_arg, int bserrno) 7692 { 7693 spdk_bs_sequence_t *seq = cb_arg; 7694 7695 bs_sequence_finish(seq, -ENOMEM); 7696 } 7697 7698 static void 7699 bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 7700 { 7701 spdk_bs_sequence_t *seq = cb_arg; 7702 struct delete_snapshot_ctx *ctx; 7703 bool update_clone = false; 7704 7705 if (bserrno != 0) { 7706 bs_sequence_finish(seq, bserrno); 7707 return; 7708 } 7709 7710 blob_verify_md_op(blob); 7711 7712 ctx = calloc(1, sizeof(*ctx)); 7713 if (ctx == NULL) { 7714 spdk_blob_close(blob, bs_delete_enomem_close_cpl, seq); 7715 return; 7716 } 7717 7718 ctx->snapshot = blob; 7719 ctx->cb_fn = bs_delete_blob_finish; 7720 ctx->cb_arg = seq; 7721 7722 /* Check if blob can be removed and if it is a snapshot with clone on top of it */ 7723 ctx->bserrno = bs_is_blob_deletable(blob, &update_clone); 7724 if (ctx->bserrno) { 7725 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 7726 return; 7727 } 7728 7729 if (blob->locked_operation_in_progress) { 7730 SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress\n"); 7731 ctx->bserrno = -EBUSY; 7732 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 7733 return; 7734 } 7735 7736 blob->locked_operation_in_progress = true; 7737 7738 /* 7739 * Remove the blob from the blob_store list now, to ensure it does not 7740 * get returned after this point by blob_lookup(). 7741 */ 7742 spdk_bit_array_clear(blob->bs->open_blobids, blob->id); 7743 RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob); 7744 7745 if (update_clone) { 7746 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 7747 if (!ctx->page) { 7748 ctx->bserrno = -ENOMEM; 7749 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 7750 return; 7751 } 7752 /* This blob is a snapshot with active clone - update clone first */ 7753 update_clone_on_snapshot_deletion(blob, ctx); 7754 } else { 7755 /* This blob does not have any clones - just remove it */ 7756 bs_blob_list_remove(blob); 7757 bs_delete_blob_finish(seq, blob, 0); 7758 free(ctx); 7759 } 7760 } 7761 7762 void 7763 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 7764 spdk_blob_op_complete cb_fn, void *cb_arg) 7765 { 7766 struct spdk_bs_cpl cpl; 7767 spdk_bs_sequence_t *seq; 7768 7769 SPDK_DEBUGLOG(blob, "Deleting blob 0x%" PRIx64 "\n", blobid); 7770 7771 assert(spdk_get_thread() == bs->md_thread); 7772 7773 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7774 cpl.u.blob_basic.cb_fn = cb_fn; 7775 cpl.u.blob_basic.cb_arg = cb_arg; 7776 7777 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 7778 if (!seq) { 7779 cb_fn(cb_arg, -ENOMEM); 7780 return; 7781 } 7782 7783 spdk_bs_open_blob(bs, blobid, bs_delete_open_cpl, seq); 7784 } 7785 7786 /* END spdk_bs_delete_blob */ 7787 7788 /* START spdk_bs_open_blob */ 7789 7790 static void 7791 bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7792 { 7793 struct spdk_blob *blob = cb_arg; 7794 struct spdk_blob *existing; 7795 7796 if (bserrno != 0) { 7797 blob_free(blob); 7798 seq->cpl.u.blob_handle.blob = NULL; 7799 bs_sequence_finish(seq, bserrno); 7800 return; 7801 } 7802 7803 existing = blob_lookup(blob->bs, blob->id); 7804 if (existing) { 7805 blob_free(blob); 7806 existing->open_ref++; 7807 seq->cpl.u.blob_handle.blob = existing; 7808 bs_sequence_finish(seq, 0); 7809 return; 7810 } 7811 7812 blob->open_ref++; 7813 7814 spdk_bit_array_set(blob->bs->open_blobids, blob->id); 7815 RB_INSERT(spdk_blob_tree, &blob->bs->open_blobs, blob); 7816 7817 bs_sequence_finish(seq, bserrno); 7818 } 7819 7820 static inline void 7821 blob_open_opts_copy(const struct spdk_blob_open_opts *src, struct spdk_blob_open_opts *dst) 7822 { 7823 #define FIELD_OK(field) \ 7824 offsetof(struct spdk_blob_open_opts, field) + sizeof(src->field) <= src->opts_size 7825 7826 #define SET_FIELD(field) \ 7827 if (FIELD_OK(field)) { \ 7828 dst->field = src->field; \ 7829 } \ 7830 7831 SET_FIELD(clear_method); 7832 SET_FIELD(esnap_ctx); 7833 7834 dst->opts_size = src->opts_size; 7835 7836 /* You should not remove this statement, but need to update the assert statement 7837 * if you add a new field, and also add a corresponding SET_FIELD statement */ 7838 SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_open_opts) == 24, "Incorrect size"); 7839 7840 #undef FIELD_OK 7841 #undef SET_FIELD 7842 } 7843 7844 static void 7845 bs_open_blob(struct spdk_blob_store *bs, 7846 spdk_blob_id blobid, 7847 struct spdk_blob_open_opts *opts, 7848 spdk_blob_op_with_handle_complete cb_fn, 7849 void *cb_arg) 7850 { 7851 struct spdk_blob *blob; 7852 struct spdk_bs_cpl cpl; 7853 struct spdk_blob_open_opts opts_local; 7854 spdk_bs_sequence_t *seq; 7855 uint32_t page_num; 7856 7857 SPDK_DEBUGLOG(blob, "Opening blob 0x%" PRIx64 "\n", blobid); 7858 assert(spdk_get_thread() == bs->md_thread); 7859 7860 page_num = bs_blobid_to_page(blobid); 7861 if (spdk_bit_array_get(bs->used_blobids, page_num) == false) { 7862 /* Invalid blobid */ 7863 cb_fn(cb_arg, NULL, -ENOENT); 7864 return; 7865 } 7866 7867 blob = blob_lookup(bs, blobid); 7868 if (blob) { 7869 blob->open_ref++; 7870 cb_fn(cb_arg, blob, 0); 7871 return; 7872 } 7873 7874 blob = blob_alloc(bs, blobid); 7875 if (!blob) { 7876 cb_fn(cb_arg, NULL, -ENOMEM); 7877 return; 7878 } 7879 7880 spdk_blob_open_opts_init(&opts_local, sizeof(opts_local)); 7881 if (opts) { 7882 blob_open_opts_copy(opts, &opts_local); 7883 } 7884 7885 blob->clear_method = opts_local.clear_method; 7886 7887 cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE; 7888 cpl.u.blob_handle.cb_fn = cb_fn; 7889 cpl.u.blob_handle.cb_arg = cb_arg; 7890 cpl.u.blob_handle.blob = blob; 7891 cpl.u.blob_handle.esnap_ctx = opts_local.esnap_ctx; 7892 7893 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 7894 if (!seq) { 7895 blob_free(blob); 7896 cb_fn(cb_arg, NULL, -ENOMEM); 7897 return; 7898 } 7899 7900 blob_load(seq, blob, bs_open_blob_cpl, blob); 7901 } 7902 7903 void 7904 spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 7905 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 7906 { 7907 bs_open_blob(bs, blobid, NULL, cb_fn, cb_arg); 7908 } 7909 7910 void 7911 spdk_bs_open_blob_ext(struct spdk_blob_store *bs, spdk_blob_id blobid, 7912 struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 7913 { 7914 bs_open_blob(bs, blobid, opts, cb_fn, cb_arg); 7915 } 7916 7917 /* END spdk_bs_open_blob */ 7918 7919 /* START spdk_blob_set_read_only */ 7920 int 7921 spdk_blob_set_read_only(struct spdk_blob *blob) 7922 { 7923 blob_verify_md_op(blob); 7924 7925 blob->data_ro_flags |= SPDK_BLOB_READ_ONLY; 7926 7927 blob->state = SPDK_BLOB_STATE_DIRTY; 7928 return 0; 7929 } 7930 /* END spdk_blob_set_read_only */ 7931 7932 /* START spdk_blob_sync_md */ 7933 7934 static void 7935 blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7936 { 7937 struct spdk_blob *blob = cb_arg; 7938 7939 if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 7940 blob->data_ro = true; 7941 blob->md_ro = true; 7942 } 7943 7944 bs_sequence_finish(seq, bserrno); 7945 } 7946 7947 static void 7948 blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 7949 { 7950 struct spdk_bs_cpl cpl; 7951 spdk_bs_sequence_t *seq; 7952 7953 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7954 cpl.u.blob_basic.cb_fn = cb_fn; 7955 cpl.u.blob_basic.cb_arg = cb_arg; 7956 7957 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 7958 if (!seq) { 7959 cb_fn(cb_arg, -ENOMEM); 7960 return; 7961 } 7962 7963 blob_persist(seq, blob, blob_sync_md_cpl, blob); 7964 } 7965 7966 void 7967 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 7968 { 7969 blob_verify_md_op(blob); 7970 7971 SPDK_DEBUGLOG(blob, "Syncing blob 0x%" PRIx64 "\n", blob->id); 7972 7973 if (blob->md_ro) { 7974 assert(blob->state == SPDK_BLOB_STATE_CLEAN); 7975 cb_fn(cb_arg, 0); 7976 return; 7977 } 7978 7979 blob_sync_md(blob, cb_fn, cb_arg); 7980 } 7981 7982 /* END spdk_blob_sync_md */ 7983 7984 struct spdk_blob_cluster_op_ctx { 7985 struct spdk_thread *thread; 7986 struct spdk_blob *blob; 7987 uint32_t cluster_num; /* cluster index in blob */ 7988 uint32_t cluster; /* cluster on disk */ 7989 uint32_t extent_page; /* extent page on disk */ 7990 struct spdk_blob_md_page *page; /* preallocated extent page */ 7991 int rc; 7992 spdk_blob_op_complete cb_fn; 7993 void *cb_arg; 7994 }; 7995 7996 static void 7997 blob_op_cluster_msg_cpl(void *arg) 7998 { 7999 struct spdk_blob_cluster_op_ctx *ctx = arg; 8000 8001 ctx->cb_fn(ctx->cb_arg, ctx->rc); 8002 free(ctx); 8003 } 8004 8005 static void 8006 blob_op_cluster_msg_cb(void *arg, int bserrno) 8007 { 8008 struct spdk_blob_cluster_op_ctx *ctx = arg; 8009 8010 ctx->rc = bserrno; 8011 spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx); 8012 } 8013 8014 static void 8015 blob_insert_new_ep_cb(void *arg, int bserrno) 8016 { 8017 struct spdk_blob_cluster_op_ctx *ctx = arg; 8018 uint32_t *extent_page; 8019 8020 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 8021 *extent_page = ctx->extent_page; 8022 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8023 blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx); 8024 } 8025 8026 struct spdk_blob_write_extent_page_ctx { 8027 struct spdk_blob_store *bs; 8028 8029 uint32_t extent; 8030 struct spdk_blob_md_page *page; 8031 }; 8032 8033 static void 8034 blob_free_cluster_msg_cb(void *arg, int bserrno) 8035 { 8036 struct spdk_blob_cluster_op_ctx *ctx = arg; 8037 8038 spdk_spin_lock(&ctx->blob->bs->used_lock); 8039 bs_release_cluster(ctx->blob->bs, bs_lba_to_cluster(ctx->blob->bs, ctx->cluster)); 8040 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8041 8042 ctx->rc = bserrno; 8043 spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx); 8044 } 8045 8046 static void 8047 blob_free_cluster_update_ep_cb(void *arg, int bserrno) 8048 { 8049 struct spdk_blob_cluster_op_ctx *ctx = arg; 8050 8051 if (bserrno != 0 || ctx->blob->bs->clean == 0) { 8052 blob_free_cluster_msg_cb(ctx, bserrno); 8053 return; 8054 } 8055 8056 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8057 blob_sync_md(ctx->blob, blob_free_cluster_msg_cb, ctx); 8058 } 8059 8060 static void 8061 blob_free_cluster_free_ep_cb(void *arg, int bserrno) 8062 { 8063 struct spdk_blob_cluster_op_ctx *ctx = arg; 8064 8065 spdk_spin_lock(&ctx->blob->bs->used_lock); 8066 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8067 bs_release_md_page(ctx->blob->bs, ctx->extent_page); 8068 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8069 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8070 blob_sync_md(ctx->blob, blob_free_cluster_msg_cb, ctx); 8071 } 8072 8073 static void 8074 blob_persist_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8075 { 8076 struct spdk_blob_write_extent_page_ctx *ctx = cb_arg; 8077 8078 free(ctx); 8079 bs_sequence_finish(seq, bserrno); 8080 } 8081 8082 static void 8083 blob_write_extent_page_ready(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8084 { 8085 struct spdk_blob_write_extent_page_ctx *ctx = cb_arg; 8086 8087 if (bserrno != 0) { 8088 blob_persist_extent_page_cpl(seq, ctx, bserrno); 8089 return; 8090 } 8091 bs_sequence_write_dev(seq, ctx->page, bs_md_page_to_lba(ctx->bs, ctx->extent), 8092 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 8093 blob_persist_extent_page_cpl, ctx); 8094 } 8095 8096 static void 8097 blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 8098 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg) 8099 { 8100 struct spdk_blob_write_extent_page_ctx *ctx; 8101 spdk_bs_sequence_t *seq; 8102 struct spdk_bs_cpl cpl; 8103 8104 ctx = calloc(1, sizeof(*ctx)); 8105 if (!ctx) { 8106 cb_fn(cb_arg, -ENOMEM); 8107 return; 8108 } 8109 ctx->bs = blob->bs; 8110 ctx->extent = extent; 8111 ctx->page = page; 8112 8113 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 8114 cpl.u.blob_basic.cb_fn = cb_fn; 8115 cpl.u.blob_basic.cb_arg = cb_arg; 8116 8117 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 8118 if (!seq) { 8119 free(ctx); 8120 cb_fn(cb_arg, -ENOMEM); 8121 return; 8122 } 8123 8124 assert(page); 8125 page->next = SPDK_INVALID_MD_PAGE; 8126 page->id = blob->id; 8127 page->sequence_num = 0; 8128 8129 blob_serialize_extent_page(blob, cluster_num, page); 8130 8131 page->crc = blob_md_page_calc_crc(page); 8132 8133 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent) == true); 8134 8135 bs_mark_dirty(seq, blob->bs, blob_write_extent_page_ready, ctx); 8136 } 8137 8138 static void 8139 blob_insert_cluster_msg(void *arg) 8140 { 8141 struct spdk_blob_cluster_op_ctx *ctx = arg; 8142 uint32_t *extent_page; 8143 8144 ctx->rc = blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster); 8145 if (ctx->rc != 0) { 8146 spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx); 8147 return; 8148 } 8149 8150 if (ctx->blob->use_extent_table == false) { 8151 /* Extent table is not used, proceed with sync of md that will only use extents_rle. */ 8152 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8153 blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx); 8154 return; 8155 } 8156 8157 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 8158 if (*extent_page == 0) { 8159 /* Extent page requires allocation. 8160 * It was already claimed in the used_md_pages map and placed in ctx. */ 8161 assert(ctx->extent_page != 0); 8162 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8163 blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, ctx->page, 8164 blob_insert_new_ep_cb, ctx); 8165 } else { 8166 /* It is possible for original thread to allocate extent page for 8167 * different cluster in the same extent page. In such case proceed with 8168 * updating the existing extent page, but release the additional one. */ 8169 if (ctx->extent_page != 0) { 8170 spdk_spin_lock(&ctx->blob->bs->used_lock); 8171 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8172 bs_release_md_page(ctx->blob->bs, ctx->extent_page); 8173 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8174 ctx->extent_page = 0; 8175 } 8176 /* Extent page already allocated. 8177 * Every cluster allocation, requires just an update of single extent page. */ 8178 blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, ctx->page, 8179 blob_op_cluster_msg_cb, ctx); 8180 } 8181 } 8182 8183 static void 8184 blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 8185 uint64_t cluster, uint32_t extent_page, struct spdk_blob_md_page *page, 8186 spdk_blob_op_complete cb_fn, void *cb_arg) 8187 { 8188 struct spdk_blob_cluster_op_ctx *ctx; 8189 8190 ctx = calloc(1, sizeof(*ctx)); 8191 if (ctx == NULL) { 8192 cb_fn(cb_arg, -ENOMEM); 8193 return; 8194 } 8195 8196 ctx->thread = spdk_get_thread(); 8197 ctx->blob = blob; 8198 ctx->cluster_num = cluster_num; 8199 ctx->cluster = cluster; 8200 ctx->extent_page = extent_page; 8201 ctx->page = page; 8202 ctx->cb_fn = cb_fn; 8203 ctx->cb_arg = cb_arg; 8204 8205 spdk_thread_send_msg(blob->bs->md_thread, blob_insert_cluster_msg, ctx); 8206 } 8207 8208 static void 8209 blob_free_cluster_msg(void *arg) 8210 { 8211 struct spdk_blob_cluster_op_ctx *ctx = arg; 8212 uint32_t *extent_page; 8213 uint32_t start_cluster_idx; 8214 bool free_extent_page = true; 8215 size_t i; 8216 8217 ctx->cluster = ctx->blob->active.clusters[ctx->cluster_num]; 8218 ctx->blob->active.clusters[ctx->cluster_num] = 0; 8219 8220 if (ctx->blob->use_extent_table == false) { 8221 /* Extent table is not used, proceed with sync of md that will only use extents_rle. */ 8222 spdk_spin_lock(&ctx->blob->bs->used_lock); 8223 bs_release_cluster(ctx->blob->bs, bs_lba_to_cluster(ctx->blob->bs, ctx->cluster)); 8224 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8225 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8226 blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx); 8227 return; 8228 } 8229 8230 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 8231 8232 /* There shouldn't be parallel release operations on same cluster */ 8233 assert(*extent_page == ctx->extent_page); 8234 8235 start_cluster_idx = (ctx->cluster_num / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP; 8236 for (i = 0; i < SPDK_EXTENTS_PER_EP; ++i) { 8237 if (ctx->blob->active.clusters[start_cluster_idx + i] != 0) { 8238 free_extent_page = false; 8239 break; 8240 } 8241 } 8242 8243 if (free_extent_page) { 8244 assert(ctx->extent_page != 0); 8245 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8246 ctx->blob->active.extent_pages[bs_cluster_to_extent_table_id(ctx->cluster_num)] = 0; 8247 blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, ctx->page, 8248 blob_free_cluster_free_ep_cb, ctx); 8249 } else { 8250 blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, ctx->page, 8251 blob_free_cluster_update_ep_cb, ctx); 8252 } 8253 } 8254 8255 8256 static void 8257 blob_free_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, uint32_t extent_page, 8258 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg) 8259 { 8260 struct spdk_blob_cluster_op_ctx *ctx; 8261 8262 ctx = calloc(1, sizeof(*ctx)); 8263 if (ctx == NULL) { 8264 cb_fn(cb_arg, -ENOMEM); 8265 return; 8266 } 8267 8268 ctx->thread = spdk_get_thread(); 8269 ctx->blob = blob; 8270 ctx->cluster_num = cluster_num; 8271 ctx->extent_page = extent_page; 8272 ctx->page = page; 8273 ctx->cb_fn = cb_fn; 8274 ctx->cb_arg = cb_arg; 8275 8276 spdk_thread_send_msg(blob->bs->md_thread, blob_free_cluster_msg, ctx); 8277 } 8278 8279 /* START spdk_blob_close */ 8280 8281 static void 8282 blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8283 { 8284 struct spdk_blob *blob = cb_arg; 8285 8286 if (bserrno == 0) { 8287 blob->open_ref--; 8288 if (blob->open_ref == 0) { 8289 /* 8290 * Blobs with active.num_pages == 0 are deleted blobs. 8291 * these blobs are removed from the blob_store list 8292 * when the deletion process starts - so don't try to 8293 * remove them again. 8294 */ 8295 if (blob->active.num_pages > 0) { 8296 spdk_bit_array_clear(blob->bs->open_blobids, blob->id); 8297 RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob); 8298 } 8299 blob_free(blob); 8300 } 8301 } 8302 8303 bs_sequence_finish(seq, bserrno); 8304 } 8305 8306 static void 8307 blob_close_esnap_done(void *cb_arg, struct spdk_blob *blob, int bserrno) 8308 { 8309 spdk_bs_sequence_t *seq = cb_arg; 8310 8311 if (bserrno != 0) { 8312 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": close failed with error %d\n", 8313 blob->id, bserrno); 8314 bs_sequence_finish(seq, bserrno); 8315 return; 8316 } 8317 8318 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": closed, syncing metadata on thread %s\n", 8319 blob->id, spdk_thread_get_name(spdk_get_thread())); 8320 8321 /* Sync metadata */ 8322 blob_persist(seq, blob, blob_close_cpl, blob); 8323 } 8324 8325 void 8326 spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 8327 { 8328 struct spdk_bs_cpl cpl; 8329 spdk_bs_sequence_t *seq; 8330 8331 blob_verify_md_op(blob); 8332 8333 SPDK_DEBUGLOG(blob, "Closing blob 0x%" PRIx64 "\n", blob->id); 8334 8335 if (blob->open_ref == 0) { 8336 cb_fn(cb_arg, -EBADF); 8337 return; 8338 } 8339 8340 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 8341 cpl.u.blob_basic.cb_fn = cb_fn; 8342 cpl.u.blob_basic.cb_arg = cb_arg; 8343 8344 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 8345 if (!seq) { 8346 cb_fn(cb_arg, -ENOMEM); 8347 return; 8348 } 8349 8350 if (blob->open_ref == 1 && blob_is_esnap_clone(blob)) { 8351 blob_esnap_destroy_bs_dev_channels(blob, false, blob_close_esnap_done, seq); 8352 return; 8353 } 8354 8355 /* Sync metadata */ 8356 blob_persist(seq, blob, blob_close_cpl, blob); 8357 } 8358 8359 /* END spdk_blob_close */ 8360 8361 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs) 8362 { 8363 return spdk_get_io_channel(bs); 8364 } 8365 8366 void 8367 spdk_bs_free_io_channel(struct spdk_io_channel *channel) 8368 { 8369 blob_esnap_destroy_bs_channel(spdk_io_channel_get_ctx(channel)); 8370 spdk_put_io_channel(channel); 8371 } 8372 8373 void 8374 spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel, 8375 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 8376 { 8377 blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 8378 SPDK_BLOB_UNMAP); 8379 } 8380 8381 void 8382 spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel, 8383 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 8384 { 8385 blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 8386 SPDK_BLOB_WRITE_ZEROES); 8387 } 8388 8389 void 8390 spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel, 8391 void *payload, uint64_t offset, uint64_t length, 8392 spdk_blob_op_complete cb_fn, void *cb_arg) 8393 { 8394 blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 8395 SPDK_BLOB_WRITE); 8396 } 8397 8398 void 8399 spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel, 8400 void *payload, uint64_t offset, uint64_t length, 8401 spdk_blob_op_complete cb_fn, void *cb_arg) 8402 { 8403 blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 8404 SPDK_BLOB_READ); 8405 } 8406 8407 void 8408 spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel, 8409 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 8410 spdk_blob_op_complete cb_fn, void *cb_arg) 8411 { 8412 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, NULL); 8413 } 8414 8415 void 8416 spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel, 8417 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 8418 spdk_blob_op_complete cb_fn, void *cb_arg) 8419 { 8420 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, NULL); 8421 } 8422 8423 void 8424 spdk_blob_io_writev_ext(struct spdk_blob *blob, struct spdk_io_channel *channel, 8425 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 8426 spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts) 8427 { 8428 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, 8429 io_opts); 8430 } 8431 8432 void 8433 spdk_blob_io_readv_ext(struct spdk_blob *blob, struct spdk_io_channel *channel, 8434 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 8435 spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts) 8436 { 8437 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, 8438 io_opts); 8439 } 8440 8441 struct spdk_bs_iter_ctx { 8442 int64_t page_num; 8443 struct spdk_blob_store *bs; 8444 8445 spdk_blob_op_with_handle_complete cb_fn; 8446 void *cb_arg; 8447 }; 8448 8449 static void 8450 bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 8451 { 8452 struct spdk_bs_iter_ctx *ctx = cb_arg; 8453 struct spdk_blob_store *bs = ctx->bs; 8454 spdk_blob_id id; 8455 8456 if (bserrno == 0) { 8457 ctx->cb_fn(ctx->cb_arg, _blob, bserrno); 8458 free(ctx); 8459 return; 8460 } 8461 8462 ctx->page_num++; 8463 ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num); 8464 if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) { 8465 ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT); 8466 free(ctx); 8467 return; 8468 } 8469 8470 id = bs_page_to_blobid(ctx->page_num); 8471 8472 spdk_bs_open_blob(bs, id, bs_iter_cpl, ctx); 8473 } 8474 8475 void 8476 spdk_bs_iter_first(struct spdk_blob_store *bs, 8477 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 8478 { 8479 struct spdk_bs_iter_ctx *ctx; 8480 8481 ctx = calloc(1, sizeof(*ctx)); 8482 if (!ctx) { 8483 cb_fn(cb_arg, NULL, -ENOMEM); 8484 return; 8485 } 8486 8487 ctx->page_num = -1; 8488 ctx->bs = bs; 8489 ctx->cb_fn = cb_fn; 8490 ctx->cb_arg = cb_arg; 8491 8492 bs_iter_cpl(ctx, NULL, -1); 8493 } 8494 8495 static void 8496 bs_iter_close_cpl(void *cb_arg, int bserrno) 8497 { 8498 struct spdk_bs_iter_ctx *ctx = cb_arg; 8499 8500 bs_iter_cpl(ctx, NULL, -1); 8501 } 8502 8503 void 8504 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob, 8505 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 8506 { 8507 struct spdk_bs_iter_ctx *ctx; 8508 8509 assert(blob != NULL); 8510 8511 ctx = calloc(1, sizeof(*ctx)); 8512 if (!ctx) { 8513 cb_fn(cb_arg, NULL, -ENOMEM); 8514 return; 8515 } 8516 8517 ctx->page_num = bs_blobid_to_page(blob->id); 8518 ctx->bs = bs; 8519 ctx->cb_fn = cb_fn; 8520 ctx->cb_arg = cb_arg; 8521 8522 /* Close the existing blob */ 8523 spdk_blob_close(blob, bs_iter_close_cpl, ctx); 8524 } 8525 8526 static int 8527 blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 8528 uint16_t value_len, bool internal) 8529 { 8530 struct spdk_xattr_tailq *xattrs; 8531 struct spdk_xattr *xattr; 8532 size_t desc_size; 8533 void *tmp; 8534 8535 blob_verify_md_op(blob); 8536 8537 if (blob->md_ro) { 8538 return -EPERM; 8539 } 8540 8541 desc_size = sizeof(struct spdk_blob_md_descriptor_xattr) + strlen(name) + value_len; 8542 if (desc_size > SPDK_BS_MAX_DESC_SIZE) { 8543 SPDK_DEBUGLOG(blob, "Xattr '%s' of size %zu does not fix into single page %zu\n", name, 8544 desc_size, SPDK_BS_MAX_DESC_SIZE); 8545 return -ENOMEM; 8546 } 8547 8548 if (internal) { 8549 xattrs = &blob->xattrs_internal; 8550 blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR; 8551 } else { 8552 xattrs = &blob->xattrs; 8553 } 8554 8555 TAILQ_FOREACH(xattr, xattrs, link) { 8556 if (!strcmp(name, xattr->name)) { 8557 tmp = malloc(value_len); 8558 if (!tmp) { 8559 return -ENOMEM; 8560 } 8561 8562 free(xattr->value); 8563 xattr->value_len = value_len; 8564 xattr->value = tmp; 8565 memcpy(xattr->value, value, value_len); 8566 8567 blob->state = SPDK_BLOB_STATE_DIRTY; 8568 8569 return 0; 8570 } 8571 } 8572 8573 xattr = calloc(1, sizeof(*xattr)); 8574 if (!xattr) { 8575 return -ENOMEM; 8576 } 8577 8578 xattr->name = strdup(name); 8579 if (!xattr->name) { 8580 free(xattr); 8581 return -ENOMEM; 8582 } 8583 8584 xattr->value_len = value_len; 8585 xattr->value = malloc(value_len); 8586 if (!xattr->value) { 8587 free(xattr->name); 8588 free(xattr); 8589 return -ENOMEM; 8590 } 8591 memcpy(xattr->value, value, value_len); 8592 TAILQ_INSERT_TAIL(xattrs, xattr, link); 8593 8594 blob->state = SPDK_BLOB_STATE_DIRTY; 8595 8596 return 0; 8597 } 8598 8599 int 8600 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 8601 uint16_t value_len) 8602 { 8603 return blob_set_xattr(blob, name, value, value_len, false); 8604 } 8605 8606 static int 8607 blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal) 8608 { 8609 struct spdk_xattr_tailq *xattrs; 8610 struct spdk_xattr *xattr; 8611 8612 blob_verify_md_op(blob); 8613 8614 if (blob->md_ro) { 8615 return -EPERM; 8616 } 8617 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 8618 8619 TAILQ_FOREACH(xattr, xattrs, link) { 8620 if (!strcmp(name, xattr->name)) { 8621 TAILQ_REMOVE(xattrs, xattr, link); 8622 free(xattr->value); 8623 free(xattr->name); 8624 free(xattr); 8625 8626 if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) { 8627 blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR; 8628 } 8629 blob->state = SPDK_BLOB_STATE_DIRTY; 8630 8631 return 0; 8632 } 8633 } 8634 8635 return -ENOENT; 8636 } 8637 8638 int 8639 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name) 8640 { 8641 return blob_remove_xattr(blob, name, false); 8642 } 8643 8644 static int 8645 blob_get_xattr_value(struct spdk_blob *blob, const char *name, 8646 const void **value, size_t *value_len, bool internal) 8647 { 8648 struct spdk_xattr *xattr; 8649 struct spdk_xattr_tailq *xattrs; 8650 8651 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 8652 8653 TAILQ_FOREACH(xattr, xattrs, link) { 8654 if (!strcmp(name, xattr->name)) { 8655 *value = xattr->value; 8656 *value_len = xattr->value_len; 8657 return 0; 8658 } 8659 } 8660 return -ENOENT; 8661 } 8662 8663 int 8664 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 8665 const void **value, size_t *value_len) 8666 { 8667 blob_verify_md_op(blob); 8668 8669 return blob_get_xattr_value(blob, name, value, value_len, false); 8670 } 8671 8672 struct spdk_xattr_names { 8673 uint32_t count; 8674 const char *names[0]; 8675 }; 8676 8677 static int 8678 blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names) 8679 { 8680 struct spdk_xattr *xattr; 8681 int count = 0; 8682 8683 TAILQ_FOREACH(xattr, xattrs, link) { 8684 count++; 8685 } 8686 8687 *names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *)); 8688 if (*names == NULL) { 8689 return -ENOMEM; 8690 } 8691 8692 TAILQ_FOREACH(xattr, xattrs, link) { 8693 (*names)->names[(*names)->count++] = xattr->name; 8694 } 8695 8696 return 0; 8697 } 8698 8699 int 8700 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names) 8701 { 8702 blob_verify_md_op(blob); 8703 8704 return blob_get_xattr_names(&blob->xattrs, names); 8705 } 8706 8707 uint32_t 8708 spdk_xattr_names_get_count(struct spdk_xattr_names *names) 8709 { 8710 assert(names != NULL); 8711 8712 return names->count; 8713 } 8714 8715 const char * 8716 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index) 8717 { 8718 if (index >= names->count) { 8719 return NULL; 8720 } 8721 8722 return names->names[index]; 8723 } 8724 8725 void 8726 spdk_xattr_names_free(struct spdk_xattr_names *names) 8727 { 8728 free(names); 8729 } 8730 8731 struct spdk_bs_type 8732 spdk_bs_get_bstype(struct spdk_blob_store *bs) 8733 { 8734 return bs->bstype; 8735 } 8736 8737 void 8738 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype) 8739 { 8740 memcpy(&bs->bstype, &bstype, sizeof(bstype)); 8741 } 8742 8743 bool 8744 spdk_blob_is_read_only(struct spdk_blob *blob) 8745 { 8746 assert(blob != NULL); 8747 return (blob->data_ro || blob->md_ro); 8748 } 8749 8750 bool 8751 spdk_blob_is_snapshot(struct spdk_blob *blob) 8752 { 8753 struct spdk_blob_list *snapshot_entry; 8754 8755 assert(blob != NULL); 8756 8757 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 8758 if (snapshot_entry == NULL) { 8759 return false; 8760 } 8761 8762 return true; 8763 } 8764 8765 bool 8766 spdk_blob_is_clone(struct spdk_blob *blob) 8767 { 8768 assert(blob != NULL); 8769 8770 if (blob->parent_id != SPDK_BLOBID_INVALID && 8771 blob->parent_id != SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 8772 assert(spdk_blob_is_thin_provisioned(blob)); 8773 return true; 8774 } 8775 8776 return false; 8777 } 8778 8779 bool 8780 spdk_blob_is_thin_provisioned(struct spdk_blob *blob) 8781 { 8782 assert(blob != NULL); 8783 return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 8784 } 8785 8786 bool 8787 spdk_blob_is_esnap_clone(const struct spdk_blob *blob) 8788 { 8789 return blob_is_esnap_clone(blob); 8790 } 8791 8792 static void 8793 blob_update_clear_method(struct spdk_blob *blob) 8794 { 8795 enum blob_clear_method stored_cm; 8796 8797 assert(blob != NULL); 8798 8799 /* If BLOB_CLEAR_WITH_DEFAULT was passed in, use the setting stored 8800 * in metadata previously. If something other than the default was 8801 * specified, ignore stored value and used what was passed in. 8802 */ 8803 stored_cm = ((blob->md_ro_flags & SPDK_BLOB_CLEAR_METHOD) >> SPDK_BLOB_CLEAR_METHOD_SHIFT); 8804 8805 if (blob->clear_method == BLOB_CLEAR_WITH_DEFAULT) { 8806 blob->clear_method = stored_cm; 8807 } else if (blob->clear_method != stored_cm) { 8808 SPDK_WARNLOG("Using passed in clear method 0x%x instead of stored value of 0x%x\n", 8809 blob->clear_method, stored_cm); 8810 } 8811 } 8812 8813 spdk_blob_id 8814 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id) 8815 { 8816 struct spdk_blob_list *snapshot_entry = NULL; 8817 struct spdk_blob_list *clone_entry = NULL; 8818 8819 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 8820 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 8821 if (clone_entry->id == blob_id) { 8822 return snapshot_entry->id; 8823 } 8824 } 8825 } 8826 8827 return SPDK_BLOBID_INVALID; 8828 } 8829 8830 int 8831 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids, 8832 size_t *count) 8833 { 8834 struct spdk_blob_list *snapshot_entry, *clone_entry; 8835 size_t n; 8836 8837 snapshot_entry = bs_get_snapshot_entry(bs, blobid); 8838 if (snapshot_entry == NULL) { 8839 *count = 0; 8840 return 0; 8841 } 8842 8843 if (ids == NULL || *count < snapshot_entry->clone_count) { 8844 *count = snapshot_entry->clone_count; 8845 return -ENOMEM; 8846 } 8847 *count = snapshot_entry->clone_count; 8848 8849 n = 0; 8850 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 8851 ids[n++] = clone_entry->id; 8852 } 8853 8854 return 0; 8855 } 8856 8857 static void 8858 bs_load_grow_continue(struct spdk_bs_load_ctx *ctx) 8859 { 8860 int rc; 8861 8862 if (ctx->super->size == 0) { 8863 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 8864 } 8865 8866 if (ctx->super->io_unit_size == 0) { 8867 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 8868 } 8869 8870 /* Parse the super block */ 8871 ctx->bs->clean = 1; 8872 ctx->bs->cluster_sz = ctx->super->cluster_size; 8873 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 8874 ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE; 8875 if (spdk_u32_is_pow2(ctx->bs->pages_per_cluster)) { 8876 ctx->bs->pages_per_cluster_shift = spdk_u32log2(ctx->bs->pages_per_cluster); 8877 } 8878 ctx->bs->io_unit_size = ctx->super->io_unit_size; 8879 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 8880 if (rc < 0) { 8881 bs_load_ctx_fail(ctx, -ENOMEM); 8882 return; 8883 } 8884 ctx->bs->md_start = ctx->super->md_start; 8885 ctx->bs->md_len = ctx->super->md_len; 8886 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len); 8887 if (rc < 0) { 8888 bs_load_ctx_fail(ctx, -ENOMEM); 8889 return; 8890 } 8891 8892 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 8893 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 8894 ctx->bs->super_blob = ctx->super->super_blob; 8895 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 8896 8897 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0) { 8898 SPDK_ERRLOG("Can not grow an unclean blobstore, please load it normally to clean it.\n"); 8899 bs_load_ctx_fail(ctx, -EIO); 8900 return; 8901 } else { 8902 bs_load_read_used_pages(ctx); 8903 } 8904 } 8905 8906 static void 8907 bs_load_grow_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8908 { 8909 struct spdk_bs_load_ctx *ctx = cb_arg; 8910 8911 if (bserrno != 0) { 8912 bs_load_ctx_fail(ctx, bserrno); 8913 return; 8914 } 8915 bs_load_grow_continue(ctx); 8916 } 8917 8918 static void 8919 bs_load_grow_used_clusters_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8920 { 8921 struct spdk_bs_load_ctx *ctx = cb_arg; 8922 8923 if (bserrno != 0) { 8924 bs_load_ctx_fail(ctx, bserrno); 8925 return; 8926 } 8927 8928 spdk_free(ctx->mask); 8929 8930 bs_sequence_write_dev(ctx->seq, ctx->super, bs_page_to_lba(ctx->bs, 0), 8931 bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 8932 bs_load_grow_super_write_cpl, ctx); 8933 } 8934 8935 static void 8936 bs_load_grow_used_clusters_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8937 { 8938 struct spdk_bs_load_ctx *ctx = cb_arg; 8939 uint64_t lba, lba_count; 8940 uint64_t dev_size; 8941 uint64_t total_clusters; 8942 8943 if (bserrno != 0) { 8944 bs_load_ctx_fail(ctx, bserrno); 8945 return; 8946 } 8947 8948 /* The type must be correct */ 8949 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 8950 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 8951 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 8952 struct spdk_blob_md_page) * 8)); 8953 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 8954 total_clusters = dev_size / ctx->super->cluster_size; 8955 ctx->mask->length = total_clusters; 8956 8957 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 8958 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 8959 bs_sequence_write_dev(ctx->seq, ctx->mask, lba, lba_count, 8960 bs_load_grow_used_clusters_write_cpl, ctx); 8961 } 8962 8963 static void 8964 bs_load_try_to_grow(struct spdk_bs_load_ctx *ctx) 8965 { 8966 uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask; 8967 uint64_t lba, lba_count, mask_size; 8968 8969 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 8970 total_clusters = dev_size / ctx->super->cluster_size; 8971 used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 8972 spdk_divide_round_up(total_clusters, 8), 8973 SPDK_BS_PAGE_SIZE); 8974 max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start; 8975 /* No necessary to grow or no space to grow */ 8976 if (ctx->super->size >= dev_size || used_cluster_mask_len > max_used_cluster_mask) { 8977 SPDK_DEBUGLOG(blob, "No grow\n"); 8978 bs_load_grow_continue(ctx); 8979 return; 8980 } 8981 8982 SPDK_DEBUGLOG(blob, "Resize blobstore\n"); 8983 8984 ctx->super->size = dev_size; 8985 ctx->super->used_cluster_mask_len = used_cluster_mask_len; 8986 ctx->super->crc = blob_md_page_calc_crc(ctx->super); 8987 8988 mask_size = used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 8989 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 8990 SPDK_MALLOC_DMA); 8991 if (!ctx->mask) { 8992 bs_load_ctx_fail(ctx, -ENOMEM); 8993 return; 8994 } 8995 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 8996 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 8997 bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count, 8998 bs_load_grow_used_clusters_read_cpl, ctx); 8999 } 9000 9001 static void 9002 bs_grow_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9003 { 9004 struct spdk_bs_load_ctx *ctx = cb_arg; 9005 int rc; 9006 9007 rc = bs_super_validate(ctx->super, ctx->bs); 9008 if (rc != 0) { 9009 bs_load_ctx_fail(ctx, rc); 9010 return; 9011 } 9012 9013 bs_load_try_to_grow(ctx); 9014 } 9015 9016 struct spdk_bs_grow_ctx { 9017 struct spdk_blob_store *bs; 9018 struct spdk_bs_super_block *super; 9019 9020 struct spdk_bit_pool *new_used_clusters; 9021 struct spdk_bs_md_mask *new_used_clusters_mask; 9022 9023 spdk_bs_sequence_t *seq; 9024 }; 9025 9026 static void 9027 bs_grow_live_done(struct spdk_bs_grow_ctx *ctx, int bserrno) 9028 { 9029 if (bserrno != 0) { 9030 spdk_bit_pool_free(&ctx->new_used_clusters); 9031 } 9032 9033 bs_sequence_finish(ctx->seq, bserrno); 9034 free(ctx->new_used_clusters_mask); 9035 spdk_free(ctx->super); 9036 free(ctx); 9037 } 9038 9039 static void 9040 bs_grow_live_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9041 { 9042 struct spdk_bs_grow_ctx *ctx = cb_arg; 9043 struct spdk_blob_store *bs = ctx->bs; 9044 uint64_t total_clusters; 9045 9046 if (bserrno != 0) { 9047 bs_grow_live_done(ctx, bserrno); 9048 return; 9049 } 9050 9051 /* 9052 * Blobstore is not clean until unload, for now only the super block is up to date. 9053 * This is similar to state right after blobstore init, when bs_write_used_md() didn't 9054 * yet execute. 9055 * When cleanly unloaded, the used md pages will be written out. 9056 * In case of unclean shutdown, loading blobstore will go through recovery path correctly 9057 * filling out the used_clusters with new size and writing it out. 9058 */ 9059 bs->clean = 0; 9060 9061 /* Reverting the super->size past this point is complex, avoid any error paths 9062 * that require to do so. */ 9063 spdk_spin_lock(&bs->used_lock); 9064 9065 total_clusters = ctx->super->size / ctx->super->cluster_size; 9066 9067 assert(total_clusters >= spdk_bit_pool_capacity(bs->used_clusters)); 9068 spdk_bit_pool_store_mask(bs->used_clusters, ctx->new_used_clusters_mask); 9069 9070 assert(total_clusters == spdk_bit_pool_capacity(ctx->new_used_clusters)); 9071 spdk_bit_pool_load_mask(ctx->new_used_clusters, ctx->new_used_clusters_mask); 9072 9073 spdk_bit_pool_free(&bs->used_clusters); 9074 bs->used_clusters = ctx->new_used_clusters; 9075 9076 bs->total_clusters = total_clusters; 9077 bs->total_data_clusters = bs->total_clusters - spdk_divide_round_up( 9078 bs->md_start + bs->md_len, bs->pages_per_cluster); 9079 9080 bs->num_free_clusters = spdk_bit_pool_count_free(bs->used_clusters); 9081 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 9082 spdk_spin_unlock(&bs->used_lock); 9083 9084 bs_grow_live_done(ctx, 0); 9085 } 9086 9087 static void 9088 bs_grow_live_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9089 { 9090 struct spdk_bs_grow_ctx *ctx = cb_arg; 9091 uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask; 9092 int rc; 9093 9094 if (bserrno != 0) { 9095 bs_grow_live_done(ctx, bserrno); 9096 return; 9097 } 9098 9099 rc = bs_super_validate(ctx->super, ctx->bs); 9100 if (rc != 0) { 9101 bs_grow_live_done(ctx, rc); 9102 return; 9103 } 9104 9105 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9106 total_clusters = dev_size / ctx->super->cluster_size; 9107 used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 9108 spdk_divide_round_up(total_clusters, 8), 9109 SPDK_BS_PAGE_SIZE); 9110 max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start; 9111 /* Only checking dev_size. Since it can change, but total_clusters remain the same. */ 9112 if (dev_size == ctx->super->size) { 9113 SPDK_DEBUGLOG(blob, "No need to grow blobstore\n"); 9114 bs_grow_live_done(ctx, 0); 9115 return; 9116 } 9117 /* 9118 * Blobstore cannot be shrunk, so check before if: 9119 * - new size of the device is smaller than size in super_block 9120 * - new total number of clusters is smaller than used_clusters bit_pool 9121 * - there is enough space in metadata for used_cluster_mask to be written out 9122 */ 9123 if (dev_size < ctx->super->size || 9124 total_clusters < spdk_bit_pool_capacity(ctx->bs->used_clusters) || 9125 used_cluster_mask_len > max_used_cluster_mask) { 9126 SPDK_DEBUGLOG(blob, "No space to grow blobstore\n"); 9127 bs_grow_live_done(ctx, -ENOSPC); 9128 return; 9129 } 9130 9131 SPDK_DEBUGLOG(blob, "Resizing blobstore\n"); 9132 9133 ctx->new_used_clusters_mask = calloc(1, total_clusters); 9134 if (!ctx->new_used_clusters_mask) { 9135 bs_grow_live_done(ctx, -ENOMEM); 9136 return; 9137 } 9138 ctx->new_used_clusters = spdk_bit_pool_create(total_clusters); 9139 if (!ctx->new_used_clusters) { 9140 bs_grow_live_done(ctx, -ENOMEM); 9141 return; 9142 } 9143 9144 ctx->super->clean = 0; 9145 ctx->super->size = dev_size; 9146 ctx->super->used_cluster_mask_len = used_cluster_mask_len; 9147 bs_write_super(seq, ctx->bs, ctx->super, bs_grow_live_super_write_cpl, ctx); 9148 } 9149 9150 void 9151 spdk_bs_grow_live(struct spdk_blob_store *bs, 9152 spdk_bs_op_complete cb_fn, void *cb_arg) 9153 { 9154 struct spdk_bs_cpl cpl; 9155 struct spdk_bs_grow_ctx *ctx; 9156 9157 assert(spdk_get_thread() == bs->md_thread); 9158 9159 SPDK_DEBUGLOG(blob, "Growing blobstore on dev %p\n", bs->dev); 9160 9161 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 9162 cpl.u.bs_basic.cb_fn = cb_fn; 9163 cpl.u.bs_basic.cb_arg = cb_arg; 9164 9165 ctx = calloc(1, sizeof(struct spdk_bs_grow_ctx)); 9166 if (!ctx) { 9167 cb_fn(cb_arg, -ENOMEM); 9168 return; 9169 } 9170 ctx->bs = bs; 9171 9172 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 9173 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 9174 if (!ctx->super) { 9175 free(ctx); 9176 cb_fn(cb_arg, -ENOMEM); 9177 return; 9178 } 9179 9180 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 9181 if (!ctx->seq) { 9182 spdk_free(ctx->super); 9183 free(ctx); 9184 cb_fn(cb_arg, -ENOMEM); 9185 return; 9186 } 9187 9188 /* Read the super block */ 9189 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 9190 bs_byte_to_lba(bs, sizeof(*ctx->super)), 9191 bs_grow_live_load_super_cpl, ctx); 9192 } 9193 9194 void 9195 spdk_bs_grow(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 9196 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 9197 { 9198 struct spdk_blob_store *bs; 9199 struct spdk_bs_cpl cpl; 9200 struct spdk_bs_load_ctx *ctx; 9201 struct spdk_bs_opts opts = {}; 9202 int err; 9203 9204 SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev); 9205 9206 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 9207 SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen); 9208 dev->destroy(dev); 9209 cb_fn(cb_arg, NULL, -EINVAL); 9210 return; 9211 } 9212 9213 spdk_bs_opts_init(&opts, sizeof(opts)); 9214 if (o) { 9215 if (bs_opts_copy(o, &opts)) { 9216 return; 9217 } 9218 } 9219 9220 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 9221 dev->destroy(dev); 9222 cb_fn(cb_arg, NULL, -EINVAL); 9223 return; 9224 } 9225 9226 err = bs_alloc(dev, &opts, &bs, &ctx); 9227 if (err) { 9228 dev->destroy(dev); 9229 cb_fn(cb_arg, NULL, err); 9230 return; 9231 } 9232 9233 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 9234 cpl.u.bs_handle.cb_fn = cb_fn; 9235 cpl.u.bs_handle.cb_arg = cb_arg; 9236 cpl.u.bs_handle.bs = bs; 9237 9238 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 9239 if (!ctx->seq) { 9240 spdk_free(ctx->super); 9241 free(ctx); 9242 bs_free(bs); 9243 cb_fn(cb_arg, NULL, -ENOMEM); 9244 return; 9245 } 9246 9247 /* Read the super block */ 9248 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 9249 bs_byte_to_lba(bs, sizeof(*ctx->super)), 9250 bs_grow_load_super_cpl, ctx); 9251 } 9252 9253 int 9254 spdk_blob_get_esnap_id(struct spdk_blob *blob, const void **id, size_t *len) 9255 { 9256 if (!blob_is_esnap_clone(blob)) { 9257 return -EINVAL; 9258 } 9259 9260 return blob_get_xattr_value(blob, BLOB_EXTERNAL_SNAPSHOT_ID, id, len, true); 9261 } 9262 9263 struct spdk_io_channel * 9264 blob_esnap_get_io_channel(struct spdk_io_channel *ch, struct spdk_blob *blob) 9265 { 9266 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(ch); 9267 struct spdk_bs_dev *bs_dev = blob->back_bs_dev; 9268 struct blob_esnap_channel find = {}; 9269 struct blob_esnap_channel *esnap_channel, *existing; 9270 9271 find.blob_id = blob->id; 9272 esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find); 9273 if (spdk_likely(esnap_channel != NULL)) { 9274 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": using cached channel on thread %s\n", 9275 blob->id, spdk_thread_get_name(spdk_get_thread())); 9276 return esnap_channel->channel; 9277 } 9278 9279 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": allocating channel on thread %s\n", 9280 blob->id, spdk_thread_get_name(spdk_get_thread())); 9281 9282 esnap_channel = calloc(1, sizeof(*esnap_channel)); 9283 if (esnap_channel == NULL) { 9284 SPDK_NOTICELOG("blob 0x%" PRIx64 " channel allocation failed: no memory\n", 9285 find.blob_id); 9286 return NULL; 9287 } 9288 esnap_channel->channel = bs_dev->create_channel(bs_dev); 9289 if (esnap_channel->channel == NULL) { 9290 SPDK_NOTICELOG("blob 0x%" PRIx64 " back channel allocation failed\n", blob->id); 9291 free(esnap_channel); 9292 return NULL; 9293 } 9294 esnap_channel->blob_id = find.blob_id; 9295 existing = RB_INSERT(blob_esnap_channel_tree, &bs_channel->esnap_channels, esnap_channel); 9296 if (spdk_unlikely(existing != NULL)) { 9297 /* 9298 * This should be unreachable: all modifications to this tree happen on this thread. 9299 */ 9300 SPDK_ERRLOG("blob 0x%" PRIx64 "lost race to allocate a channel\n", find.blob_id); 9301 assert(false); 9302 9303 bs_dev->destroy_channel(bs_dev, esnap_channel->channel); 9304 free(esnap_channel); 9305 9306 return existing->channel; 9307 } 9308 9309 return esnap_channel->channel; 9310 } 9311 9312 static int 9313 blob_esnap_channel_compare(struct blob_esnap_channel *c1, struct blob_esnap_channel *c2) 9314 { 9315 return (c1->blob_id < c2->blob_id ? -1 : c1->blob_id > c2->blob_id); 9316 } 9317 9318 struct blob_esnap_destroy_ctx { 9319 spdk_blob_op_with_handle_complete cb_fn; 9320 void *cb_arg; 9321 struct spdk_blob *blob; 9322 struct spdk_bs_dev *back_bs_dev; 9323 bool abort_io; 9324 }; 9325 9326 static void 9327 blob_esnap_destroy_channels_done(struct spdk_io_channel_iter *i, int status) 9328 { 9329 struct blob_esnap_destroy_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 9330 struct spdk_blob *blob = ctx->blob; 9331 struct spdk_blob_store *bs = blob->bs; 9332 9333 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": done destroying channels for this blob\n", 9334 blob->id); 9335 9336 if (ctx->cb_fn != NULL) { 9337 ctx->cb_fn(ctx->cb_arg, blob, status); 9338 } 9339 free(ctx); 9340 9341 bs->esnap_channels_unloading--; 9342 if (bs->esnap_channels_unloading == 0 && bs->esnap_unload_cb_fn != NULL) { 9343 spdk_bs_unload(bs, bs->esnap_unload_cb_fn, bs->esnap_unload_cb_arg); 9344 } 9345 } 9346 9347 static void 9348 blob_esnap_destroy_one_channel(struct spdk_io_channel_iter *i) 9349 { 9350 struct blob_esnap_destroy_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 9351 struct spdk_blob *blob = ctx->blob; 9352 struct spdk_bs_dev *bs_dev = ctx->back_bs_dev; 9353 struct spdk_io_channel *channel = spdk_io_channel_iter_get_channel(i); 9354 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(channel); 9355 struct blob_esnap_channel *esnap_channel; 9356 struct blob_esnap_channel find = {}; 9357 9358 assert(spdk_get_thread() == spdk_io_channel_get_thread(channel)); 9359 9360 find.blob_id = blob->id; 9361 esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find); 9362 if (esnap_channel != NULL) { 9363 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": destroying channel on thread %s\n", 9364 blob->id, spdk_thread_get_name(spdk_get_thread())); 9365 RB_REMOVE(blob_esnap_channel_tree, &bs_channel->esnap_channels, esnap_channel); 9366 9367 if (ctx->abort_io) { 9368 spdk_bs_user_op_t *op, *tmp; 9369 9370 TAILQ_FOREACH_SAFE(op, &bs_channel->queued_io, link, tmp) { 9371 if (op->back_channel == esnap_channel->channel) { 9372 TAILQ_REMOVE(&bs_channel->queued_io, op, link); 9373 bs_user_op_abort(op, -EIO); 9374 } 9375 } 9376 } 9377 9378 bs_dev->destroy_channel(bs_dev, esnap_channel->channel); 9379 free(esnap_channel); 9380 } 9381 9382 spdk_for_each_channel_continue(i, 0); 9383 } 9384 9385 /* 9386 * Destroy the channels for a specific blob on each thread with a blobstore channel. This should be 9387 * used when closing an esnap clone blob and after decoupling from the parent. 9388 */ 9389 static void 9390 blob_esnap_destroy_bs_dev_channels(struct spdk_blob *blob, bool abort_io, 9391 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 9392 { 9393 struct blob_esnap_destroy_ctx *ctx; 9394 9395 if (!blob_is_esnap_clone(blob) || blob->back_bs_dev == NULL) { 9396 if (cb_fn != NULL) { 9397 cb_fn(cb_arg, blob, 0); 9398 } 9399 return; 9400 } 9401 9402 ctx = calloc(1, sizeof(*ctx)); 9403 if (ctx == NULL) { 9404 if (cb_fn != NULL) { 9405 cb_fn(cb_arg, blob, -ENOMEM); 9406 } 9407 return; 9408 } 9409 ctx->cb_fn = cb_fn; 9410 ctx->cb_arg = cb_arg; 9411 ctx->blob = blob; 9412 ctx->back_bs_dev = blob->back_bs_dev; 9413 ctx->abort_io = abort_io; 9414 9415 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": destroying channels for this blob\n", 9416 blob->id); 9417 9418 blob->bs->esnap_channels_unloading++; 9419 spdk_for_each_channel(blob->bs, blob_esnap_destroy_one_channel, ctx, 9420 blob_esnap_destroy_channels_done); 9421 } 9422 9423 /* 9424 * Destroy all bs_dev channels on a specific blobstore channel. This should be used when a 9425 * bs_channel is destroyed. 9426 */ 9427 static void 9428 blob_esnap_destroy_bs_channel(struct spdk_bs_channel *ch) 9429 { 9430 struct blob_esnap_channel *esnap_channel, *esnap_channel_tmp; 9431 9432 assert(spdk_get_thread() == spdk_io_channel_get_thread(spdk_io_channel_from_ctx(ch))); 9433 9434 SPDK_DEBUGLOG(blob_esnap, "destroying channels on thread %s\n", 9435 spdk_thread_get_name(spdk_get_thread())); 9436 RB_FOREACH_SAFE(esnap_channel, blob_esnap_channel_tree, &ch->esnap_channels, 9437 esnap_channel_tmp) { 9438 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 9439 ": destroying one channel in thread %s\n", 9440 esnap_channel->blob_id, spdk_thread_get_name(spdk_get_thread())); 9441 RB_REMOVE(blob_esnap_channel_tree, &ch->esnap_channels, esnap_channel); 9442 spdk_put_io_channel(esnap_channel->channel); 9443 free(esnap_channel); 9444 } 9445 SPDK_DEBUGLOG(blob_esnap, "done destroying channels on thread %s\n", 9446 spdk_thread_get_name(spdk_get_thread())); 9447 } 9448 9449 struct set_bs_dev_ctx { 9450 struct spdk_blob *blob; 9451 struct spdk_bs_dev *back_bs_dev; 9452 spdk_blob_op_complete cb_fn; 9453 void *cb_arg; 9454 int bserrno; 9455 }; 9456 9457 static void 9458 blob_set_back_bs_dev_done(void *_ctx, int bserrno) 9459 { 9460 struct set_bs_dev_ctx *ctx = _ctx; 9461 9462 if (bserrno != 0) { 9463 /* Even though the unfreeze failed, the update may have succeed. */ 9464 SPDK_ERRLOG("blob 0x%" PRIx64 ": unfreeze failed with error %d\n", ctx->blob->id, 9465 bserrno); 9466 } 9467 ctx->cb_fn(ctx->cb_arg, ctx->bserrno); 9468 free(ctx); 9469 } 9470 9471 static void 9472 blob_frozen_set_back_bs_dev(void *_ctx, struct spdk_blob *blob, int bserrno) 9473 { 9474 struct set_bs_dev_ctx *ctx = _ctx; 9475 9476 if (bserrno != 0) { 9477 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to release old back_bs_dev with error %d\n", 9478 blob->id, bserrno); 9479 ctx->bserrno = bserrno; 9480 blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx); 9481 return; 9482 } 9483 9484 if (blob->back_bs_dev != NULL) { 9485 blob->back_bs_dev->destroy(blob->back_bs_dev); 9486 } 9487 9488 SPDK_NOTICELOG("blob 0x%" PRIx64 ": hotplugged back_bs_dev\n", blob->id); 9489 blob->back_bs_dev = ctx->back_bs_dev; 9490 ctx->bserrno = 0; 9491 9492 blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx); 9493 } 9494 9495 static void 9496 blob_frozen_destroy_esnap_channels(void *_ctx, int bserrno) 9497 { 9498 struct set_bs_dev_ctx *ctx = _ctx; 9499 struct spdk_blob *blob = ctx->blob; 9500 9501 if (bserrno != 0) { 9502 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to freeze with error %d\n", blob->id, 9503 bserrno); 9504 ctx->cb_fn(ctx->cb_arg, bserrno); 9505 free(ctx); 9506 return; 9507 } 9508 9509 /* 9510 * This does not prevent future reads from the esnap device because any future IO will 9511 * lazily create a new esnap IO channel. 9512 */ 9513 blob_esnap_destroy_bs_dev_channels(blob, true, blob_frozen_set_back_bs_dev, ctx); 9514 } 9515 9516 void 9517 spdk_blob_set_esnap_bs_dev(struct spdk_blob *blob, struct spdk_bs_dev *back_bs_dev, 9518 spdk_blob_op_complete cb_fn, void *cb_arg) 9519 { 9520 struct set_bs_dev_ctx *ctx; 9521 9522 if (!blob_is_esnap_clone(blob)) { 9523 SPDK_ERRLOG("blob 0x%" PRIx64 ": not an esnap clone\n", blob->id); 9524 cb_fn(cb_arg, -EINVAL); 9525 return; 9526 } 9527 9528 ctx = calloc(1, sizeof(*ctx)); 9529 if (ctx == NULL) { 9530 SPDK_ERRLOG("blob 0x%" PRIx64 ": out of memory while setting back_bs_dev\n", 9531 blob->id); 9532 cb_fn(cb_arg, -ENOMEM); 9533 return; 9534 } 9535 ctx->cb_fn = cb_fn; 9536 ctx->cb_arg = cb_arg; 9537 ctx->back_bs_dev = back_bs_dev; 9538 ctx->blob = blob; 9539 blob_freeze_io(blob, blob_frozen_destroy_esnap_channels, ctx); 9540 } 9541 9542 struct spdk_bs_dev * 9543 spdk_blob_get_esnap_bs_dev(const struct spdk_blob *blob) 9544 { 9545 if (!blob_is_esnap_clone(blob)) { 9546 SPDK_ERRLOG("blob 0x%" PRIx64 ": not an esnap clone\n", blob->id); 9547 return NULL; 9548 } 9549 9550 return blob->back_bs_dev; 9551 } 9552 9553 bool 9554 spdk_blob_is_degraded(const struct spdk_blob *blob) 9555 { 9556 if (blob->bs->dev->is_degraded != NULL && blob->bs->dev->is_degraded(blob->bs->dev)) { 9557 return true; 9558 } 9559 if (blob->back_bs_dev == NULL || blob->back_bs_dev->is_degraded == NULL) { 9560 return false; 9561 } 9562 9563 return blob->back_bs_dev->is_degraded(blob->back_bs_dev); 9564 } 9565 9566 SPDK_LOG_REGISTER_COMPONENT(blob) 9567 SPDK_LOG_REGISTER_COMPONENT(blob_esnap) 9568