1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "spdk/blob.h" 10 #include "spdk/crc32.h" 11 #include "spdk/env.h" 12 #include "spdk/queue.h" 13 #include "spdk/thread.h" 14 #include "spdk/bit_array.h" 15 #include "spdk/bit_pool.h" 16 #include "spdk/likely.h" 17 #include "spdk/util.h" 18 #include "spdk/string.h" 19 20 #include "spdk_internal/assert.h" 21 #include "spdk/log.h" 22 23 #include "blobstore.h" 24 25 #define BLOB_CRC32C_INITIAL 0xffffffffUL 26 27 static int bs_register_md_thread(struct spdk_blob_store *bs); 28 static int bs_unregister_md_thread(struct spdk_blob_store *bs); 29 static void blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 30 static void blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 31 uint64_t cluster, uint32_t extent, struct spdk_blob_md_page *page, 32 spdk_blob_op_complete cb_fn, void *cb_arg); 33 static void blob_free_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 34 uint32_t extent_page, struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg); 35 36 static int blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 37 uint16_t value_len, bool internal); 38 static int blob_get_xattr_value(struct spdk_blob *blob, const char *name, 39 const void **value, size_t *value_len, bool internal); 40 static int blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal); 41 42 static void blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 43 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg); 44 static void blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg); 45 46 static void bs_shallow_copy_cluster_find_next(void *cb_arg); 47 48 /* 49 * External snapshots require a channel per thread per esnap bdev. The tree 50 * is populated lazily as blob IOs are handled by the back_bs_dev. When this 51 * channel is destroyed, all the channels in the tree are destroyed. 52 */ 53 54 struct blob_esnap_channel { 55 RB_ENTRY(blob_esnap_channel) node; 56 spdk_blob_id blob_id; 57 struct spdk_io_channel *channel; 58 }; 59 60 static int blob_esnap_channel_compare(struct blob_esnap_channel *c1, struct blob_esnap_channel *c2); 61 static void blob_esnap_destroy_bs_dev_channels(struct spdk_blob *blob, bool abort_io, 62 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg); 63 static void blob_esnap_destroy_bs_channel(struct spdk_bs_channel *ch); 64 static void blob_set_back_bs_dev_frozen(void *_ctx, int bserrno); 65 RB_GENERATE_STATIC(blob_esnap_channel_tree, blob_esnap_channel, node, blob_esnap_channel_compare) 66 67 static inline bool 68 blob_is_esnap_clone(const struct spdk_blob *blob) 69 { 70 assert(blob != NULL); 71 return !!(blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT); 72 } 73 74 static int 75 blob_id_cmp(struct spdk_blob *blob1, struct spdk_blob *blob2) 76 { 77 assert(blob1 != NULL && blob2 != NULL); 78 return (blob1->id < blob2->id ? -1 : blob1->id > blob2->id); 79 } 80 81 RB_GENERATE_STATIC(spdk_blob_tree, spdk_blob, link, blob_id_cmp); 82 83 static void 84 blob_verify_md_op(struct spdk_blob *blob) 85 { 86 assert(blob != NULL); 87 assert(spdk_get_thread() == blob->bs->md_thread); 88 assert(blob->state != SPDK_BLOB_STATE_LOADING); 89 } 90 91 static struct spdk_blob_list * 92 bs_get_snapshot_entry(struct spdk_blob_store *bs, spdk_blob_id blobid) 93 { 94 struct spdk_blob_list *snapshot_entry = NULL; 95 96 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 97 if (snapshot_entry->id == blobid) { 98 break; 99 } 100 } 101 102 return snapshot_entry; 103 } 104 105 static void 106 bs_claim_md_page(struct spdk_blob_store *bs, uint32_t page) 107 { 108 assert(spdk_spin_held(&bs->used_lock)); 109 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 110 assert(spdk_bit_array_get(bs->used_md_pages, page) == false); 111 112 spdk_bit_array_set(bs->used_md_pages, page); 113 } 114 115 static void 116 bs_release_md_page(struct spdk_blob_store *bs, uint32_t page) 117 { 118 assert(spdk_spin_held(&bs->used_lock)); 119 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 120 assert(spdk_bit_array_get(bs->used_md_pages, page) == true); 121 122 spdk_bit_array_clear(bs->used_md_pages, page); 123 } 124 125 static uint32_t 126 bs_claim_cluster(struct spdk_blob_store *bs) 127 { 128 uint32_t cluster_num; 129 130 assert(spdk_spin_held(&bs->used_lock)); 131 132 cluster_num = spdk_bit_pool_allocate_bit(bs->used_clusters); 133 if (cluster_num == UINT32_MAX) { 134 return UINT32_MAX; 135 } 136 137 SPDK_DEBUGLOG(blob, "Claiming cluster %u\n", cluster_num); 138 bs->num_free_clusters--; 139 140 return cluster_num; 141 } 142 143 static void 144 bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 145 { 146 assert(spdk_spin_held(&bs->used_lock)); 147 assert(cluster_num < spdk_bit_pool_capacity(bs->used_clusters)); 148 assert(spdk_bit_pool_is_allocated(bs->used_clusters, cluster_num) == true); 149 assert(bs->num_free_clusters < bs->total_clusters); 150 151 SPDK_DEBUGLOG(blob, "Releasing cluster %u\n", cluster_num); 152 153 spdk_bit_pool_free_bit(bs->used_clusters, cluster_num); 154 bs->num_free_clusters++; 155 } 156 157 static int 158 blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster) 159 { 160 uint64_t *cluster_lba = &blob->active.clusters[cluster_num]; 161 162 blob_verify_md_op(blob); 163 164 if (*cluster_lba != 0) { 165 return -EEXIST; 166 } 167 168 *cluster_lba = bs_cluster_to_lba(blob->bs, cluster); 169 blob->active.num_allocated_clusters++; 170 171 return 0; 172 } 173 174 static int 175 bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num, 176 uint64_t *cluster, uint32_t *lowest_free_md_page, bool update_map) 177 { 178 uint32_t *extent_page = 0; 179 180 assert(spdk_spin_held(&blob->bs->used_lock)); 181 182 *cluster = bs_claim_cluster(blob->bs); 183 if (*cluster == UINT32_MAX) { 184 /* No more free clusters. Cannot satisfy the request */ 185 return -ENOSPC; 186 } 187 188 if (blob->use_extent_table) { 189 extent_page = bs_cluster_to_extent_page(blob, cluster_num); 190 if (*extent_page == 0) { 191 /* Extent page shall never occupy md_page so start the search from 1 */ 192 if (*lowest_free_md_page == 0) { 193 *lowest_free_md_page = 1; 194 } 195 /* No extent_page is allocated for the cluster */ 196 *lowest_free_md_page = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, 197 *lowest_free_md_page); 198 if (*lowest_free_md_page == UINT32_MAX) { 199 /* No more free md pages. Cannot satisfy the request */ 200 bs_release_cluster(blob->bs, *cluster); 201 return -ENOSPC; 202 } 203 bs_claim_md_page(blob->bs, *lowest_free_md_page); 204 } 205 } 206 207 SPDK_DEBUGLOG(blob, "Claiming cluster %" PRIu64 " for blob 0x%" PRIx64 "\n", *cluster, 208 blob->id); 209 210 if (update_map) { 211 blob_insert_cluster(blob, cluster_num, *cluster); 212 if (blob->use_extent_table && *extent_page == 0) { 213 *extent_page = *lowest_free_md_page; 214 } 215 } 216 217 return 0; 218 } 219 220 static void 221 blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs) 222 { 223 xattrs->count = 0; 224 xattrs->names = NULL; 225 xattrs->ctx = NULL; 226 xattrs->get_value = NULL; 227 } 228 229 void 230 spdk_blob_opts_init(struct spdk_blob_opts *opts, size_t opts_size) 231 { 232 if (!opts) { 233 SPDK_ERRLOG("opts should not be NULL\n"); 234 return; 235 } 236 237 if (!opts_size) { 238 SPDK_ERRLOG("opts_size should not be zero value\n"); 239 return; 240 } 241 242 memset(opts, 0, opts_size); 243 opts->opts_size = opts_size; 244 245 #define FIELD_OK(field) \ 246 offsetof(struct spdk_blob_opts, field) + sizeof(opts->field) <= opts_size 247 248 #define SET_FIELD(field, value) \ 249 if (FIELD_OK(field)) { \ 250 opts->field = value; \ 251 } \ 252 253 SET_FIELD(num_clusters, 0); 254 SET_FIELD(thin_provision, false); 255 SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT); 256 257 if (FIELD_OK(xattrs)) { 258 blob_xattrs_init(&opts->xattrs); 259 } 260 261 SET_FIELD(use_extent_table, true); 262 263 #undef FIELD_OK 264 #undef SET_FIELD 265 } 266 267 void 268 spdk_blob_open_opts_init(struct spdk_blob_open_opts *opts, size_t opts_size) 269 { 270 if (!opts) { 271 SPDK_ERRLOG("opts should not be NULL\n"); 272 return; 273 } 274 275 if (!opts_size) { 276 SPDK_ERRLOG("opts_size should not be zero value\n"); 277 return; 278 } 279 280 memset(opts, 0, opts_size); 281 opts->opts_size = opts_size; 282 283 #define FIELD_OK(field) \ 284 offsetof(struct spdk_blob_open_opts, field) + sizeof(opts->field) <= opts_size 285 286 #define SET_FIELD(field, value) \ 287 if (FIELD_OK(field)) { \ 288 opts->field = value; \ 289 } \ 290 291 SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT); 292 293 #undef FIELD_OK 294 #undef SET_FILED 295 } 296 297 static struct spdk_blob * 298 blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id) 299 { 300 struct spdk_blob *blob; 301 302 blob = calloc(1, sizeof(*blob)); 303 if (!blob) { 304 return NULL; 305 } 306 307 blob->id = id; 308 blob->bs = bs; 309 310 blob->parent_id = SPDK_BLOBID_INVALID; 311 312 blob->state = SPDK_BLOB_STATE_DIRTY; 313 blob->extent_rle_found = false; 314 blob->extent_table_found = false; 315 blob->active.num_pages = 1; 316 blob->active.pages = calloc(1, sizeof(*blob->active.pages)); 317 if (!blob->active.pages) { 318 free(blob); 319 return NULL; 320 } 321 322 blob->active.pages[0] = bs_blobid_to_page(id); 323 324 TAILQ_INIT(&blob->xattrs); 325 TAILQ_INIT(&blob->xattrs_internal); 326 TAILQ_INIT(&blob->pending_persists); 327 TAILQ_INIT(&blob->persists_to_complete); 328 329 return blob; 330 } 331 332 static void 333 xattrs_free(struct spdk_xattr_tailq *xattrs) 334 { 335 struct spdk_xattr *xattr, *xattr_tmp; 336 337 TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) { 338 TAILQ_REMOVE(xattrs, xattr, link); 339 free(xattr->name); 340 free(xattr->value); 341 free(xattr); 342 } 343 } 344 345 static void 346 blob_free(struct spdk_blob *blob) 347 { 348 assert(blob != NULL); 349 assert(TAILQ_EMPTY(&blob->pending_persists)); 350 assert(TAILQ_EMPTY(&blob->persists_to_complete)); 351 352 free(blob->active.extent_pages); 353 free(blob->clean.extent_pages); 354 free(blob->active.clusters); 355 free(blob->clean.clusters); 356 free(blob->active.pages); 357 free(blob->clean.pages); 358 359 xattrs_free(&blob->xattrs); 360 xattrs_free(&blob->xattrs_internal); 361 362 if (blob->back_bs_dev) { 363 blob->back_bs_dev->destroy(blob->back_bs_dev); 364 } 365 366 free(blob); 367 } 368 369 static void 370 blob_back_bs_destroy_esnap_done(void *ctx, struct spdk_blob *blob, int bserrno) 371 { 372 struct spdk_bs_dev *bs_dev = ctx; 373 374 if (bserrno != 0) { 375 /* 376 * This is probably due to a memory allocation failure when creating the 377 * blob_esnap_destroy_ctx before iterating threads. 378 */ 379 SPDK_ERRLOG("blob 0x%" PRIx64 ": Unable to destroy bs dev channels: error %d\n", 380 blob->id, bserrno); 381 assert(false); 382 } 383 384 if (bs_dev == NULL) { 385 /* 386 * This check exists to make scanbuild happy. 387 * 388 * blob->back_bs_dev for an esnap is NULL during the first iteration of blobs while 389 * the blobstore is being loaded. It could also be NULL if there was an error 390 * opening the esnap device. In each of these cases, no channels could have been 391 * created because back_bs_dev->create_channel() would have led to a NULL pointer 392 * deref. 393 */ 394 assert(false); 395 return; 396 } 397 398 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": calling destroy on back_bs_dev\n", blob->id); 399 bs_dev->destroy(bs_dev); 400 } 401 402 static void 403 blob_back_bs_destroy(struct spdk_blob *blob) 404 { 405 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": preparing to destroy back_bs_dev\n", 406 blob->id); 407 408 blob_esnap_destroy_bs_dev_channels(blob, false, blob_back_bs_destroy_esnap_done, 409 blob->back_bs_dev); 410 blob->back_bs_dev = NULL; 411 } 412 413 struct blob_parent { 414 union { 415 struct { 416 spdk_blob_id id; 417 struct spdk_blob *blob; 418 } snapshot; 419 420 struct { 421 void *id; 422 uint32_t id_len; 423 struct spdk_bs_dev *back_bs_dev; 424 } esnap; 425 } u; 426 }; 427 428 typedef int (*set_parent_refs_cb)(struct spdk_blob *blob, struct blob_parent *parent); 429 430 struct set_bs_dev_ctx { 431 struct spdk_blob *blob; 432 struct spdk_bs_dev *back_bs_dev; 433 434 /* 435 * This callback is used during a set parent operation to change the references 436 * to the parent of the blob. 437 */ 438 set_parent_refs_cb parent_refs_cb_fn; 439 struct blob_parent *parent_refs_cb_arg; 440 441 spdk_blob_op_complete cb_fn; 442 void *cb_arg; 443 int bserrno; 444 }; 445 446 static void 447 blob_set_back_bs_dev(struct spdk_blob *blob, struct spdk_bs_dev *back_bs_dev, 448 set_parent_refs_cb parent_refs_cb_fn, struct blob_parent *parent_refs_cb_arg, 449 spdk_blob_op_complete cb_fn, void *cb_arg) 450 { 451 struct set_bs_dev_ctx *ctx; 452 453 ctx = calloc(1, sizeof(*ctx)); 454 if (ctx == NULL) { 455 SPDK_ERRLOG("blob 0x%" PRIx64 ": out of memory while setting back_bs_dev\n", 456 blob->id); 457 cb_fn(cb_arg, -ENOMEM); 458 return; 459 } 460 461 ctx->parent_refs_cb_fn = parent_refs_cb_fn; 462 ctx->parent_refs_cb_arg = parent_refs_cb_arg; 463 ctx->cb_fn = cb_fn; 464 ctx->cb_arg = cb_arg; 465 ctx->back_bs_dev = back_bs_dev; 466 ctx->blob = blob; 467 468 blob_freeze_io(blob, blob_set_back_bs_dev_frozen, ctx); 469 } 470 471 struct freeze_io_ctx { 472 struct spdk_bs_cpl cpl; 473 struct spdk_blob *blob; 474 }; 475 476 static void 477 blob_io_sync(struct spdk_io_channel_iter *i) 478 { 479 spdk_for_each_channel_continue(i, 0); 480 } 481 482 static void 483 blob_execute_queued_io(struct spdk_io_channel_iter *i) 484 { 485 struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); 486 struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch); 487 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 488 struct spdk_bs_request_set *set; 489 struct spdk_bs_user_op_args *args; 490 spdk_bs_user_op_t *op, *tmp; 491 492 TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) { 493 set = (struct spdk_bs_request_set *)op; 494 args = &set->u.user_op; 495 496 if (args->blob == ctx->blob) { 497 TAILQ_REMOVE(&ch->queued_io, op, link); 498 bs_user_op_execute(op); 499 } 500 } 501 502 spdk_for_each_channel_continue(i, 0); 503 } 504 505 static void 506 blob_io_cpl(struct spdk_io_channel_iter *i, int status) 507 { 508 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 509 510 ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0); 511 512 free(ctx); 513 } 514 515 static void 516 blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 517 { 518 struct freeze_io_ctx *ctx; 519 520 blob_verify_md_op(blob); 521 522 ctx = calloc(1, sizeof(*ctx)); 523 if (!ctx) { 524 cb_fn(cb_arg, -ENOMEM); 525 return; 526 } 527 528 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 529 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 530 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 531 ctx->blob = blob; 532 533 /* Freeze I/O on blob */ 534 blob->frozen_refcnt++; 535 536 spdk_for_each_channel(blob->bs, blob_io_sync, ctx, blob_io_cpl); 537 } 538 539 static void 540 blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 541 { 542 struct freeze_io_ctx *ctx; 543 544 blob_verify_md_op(blob); 545 546 ctx = calloc(1, sizeof(*ctx)); 547 if (!ctx) { 548 cb_fn(cb_arg, -ENOMEM); 549 return; 550 } 551 552 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 553 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 554 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 555 ctx->blob = blob; 556 557 assert(blob->frozen_refcnt > 0); 558 559 blob->frozen_refcnt--; 560 561 spdk_for_each_channel(blob->bs, blob_execute_queued_io, ctx, blob_io_cpl); 562 } 563 564 static int 565 blob_mark_clean(struct spdk_blob *blob) 566 { 567 uint32_t *extent_pages = NULL; 568 uint64_t *clusters = NULL; 569 uint32_t *pages = NULL; 570 571 assert(blob != NULL); 572 573 if (blob->active.num_extent_pages) { 574 assert(blob->active.extent_pages); 575 extent_pages = calloc(blob->active.num_extent_pages, sizeof(*blob->active.extent_pages)); 576 if (!extent_pages) { 577 return -ENOMEM; 578 } 579 memcpy(extent_pages, blob->active.extent_pages, 580 blob->active.num_extent_pages * sizeof(*extent_pages)); 581 } 582 583 if (blob->active.num_clusters) { 584 assert(blob->active.clusters); 585 clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters)); 586 if (!clusters) { 587 free(extent_pages); 588 return -ENOMEM; 589 } 590 memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 591 } 592 593 if (blob->active.num_pages) { 594 assert(blob->active.pages); 595 pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages)); 596 if (!pages) { 597 free(extent_pages); 598 free(clusters); 599 return -ENOMEM; 600 } 601 memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 602 } 603 604 free(blob->clean.extent_pages); 605 free(blob->clean.clusters); 606 free(blob->clean.pages); 607 608 blob->clean.num_extent_pages = blob->active.num_extent_pages; 609 blob->clean.extent_pages = blob->active.extent_pages; 610 blob->clean.num_clusters = blob->active.num_clusters; 611 blob->clean.clusters = blob->active.clusters; 612 blob->clean.num_allocated_clusters = blob->active.num_allocated_clusters; 613 blob->clean.num_pages = blob->active.num_pages; 614 blob->clean.pages = blob->active.pages; 615 616 blob->active.extent_pages = extent_pages; 617 blob->active.clusters = clusters; 618 blob->active.pages = pages; 619 620 /* If the metadata was dirtied again while the metadata was being written to disk, 621 * we do not want to revert the DIRTY state back to CLEAN here. 622 */ 623 if (blob->state == SPDK_BLOB_STATE_LOADING) { 624 blob->state = SPDK_BLOB_STATE_CLEAN; 625 } 626 627 return 0; 628 } 629 630 static int 631 blob_deserialize_xattr(struct spdk_blob *blob, 632 struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal) 633 { 634 struct spdk_xattr *xattr; 635 636 if (desc_xattr->length != sizeof(desc_xattr->name_length) + 637 sizeof(desc_xattr->value_length) + 638 desc_xattr->name_length + desc_xattr->value_length) { 639 return -EINVAL; 640 } 641 642 xattr = calloc(1, sizeof(*xattr)); 643 if (xattr == NULL) { 644 return -ENOMEM; 645 } 646 647 xattr->name = malloc(desc_xattr->name_length + 1); 648 if (xattr->name == NULL) { 649 free(xattr); 650 return -ENOMEM; 651 } 652 653 xattr->value = malloc(desc_xattr->value_length); 654 if (xattr->value == NULL) { 655 free(xattr->name); 656 free(xattr); 657 return -ENOMEM; 658 } 659 660 memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length); 661 xattr->name[desc_xattr->name_length] = '\0'; 662 xattr->value_len = desc_xattr->value_length; 663 memcpy(xattr->value, 664 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 665 desc_xattr->value_length); 666 667 TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link); 668 669 return 0; 670 } 671 672 673 static int 674 blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob) 675 { 676 struct spdk_blob_md_descriptor *desc; 677 size_t cur_desc = 0; 678 void *tmp; 679 680 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 681 while (cur_desc < sizeof(page->descriptors)) { 682 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 683 if (desc->length == 0) { 684 /* If padding and length are 0, this terminates the page */ 685 break; 686 } 687 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 688 struct spdk_blob_md_descriptor_flags *desc_flags; 689 690 desc_flags = (struct spdk_blob_md_descriptor_flags *)desc; 691 692 if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) { 693 return -EINVAL; 694 } 695 696 if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) != 697 SPDK_BLOB_INVALID_FLAGS_MASK) { 698 return -EINVAL; 699 } 700 701 if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) != 702 SPDK_BLOB_DATA_RO_FLAGS_MASK) { 703 blob->data_ro = true; 704 blob->md_ro = true; 705 } 706 707 if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) != 708 SPDK_BLOB_MD_RO_FLAGS_MASK) { 709 blob->md_ro = true; 710 } 711 712 if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 713 blob->data_ro = true; 714 blob->md_ro = true; 715 } 716 717 blob->invalid_flags = desc_flags->invalid_flags; 718 blob->data_ro_flags = desc_flags->data_ro_flags; 719 blob->md_ro_flags = desc_flags->md_ro_flags; 720 721 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 722 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 723 unsigned int i, j; 724 unsigned int cluster_count = blob->active.num_clusters; 725 726 if (blob->extent_table_found) { 727 /* Extent Table already present in the md, 728 * both descriptors should never be at the same time. */ 729 return -EINVAL; 730 } 731 blob->extent_rle_found = true; 732 733 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 734 735 if (desc_extent_rle->length == 0 || 736 (desc_extent_rle->length % sizeof(desc_extent_rle->extents[0]) != 0)) { 737 return -EINVAL; 738 } 739 740 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 741 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 742 if (desc_extent_rle->extents[i].cluster_idx != 0) { 743 if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, 744 desc_extent_rle->extents[i].cluster_idx + j)) { 745 return -EINVAL; 746 } 747 } 748 cluster_count++; 749 } 750 } 751 752 if (cluster_count == 0) { 753 return -EINVAL; 754 } 755 tmp = realloc(blob->active.clusters, cluster_count * sizeof(*blob->active.clusters)); 756 if (tmp == NULL) { 757 return -ENOMEM; 758 } 759 blob->active.clusters = tmp; 760 blob->active.cluster_array_size = cluster_count; 761 762 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 763 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 764 if (desc_extent_rle->extents[i].cluster_idx != 0) { 765 blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs, 766 desc_extent_rle->extents[i].cluster_idx + j); 767 blob->active.num_allocated_clusters++; 768 } else if (spdk_blob_is_thin_provisioned(blob)) { 769 blob->active.clusters[blob->active.num_clusters++] = 0; 770 } else { 771 return -EINVAL; 772 } 773 } 774 } 775 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 776 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 777 uint32_t num_extent_pages = blob->active.num_extent_pages; 778 uint32_t i, j; 779 size_t extent_pages_length; 780 781 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 782 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 783 784 if (blob->extent_rle_found) { 785 /* This means that Extent RLE is present in MD, 786 * both should never be at the same time. */ 787 return -EINVAL; 788 } else if (blob->extent_table_found && 789 desc_extent_table->num_clusters != blob->remaining_clusters_in_et) { 790 /* Number of clusters in this ET does not match number 791 * from previously read EXTENT_TABLE. */ 792 return -EINVAL; 793 } 794 795 if (desc_extent_table->length == 0 || 796 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 797 return -EINVAL; 798 } 799 800 blob->extent_table_found = true; 801 802 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 803 num_extent_pages += desc_extent_table->extent_page[i].num_pages; 804 } 805 806 if (num_extent_pages > 0) { 807 tmp = realloc(blob->active.extent_pages, num_extent_pages * sizeof(uint32_t)); 808 if (tmp == NULL) { 809 return -ENOMEM; 810 } 811 blob->active.extent_pages = tmp; 812 } 813 blob->active.extent_pages_array_size = num_extent_pages; 814 815 blob->remaining_clusters_in_et = desc_extent_table->num_clusters; 816 817 /* Extent table entries contain md page numbers for extent pages. 818 * Zeroes represent unallocated extent pages, those are run-length-encoded. 819 */ 820 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 821 if (desc_extent_table->extent_page[i].page_idx != 0) { 822 assert(desc_extent_table->extent_page[i].num_pages == 1); 823 blob->active.extent_pages[blob->active.num_extent_pages++] = 824 desc_extent_table->extent_page[i].page_idx; 825 } else if (spdk_blob_is_thin_provisioned(blob)) { 826 for (j = 0; j < desc_extent_table->extent_page[i].num_pages; j++) { 827 blob->active.extent_pages[blob->active.num_extent_pages++] = 0; 828 } 829 } else { 830 return -EINVAL; 831 } 832 } 833 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 834 struct spdk_blob_md_descriptor_extent_page *desc_extent; 835 unsigned int i; 836 unsigned int cluster_count = 0; 837 size_t cluster_idx_length; 838 839 if (blob->extent_rle_found) { 840 /* This means that Extent RLE is present in MD, 841 * both should never be at the same time. */ 842 return -EINVAL; 843 } 844 845 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 846 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 847 848 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 849 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 850 return -EINVAL; 851 } 852 853 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 854 if (desc_extent->cluster_idx[i] != 0) { 855 if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, desc_extent->cluster_idx[i])) { 856 return -EINVAL; 857 } 858 } 859 cluster_count++; 860 } 861 862 if (cluster_count == 0) { 863 return -EINVAL; 864 } 865 866 /* When reading extent pages sequentially starting cluster idx should match 867 * current size of a blob. 868 * If changed to batch reading, this check shall be removed. */ 869 if (desc_extent->start_cluster_idx != blob->active.num_clusters) { 870 return -EINVAL; 871 } 872 873 tmp = realloc(blob->active.clusters, 874 (cluster_count + blob->active.num_clusters) * sizeof(*blob->active.clusters)); 875 if (tmp == NULL) { 876 return -ENOMEM; 877 } 878 blob->active.clusters = tmp; 879 blob->active.cluster_array_size = (cluster_count + blob->active.num_clusters); 880 881 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 882 if (desc_extent->cluster_idx[i] != 0) { 883 blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs, 884 desc_extent->cluster_idx[i]); 885 blob->active.num_allocated_clusters++; 886 } else if (spdk_blob_is_thin_provisioned(blob)) { 887 blob->active.clusters[blob->active.num_clusters++] = 0; 888 } else { 889 return -EINVAL; 890 } 891 } 892 assert(desc_extent->start_cluster_idx + cluster_count == blob->active.num_clusters); 893 assert(blob->remaining_clusters_in_et >= cluster_count); 894 blob->remaining_clusters_in_et -= cluster_count; 895 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 896 int rc; 897 898 rc = blob_deserialize_xattr(blob, 899 (struct spdk_blob_md_descriptor_xattr *) desc, false); 900 if (rc != 0) { 901 return rc; 902 } 903 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 904 int rc; 905 906 rc = blob_deserialize_xattr(blob, 907 (struct spdk_blob_md_descriptor_xattr *) desc, true); 908 if (rc != 0) { 909 return rc; 910 } 911 } else { 912 /* Unrecognized descriptor type. Do not fail - just continue to the 913 * next descriptor. If this descriptor is associated with some feature 914 * defined in a newer version of blobstore, that version of blobstore 915 * should create and set an associated feature flag to specify if this 916 * blob can be loaded or not. 917 */ 918 } 919 920 /* Advance to the next descriptor */ 921 cur_desc += sizeof(*desc) + desc->length; 922 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 923 break; 924 } 925 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 926 } 927 928 return 0; 929 } 930 931 static bool bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page); 932 933 static int 934 blob_parse_extent_page(struct spdk_blob_md_page *extent_page, struct spdk_blob *blob) 935 { 936 assert(blob != NULL); 937 assert(blob->state == SPDK_BLOB_STATE_LOADING); 938 939 if (bs_load_cur_extent_page_valid(extent_page) == false) { 940 return -ENOENT; 941 } 942 943 return blob_parse_page(extent_page, blob); 944 } 945 946 static int 947 blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count, 948 struct spdk_blob *blob) 949 { 950 const struct spdk_blob_md_page *page; 951 uint32_t i; 952 int rc; 953 void *tmp; 954 955 assert(page_count > 0); 956 assert(pages[0].sequence_num == 0); 957 assert(blob != NULL); 958 assert(blob->state == SPDK_BLOB_STATE_LOADING); 959 assert(blob->active.clusters == NULL); 960 961 /* The blobid provided doesn't match what's in the MD, this can 962 * happen for example if a bogus blobid is passed in through open. 963 */ 964 if (blob->id != pages[0].id) { 965 SPDK_ERRLOG("Blobid (0x%" PRIx64 ") doesn't match what's in metadata " 966 "(0x%" PRIx64 ")\n", blob->id, pages[0].id); 967 return -ENOENT; 968 } 969 970 tmp = realloc(blob->active.pages, page_count * sizeof(*blob->active.pages)); 971 if (!tmp) { 972 return -ENOMEM; 973 } 974 blob->active.pages = tmp; 975 976 blob->active.pages[0] = pages[0].id; 977 978 for (i = 1; i < page_count; i++) { 979 assert(spdk_bit_array_get(blob->bs->used_md_pages, pages[i - 1].next)); 980 blob->active.pages[i] = pages[i - 1].next; 981 } 982 blob->active.num_pages = page_count; 983 984 for (i = 0; i < page_count; i++) { 985 page = &pages[i]; 986 987 assert(page->id == blob->id); 988 assert(page->sequence_num == i); 989 990 rc = blob_parse_page(page, blob); 991 if (rc != 0) { 992 return rc; 993 } 994 } 995 996 return 0; 997 } 998 999 static int 1000 blob_serialize_add_page(const struct spdk_blob *blob, 1001 struct spdk_blob_md_page **pages, 1002 uint32_t *page_count, 1003 struct spdk_blob_md_page **last_page) 1004 { 1005 struct spdk_blob_md_page *page, *tmp_pages; 1006 1007 assert(pages != NULL); 1008 assert(page_count != NULL); 1009 1010 *last_page = NULL; 1011 if (*page_count == 0) { 1012 assert(*pages == NULL); 1013 *pages = spdk_malloc(SPDK_BS_PAGE_SIZE, 0, 1014 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 1015 if (*pages == NULL) { 1016 return -ENOMEM; 1017 } 1018 *page_count = 1; 1019 } else { 1020 assert(*pages != NULL); 1021 tmp_pages = spdk_realloc(*pages, SPDK_BS_PAGE_SIZE * (*page_count + 1), 0); 1022 if (tmp_pages == NULL) { 1023 return -ENOMEM; 1024 } 1025 (*page_count)++; 1026 *pages = tmp_pages; 1027 } 1028 1029 page = &(*pages)[*page_count - 1]; 1030 memset(page, 0, sizeof(*page)); 1031 page->id = blob->id; 1032 page->sequence_num = *page_count - 1; 1033 page->next = SPDK_INVALID_MD_PAGE; 1034 *last_page = page; 1035 1036 return 0; 1037 } 1038 1039 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor. 1040 * Update required_sz on both success and failure. 1041 * 1042 */ 1043 static int 1044 blob_serialize_xattr(const struct spdk_xattr *xattr, 1045 uint8_t *buf, size_t buf_sz, 1046 size_t *required_sz, bool internal) 1047 { 1048 struct spdk_blob_md_descriptor_xattr *desc; 1049 1050 *required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) + 1051 strlen(xattr->name) + 1052 xattr->value_len; 1053 1054 if (buf_sz < *required_sz) { 1055 return -1; 1056 } 1057 1058 desc = (struct spdk_blob_md_descriptor_xattr *)buf; 1059 1060 desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR; 1061 desc->length = sizeof(desc->name_length) + 1062 sizeof(desc->value_length) + 1063 strlen(xattr->name) + 1064 xattr->value_len; 1065 desc->name_length = strlen(xattr->name); 1066 desc->value_length = xattr->value_len; 1067 1068 memcpy(desc->name, xattr->name, desc->name_length); 1069 memcpy((void *)((uintptr_t)desc->name + desc->name_length), 1070 xattr->value, 1071 desc->value_length); 1072 1073 return 0; 1074 } 1075 1076 static void 1077 blob_serialize_extent_table_entry(const struct spdk_blob *blob, 1078 uint64_t start_ep, uint64_t *next_ep, 1079 uint8_t **buf, size_t *remaining_sz) 1080 { 1081 struct spdk_blob_md_descriptor_extent_table *desc; 1082 size_t cur_sz; 1083 uint64_t i, et_idx; 1084 uint32_t extent_page, ep_len; 1085 1086 /* The buffer must have room for at least num_clusters entry */ 1087 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->num_clusters); 1088 if (*remaining_sz < cur_sz) { 1089 *next_ep = start_ep; 1090 return; 1091 } 1092 1093 desc = (struct spdk_blob_md_descriptor_extent_table *)*buf; 1094 desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE; 1095 1096 desc->num_clusters = blob->active.num_clusters; 1097 1098 ep_len = 1; 1099 et_idx = 0; 1100 for (i = start_ep; i < blob->active.num_extent_pages; i++) { 1101 if (*remaining_sz < cur_sz + sizeof(desc->extent_page[0])) { 1102 /* If we ran out of buffer space, return */ 1103 break; 1104 } 1105 1106 extent_page = blob->active.extent_pages[i]; 1107 /* Verify that next extent_page is unallocated */ 1108 if (extent_page == 0 && 1109 (i + 1 < blob->active.num_extent_pages && blob->active.extent_pages[i + 1] == 0)) { 1110 ep_len++; 1111 continue; 1112 } 1113 desc->extent_page[et_idx].page_idx = extent_page; 1114 desc->extent_page[et_idx].num_pages = ep_len; 1115 et_idx++; 1116 1117 ep_len = 1; 1118 cur_sz += sizeof(desc->extent_page[et_idx]); 1119 } 1120 *next_ep = i; 1121 1122 desc->length = sizeof(desc->num_clusters) + sizeof(desc->extent_page[0]) * et_idx; 1123 *remaining_sz -= sizeof(struct spdk_blob_md_descriptor) + desc->length; 1124 *buf += sizeof(struct spdk_blob_md_descriptor) + desc->length; 1125 } 1126 1127 static int 1128 blob_serialize_extent_table(const struct spdk_blob *blob, 1129 struct spdk_blob_md_page **pages, 1130 struct spdk_blob_md_page *cur_page, 1131 uint32_t *page_count, uint8_t **buf, 1132 size_t *remaining_sz) 1133 { 1134 uint64_t last_extent_page; 1135 int rc; 1136 1137 last_extent_page = 0; 1138 /* At least single extent table entry has to be always persisted. 1139 * Such case occurs with num_extent_pages == 0. */ 1140 while (last_extent_page <= blob->active.num_extent_pages) { 1141 blob_serialize_extent_table_entry(blob, last_extent_page, &last_extent_page, buf, 1142 remaining_sz); 1143 1144 if (last_extent_page == blob->active.num_extent_pages) { 1145 break; 1146 } 1147 1148 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1149 if (rc < 0) { 1150 return rc; 1151 } 1152 1153 *buf = (uint8_t *)cur_page->descriptors; 1154 *remaining_sz = sizeof(cur_page->descriptors); 1155 } 1156 1157 return 0; 1158 } 1159 1160 static void 1161 blob_serialize_extent_rle(const struct spdk_blob *blob, 1162 uint64_t start_cluster, uint64_t *next_cluster, 1163 uint8_t **buf, size_t *buf_sz) 1164 { 1165 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 1166 size_t cur_sz; 1167 uint64_t i, extent_idx; 1168 uint64_t lba, lba_per_cluster, lba_count; 1169 1170 /* The buffer must have room for at least one extent */ 1171 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc_extent_rle->extents[0]); 1172 if (*buf_sz < cur_sz) { 1173 *next_cluster = start_cluster; 1174 return; 1175 } 1176 1177 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)*buf; 1178 desc_extent_rle->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE; 1179 1180 lba_per_cluster = bs_cluster_to_lba(blob->bs, 1); 1181 /* Assert for scan-build false positive */ 1182 assert(lba_per_cluster > 0); 1183 1184 lba = blob->active.clusters[start_cluster]; 1185 lba_count = lba_per_cluster; 1186 extent_idx = 0; 1187 for (i = start_cluster + 1; i < blob->active.num_clusters; i++) { 1188 if ((lba + lba_count) == blob->active.clusters[i] && lba != 0) { 1189 /* Run-length encode sequential non-zero LBA */ 1190 lba_count += lba_per_cluster; 1191 continue; 1192 } else if (lba == 0 && blob->active.clusters[i] == 0) { 1193 /* Run-length encode unallocated clusters */ 1194 lba_count += lba_per_cluster; 1195 continue; 1196 } 1197 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1198 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1199 extent_idx++; 1200 1201 cur_sz += sizeof(desc_extent_rle->extents[extent_idx]); 1202 1203 if (*buf_sz < cur_sz) { 1204 /* If we ran out of buffer space, return */ 1205 *next_cluster = i; 1206 break; 1207 } 1208 1209 lba = blob->active.clusters[i]; 1210 lba_count = lba_per_cluster; 1211 } 1212 1213 if (*buf_sz >= cur_sz) { 1214 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1215 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1216 extent_idx++; 1217 1218 *next_cluster = blob->active.num_clusters; 1219 } 1220 1221 desc_extent_rle->length = sizeof(desc_extent_rle->extents[0]) * extent_idx; 1222 *buf_sz -= sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1223 *buf += sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1224 } 1225 1226 static int 1227 blob_serialize_extents_rle(const struct spdk_blob *blob, 1228 struct spdk_blob_md_page **pages, 1229 struct spdk_blob_md_page *cur_page, 1230 uint32_t *page_count, uint8_t **buf, 1231 size_t *remaining_sz) 1232 { 1233 uint64_t last_cluster; 1234 int rc; 1235 1236 last_cluster = 0; 1237 while (last_cluster < blob->active.num_clusters) { 1238 blob_serialize_extent_rle(blob, last_cluster, &last_cluster, buf, remaining_sz); 1239 1240 if (last_cluster == blob->active.num_clusters) { 1241 break; 1242 } 1243 1244 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1245 if (rc < 0) { 1246 return rc; 1247 } 1248 1249 *buf = (uint8_t *)cur_page->descriptors; 1250 *remaining_sz = sizeof(cur_page->descriptors); 1251 } 1252 1253 return 0; 1254 } 1255 1256 static void 1257 blob_serialize_extent_page(const struct spdk_blob *blob, 1258 uint64_t cluster, struct spdk_blob_md_page *page) 1259 { 1260 struct spdk_blob_md_descriptor_extent_page *desc_extent; 1261 uint64_t i, extent_idx; 1262 uint64_t lba, lba_per_cluster; 1263 uint64_t start_cluster_idx = (cluster / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP; 1264 1265 desc_extent = (struct spdk_blob_md_descriptor_extent_page *) page->descriptors; 1266 desc_extent->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE; 1267 1268 lba_per_cluster = bs_cluster_to_lba(blob->bs, 1); 1269 1270 desc_extent->start_cluster_idx = start_cluster_idx; 1271 extent_idx = 0; 1272 for (i = start_cluster_idx; i < blob->active.num_clusters; i++) { 1273 lba = blob->active.clusters[i]; 1274 desc_extent->cluster_idx[extent_idx++] = lba / lba_per_cluster; 1275 if (extent_idx >= SPDK_EXTENTS_PER_EP) { 1276 break; 1277 } 1278 } 1279 desc_extent->length = sizeof(desc_extent->start_cluster_idx) + 1280 sizeof(desc_extent->cluster_idx[0]) * extent_idx; 1281 } 1282 1283 static void 1284 blob_serialize_flags(const struct spdk_blob *blob, 1285 uint8_t *buf, size_t *buf_sz) 1286 { 1287 struct spdk_blob_md_descriptor_flags *desc; 1288 1289 /* 1290 * Flags get serialized first, so we should always have room for the flags 1291 * descriptor. 1292 */ 1293 assert(*buf_sz >= sizeof(*desc)); 1294 1295 desc = (struct spdk_blob_md_descriptor_flags *)buf; 1296 desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS; 1297 desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor); 1298 desc->invalid_flags = blob->invalid_flags; 1299 desc->data_ro_flags = blob->data_ro_flags; 1300 desc->md_ro_flags = blob->md_ro_flags; 1301 1302 *buf_sz -= sizeof(*desc); 1303 } 1304 1305 static int 1306 blob_serialize_xattrs(const struct spdk_blob *blob, 1307 const struct spdk_xattr_tailq *xattrs, bool internal, 1308 struct spdk_blob_md_page **pages, 1309 struct spdk_blob_md_page *cur_page, 1310 uint32_t *page_count, uint8_t **buf, 1311 size_t *remaining_sz) 1312 { 1313 const struct spdk_xattr *xattr; 1314 int rc; 1315 1316 TAILQ_FOREACH(xattr, xattrs, link) { 1317 size_t required_sz = 0; 1318 1319 rc = blob_serialize_xattr(xattr, 1320 *buf, *remaining_sz, 1321 &required_sz, internal); 1322 if (rc < 0) { 1323 /* Need to add a new page to the chain */ 1324 rc = blob_serialize_add_page(blob, pages, page_count, 1325 &cur_page); 1326 if (rc < 0) { 1327 spdk_free(*pages); 1328 *pages = NULL; 1329 *page_count = 0; 1330 return rc; 1331 } 1332 1333 *buf = (uint8_t *)cur_page->descriptors; 1334 *remaining_sz = sizeof(cur_page->descriptors); 1335 1336 /* Try again */ 1337 required_sz = 0; 1338 rc = blob_serialize_xattr(xattr, 1339 *buf, *remaining_sz, 1340 &required_sz, internal); 1341 1342 if (rc < 0) { 1343 spdk_free(*pages); 1344 *pages = NULL; 1345 *page_count = 0; 1346 return rc; 1347 } 1348 } 1349 1350 *remaining_sz -= required_sz; 1351 *buf += required_sz; 1352 } 1353 1354 return 0; 1355 } 1356 1357 static int 1358 blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages, 1359 uint32_t *page_count) 1360 { 1361 struct spdk_blob_md_page *cur_page; 1362 int rc; 1363 uint8_t *buf; 1364 size_t remaining_sz; 1365 1366 assert(pages != NULL); 1367 assert(page_count != NULL); 1368 assert(blob != NULL); 1369 assert(blob->state == SPDK_BLOB_STATE_DIRTY); 1370 1371 *pages = NULL; 1372 *page_count = 0; 1373 1374 /* A blob always has at least 1 page, even if it has no descriptors */ 1375 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1376 if (rc < 0) { 1377 return rc; 1378 } 1379 1380 buf = (uint8_t *)cur_page->descriptors; 1381 remaining_sz = sizeof(cur_page->descriptors); 1382 1383 /* Serialize flags */ 1384 blob_serialize_flags(blob, buf, &remaining_sz); 1385 buf += sizeof(struct spdk_blob_md_descriptor_flags); 1386 1387 /* Serialize xattrs */ 1388 rc = blob_serialize_xattrs(blob, &blob->xattrs, false, 1389 pages, cur_page, page_count, &buf, &remaining_sz); 1390 if (rc < 0) { 1391 return rc; 1392 } 1393 1394 /* Serialize internal xattrs */ 1395 rc = blob_serialize_xattrs(blob, &blob->xattrs_internal, true, 1396 pages, cur_page, page_count, &buf, &remaining_sz); 1397 if (rc < 0) { 1398 return rc; 1399 } 1400 1401 if (blob->use_extent_table) { 1402 /* Serialize extent table */ 1403 rc = blob_serialize_extent_table(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1404 } else { 1405 /* Serialize extents */ 1406 rc = blob_serialize_extents_rle(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1407 } 1408 1409 return rc; 1410 } 1411 1412 struct spdk_blob_load_ctx { 1413 struct spdk_blob *blob; 1414 1415 struct spdk_blob_md_page *pages; 1416 uint32_t num_pages; 1417 uint32_t next_extent_page; 1418 spdk_bs_sequence_t *seq; 1419 1420 spdk_bs_sequence_cpl cb_fn; 1421 void *cb_arg; 1422 }; 1423 1424 static uint32_t 1425 blob_md_page_calc_crc(void *page) 1426 { 1427 uint32_t crc; 1428 1429 crc = BLOB_CRC32C_INITIAL; 1430 crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc); 1431 crc ^= BLOB_CRC32C_INITIAL; 1432 1433 return crc; 1434 1435 } 1436 1437 static void 1438 blob_load_final(struct spdk_blob_load_ctx *ctx, int bserrno) 1439 { 1440 struct spdk_blob *blob = ctx->blob; 1441 1442 if (bserrno == 0) { 1443 blob_mark_clean(blob); 1444 } 1445 1446 ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno); 1447 1448 /* Free the memory */ 1449 spdk_free(ctx->pages); 1450 free(ctx); 1451 } 1452 1453 static void 1454 blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno) 1455 { 1456 struct spdk_blob_load_ctx *ctx = cb_arg; 1457 struct spdk_blob *blob = ctx->blob; 1458 1459 if (bserrno == 0) { 1460 blob->back_bs_dev = bs_create_blob_bs_dev(snapshot); 1461 if (blob->back_bs_dev == NULL) { 1462 bserrno = -ENOMEM; 1463 } 1464 } 1465 if (bserrno != 0) { 1466 SPDK_ERRLOG("Snapshot fail\n"); 1467 } 1468 1469 blob_load_final(ctx, bserrno); 1470 } 1471 1472 static void blob_update_clear_method(struct spdk_blob *blob); 1473 1474 static int 1475 blob_load_esnap(struct spdk_blob *blob, void *blob_ctx) 1476 { 1477 struct spdk_blob_store *bs = blob->bs; 1478 struct spdk_bs_dev *bs_dev = NULL; 1479 const void *esnap_id = NULL; 1480 size_t id_len = 0; 1481 int rc; 1482 1483 if (bs->esnap_bs_dev_create == NULL) { 1484 SPDK_NOTICELOG("blob 0x%" PRIx64 " is an esnap clone but the blobstore was opened " 1485 "without support for esnap clones\n", blob->id); 1486 return -ENOTSUP; 1487 } 1488 assert(blob->back_bs_dev == NULL); 1489 1490 rc = blob_get_xattr_value(blob, BLOB_EXTERNAL_SNAPSHOT_ID, &esnap_id, &id_len, true); 1491 if (rc != 0) { 1492 SPDK_ERRLOG("blob 0x%" PRIx64 " is an esnap clone but has no esnap ID\n", blob->id); 1493 return -EINVAL; 1494 } 1495 assert(id_len > 0 && id_len < UINT32_MAX); 1496 1497 SPDK_INFOLOG(blob, "Creating external snapshot device\n"); 1498 1499 rc = bs->esnap_bs_dev_create(bs->esnap_ctx, blob_ctx, blob, esnap_id, (uint32_t)id_len, 1500 &bs_dev); 1501 if (rc != 0) { 1502 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": failed to load back_bs_dev " 1503 "with error %d\n", blob->id, rc); 1504 return rc; 1505 } 1506 1507 /* 1508 * Note: bs_dev might be NULL if the consumer chose to not open the external snapshot. 1509 * This especially might happen during spdk_bs_load() iteration. 1510 */ 1511 if (bs_dev != NULL) { 1512 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": loaded back_bs_dev\n", blob->id); 1513 if ((bs->io_unit_size % bs_dev->blocklen) != 0) { 1514 SPDK_NOTICELOG("blob 0x%" PRIx64 " external snapshot device block size %u " 1515 "is not compatible with blobstore block size %u\n", 1516 blob->id, bs_dev->blocklen, bs->io_unit_size); 1517 bs_dev->destroy(bs_dev); 1518 return -EINVAL; 1519 } 1520 } 1521 1522 blob->back_bs_dev = bs_dev; 1523 blob->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT; 1524 1525 return 0; 1526 } 1527 1528 static void 1529 blob_load_backing_dev(spdk_bs_sequence_t *seq, void *cb_arg) 1530 { 1531 struct spdk_blob_load_ctx *ctx = cb_arg; 1532 struct spdk_blob *blob = ctx->blob; 1533 const void *value; 1534 size_t len; 1535 int rc; 1536 1537 if (blob_is_esnap_clone(blob)) { 1538 rc = blob_load_esnap(blob, seq->cpl.u.blob_handle.esnap_ctx); 1539 blob_load_final(ctx, rc); 1540 return; 1541 } 1542 1543 if (spdk_blob_is_thin_provisioned(blob)) { 1544 rc = blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true); 1545 if (rc == 0) { 1546 if (len != sizeof(spdk_blob_id)) { 1547 blob_load_final(ctx, -EINVAL); 1548 return; 1549 } 1550 /* open snapshot blob and continue in the callback function */ 1551 blob->parent_id = *(spdk_blob_id *)value; 1552 spdk_bs_open_blob(blob->bs, blob->parent_id, 1553 blob_load_snapshot_cpl, ctx); 1554 return; 1555 } else { 1556 /* add zeroes_dev for thin provisioned blob */ 1557 blob->back_bs_dev = bs_create_zeroes_dev(); 1558 } 1559 } else { 1560 /* standard blob */ 1561 blob->back_bs_dev = NULL; 1562 } 1563 blob_load_final(ctx, 0); 1564 } 1565 1566 static void 1567 blob_load_cpl_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1568 { 1569 struct spdk_blob_load_ctx *ctx = cb_arg; 1570 struct spdk_blob *blob = ctx->blob; 1571 struct spdk_blob_md_page *page; 1572 uint64_t i; 1573 uint32_t crc; 1574 uint64_t lba; 1575 void *tmp; 1576 uint64_t sz; 1577 1578 if (bserrno) { 1579 SPDK_ERRLOG("Extent page read failed: %d\n", bserrno); 1580 blob_load_final(ctx, bserrno); 1581 return; 1582 } 1583 1584 if (ctx->pages == NULL) { 1585 /* First iteration of this function, allocate buffer for single EXTENT_PAGE */ 1586 ctx->pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 1587 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 1588 if (!ctx->pages) { 1589 blob_load_final(ctx, -ENOMEM); 1590 return; 1591 } 1592 ctx->num_pages = 1; 1593 ctx->next_extent_page = 0; 1594 } else { 1595 page = &ctx->pages[0]; 1596 crc = blob_md_page_calc_crc(page); 1597 if (crc != page->crc) { 1598 blob_load_final(ctx, -EINVAL); 1599 return; 1600 } 1601 1602 if (page->next != SPDK_INVALID_MD_PAGE) { 1603 blob_load_final(ctx, -EINVAL); 1604 return; 1605 } 1606 1607 bserrno = blob_parse_extent_page(page, blob); 1608 if (bserrno) { 1609 blob_load_final(ctx, bserrno); 1610 return; 1611 } 1612 } 1613 1614 for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) { 1615 if (blob->active.extent_pages[i] != 0) { 1616 /* Extent page was allocated, read and parse it. */ 1617 lba = bs_md_page_to_lba(blob->bs, blob->active.extent_pages[i]); 1618 ctx->next_extent_page = i + 1; 1619 1620 bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1621 bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 1622 blob_load_cpl_extents_cpl, ctx); 1623 return; 1624 } else { 1625 /* Thin provisioned blobs can point to unallocated extent pages. 1626 * In this case blob size should be increased by up to the amount left in remaining_clusters_in_et. */ 1627 1628 sz = spdk_min(blob->remaining_clusters_in_et, SPDK_EXTENTS_PER_EP); 1629 blob->active.num_clusters += sz; 1630 blob->remaining_clusters_in_et -= sz; 1631 1632 assert(spdk_blob_is_thin_provisioned(blob)); 1633 assert(i + 1 < blob->active.num_extent_pages || blob->remaining_clusters_in_et == 0); 1634 1635 tmp = realloc(blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 1636 if (tmp == NULL) { 1637 blob_load_final(ctx, -ENOMEM); 1638 return; 1639 } 1640 memset(tmp + sizeof(*blob->active.clusters) * blob->active.cluster_array_size, 0, 1641 sizeof(*blob->active.clusters) * (blob->active.num_clusters - blob->active.cluster_array_size)); 1642 blob->active.clusters = tmp; 1643 blob->active.cluster_array_size = blob->active.num_clusters; 1644 } 1645 } 1646 1647 blob_load_backing_dev(seq, ctx); 1648 } 1649 1650 static void 1651 blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1652 { 1653 struct spdk_blob_load_ctx *ctx = cb_arg; 1654 struct spdk_blob *blob = ctx->blob; 1655 struct spdk_blob_md_page *page; 1656 int rc; 1657 uint32_t crc; 1658 uint32_t current_page; 1659 1660 if (ctx->num_pages == 1) { 1661 current_page = bs_blobid_to_page(blob->id); 1662 } else { 1663 assert(ctx->num_pages != 0); 1664 page = &ctx->pages[ctx->num_pages - 2]; 1665 current_page = page->next; 1666 } 1667 1668 if (bserrno) { 1669 SPDK_ERRLOG("Metadata page %d read failed for blobid 0x%" PRIx64 ": %d\n", 1670 current_page, blob->id, bserrno); 1671 blob_load_final(ctx, bserrno); 1672 return; 1673 } 1674 1675 page = &ctx->pages[ctx->num_pages - 1]; 1676 crc = blob_md_page_calc_crc(page); 1677 if (crc != page->crc) { 1678 SPDK_ERRLOG("Metadata page %d crc mismatch for blobid 0x%" PRIx64 "\n", 1679 current_page, blob->id); 1680 blob_load_final(ctx, -EINVAL); 1681 return; 1682 } 1683 1684 if (page->next != SPDK_INVALID_MD_PAGE) { 1685 struct spdk_blob_md_page *tmp_pages; 1686 uint32_t next_page = page->next; 1687 uint64_t next_lba = bs_md_page_to_lba(blob->bs, next_page); 1688 1689 /* Read the next page */ 1690 tmp_pages = spdk_realloc(ctx->pages, (sizeof(*page) * (ctx->num_pages + 1)), 0); 1691 if (tmp_pages == NULL) { 1692 blob_load_final(ctx, -ENOMEM); 1693 return; 1694 } 1695 ctx->num_pages++; 1696 ctx->pages = tmp_pages; 1697 1698 bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1], 1699 next_lba, 1700 bs_byte_to_lba(blob->bs, sizeof(*page)), 1701 blob_load_cpl, ctx); 1702 return; 1703 } 1704 1705 /* Parse the pages */ 1706 rc = blob_parse(ctx->pages, ctx->num_pages, blob); 1707 if (rc) { 1708 blob_load_final(ctx, rc); 1709 return; 1710 } 1711 1712 if (blob->extent_table_found == true) { 1713 /* If EXTENT_TABLE was found, that means support for it should be enabled. */ 1714 assert(blob->extent_rle_found == false); 1715 blob->use_extent_table = true; 1716 } else { 1717 /* If EXTENT_RLE or no extent_* descriptor was found disable support 1718 * for extent table. No extent_* descriptors means that blob has length of 0 1719 * and no extent_rle descriptors were persisted for it. 1720 * EXTENT_TABLE if used, is always present in metadata regardless of length. */ 1721 blob->use_extent_table = false; 1722 } 1723 1724 /* Check the clear_method stored in metadata vs what may have been passed 1725 * via spdk_bs_open_blob_ext() and update accordingly. 1726 */ 1727 blob_update_clear_method(blob); 1728 1729 spdk_free(ctx->pages); 1730 ctx->pages = NULL; 1731 1732 if (blob->extent_table_found) { 1733 blob_load_cpl_extents_cpl(seq, ctx, 0); 1734 } else { 1735 blob_load_backing_dev(seq, ctx); 1736 } 1737 } 1738 1739 /* Load a blob from disk given a blobid */ 1740 static void 1741 blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 1742 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 1743 { 1744 struct spdk_blob_load_ctx *ctx; 1745 struct spdk_blob_store *bs; 1746 uint32_t page_num; 1747 uint64_t lba; 1748 1749 blob_verify_md_op(blob); 1750 1751 bs = blob->bs; 1752 1753 ctx = calloc(1, sizeof(*ctx)); 1754 if (!ctx) { 1755 cb_fn(seq, cb_arg, -ENOMEM); 1756 return; 1757 } 1758 1759 ctx->blob = blob; 1760 ctx->pages = spdk_realloc(ctx->pages, SPDK_BS_PAGE_SIZE, 0); 1761 if (!ctx->pages) { 1762 free(ctx); 1763 cb_fn(seq, cb_arg, -ENOMEM); 1764 return; 1765 } 1766 ctx->num_pages = 1; 1767 ctx->cb_fn = cb_fn; 1768 ctx->cb_arg = cb_arg; 1769 ctx->seq = seq; 1770 1771 page_num = bs_blobid_to_page(blob->id); 1772 lba = bs_md_page_to_lba(blob->bs, page_num); 1773 1774 blob->state = SPDK_BLOB_STATE_LOADING; 1775 1776 bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1777 bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE), 1778 blob_load_cpl, ctx); 1779 } 1780 1781 struct spdk_blob_persist_ctx { 1782 struct spdk_blob *blob; 1783 1784 struct spdk_blob_md_page *pages; 1785 uint32_t next_extent_page; 1786 struct spdk_blob_md_page *extent_page; 1787 1788 spdk_bs_sequence_t *seq; 1789 spdk_bs_sequence_cpl cb_fn; 1790 void *cb_arg; 1791 TAILQ_ENTRY(spdk_blob_persist_ctx) link; 1792 }; 1793 1794 static void 1795 bs_batch_clear_dev(struct spdk_blob *blob, spdk_bs_batch_t *batch, uint64_t lba, 1796 uint64_t lba_count) 1797 { 1798 switch (blob->clear_method) { 1799 case BLOB_CLEAR_WITH_DEFAULT: 1800 case BLOB_CLEAR_WITH_UNMAP: 1801 bs_batch_unmap_dev(batch, lba, lba_count); 1802 break; 1803 case BLOB_CLEAR_WITH_WRITE_ZEROES: 1804 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1805 break; 1806 case BLOB_CLEAR_WITH_NONE: 1807 default: 1808 break; 1809 } 1810 } 1811 1812 static int 1813 bs_super_validate(struct spdk_bs_super_block *super, struct spdk_blob_store *bs) 1814 { 1815 uint32_t crc; 1816 static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH]; 1817 1818 if (super->version > SPDK_BS_VERSION || 1819 super->version < SPDK_BS_INITIAL_VERSION) { 1820 return -EILSEQ; 1821 } 1822 1823 if (memcmp(super->signature, SPDK_BS_SUPER_BLOCK_SIG, 1824 sizeof(super->signature)) != 0) { 1825 return -EILSEQ; 1826 } 1827 1828 crc = blob_md_page_calc_crc(super); 1829 if (crc != super->crc) { 1830 return -EILSEQ; 1831 } 1832 1833 if (memcmp(&bs->bstype, &super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 1834 SPDK_DEBUGLOG(blob, "Bstype matched - loading blobstore\n"); 1835 } else if (memcmp(&bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 1836 SPDK_DEBUGLOG(blob, "Bstype wildcard used - loading blobstore regardless bstype\n"); 1837 } else { 1838 SPDK_DEBUGLOG(blob, "Unexpected bstype\n"); 1839 SPDK_LOGDUMP(blob, "Expected:", bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 1840 SPDK_LOGDUMP(blob, "Found:", super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 1841 return -ENXIO; 1842 } 1843 1844 if (super->size > bs->dev->blockcnt * bs->dev->blocklen) { 1845 SPDK_NOTICELOG("Size mismatch, dev size: %" PRIu64 ", blobstore size: %" PRIu64 "\n", 1846 bs->dev->blockcnt * bs->dev->blocklen, super->size); 1847 return -EILSEQ; 1848 } 1849 1850 return 0; 1851 } 1852 1853 static void bs_mark_dirty(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 1854 spdk_bs_sequence_cpl cb_fn, void *cb_arg); 1855 1856 static void 1857 blob_persist_complete_cb(void *arg) 1858 { 1859 struct spdk_blob_persist_ctx *ctx = arg; 1860 1861 /* Call user callback */ 1862 ctx->cb_fn(ctx->seq, ctx->cb_arg, 0); 1863 1864 /* Free the memory */ 1865 spdk_free(ctx->pages); 1866 free(ctx); 1867 } 1868 1869 static void blob_persist_start(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 1870 1871 static void 1872 blob_persist_complete(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx, int bserrno) 1873 { 1874 struct spdk_blob_persist_ctx *next_persist, *tmp; 1875 struct spdk_blob *blob = ctx->blob; 1876 1877 if (bserrno == 0) { 1878 blob_mark_clean(blob); 1879 } 1880 1881 assert(ctx == TAILQ_FIRST(&blob->persists_to_complete)); 1882 1883 /* Complete all persists that were pending when the current persist started */ 1884 TAILQ_FOREACH_SAFE(next_persist, &blob->persists_to_complete, link, tmp) { 1885 TAILQ_REMOVE(&blob->persists_to_complete, next_persist, link); 1886 spdk_thread_send_msg(spdk_get_thread(), blob_persist_complete_cb, next_persist); 1887 } 1888 1889 if (TAILQ_EMPTY(&blob->pending_persists)) { 1890 return; 1891 } 1892 1893 /* Queue up all pending persists for completion and start blob persist with first one */ 1894 TAILQ_SWAP(&blob->persists_to_complete, &blob->pending_persists, spdk_blob_persist_ctx, link); 1895 next_persist = TAILQ_FIRST(&blob->persists_to_complete); 1896 1897 blob->state = SPDK_BLOB_STATE_DIRTY; 1898 bs_mark_dirty(seq, blob->bs, blob_persist_start, next_persist); 1899 } 1900 1901 static void 1902 blob_persist_clear_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1903 { 1904 struct spdk_blob_persist_ctx *ctx = cb_arg; 1905 struct spdk_blob *blob = ctx->blob; 1906 struct spdk_blob_store *bs = blob->bs; 1907 size_t i; 1908 1909 if (bserrno != 0) { 1910 blob_persist_complete(seq, ctx, bserrno); 1911 return; 1912 } 1913 1914 spdk_spin_lock(&bs->used_lock); 1915 1916 /* Release all extent_pages that were truncated */ 1917 for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) { 1918 /* Nothing to release if it was not allocated */ 1919 if (blob->active.extent_pages[i] != 0) { 1920 bs_release_md_page(bs, blob->active.extent_pages[i]); 1921 } 1922 } 1923 1924 spdk_spin_unlock(&bs->used_lock); 1925 1926 if (blob->active.num_extent_pages == 0) { 1927 free(blob->active.extent_pages); 1928 blob->active.extent_pages = NULL; 1929 blob->active.extent_pages_array_size = 0; 1930 } else if (blob->active.num_extent_pages != blob->active.extent_pages_array_size) { 1931 #ifndef __clang_analyzer__ 1932 void *tmp; 1933 1934 /* scan-build really can't figure reallocs, workaround it */ 1935 tmp = realloc(blob->active.extent_pages, sizeof(uint32_t) * blob->active.num_extent_pages); 1936 assert(tmp != NULL); 1937 blob->active.extent_pages = tmp; 1938 #endif 1939 blob->active.extent_pages_array_size = blob->active.num_extent_pages; 1940 } 1941 1942 blob_persist_complete(seq, ctx, bserrno); 1943 } 1944 1945 static void 1946 blob_persist_clear_extents(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 1947 { 1948 struct spdk_blob *blob = ctx->blob; 1949 struct spdk_blob_store *bs = blob->bs; 1950 size_t i; 1951 uint64_t lba; 1952 uint64_t lba_count; 1953 spdk_bs_batch_t *batch; 1954 1955 batch = bs_sequence_to_batch(seq, blob_persist_clear_extents_cpl, ctx); 1956 lba_count = bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE); 1957 1958 /* Clear all extent_pages that were truncated */ 1959 for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) { 1960 /* Nothing to clear if it was not allocated */ 1961 if (blob->active.extent_pages[i] != 0) { 1962 lba = bs_md_page_to_lba(bs, blob->active.extent_pages[i]); 1963 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1964 } 1965 } 1966 1967 bs_batch_close(batch); 1968 } 1969 1970 static void 1971 blob_persist_clear_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1972 { 1973 struct spdk_blob_persist_ctx *ctx = cb_arg; 1974 struct spdk_blob *blob = ctx->blob; 1975 struct spdk_blob_store *bs = blob->bs; 1976 size_t i; 1977 1978 if (bserrno != 0) { 1979 blob_persist_complete(seq, ctx, bserrno); 1980 return; 1981 } 1982 1983 spdk_spin_lock(&bs->used_lock); 1984 /* Release all clusters that were truncated */ 1985 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1986 uint32_t cluster_num = bs_lba_to_cluster(bs, blob->active.clusters[i]); 1987 1988 /* Nothing to release if it was not allocated */ 1989 if (blob->active.clusters[i] != 0) { 1990 bs_release_cluster(bs, cluster_num); 1991 } 1992 } 1993 spdk_spin_unlock(&bs->used_lock); 1994 1995 if (blob->active.num_clusters == 0) { 1996 free(blob->active.clusters); 1997 blob->active.clusters = NULL; 1998 blob->active.cluster_array_size = 0; 1999 } else if (blob->active.num_clusters != blob->active.cluster_array_size) { 2000 #ifndef __clang_analyzer__ 2001 void *tmp; 2002 2003 /* scan-build really can't figure reallocs, workaround it */ 2004 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * blob->active.num_clusters); 2005 assert(tmp != NULL); 2006 blob->active.clusters = tmp; 2007 2008 #endif 2009 blob->active.cluster_array_size = blob->active.num_clusters; 2010 } 2011 2012 /* Move on to clearing extent pages */ 2013 blob_persist_clear_extents(seq, ctx); 2014 } 2015 2016 static void 2017 blob_persist_clear_clusters(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 2018 { 2019 struct spdk_blob *blob = ctx->blob; 2020 struct spdk_blob_store *bs = blob->bs; 2021 spdk_bs_batch_t *batch; 2022 size_t i; 2023 uint64_t lba; 2024 uint64_t lba_count; 2025 2026 /* Clusters don't move around in blobs. The list shrinks or grows 2027 * at the end, but no changes ever occur in the middle of the list. 2028 */ 2029 2030 batch = bs_sequence_to_batch(seq, blob_persist_clear_clusters_cpl, ctx); 2031 2032 /* Clear all clusters that were truncated */ 2033 lba = 0; 2034 lba_count = 0; 2035 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 2036 uint64_t next_lba = blob->active.clusters[i]; 2037 uint64_t next_lba_count = bs_cluster_to_lba(bs, 1); 2038 2039 if (next_lba > 0 && (lba + lba_count) == next_lba) { 2040 /* This cluster is contiguous with the previous one. */ 2041 lba_count += next_lba_count; 2042 continue; 2043 } else if (next_lba == 0) { 2044 continue; 2045 } 2046 2047 /* This cluster is not contiguous with the previous one. */ 2048 2049 /* If a run of LBAs previously existing, clear them now */ 2050 if (lba_count > 0) { 2051 bs_batch_clear_dev(ctx->blob, batch, lba, lba_count); 2052 } 2053 2054 /* Start building the next batch */ 2055 lba = next_lba; 2056 if (next_lba > 0) { 2057 lba_count = next_lba_count; 2058 } else { 2059 lba_count = 0; 2060 } 2061 } 2062 2063 /* If we ended with a contiguous set of LBAs, clear them now */ 2064 if (lba_count > 0) { 2065 bs_batch_clear_dev(ctx->blob, batch, lba, lba_count); 2066 } 2067 2068 bs_batch_close(batch); 2069 } 2070 2071 static void 2072 blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2073 { 2074 struct spdk_blob_persist_ctx *ctx = cb_arg; 2075 struct spdk_blob *blob = ctx->blob; 2076 struct spdk_blob_store *bs = blob->bs; 2077 size_t i; 2078 2079 if (bserrno != 0) { 2080 blob_persist_complete(seq, ctx, bserrno); 2081 return; 2082 } 2083 2084 spdk_spin_lock(&bs->used_lock); 2085 2086 /* This loop starts at 1 because the first page is special and handled 2087 * below. The pages (except the first) are never written in place, 2088 * so any pages in the clean list must be zeroed. 2089 */ 2090 for (i = 1; i < blob->clean.num_pages; i++) { 2091 bs_release_md_page(bs, blob->clean.pages[i]); 2092 } 2093 2094 if (blob->active.num_pages == 0) { 2095 uint32_t page_num; 2096 2097 page_num = bs_blobid_to_page(blob->id); 2098 bs_release_md_page(bs, page_num); 2099 } 2100 2101 spdk_spin_unlock(&bs->used_lock); 2102 2103 /* Move on to clearing clusters */ 2104 blob_persist_clear_clusters(seq, ctx); 2105 } 2106 2107 static void 2108 blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2109 { 2110 struct spdk_blob_persist_ctx *ctx = cb_arg; 2111 struct spdk_blob *blob = ctx->blob; 2112 struct spdk_blob_store *bs = blob->bs; 2113 uint64_t lba; 2114 uint64_t lba_count; 2115 spdk_bs_batch_t *batch; 2116 size_t i; 2117 2118 if (bserrno != 0) { 2119 blob_persist_complete(seq, ctx, bserrno); 2120 return; 2121 } 2122 2123 batch = bs_sequence_to_batch(seq, blob_persist_zero_pages_cpl, ctx); 2124 2125 lba_count = bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE); 2126 2127 /* This loop starts at 1 because the first page is special and handled 2128 * below. The pages (except the first) are never written in place, 2129 * so any pages in the clean list must be zeroed. 2130 */ 2131 for (i = 1; i < blob->clean.num_pages; i++) { 2132 lba = bs_md_page_to_lba(bs, blob->clean.pages[i]); 2133 2134 bs_batch_write_zeroes_dev(batch, lba, lba_count); 2135 } 2136 2137 /* The first page will only be zeroed if this is a delete. */ 2138 if (blob->active.num_pages == 0) { 2139 uint32_t page_num; 2140 2141 /* The first page in the metadata goes where the blobid indicates */ 2142 page_num = bs_blobid_to_page(blob->id); 2143 lba = bs_md_page_to_lba(bs, page_num); 2144 2145 bs_batch_write_zeroes_dev(batch, lba, lba_count); 2146 } 2147 2148 bs_batch_close(batch); 2149 } 2150 2151 static void 2152 blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2153 { 2154 struct spdk_blob_persist_ctx *ctx = cb_arg; 2155 struct spdk_blob *blob = ctx->blob; 2156 struct spdk_blob_store *bs = blob->bs; 2157 uint64_t lba; 2158 uint32_t lba_count; 2159 struct spdk_blob_md_page *page; 2160 2161 if (bserrno != 0) { 2162 blob_persist_complete(seq, ctx, bserrno); 2163 return; 2164 } 2165 2166 if (blob->active.num_pages == 0) { 2167 /* Move on to the next step */ 2168 blob_persist_zero_pages(seq, ctx, 0); 2169 return; 2170 } 2171 2172 lba_count = bs_byte_to_lba(bs, sizeof(*page)); 2173 2174 page = &ctx->pages[0]; 2175 /* The first page in the metadata goes where the blobid indicates */ 2176 lba = bs_md_page_to_lba(bs, bs_blobid_to_page(blob->id)); 2177 2178 bs_sequence_write_dev(seq, page, lba, lba_count, 2179 blob_persist_zero_pages, ctx); 2180 } 2181 2182 static void 2183 blob_persist_write_page_chain(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 2184 { 2185 struct spdk_blob *blob = ctx->blob; 2186 struct spdk_blob_store *bs = blob->bs; 2187 uint64_t lba; 2188 uint32_t lba_count; 2189 struct spdk_blob_md_page *page; 2190 spdk_bs_batch_t *batch; 2191 size_t i; 2192 2193 /* Clusters don't move around in blobs. The list shrinks or grows 2194 * at the end, but no changes ever occur in the middle of the list. 2195 */ 2196 2197 lba_count = bs_byte_to_lba(bs, sizeof(*page)); 2198 2199 batch = bs_sequence_to_batch(seq, blob_persist_write_page_root, ctx); 2200 2201 /* This starts at 1. The root page is not written until 2202 * all of the others are finished 2203 */ 2204 for (i = 1; i < blob->active.num_pages; i++) { 2205 page = &ctx->pages[i]; 2206 assert(page->sequence_num == i); 2207 2208 lba = bs_md_page_to_lba(bs, blob->active.pages[i]); 2209 2210 bs_batch_write_dev(batch, page, lba, lba_count); 2211 } 2212 2213 bs_batch_close(batch); 2214 } 2215 2216 static int 2217 blob_resize(struct spdk_blob *blob, uint64_t sz) 2218 { 2219 uint64_t i; 2220 uint64_t *tmp; 2221 uint64_t cluster; 2222 uint32_t lfmd; /* lowest free md page */ 2223 uint64_t num_clusters; 2224 uint32_t *ep_tmp; 2225 uint64_t new_num_ep = 0, current_num_ep = 0; 2226 struct spdk_blob_store *bs; 2227 int rc; 2228 2229 bs = blob->bs; 2230 2231 blob_verify_md_op(blob); 2232 2233 if (blob->active.num_clusters == sz) { 2234 return 0; 2235 } 2236 2237 if (blob->active.num_clusters < blob->active.cluster_array_size) { 2238 /* If this blob was resized to be larger, then smaller, then 2239 * larger without syncing, then the cluster array already 2240 * contains spare assigned clusters we can use. 2241 */ 2242 num_clusters = spdk_min(blob->active.cluster_array_size, 2243 sz); 2244 } else { 2245 num_clusters = blob->active.num_clusters; 2246 } 2247 2248 if (blob->use_extent_table) { 2249 /* Round up since every cluster beyond current Extent Table size, 2250 * requires new extent page. */ 2251 new_num_ep = spdk_divide_round_up(sz, SPDK_EXTENTS_PER_EP); 2252 current_num_ep = spdk_divide_round_up(num_clusters, SPDK_EXTENTS_PER_EP); 2253 } 2254 2255 assert(!spdk_spin_held(&bs->used_lock)); 2256 2257 /* Check first that we have enough clusters and md pages before we start claiming them. 2258 * bs->used_lock is held to ensure that clusters we think are free are still free when we go 2259 * to claim them later in this function. 2260 */ 2261 if (sz > num_clusters && spdk_blob_is_thin_provisioned(blob) == false) { 2262 spdk_spin_lock(&bs->used_lock); 2263 if ((sz - num_clusters) > bs->num_free_clusters) { 2264 rc = -ENOSPC; 2265 goto out; 2266 } 2267 lfmd = 0; 2268 for (i = current_num_ep; i < new_num_ep ; i++) { 2269 lfmd = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, lfmd); 2270 if (lfmd == UINT32_MAX) { 2271 /* No more free md pages. Cannot satisfy the request */ 2272 rc = -ENOSPC; 2273 goto out; 2274 } 2275 } 2276 } 2277 2278 if (sz > num_clusters) { 2279 /* Expand the cluster array if necessary. 2280 * We only shrink the array when persisting. 2281 */ 2282 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * sz); 2283 if (sz > 0 && tmp == NULL) { 2284 rc = -ENOMEM; 2285 goto out; 2286 } 2287 memset(tmp + blob->active.cluster_array_size, 0, 2288 sizeof(*blob->active.clusters) * (sz - blob->active.cluster_array_size)); 2289 blob->active.clusters = tmp; 2290 blob->active.cluster_array_size = sz; 2291 2292 /* Expand the extents table, only if enough clusters were added */ 2293 if (new_num_ep > current_num_ep && blob->use_extent_table) { 2294 ep_tmp = realloc(blob->active.extent_pages, sizeof(*blob->active.extent_pages) * new_num_ep); 2295 if (new_num_ep > 0 && ep_tmp == NULL) { 2296 rc = -ENOMEM; 2297 goto out; 2298 } 2299 memset(ep_tmp + blob->active.extent_pages_array_size, 0, 2300 sizeof(*blob->active.extent_pages) * (new_num_ep - blob->active.extent_pages_array_size)); 2301 blob->active.extent_pages = ep_tmp; 2302 blob->active.extent_pages_array_size = new_num_ep; 2303 } 2304 } 2305 2306 blob->state = SPDK_BLOB_STATE_DIRTY; 2307 2308 if (spdk_blob_is_thin_provisioned(blob) == false) { 2309 cluster = 0; 2310 lfmd = 0; 2311 for (i = num_clusters; i < sz; i++) { 2312 bs_allocate_cluster(blob, i, &cluster, &lfmd, true); 2313 /* Do not increment lfmd here. lfmd will get updated 2314 * to the md_page allocated (if any) when a new extent 2315 * page is needed. Just pass that value again, 2316 * bs_allocate_cluster will just start at that index 2317 * to find the next free md_page when needed. 2318 */ 2319 } 2320 } 2321 2322 /* If we are shrinking the blob, we must adjust num_allocated_clusters */ 2323 for (i = sz; i < num_clusters; i++) { 2324 if (blob->active.clusters[i] != 0) { 2325 blob->active.num_allocated_clusters--; 2326 } 2327 } 2328 2329 blob->active.num_clusters = sz; 2330 blob->active.num_extent_pages = new_num_ep; 2331 2332 rc = 0; 2333 out: 2334 if (spdk_spin_held(&bs->used_lock)) { 2335 spdk_spin_unlock(&bs->used_lock); 2336 } 2337 2338 return rc; 2339 } 2340 2341 static void 2342 blob_persist_generate_new_md(struct spdk_blob_persist_ctx *ctx) 2343 { 2344 spdk_bs_sequence_t *seq = ctx->seq; 2345 struct spdk_blob *blob = ctx->blob; 2346 struct spdk_blob_store *bs = blob->bs; 2347 uint64_t i; 2348 uint32_t page_num; 2349 void *tmp; 2350 int rc; 2351 2352 /* Generate the new metadata */ 2353 rc = blob_serialize(blob, &ctx->pages, &blob->active.num_pages); 2354 if (rc < 0) { 2355 blob_persist_complete(seq, ctx, rc); 2356 return; 2357 } 2358 2359 assert(blob->active.num_pages >= 1); 2360 2361 /* Resize the cache of page indices */ 2362 tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 2363 if (!tmp) { 2364 blob_persist_complete(seq, ctx, -ENOMEM); 2365 return; 2366 } 2367 blob->active.pages = tmp; 2368 2369 /* Assign this metadata to pages. This requires two passes - one to verify that there are 2370 * enough pages and a second to actually claim them. The used_lock is held across 2371 * both passes to ensure things don't change in the middle. 2372 */ 2373 spdk_spin_lock(&bs->used_lock); 2374 page_num = 0; 2375 /* Note that this loop starts at one. The first page location is fixed by the blobid. */ 2376 for (i = 1; i < blob->active.num_pages; i++) { 2377 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 2378 if (page_num == UINT32_MAX) { 2379 spdk_spin_unlock(&bs->used_lock); 2380 blob_persist_complete(seq, ctx, -ENOMEM); 2381 return; 2382 } 2383 page_num++; 2384 } 2385 2386 page_num = 0; 2387 blob->active.pages[0] = bs_blobid_to_page(blob->id); 2388 for (i = 1; i < blob->active.num_pages; i++) { 2389 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 2390 ctx->pages[i - 1].next = page_num; 2391 /* Now that previous metadata page is complete, calculate the crc for it. */ 2392 ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]); 2393 blob->active.pages[i] = page_num; 2394 bs_claim_md_page(bs, page_num); 2395 SPDK_DEBUGLOG(blob, "Claiming page %u for blob 0x%" PRIx64 "\n", page_num, 2396 blob->id); 2397 page_num++; 2398 } 2399 spdk_spin_unlock(&bs->used_lock); 2400 ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]); 2401 /* Start writing the metadata from last page to first */ 2402 blob->state = SPDK_BLOB_STATE_CLEAN; 2403 blob_persist_write_page_chain(seq, ctx); 2404 } 2405 2406 static void 2407 blob_persist_write_extent_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2408 { 2409 struct spdk_blob_persist_ctx *ctx = cb_arg; 2410 struct spdk_blob *blob = ctx->blob; 2411 size_t i; 2412 uint32_t extent_page_id; 2413 uint32_t page_count = 0; 2414 int rc; 2415 2416 if (ctx->extent_page != NULL) { 2417 spdk_free(ctx->extent_page); 2418 ctx->extent_page = NULL; 2419 } 2420 2421 if (bserrno != 0) { 2422 blob_persist_complete(seq, ctx, bserrno); 2423 return; 2424 } 2425 2426 /* Only write out Extent Pages when blob was resized. */ 2427 for (i = ctx->next_extent_page; i < blob->active.extent_pages_array_size; i++) { 2428 extent_page_id = blob->active.extent_pages[i]; 2429 if (extent_page_id == 0) { 2430 /* No Extent Page to persist */ 2431 assert(spdk_blob_is_thin_provisioned(blob)); 2432 continue; 2433 } 2434 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent_page_id)); 2435 ctx->next_extent_page = i + 1; 2436 rc = blob_serialize_add_page(ctx->blob, &ctx->extent_page, &page_count, &ctx->extent_page); 2437 if (rc < 0) { 2438 blob_persist_complete(seq, ctx, rc); 2439 return; 2440 } 2441 2442 blob->state = SPDK_BLOB_STATE_DIRTY; 2443 blob_serialize_extent_page(blob, i * SPDK_EXTENTS_PER_EP, ctx->extent_page); 2444 2445 ctx->extent_page->crc = blob_md_page_calc_crc(ctx->extent_page); 2446 2447 bs_sequence_write_dev(seq, ctx->extent_page, bs_md_page_to_lba(blob->bs, extent_page_id), 2448 bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 2449 blob_persist_write_extent_pages, ctx); 2450 return; 2451 } 2452 2453 blob_persist_generate_new_md(ctx); 2454 } 2455 2456 static void 2457 blob_persist_start(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2458 { 2459 struct spdk_blob_persist_ctx *ctx = cb_arg; 2460 struct spdk_blob *blob = ctx->blob; 2461 2462 if (bserrno != 0) { 2463 blob_persist_complete(seq, ctx, bserrno); 2464 return; 2465 } 2466 2467 if (blob->active.num_pages == 0) { 2468 /* This is the signal that the blob should be deleted. 2469 * Immediately jump to the clean up routine. */ 2470 assert(blob->clean.num_pages > 0); 2471 blob->state = SPDK_BLOB_STATE_CLEAN; 2472 blob_persist_zero_pages(seq, ctx, 0); 2473 return; 2474 2475 } 2476 2477 if (blob->clean.num_clusters < blob->active.num_clusters) { 2478 /* Blob was resized up */ 2479 assert(blob->clean.num_extent_pages <= blob->active.num_extent_pages); 2480 ctx->next_extent_page = spdk_max(1, blob->clean.num_extent_pages) - 1; 2481 } else if (blob->active.num_clusters < blob->active.cluster_array_size) { 2482 /* Blob was resized down */ 2483 assert(blob->clean.num_extent_pages >= blob->active.num_extent_pages); 2484 ctx->next_extent_page = spdk_max(1, blob->active.num_extent_pages) - 1; 2485 } else { 2486 /* No change in size occurred */ 2487 blob_persist_generate_new_md(ctx); 2488 return; 2489 } 2490 2491 blob_persist_write_extent_pages(seq, ctx, 0); 2492 } 2493 2494 struct spdk_bs_mark_dirty { 2495 struct spdk_blob_store *bs; 2496 struct spdk_bs_super_block *super; 2497 spdk_bs_sequence_cpl cb_fn; 2498 void *cb_arg; 2499 }; 2500 2501 static void 2502 bs_mark_dirty_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2503 { 2504 struct spdk_bs_mark_dirty *ctx = cb_arg; 2505 2506 if (bserrno == 0) { 2507 ctx->bs->clean = 0; 2508 } 2509 2510 ctx->cb_fn(seq, ctx->cb_arg, bserrno); 2511 2512 spdk_free(ctx->super); 2513 free(ctx); 2514 } 2515 2516 static void bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2517 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg); 2518 2519 2520 static void 2521 bs_mark_dirty_write(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2522 { 2523 struct spdk_bs_mark_dirty *ctx = cb_arg; 2524 int rc; 2525 2526 if (bserrno != 0) { 2527 bs_mark_dirty_write_cpl(seq, ctx, bserrno); 2528 return; 2529 } 2530 2531 rc = bs_super_validate(ctx->super, ctx->bs); 2532 if (rc != 0) { 2533 bs_mark_dirty_write_cpl(seq, ctx, rc); 2534 return; 2535 } 2536 2537 ctx->super->clean = 0; 2538 if (ctx->super->size == 0) { 2539 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 2540 } 2541 2542 bs_write_super(seq, ctx->bs, ctx->super, bs_mark_dirty_write_cpl, ctx); 2543 } 2544 2545 static void 2546 bs_mark_dirty(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2547 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2548 { 2549 struct spdk_bs_mark_dirty *ctx; 2550 2551 /* Blobstore is already marked dirty */ 2552 if (bs->clean == 0) { 2553 cb_fn(seq, cb_arg, 0); 2554 return; 2555 } 2556 2557 ctx = calloc(1, sizeof(*ctx)); 2558 if (!ctx) { 2559 cb_fn(seq, cb_arg, -ENOMEM); 2560 return; 2561 } 2562 ctx->bs = bs; 2563 ctx->cb_fn = cb_fn; 2564 ctx->cb_arg = cb_arg; 2565 2566 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 2567 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 2568 if (!ctx->super) { 2569 free(ctx); 2570 cb_fn(seq, cb_arg, -ENOMEM); 2571 return; 2572 } 2573 2574 bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0), 2575 bs_byte_to_lba(bs, sizeof(*ctx->super)), 2576 bs_mark_dirty_write, ctx); 2577 } 2578 2579 /* Write a blob to disk */ 2580 static void 2581 blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 2582 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2583 { 2584 struct spdk_blob_persist_ctx *ctx; 2585 2586 blob_verify_md_op(blob); 2587 2588 if (blob->state == SPDK_BLOB_STATE_CLEAN && TAILQ_EMPTY(&blob->persists_to_complete)) { 2589 cb_fn(seq, cb_arg, 0); 2590 return; 2591 } 2592 2593 ctx = calloc(1, sizeof(*ctx)); 2594 if (!ctx) { 2595 cb_fn(seq, cb_arg, -ENOMEM); 2596 return; 2597 } 2598 ctx->blob = blob; 2599 ctx->seq = seq; 2600 ctx->cb_fn = cb_fn; 2601 ctx->cb_arg = cb_arg; 2602 2603 /* Multiple blob persists can affect one another, via blob->state or 2604 * blob mutable data changes. To prevent it, queue up the persists. */ 2605 if (!TAILQ_EMPTY(&blob->persists_to_complete)) { 2606 TAILQ_INSERT_TAIL(&blob->pending_persists, ctx, link); 2607 return; 2608 } 2609 TAILQ_INSERT_HEAD(&blob->persists_to_complete, ctx, link); 2610 2611 bs_mark_dirty(seq, blob->bs, blob_persist_start, ctx); 2612 } 2613 2614 struct spdk_blob_copy_cluster_ctx { 2615 struct spdk_blob *blob; 2616 uint8_t *buf; 2617 uint64_t page; 2618 uint64_t new_cluster; 2619 uint32_t new_extent_page; 2620 spdk_bs_sequence_t *seq; 2621 struct spdk_blob_md_page *new_cluster_page; 2622 }; 2623 2624 struct spdk_blob_free_cluster_ctx { 2625 struct spdk_blob *blob; 2626 uint64_t page; 2627 struct spdk_blob_md_page *md_page; 2628 uint64_t cluster_num; 2629 uint32_t extent_page; 2630 spdk_bs_sequence_t *seq; 2631 }; 2632 2633 static void 2634 blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno) 2635 { 2636 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2637 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq; 2638 TAILQ_HEAD(, spdk_bs_request_set) requests; 2639 spdk_bs_user_op_t *op; 2640 2641 TAILQ_INIT(&requests); 2642 TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link); 2643 2644 while (!TAILQ_EMPTY(&requests)) { 2645 op = TAILQ_FIRST(&requests); 2646 TAILQ_REMOVE(&requests, op, link); 2647 if (bserrno == 0) { 2648 bs_user_op_execute(op); 2649 } else { 2650 bs_user_op_abort(op, bserrno); 2651 } 2652 } 2653 2654 spdk_free(ctx->buf); 2655 free(ctx); 2656 } 2657 2658 static void 2659 blob_free_cluster_cpl(void *cb_arg, int bserrno) 2660 { 2661 struct spdk_blob_free_cluster_ctx *ctx = cb_arg; 2662 spdk_bs_sequence_t *seq = ctx->seq; 2663 2664 bs_sequence_finish(seq, bserrno); 2665 2666 free(ctx); 2667 } 2668 2669 static void 2670 blob_insert_cluster_revert(struct spdk_blob_copy_cluster_ctx *ctx) 2671 { 2672 spdk_spin_lock(&ctx->blob->bs->used_lock); 2673 bs_release_cluster(ctx->blob->bs, ctx->new_cluster); 2674 if (ctx->new_extent_page != 0) { 2675 bs_release_md_page(ctx->blob->bs, ctx->new_extent_page); 2676 } 2677 spdk_spin_unlock(&ctx->blob->bs->used_lock); 2678 } 2679 2680 static void 2681 blob_insert_cluster_clear_cpl(void *cb_arg, int bserrno) 2682 { 2683 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2684 2685 if (bserrno) { 2686 SPDK_WARNLOG("Failed to clear cluster: %d\n", bserrno); 2687 } 2688 2689 blob_insert_cluster_revert(ctx); 2690 bs_sequence_finish(ctx->seq, bserrno); 2691 } 2692 2693 static void 2694 blob_insert_cluster_clear(struct spdk_blob_copy_cluster_ctx *ctx) 2695 { 2696 struct spdk_bs_cpl cpl; 2697 spdk_bs_batch_t *batch; 2698 struct spdk_io_channel *ch = spdk_io_channel_from_ctx(ctx->seq->channel); 2699 2700 /* 2701 * We allocated a cluster and we copied data to it. But now, we realized that we don't need 2702 * this cluster and we want to release it. We must ensure that we clear the data on this 2703 * cluster. 2704 * The cluster may later be re-allocated by a thick-provisioned blob for example. When 2705 * reading from this thick-provisioned blob before writing data, we should read zeroes. 2706 */ 2707 2708 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2709 cpl.u.blob_basic.cb_fn = blob_insert_cluster_clear_cpl; 2710 cpl.u.blob_basic.cb_arg = ctx; 2711 2712 batch = bs_batch_open(ch, &cpl, ctx->blob); 2713 if (!batch) { 2714 blob_insert_cluster_clear_cpl(ctx, -ENOMEM); 2715 return; 2716 } 2717 2718 bs_batch_clear_dev(ctx->blob, batch, bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 2719 bs_cluster_to_lba(ctx->blob->bs, 1)); 2720 bs_batch_close(batch); 2721 } 2722 2723 static void 2724 blob_insert_cluster_cpl(void *cb_arg, int bserrno) 2725 { 2726 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2727 2728 if (bserrno) { 2729 if (bserrno == -EEXIST) { 2730 /* The metadata insert failed because another thread 2731 * allocated the cluster first. Clear and free our cluster 2732 * but continue without error. */ 2733 blob_insert_cluster_clear(ctx); 2734 return; 2735 } 2736 2737 blob_insert_cluster_revert(ctx); 2738 } 2739 2740 bs_sequence_finish(ctx->seq, bserrno); 2741 } 2742 2743 static void 2744 blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2745 { 2746 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2747 uint32_t cluster_number; 2748 2749 if (bserrno) { 2750 /* The write failed, so jump to the final completion handler */ 2751 bs_sequence_finish(seq, bserrno); 2752 return; 2753 } 2754 2755 cluster_number = bs_page_to_cluster(ctx->blob->bs, ctx->page); 2756 2757 blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2758 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx); 2759 } 2760 2761 static void 2762 blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2763 { 2764 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2765 2766 if (bserrno != 0) { 2767 /* The read failed, so jump to the final completion handler */ 2768 bs_sequence_finish(seq, bserrno); 2769 return; 2770 } 2771 2772 /* Write whole cluster */ 2773 bs_sequence_write_dev(seq, ctx->buf, 2774 bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 2775 bs_cluster_to_lba(ctx->blob->bs, 1), 2776 blob_write_copy_cpl, ctx); 2777 } 2778 2779 static bool 2780 blob_can_copy(struct spdk_blob *blob, uint32_t cluster_start_page, uint64_t *base_lba) 2781 { 2782 uint64_t lba = bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page); 2783 2784 return (!blob_is_esnap_clone(blob) && blob->bs->dev->copy != NULL) && 2785 blob->back_bs_dev->translate_lba(blob->back_bs_dev, lba, base_lba); 2786 } 2787 2788 static void 2789 blob_copy(struct spdk_blob_copy_cluster_ctx *ctx, spdk_bs_user_op_t *op, uint64_t src_lba) 2790 { 2791 struct spdk_blob *blob = ctx->blob; 2792 uint64_t lba_count = bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz); 2793 2794 bs_sequence_copy_dev(ctx->seq, 2795 bs_cluster_to_lba(blob->bs, ctx->new_cluster), 2796 src_lba, 2797 lba_count, 2798 blob_write_copy_cpl, ctx); 2799 } 2800 2801 static void 2802 bs_allocate_and_copy_cluster(struct spdk_blob *blob, 2803 struct spdk_io_channel *_ch, 2804 uint64_t io_unit, spdk_bs_user_op_t *op) 2805 { 2806 struct spdk_bs_cpl cpl; 2807 struct spdk_bs_channel *ch; 2808 struct spdk_blob_copy_cluster_ctx *ctx; 2809 uint32_t cluster_start_page; 2810 uint32_t cluster_number; 2811 bool is_zeroes; 2812 bool can_copy; 2813 bool is_valid_range; 2814 uint64_t copy_src_lba; 2815 int rc; 2816 2817 ch = spdk_io_channel_get_ctx(_ch); 2818 2819 if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) { 2820 /* There are already operations pending. Queue this user op 2821 * and return because it will be re-executed when the outstanding 2822 * cluster allocation completes. */ 2823 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2824 return; 2825 } 2826 2827 /* Round the io_unit offset down to the first page in the cluster */ 2828 cluster_start_page = bs_io_unit_to_cluster_start(blob, io_unit); 2829 2830 /* Calculate which index in the metadata cluster array the corresponding 2831 * cluster is supposed to be at. */ 2832 cluster_number = bs_io_unit_to_cluster_number(blob, io_unit); 2833 2834 ctx = calloc(1, sizeof(*ctx)); 2835 if (!ctx) { 2836 bs_user_op_abort(op, -ENOMEM); 2837 return; 2838 } 2839 2840 assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0); 2841 2842 ctx->blob = blob; 2843 ctx->page = cluster_start_page; 2844 ctx->new_cluster_page = ch->new_cluster_page; 2845 memset(ctx->new_cluster_page, 0, SPDK_BS_PAGE_SIZE); 2846 2847 /* Check if the cluster that we intend to do CoW for is valid for 2848 * the backing dev. For zeroes backing dev, it'll be always valid. 2849 * For other backing dev e.g. a snapshot, it could be invalid if 2850 * the blob has been resized after snapshot was taken. */ 2851 is_valid_range = blob->back_bs_dev->is_range_valid(blob->back_bs_dev, 2852 bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page), 2853 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz)); 2854 2855 can_copy = is_valid_range && blob_can_copy(blob, cluster_start_page, ©_src_lba); 2856 2857 is_zeroes = is_valid_range && blob->back_bs_dev->is_zeroes(blob->back_bs_dev, 2858 bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page), 2859 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz)); 2860 if (blob->parent_id != SPDK_BLOBID_INVALID && !is_zeroes && !can_copy) { 2861 ctx->buf = spdk_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen, 2862 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 2863 if (!ctx->buf) { 2864 SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n", 2865 blob->bs->cluster_sz); 2866 free(ctx); 2867 bs_user_op_abort(op, -ENOMEM); 2868 return; 2869 } 2870 } 2871 2872 spdk_spin_lock(&blob->bs->used_lock); 2873 rc = bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, &ctx->new_extent_page, 2874 false); 2875 spdk_spin_unlock(&blob->bs->used_lock); 2876 if (rc != 0) { 2877 spdk_free(ctx->buf); 2878 free(ctx); 2879 bs_user_op_abort(op, rc); 2880 return; 2881 } 2882 2883 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2884 cpl.u.blob_basic.cb_fn = blob_allocate_and_copy_cluster_cpl; 2885 cpl.u.blob_basic.cb_arg = ctx; 2886 2887 ctx->seq = bs_sequence_start_blob(_ch, &cpl, blob); 2888 if (!ctx->seq) { 2889 spdk_spin_lock(&blob->bs->used_lock); 2890 bs_release_cluster(blob->bs, ctx->new_cluster); 2891 spdk_spin_unlock(&blob->bs->used_lock); 2892 spdk_free(ctx->buf); 2893 free(ctx); 2894 bs_user_op_abort(op, -ENOMEM); 2895 return; 2896 } 2897 2898 /* Queue the user op to block other incoming operations */ 2899 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2900 2901 if (blob->parent_id != SPDK_BLOBID_INVALID && !is_zeroes) { 2902 if (can_copy) { 2903 blob_copy(ctx, op, copy_src_lba); 2904 } else { 2905 /* Read cluster from backing device */ 2906 bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf, 2907 bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page), 2908 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz), 2909 blob_write_copy, ctx); 2910 } 2911 2912 } else { 2913 blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2914 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx); 2915 } 2916 } 2917 2918 static inline bool 2919 blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t io_unit, uint64_t length, 2920 uint64_t *lba, uint64_t *lba_count) 2921 { 2922 *lba_count = length; 2923 2924 if (!bs_io_unit_is_allocated(blob, io_unit)) { 2925 assert(blob->back_bs_dev != NULL); 2926 *lba = bs_io_unit_to_back_dev_lba(blob, io_unit); 2927 *lba_count = bs_io_unit_to_back_dev_lba(blob, *lba_count); 2928 return false; 2929 } else { 2930 *lba = bs_blob_io_unit_to_lba(blob, io_unit); 2931 return true; 2932 } 2933 } 2934 2935 struct op_split_ctx { 2936 struct spdk_blob *blob; 2937 struct spdk_io_channel *channel; 2938 uint64_t io_unit_offset; 2939 uint64_t io_units_remaining; 2940 void *curr_payload; 2941 enum spdk_blob_op_type op_type; 2942 spdk_bs_sequence_t *seq; 2943 bool in_submit_ctx; 2944 bool completed_in_submit_ctx; 2945 bool done; 2946 }; 2947 2948 static void 2949 blob_request_submit_op_split_next(void *cb_arg, int bserrno) 2950 { 2951 struct op_split_ctx *ctx = cb_arg; 2952 struct spdk_blob *blob = ctx->blob; 2953 struct spdk_io_channel *ch = ctx->channel; 2954 enum spdk_blob_op_type op_type = ctx->op_type; 2955 uint8_t *buf; 2956 uint64_t offset; 2957 uint64_t length; 2958 uint64_t op_length; 2959 2960 if (bserrno != 0 || ctx->io_units_remaining == 0) { 2961 bs_sequence_finish(ctx->seq, bserrno); 2962 if (ctx->in_submit_ctx) { 2963 /* Defer freeing of the ctx object, since it will be 2964 * accessed when this unwinds back to the submisison 2965 * context. 2966 */ 2967 ctx->done = true; 2968 } else { 2969 free(ctx); 2970 } 2971 return; 2972 } 2973 2974 if (ctx->in_submit_ctx) { 2975 /* If this split operation completed in the context 2976 * of its submission, mark the flag and return immediately 2977 * to avoid recursion. 2978 */ 2979 ctx->completed_in_submit_ctx = true; 2980 return; 2981 } 2982 2983 while (true) { 2984 ctx->completed_in_submit_ctx = false; 2985 2986 offset = ctx->io_unit_offset; 2987 length = ctx->io_units_remaining; 2988 buf = ctx->curr_payload; 2989 op_length = spdk_min(length, bs_num_io_units_to_cluster_boundary(blob, 2990 offset)); 2991 2992 /* Update length and payload for next operation */ 2993 ctx->io_units_remaining -= op_length; 2994 ctx->io_unit_offset += op_length; 2995 if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) { 2996 ctx->curr_payload += op_length * blob->bs->io_unit_size; 2997 } 2998 2999 assert(!ctx->in_submit_ctx); 3000 ctx->in_submit_ctx = true; 3001 3002 switch (op_type) { 3003 case SPDK_BLOB_READ: 3004 spdk_blob_io_read(blob, ch, buf, offset, op_length, 3005 blob_request_submit_op_split_next, ctx); 3006 break; 3007 case SPDK_BLOB_WRITE: 3008 spdk_blob_io_write(blob, ch, buf, offset, op_length, 3009 blob_request_submit_op_split_next, ctx); 3010 break; 3011 case SPDK_BLOB_UNMAP: 3012 spdk_blob_io_unmap(blob, ch, offset, op_length, 3013 blob_request_submit_op_split_next, ctx); 3014 break; 3015 case SPDK_BLOB_WRITE_ZEROES: 3016 spdk_blob_io_write_zeroes(blob, ch, offset, op_length, 3017 blob_request_submit_op_split_next, ctx); 3018 break; 3019 case SPDK_BLOB_READV: 3020 case SPDK_BLOB_WRITEV: 3021 SPDK_ERRLOG("readv/write not valid\n"); 3022 bs_sequence_finish(ctx->seq, -EINVAL); 3023 free(ctx); 3024 return; 3025 } 3026 3027 #ifndef __clang_analyzer__ 3028 /* scan-build reports a false positive around accessing the ctx here. It 3029 * forms a path that recursively calls this function, but then says 3030 * "assuming ctx->in_submit_ctx is false", when that isn't possible. 3031 * This path does free(ctx), returns to here, and reports a use-after-free 3032 * bug. Wrapping this bit of code so that scan-build doesn't see it 3033 * works around the scan-build bug. 3034 */ 3035 assert(ctx->in_submit_ctx); 3036 ctx->in_submit_ctx = false; 3037 3038 /* If the operation completed immediately, loop back and submit the 3039 * next operation. Otherwise we can return and the next split 3040 * operation will get submitted when this current operation is 3041 * later completed asynchronously. 3042 */ 3043 if (ctx->completed_in_submit_ctx) { 3044 continue; 3045 } else if (ctx->done) { 3046 free(ctx); 3047 } 3048 #endif 3049 break; 3050 } 3051 } 3052 3053 static void 3054 blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob, 3055 void *payload, uint64_t offset, uint64_t length, 3056 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 3057 { 3058 struct op_split_ctx *ctx; 3059 spdk_bs_sequence_t *seq; 3060 struct spdk_bs_cpl cpl; 3061 3062 assert(blob != NULL); 3063 3064 ctx = calloc(1, sizeof(struct op_split_ctx)); 3065 if (ctx == NULL) { 3066 cb_fn(cb_arg, -ENOMEM); 3067 return; 3068 } 3069 3070 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 3071 cpl.u.blob_basic.cb_fn = cb_fn; 3072 cpl.u.blob_basic.cb_arg = cb_arg; 3073 3074 seq = bs_sequence_start_blob(ch, &cpl, blob); 3075 if (!seq) { 3076 free(ctx); 3077 cb_fn(cb_arg, -ENOMEM); 3078 return; 3079 } 3080 3081 ctx->blob = blob; 3082 ctx->channel = ch; 3083 ctx->curr_payload = payload; 3084 ctx->io_unit_offset = offset; 3085 ctx->io_units_remaining = length; 3086 ctx->op_type = op_type; 3087 ctx->seq = seq; 3088 3089 blob_request_submit_op_split_next(ctx, 0); 3090 } 3091 3092 static void 3093 spdk_free_cluster_unmap_complete(void *cb_arg, int bserrno) 3094 { 3095 struct spdk_blob_free_cluster_ctx *ctx = cb_arg; 3096 3097 if (bserrno) { 3098 bs_sequence_finish(ctx->seq, bserrno); 3099 free(ctx); 3100 return; 3101 } 3102 3103 blob_free_cluster_on_md_thread(ctx->blob, ctx->cluster_num, 3104 ctx->extent_page, ctx->md_page, blob_free_cluster_cpl, ctx); 3105 } 3106 3107 static void 3108 blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob, 3109 void *payload, uint64_t offset, uint64_t length, 3110 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 3111 { 3112 struct spdk_bs_cpl cpl; 3113 uint64_t lba; 3114 uint64_t lba_count; 3115 bool is_allocated; 3116 3117 assert(blob != NULL); 3118 3119 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 3120 cpl.u.blob_basic.cb_fn = cb_fn; 3121 cpl.u.blob_basic.cb_arg = cb_arg; 3122 3123 if (blob->frozen_refcnt) { 3124 /* This blob I/O is frozen */ 3125 spdk_bs_user_op_t *op; 3126 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 3127 3128 op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 3129 if (!op) { 3130 cb_fn(cb_arg, -ENOMEM); 3131 return; 3132 } 3133 3134 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 3135 3136 return; 3137 } 3138 3139 is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 3140 3141 switch (op_type) { 3142 case SPDK_BLOB_READ: { 3143 spdk_bs_batch_t *batch; 3144 3145 batch = bs_batch_open(_ch, &cpl, blob); 3146 if (!batch) { 3147 cb_fn(cb_arg, -ENOMEM); 3148 return; 3149 } 3150 3151 if (is_allocated) { 3152 /* Read from the blob */ 3153 bs_batch_read_dev(batch, payload, lba, lba_count); 3154 } else { 3155 /* Read from the backing block device */ 3156 bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count); 3157 } 3158 3159 bs_batch_close(batch); 3160 break; 3161 } 3162 case SPDK_BLOB_WRITE: 3163 case SPDK_BLOB_WRITE_ZEROES: { 3164 if (is_allocated) { 3165 /* Write to the blob */ 3166 spdk_bs_batch_t *batch; 3167 3168 if (lba_count == 0) { 3169 cb_fn(cb_arg, 0); 3170 return; 3171 } 3172 3173 batch = bs_batch_open(_ch, &cpl, blob); 3174 if (!batch) { 3175 cb_fn(cb_arg, -ENOMEM); 3176 return; 3177 } 3178 3179 if (op_type == SPDK_BLOB_WRITE) { 3180 bs_batch_write_dev(batch, payload, lba, lba_count); 3181 } else { 3182 bs_batch_write_zeroes_dev(batch, lba, lba_count); 3183 } 3184 3185 bs_batch_close(batch); 3186 } else { 3187 /* Queue this operation and allocate the cluster */ 3188 spdk_bs_user_op_t *op; 3189 3190 op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 3191 if (!op) { 3192 cb_fn(cb_arg, -ENOMEM); 3193 return; 3194 } 3195 3196 bs_allocate_and_copy_cluster(blob, _ch, offset, op); 3197 } 3198 break; 3199 } 3200 case SPDK_BLOB_UNMAP: { 3201 struct spdk_blob_free_cluster_ctx *ctx = NULL; 3202 spdk_bs_batch_t *batch; 3203 3204 /* if aligned with cluster release cluster */ 3205 if (spdk_blob_is_thin_provisioned(blob) && is_allocated && 3206 bs_io_units_per_cluster(blob) == length) { 3207 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 3208 uint32_t cluster_start_page; 3209 uint32_t cluster_number; 3210 3211 assert(offset % bs_io_units_per_cluster(blob) == 0); 3212 3213 /* Round the io_unit offset down to the first page in the cluster */ 3214 cluster_start_page = bs_io_unit_to_cluster_start(blob, offset); 3215 3216 /* Calculate which index in the metadata cluster array the corresponding 3217 * cluster is supposed to be at. */ 3218 cluster_number = bs_io_unit_to_cluster_number(blob, offset); 3219 3220 ctx = calloc(1, sizeof(*ctx)); 3221 if (!ctx) { 3222 cb_fn(cb_arg, -ENOMEM); 3223 return; 3224 } 3225 /* When freeing a cluster the flow should be (in order): 3226 * 1. Unmap the underlying area (so if the cluster is reclaimed in the future, it won't leak 3227 * old data) 3228 * 2. Once the unmap completes (to avoid any races with incoming writes that may claim the 3229 * cluster), update and sync metadata freeing the cluster 3230 * 3. Once metadata update is done, complete the user unmap request 3231 */ 3232 ctx->blob = blob; 3233 ctx->page = cluster_start_page; 3234 ctx->cluster_num = cluster_number; 3235 ctx->md_page = bs_channel->new_cluster_page; 3236 ctx->seq = bs_sequence_start_bs(_ch, &cpl); 3237 if (!ctx->seq) { 3238 free(ctx); 3239 cb_fn(cb_arg, -ENOMEM); 3240 return; 3241 } 3242 3243 if (blob->use_extent_table) { 3244 ctx->extent_page = *bs_cluster_to_extent_page(blob, cluster_number); 3245 } 3246 3247 cpl.u.blob_basic.cb_fn = spdk_free_cluster_unmap_complete; 3248 cpl.u.blob_basic.cb_arg = ctx; 3249 } 3250 3251 batch = bs_batch_open(_ch, &cpl, blob); 3252 if (!batch) { 3253 free(ctx); 3254 cb_fn(cb_arg, -ENOMEM); 3255 return; 3256 } 3257 3258 if (is_allocated) { 3259 bs_batch_unmap_dev(batch, lba, lba_count); 3260 } 3261 3262 bs_batch_close(batch); 3263 break; 3264 } 3265 case SPDK_BLOB_READV: 3266 case SPDK_BLOB_WRITEV: 3267 SPDK_ERRLOG("readv/write not valid\n"); 3268 cb_fn(cb_arg, -EINVAL); 3269 break; 3270 } 3271 } 3272 3273 static void 3274 blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel, 3275 void *payload, uint64_t offset, uint64_t length, 3276 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 3277 { 3278 assert(blob != NULL); 3279 3280 if (blob->data_ro && op_type != SPDK_BLOB_READ) { 3281 cb_fn(cb_arg, -EPERM); 3282 return; 3283 } 3284 3285 if (length == 0) { 3286 cb_fn(cb_arg, 0); 3287 return; 3288 } 3289 3290 if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 3291 cb_fn(cb_arg, -EINVAL); 3292 return; 3293 } 3294 if (length <= bs_num_io_units_to_cluster_boundary(blob, offset)) { 3295 blob_request_submit_op_single(_channel, blob, payload, offset, length, 3296 cb_fn, cb_arg, op_type); 3297 } else { 3298 blob_request_submit_op_split(_channel, blob, payload, offset, length, 3299 cb_fn, cb_arg, op_type); 3300 } 3301 } 3302 3303 struct rw_iov_ctx { 3304 struct spdk_blob *blob; 3305 struct spdk_io_channel *channel; 3306 spdk_blob_op_complete cb_fn; 3307 void *cb_arg; 3308 bool read; 3309 int iovcnt; 3310 struct iovec *orig_iov; 3311 uint64_t io_unit_offset; 3312 uint64_t io_units_remaining; 3313 uint64_t io_units_done; 3314 struct spdk_blob_ext_io_opts *ext_io_opts; 3315 struct iovec iov[0]; 3316 }; 3317 3318 static void 3319 rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3320 { 3321 assert(cb_arg == NULL); 3322 bs_sequence_finish(seq, bserrno); 3323 } 3324 3325 static void 3326 rw_iov_split_next(void *cb_arg, int bserrno) 3327 { 3328 struct rw_iov_ctx *ctx = cb_arg; 3329 struct spdk_blob *blob = ctx->blob; 3330 struct iovec *iov, *orig_iov; 3331 int iovcnt; 3332 size_t orig_iovoff; 3333 uint64_t io_units_count, io_units_to_boundary, io_unit_offset; 3334 uint64_t byte_count; 3335 3336 if (bserrno != 0 || ctx->io_units_remaining == 0) { 3337 ctx->cb_fn(ctx->cb_arg, bserrno); 3338 free(ctx); 3339 return; 3340 } 3341 3342 io_unit_offset = ctx->io_unit_offset; 3343 io_units_to_boundary = bs_num_io_units_to_cluster_boundary(blob, io_unit_offset); 3344 io_units_count = spdk_min(ctx->io_units_remaining, io_units_to_boundary); 3345 /* 3346 * Get index and offset into the original iov array for our current position in the I/O sequence. 3347 * byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will 3348 * point to the current position in the I/O sequence. 3349 */ 3350 byte_count = ctx->io_units_done * blob->bs->io_unit_size; 3351 orig_iov = &ctx->orig_iov[0]; 3352 orig_iovoff = 0; 3353 while (byte_count > 0) { 3354 if (byte_count >= orig_iov->iov_len) { 3355 byte_count -= orig_iov->iov_len; 3356 orig_iov++; 3357 } else { 3358 orig_iovoff = byte_count; 3359 byte_count = 0; 3360 } 3361 } 3362 3363 /* 3364 * Build an iov array for the next I/O in the sequence. byte_count will keep track of how many 3365 * bytes of this next I/O remain to be accounted for in the new iov array. 3366 */ 3367 byte_count = io_units_count * blob->bs->io_unit_size; 3368 iov = &ctx->iov[0]; 3369 iovcnt = 0; 3370 while (byte_count > 0) { 3371 assert(iovcnt < ctx->iovcnt); 3372 iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff); 3373 iov->iov_base = orig_iov->iov_base + orig_iovoff; 3374 byte_count -= iov->iov_len; 3375 orig_iovoff = 0; 3376 orig_iov++; 3377 iov++; 3378 iovcnt++; 3379 } 3380 3381 ctx->io_unit_offset += io_units_count; 3382 ctx->io_units_remaining -= io_units_count; 3383 ctx->io_units_done += io_units_count; 3384 iov = &ctx->iov[0]; 3385 3386 if (ctx->read) { 3387 spdk_blob_io_readv_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 3388 io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts); 3389 } else { 3390 spdk_blob_io_writev_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 3391 io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts); 3392 } 3393 } 3394 3395 static void 3396 blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel, 3397 struct iovec *iov, int iovcnt, 3398 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg, bool read, 3399 struct spdk_blob_ext_io_opts *ext_io_opts) 3400 { 3401 struct spdk_bs_cpl cpl; 3402 3403 assert(blob != NULL); 3404 3405 if (!read && blob->data_ro) { 3406 cb_fn(cb_arg, -EPERM); 3407 return; 3408 } 3409 3410 if (length == 0) { 3411 cb_fn(cb_arg, 0); 3412 return; 3413 } 3414 3415 if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 3416 cb_fn(cb_arg, -EINVAL); 3417 return; 3418 } 3419 3420 /* 3421 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having 3422 * to split a request that spans a cluster boundary. For I/O that do not span a cluster boundary, 3423 * there will be no noticeable difference compared to using a batch. For I/O that do span a cluster 3424 * boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need 3425 * to allocate a separate iov array and split the I/O such that none of the resulting 3426 * smaller I/O cross a cluster boundary. These smaller I/O will be issued in sequence (not in parallel) 3427 * but since this case happens very infrequently, any performance impact will be negligible. 3428 * 3429 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs 3430 * for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them 3431 * in a batch. That would also require creating an intermediate spdk_bs_cpl that would get called 3432 * when the batch was completed, to allow for freeing the memory for the iov arrays. 3433 */ 3434 if (spdk_likely(length <= bs_num_io_units_to_cluster_boundary(blob, offset))) { 3435 uint64_t lba_count; 3436 uint64_t lba; 3437 bool is_allocated; 3438 3439 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 3440 cpl.u.blob_basic.cb_fn = cb_fn; 3441 cpl.u.blob_basic.cb_arg = cb_arg; 3442 3443 if (blob->frozen_refcnt) { 3444 /* This blob I/O is frozen */ 3445 enum spdk_blob_op_type op_type; 3446 spdk_bs_user_op_t *op; 3447 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel); 3448 3449 op_type = read ? SPDK_BLOB_READV : SPDK_BLOB_WRITEV; 3450 op = bs_user_op_alloc(_channel, &cpl, op_type, blob, iov, iovcnt, offset, length); 3451 if (!op) { 3452 cb_fn(cb_arg, -ENOMEM); 3453 return; 3454 } 3455 3456 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 3457 3458 return; 3459 } 3460 3461 is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 3462 3463 if (read) { 3464 spdk_bs_sequence_t *seq; 3465 3466 seq = bs_sequence_start_blob(_channel, &cpl, blob); 3467 if (!seq) { 3468 cb_fn(cb_arg, -ENOMEM); 3469 return; 3470 } 3471 3472 seq->ext_io_opts = ext_io_opts; 3473 3474 if (is_allocated) { 3475 bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL); 3476 } else { 3477 bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count, 3478 rw_iov_done, NULL); 3479 } 3480 } else { 3481 if (is_allocated) { 3482 spdk_bs_sequence_t *seq; 3483 3484 seq = bs_sequence_start_blob(_channel, &cpl, blob); 3485 if (!seq) { 3486 cb_fn(cb_arg, -ENOMEM); 3487 return; 3488 } 3489 3490 seq->ext_io_opts = ext_io_opts; 3491 3492 bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL); 3493 } else { 3494 /* Queue this operation and allocate the cluster */ 3495 spdk_bs_user_op_t *op; 3496 3497 op = bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset, 3498 length); 3499 if (!op) { 3500 cb_fn(cb_arg, -ENOMEM); 3501 return; 3502 } 3503 3504 op->ext_io_opts = ext_io_opts; 3505 3506 bs_allocate_and_copy_cluster(blob, _channel, offset, op); 3507 } 3508 } 3509 } else { 3510 struct rw_iov_ctx *ctx; 3511 3512 ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec)); 3513 if (ctx == NULL) { 3514 cb_fn(cb_arg, -ENOMEM); 3515 return; 3516 } 3517 3518 ctx->blob = blob; 3519 ctx->channel = _channel; 3520 ctx->cb_fn = cb_fn; 3521 ctx->cb_arg = cb_arg; 3522 ctx->read = read; 3523 ctx->orig_iov = iov; 3524 ctx->iovcnt = iovcnt; 3525 ctx->io_unit_offset = offset; 3526 ctx->io_units_remaining = length; 3527 ctx->io_units_done = 0; 3528 ctx->ext_io_opts = ext_io_opts; 3529 3530 rw_iov_split_next(ctx, 0); 3531 } 3532 } 3533 3534 static struct spdk_blob * 3535 blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid) 3536 { 3537 struct spdk_blob find; 3538 3539 if (spdk_bit_array_get(bs->open_blobids, blobid) == 0) { 3540 return NULL; 3541 } 3542 3543 find.id = blobid; 3544 return RB_FIND(spdk_blob_tree, &bs->open_blobs, &find); 3545 } 3546 3547 static void 3548 blob_get_snapshot_and_clone_entries(struct spdk_blob *blob, 3549 struct spdk_blob_list **snapshot_entry, struct spdk_blob_list **clone_entry) 3550 { 3551 assert(blob != NULL); 3552 *snapshot_entry = NULL; 3553 *clone_entry = NULL; 3554 3555 if (blob->parent_id == SPDK_BLOBID_INVALID) { 3556 return; 3557 } 3558 3559 TAILQ_FOREACH(*snapshot_entry, &blob->bs->snapshots, link) { 3560 if ((*snapshot_entry)->id == blob->parent_id) { 3561 break; 3562 } 3563 } 3564 3565 if (*snapshot_entry != NULL) { 3566 TAILQ_FOREACH(*clone_entry, &(*snapshot_entry)->clones, link) { 3567 if ((*clone_entry)->id == blob->id) { 3568 break; 3569 } 3570 } 3571 3572 assert(*clone_entry != NULL); 3573 } 3574 } 3575 3576 static int 3577 bs_channel_create(void *io_device, void *ctx_buf) 3578 { 3579 struct spdk_blob_store *bs = io_device; 3580 struct spdk_bs_channel *channel = ctx_buf; 3581 struct spdk_bs_dev *dev; 3582 uint32_t max_ops = bs->max_channel_ops; 3583 uint32_t i; 3584 3585 dev = bs->dev; 3586 3587 channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set)); 3588 if (!channel->req_mem) { 3589 return -1; 3590 } 3591 3592 TAILQ_INIT(&channel->reqs); 3593 3594 for (i = 0; i < max_ops; i++) { 3595 TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link); 3596 } 3597 3598 channel->bs = bs; 3599 channel->dev = dev; 3600 channel->dev_channel = dev->create_channel(dev); 3601 3602 if (!channel->dev_channel) { 3603 SPDK_ERRLOG("Failed to create device channel.\n"); 3604 free(channel->req_mem); 3605 return -1; 3606 } 3607 3608 channel->new_cluster_page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, NULL, SPDK_ENV_SOCKET_ID_ANY, 3609 SPDK_MALLOC_DMA); 3610 if (!channel->new_cluster_page) { 3611 SPDK_ERRLOG("Failed to allocate new cluster page\n"); 3612 free(channel->req_mem); 3613 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 3614 return -1; 3615 } 3616 3617 TAILQ_INIT(&channel->need_cluster_alloc); 3618 TAILQ_INIT(&channel->queued_io); 3619 RB_INIT(&channel->esnap_channels); 3620 3621 return 0; 3622 } 3623 3624 static void 3625 bs_channel_destroy(void *io_device, void *ctx_buf) 3626 { 3627 struct spdk_bs_channel *channel = ctx_buf; 3628 spdk_bs_user_op_t *op; 3629 3630 while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) { 3631 op = TAILQ_FIRST(&channel->need_cluster_alloc); 3632 TAILQ_REMOVE(&channel->need_cluster_alloc, op, link); 3633 bs_user_op_abort(op, -EIO); 3634 } 3635 3636 while (!TAILQ_EMPTY(&channel->queued_io)) { 3637 op = TAILQ_FIRST(&channel->queued_io); 3638 TAILQ_REMOVE(&channel->queued_io, op, link); 3639 bs_user_op_abort(op, -EIO); 3640 } 3641 3642 blob_esnap_destroy_bs_channel(channel); 3643 3644 free(channel->req_mem); 3645 spdk_free(channel->new_cluster_page); 3646 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 3647 } 3648 3649 static void 3650 bs_dev_destroy(void *io_device) 3651 { 3652 struct spdk_blob_store *bs = io_device; 3653 struct spdk_blob *blob, *blob_tmp; 3654 3655 bs->dev->destroy(bs->dev); 3656 3657 RB_FOREACH_SAFE(blob, spdk_blob_tree, &bs->open_blobs, blob_tmp) { 3658 RB_REMOVE(spdk_blob_tree, &bs->open_blobs, blob); 3659 spdk_bit_array_clear(bs->open_blobids, blob->id); 3660 blob_free(blob); 3661 } 3662 3663 spdk_spin_destroy(&bs->used_lock); 3664 3665 spdk_bit_array_free(&bs->open_blobids); 3666 spdk_bit_array_free(&bs->used_blobids); 3667 spdk_bit_array_free(&bs->used_md_pages); 3668 spdk_bit_pool_free(&bs->used_clusters); 3669 /* 3670 * If this function is called for any reason except a successful unload, 3671 * the unload_cpl type will be NONE and this will be a nop. 3672 */ 3673 bs_call_cpl(&bs->unload_cpl, bs->unload_err); 3674 3675 free(bs); 3676 } 3677 3678 static int 3679 bs_blob_list_add(struct spdk_blob *blob) 3680 { 3681 spdk_blob_id snapshot_id; 3682 struct spdk_blob_list *snapshot_entry = NULL; 3683 struct spdk_blob_list *clone_entry = NULL; 3684 3685 assert(blob != NULL); 3686 3687 snapshot_id = blob->parent_id; 3688 if (snapshot_id == SPDK_BLOBID_INVALID || 3689 snapshot_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 3690 return 0; 3691 } 3692 3693 snapshot_entry = bs_get_snapshot_entry(blob->bs, snapshot_id); 3694 if (snapshot_entry == NULL) { 3695 /* Snapshot not found */ 3696 snapshot_entry = calloc(1, sizeof(struct spdk_blob_list)); 3697 if (snapshot_entry == NULL) { 3698 return -ENOMEM; 3699 } 3700 snapshot_entry->id = snapshot_id; 3701 TAILQ_INIT(&snapshot_entry->clones); 3702 TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link); 3703 } else { 3704 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 3705 if (clone_entry->id == blob->id) { 3706 break; 3707 } 3708 } 3709 } 3710 3711 if (clone_entry == NULL) { 3712 /* Clone not found */ 3713 clone_entry = calloc(1, sizeof(struct spdk_blob_list)); 3714 if (clone_entry == NULL) { 3715 return -ENOMEM; 3716 } 3717 clone_entry->id = blob->id; 3718 TAILQ_INIT(&clone_entry->clones); 3719 TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link); 3720 snapshot_entry->clone_count++; 3721 } 3722 3723 return 0; 3724 } 3725 3726 static void 3727 bs_blob_list_remove(struct spdk_blob *blob) 3728 { 3729 struct spdk_blob_list *snapshot_entry = NULL; 3730 struct spdk_blob_list *clone_entry = NULL; 3731 3732 blob_get_snapshot_and_clone_entries(blob, &snapshot_entry, &clone_entry); 3733 3734 if (snapshot_entry == NULL) { 3735 return; 3736 } 3737 3738 blob->parent_id = SPDK_BLOBID_INVALID; 3739 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3740 free(clone_entry); 3741 3742 snapshot_entry->clone_count--; 3743 } 3744 3745 static int 3746 bs_blob_list_free(struct spdk_blob_store *bs) 3747 { 3748 struct spdk_blob_list *snapshot_entry; 3749 struct spdk_blob_list *snapshot_entry_tmp; 3750 struct spdk_blob_list *clone_entry; 3751 struct spdk_blob_list *clone_entry_tmp; 3752 3753 TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) { 3754 TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) { 3755 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3756 free(clone_entry); 3757 } 3758 TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link); 3759 free(snapshot_entry); 3760 } 3761 3762 return 0; 3763 } 3764 3765 static void 3766 bs_free(struct spdk_blob_store *bs) 3767 { 3768 bs_blob_list_free(bs); 3769 3770 bs_unregister_md_thread(bs); 3771 spdk_io_device_unregister(bs, bs_dev_destroy); 3772 } 3773 3774 void 3775 spdk_bs_opts_init(struct spdk_bs_opts *opts, size_t opts_size) 3776 { 3777 3778 if (!opts) { 3779 SPDK_ERRLOG("opts should not be NULL\n"); 3780 return; 3781 } 3782 3783 if (!opts_size) { 3784 SPDK_ERRLOG("opts_size should not be zero value\n"); 3785 return; 3786 } 3787 3788 memset(opts, 0, opts_size); 3789 opts->opts_size = opts_size; 3790 3791 #define FIELD_OK(field) \ 3792 offsetof(struct spdk_bs_opts, field) + sizeof(opts->field) <= opts_size 3793 3794 #define SET_FIELD(field, value) \ 3795 if (FIELD_OK(field)) { \ 3796 opts->field = value; \ 3797 } \ 3798 3799 SET_FIELD(cluster_sz, SPDK_BLOB_OPTS_CLUSTER_SZ); 3800 SET_FIELD(num_md_pages, SPDK_BLOB_OPTS_NUM_MD_PAGES); 3801 SET_FIELD(max_md_ops, SPDK_BLOB_OPTS_NUM_MD_PAGES); 3802 SET_FIELD(max_channel_ops, SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS); 3803 SET_FIELD(clear_method, BS_CLEAR_WITH_UNMAP); 3804 3805 if (FIELD_OK(bstype)) { 3806 memset(&opts->bstype, 0, sizeof(opts->bstype)); 3807 } 3808 3809 SET_FIELD(iter_cb_fn, NULL); 3810 SET_FIELD(iter_cb_arg, NULL); 3811 SET_FIELD(force_recover, false); 3812 SET_FIELD(esnap_bs_dev_create, NULL); 3813 SET_FIELD(esnap_ctx, NULL); 3814 3815 #undef FIELD_OK 3816 #undef SET_FIELD 3817 } 3818 3819 static int 3820 bs_opts_verify(struct spdk_bs_opts *opts) 3821 { 3822 if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 || 3823 opts->max_channel_ops == 0) { 3824 SPDK_ERRLOG("Blobstore options cannot be set to 0\n"); 3825 return -1; 3826 } 3827 3828 return 0; 3829 } 3830 3831 /* START spdk_bs_load */ 3832 3833 /* spdk_bs_load_ctx is used for init, load, unload and dump code paths. */ 3834 3835 struct spdk_bs_load_ctx { 3836 struct spdk_blob_store *bs; 3837 struct spdk_bs_super_block *super; 3838 3839 struct spdk_bs_md_mask *mask; 3840 bool in_page_chain; 3841 uint32_t page_index; 3842 uint32_t cur_page; 3843 struct spdk_blob_md_page *page; 3844 3845 uint64_t num_extent_pages; 3846 uint32_t *extent_page_num; 3847 struct spdk_blob_md_page *extent_pages; 3848 struct spdk_bit_array *used_clusters; 3849 3850 spdk_bs_sequence_t *seq; 3851 spdk_blob_op_with_handle_complete iter_cb_fn; 3852 void *iter_cb_arg; 3853 struct spdk_blob *blob; 3854 spdk_blob_id blobid; 3855 3856 bool force_recover; 3857 3858 /* These fields are used in the spdk_bs_dump path. */ 3859 bool dumping; 3860 FILE *fp; 3861 spdk_bs_dump_print_xattr print_xattr_fn; 3862 char xattr_name[4096]; 3863 }; 3864 3865 static int 3866 bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_blob_store **_bs, 3867 struct spdk_bs_load_ctx **_ctx) 3868 { 3869 struct spdk_blob_store *bs; 3870 struct spdk_bs_load_ctx *ctx; 3871 uint64_t dev_size; 3872 int rc; 3873 3874 dev_size = dev->blocklen * dev->blockcnt; 3875 if (dev_size < opts->cluster_sz) { 3876 /* Device size cannot be smaller than cluster size of blobstore */ 3877 SPDK_INFOLOG(blob, "Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n", 3878 dev_size, opts->cluster_sz); 3879 return -ENOSPC; 3880 } 3881 if (opts->cluster_sz < SPDK_BS_PAGE_SIZE) { 3882 /* Cluster size cannot be smaller than page size */ 3883 SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n", 3884 opts->cluster_sz, SPDK_BS_PAGE_SIZE); 3885 return -EINVAL; 3886 } 3887 bs = calloc(1, sizeof(struct spdk_blob_store)); 3888 if (!bs) { 3889 return -ENOMEM; 3890 } 3891 3892 ctx = calloc(1, sizeof(struct spdk_bs_load_ctx)); 3893 if (!ctx) { 3894 free(bs); 3895 return -ENOMEM; 3896 } 3897 3898 ctx->bs = bs; 3899 ctx->iter_cb_fn = opts->iter_cb_fn; 3900 ctx->iter_cb_arg = opts->iter_cb_arg; 3901 ctx->force_recover = opts->force_recover; 3902 3903 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 3904 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3905 if (!ctx->super) { 3906 free(ctx); 3907 free(bs); 3908 return -ENOMEM; 3909 } 3910 3911 RB_INIT(&bs->open_blobs); 3912 TAILQ_INIT(&bs->snapshots); 3913 bs->dev = dev; 3914 bs->md_thread = spdk_get_thread(); 3915 assert(bs->md_thread != NULL); 3916 3917 /* 3918 * Do not use bs_lba_to_cluster() here since blockcnt may not be an 3919 * even multiple of the cluster size. 3920 */ 3921 bs->cluster_sz = opts->cluster_sz; 3922 bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen); 3923 ctx->used_clusters = spdk_bit_array_create(bs->total_clusters); 3924 if (!ctx->used_clusters) { 3925 spdk_free(ctx->super); 3926 free(ctx); 3927 free(bs); 3928 return -ENOMEM; 3929 } 3930 3931 bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE; 3932 if (spdk_u32_is_pow2(bs->pages_per_cluster)) { 3933 bs->pages_per_cluster_shift = spdk_u32log2(bs->pages_per_cluster); 3934 } 3935 bs->num_free_clusters = bs->total_clusters; 3936 bs->io_unit_size = dev->blocklen; 3937 3938 bs->max_channel_ops = opts->max_channel_ops; 3939 bs->super_blob = SPDK_BLOBID_INVALID; 3940 memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype)); 3941 bs->esnap_bs_dev_create = opts->esnap_bs_dev_create; 3942 bs->esnap_ctx = opts->esnap_ctx; 3943 3944 /* The metadata is assumed to be at least 1 page */ 3945 bs->used_md_pages = spdk_bit_array_create(1); 3946 bs->used_blobids = spdk_bit_array_create(0); 3947 bs->open_blobids = spdk_bit_array_create(0); 3948 3949 spdk_spin_init(&bs->used_lock); 3950 3951 spdk_io_device_register(bs, bs_channel_create, bs_channel_destroy, 3952 sizeof(struct spdk_bs_channel), "blobstore"); 3953 rc = bs_register_md_thread(bs); 3954 if (rc == -1) { 3955 spdk_io_device_unregister(bs, NULL); 3956 spdk_spin_destroy(&bs->used_lock); 3957 spdk_bit_array_free(&bs->open_blobids); 3958 spdk_bit_array_free(&bs->used_blobids); 3959 spdk_bit_array_free(&bs->used_md_pages); 3960 spdk_bit_array_free(&ctx->used_clusters); 3961 spdk_free(ctx->super); 3962 free(ctx); 3963 free(bs); 3964 /* FIXME: this is a lie but don't know how to get a proper error code here */ 3965 return -ENOMEM; 3966 } 3967 3968 *_ctx = ctx; 3969 *_bs = bs; 3970 return 0; 3971 } 3972 3973 static void 3974 bs_load_ctx_fail(struct spdk_bs_load_ctx *ctx, int bserrno) 3975 { 3976 assert(bserrno != 0); 3977 3978 spdk_free(ctx->super); 3979 bs_sequence_finish(ctx->seq, bserrno); 3980 bs_free(ctx->bs); 3981 spdk_bit_array_free(&ctx->used_clusters); 3982 free(ctx); 3983 } 3984 3985 static void 3986 bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 3987 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg) 3988 { 3989 /* Update the values in the super block */ 3990 super->super_blob = bs->super_blob; 3991 memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype)); 3992 super->crc = blob_md_page_calc_crc(super); 3993 bs_sequence_write_dev(seq, super, bs_page_to_lba(bs, 0), 3994 bs_byte_to_lba(bs, sizeof(*super)), 3995 cb_fn, cb_arg); 3996 } 3997 3998 static void 3999 bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 4000 { 4001 struct spdk_bs_load_ctx *ctx = arg; 4002 uint64_t mask_size, lba, lba_count; 4003 4004 /* Write out the used clusters mask */ 4005 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 4006 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 4007 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4008 if (!ctx->mask) { 4009 bs_load_ctx_fail(ctx, -ENOMEM); 4010 return; 4011 } 4012 4013 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS; 4014 ctx->mask->length = ctx->bs->total_clusters; 4015 /* We could get here through the normal unload path, or through dirty 4016 * shutdown recovery. For the normal unload path, we use the mask from 4017 * the bit pool. For dirty shutdown recovery, we don't have a bit pool yet - 4018 * only the bit array from the load ctx. 4019 */ 4020 if (ctx->bs->used_clusters) { 4021 assert(ctx->mask->length == spdk_bit_pool_capacity(ctx->bs->used_clusters)); 4022 spdk_bit_pool_store_mask(ctx->bs->used_clusters, ctx->mask->mask); 4023 } else { 4024 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->used_clusters)); 4025 spdk_bit_array_store_mask(ctx->used_clusters, ctx->mask->mask); 4026 } 4027 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 4028 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 4029 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 4030 } 4031 4032 static void 4033 bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 4034 { 4035 struct spdk_bs_load_ctx *ctx = arg; 4036 uint64_t mask_size, lba, lba_count; 4037 4038 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 4039 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 4040 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4041 if (!ctx->mask) { 4042 bs_load_ctx_fail(ctx, -ENOMEM); 4043 return; 4044 } 4045 4046 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES; 4047 ctx->mask->length = ctx->super->md_len; 4048 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages)); 4049 4050 spdk_bit_array_store_mask(ctx->bs->used_md_pages, ctx->mask->mask); 4051 lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 4052 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 4053 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 4054 } 4055 4056 static void 4057 bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 4058 { 4059 struct spdk_bs_load_ctx *ctx = arg; 4060 uint64_t mask_size, lba, lba_count; 4061 4062 if (ctx->super->used_blobid_mask_len == 0) { 4063 /* 4064 * This is a pre-v3 on-disk format where the blobid mask does not get 4065 * written to disk. 4066 */ 4067 cb_fn(seq, arg, 0); 4068 return; 4069 } 4070 4071 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 4072 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 4073 SPDK_MALLOC_DMA); 4074 if (!ctx->mask) { 4075 bs_load_ctx_fail(ctx, -ENOMEM); 4076 return; 4077 } 4078 4079 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS; 4080 ctx->mask->length = ctx->super->md_len; 4081 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids)); 4082 4083 spdk_bit_array_store_mask(ctx->bs->used_blobids, ctx->mask->mask); 4084 lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 4085 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 4086 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 4087 } 4088 4089 static void 4090 blob_set_thin_provision(struct spdk_blob *blob) 4091 { 4092 blob_verify_md_op(blob); 4093 blob->invalid_flags |= SPDK_BLOB_THIN_PROV; 4094 blob->state = SPDK_BLOB_STATE_DIRTY; 4095 } 4096 4097 static void 4098 blob_set_clear_method(struct spdk_blob *blob, enum blob_clear_method clear_method) 4099 { 4100 blob_verify_md_op(blob); 4101 blob->clear_method = clear_method; 4102 blob->md_ro_flags |= (clear_method << SPDK_BLOB_CLEAR_METHOD_SHIFT); 4103 blob->state = SPDK_BLOB_STATE_DIRTY; 4104 } 4105 4106 static void bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno); 4107 4108 static void 4109 bs_delete_corrupted_blob_cpl(void *cb_arg, int bserrno) 4110 { 4111 struct spdk_bs_load_ctx *ctx = cb_arg; 4112 spdk_blob_id id; 4113 int64_t page_num; 4114 4115 /* Iterate to next blob (we can't use spdk_bs_iter_next function as our 4116 * last blob has been removed */ 4117 page_num = bs_blobid_to_page(ctx->blobid); 4118 page_num++; 4119 page_num = spdk_bit_array_find_first_set(ctx->bs->used_blobids, page_num); 4120 if (page_num >= spdk_bit_array_capacity(ctx->bs->used_blobids)) { 4121 bs_load_iter(ctx, NULL, -ENOENT); 4122 return; 4123 } 4124 4125 id = bs_page_to_blobid(page_num); 4126 4127 spdk_bs_open_blob(ctx->bs, id, bs_load_iter, ctx); 4128 } 4129 4130 static void 4131 bs_delete_corrupted_close_cb(void *cb_arg, int bserrno) 4132 { 4133 struct spdk_bs_load_ctx *ctx = cb_arg; 4134 4135 if (bserrno != 0) { 4136 SPDK_ERRLOG("Failed to close corrupted blob\n"); 4137 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4138 return; 4139 } 4140 4141 spdk_bs_delete_blob(ctx->bs, ctx->blobid, bs_delete_corrupted_blob_cpl, ctx); 4142 } 4143 4144 static void 4145 bs_delete_corrupted_blob(void *cb_arg, int bserrno) 4146 { 4147 struct spdk_bs_load_ctx *ctx = cb_arg; 4148 uint64_t i; 4149 4150 if (bserrno != 0) { 4151 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 4152 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4153 return; 4154 } 4155 4156 /* Snapshot and clone have the same copy of cluster map and extent pages 4157 * at this point. Let's clear both for snapshot now, 4158 * so that it won't be cleared for clone later when we remove snapshot. 4159 * Also set thin provision to pass data corruption check */ 4160 for (i = 0; i < ctx->blob->active.num_clusters; i++) { 4161 ctx->blob->active.clusters[i] = 0; 4162 } 4163 for (i = 0; i < ctx->blob->active.num_extent_pages; i++) { 4164 ctx->blob->active.extent_pages[i] = 0; 4165 } 4166 4167 ctx->blob->active.num_allocated_clusters = 0; 4168 4169 ctx->blob->md_ro = false; 4170 4171 blob_set_thin_provision(ctx->blob); 4172 4173 ctx->blobid = ctx->blob->id; 4174 4175 spdk_blob_close(ctx->blob, bs_delete_corrupted_close_cb, ctx); 4176 } 4177 4178 static void 4179 bs_update_corrupted_blob(void *cb_arg, int bserrno) 4180 { 4181 struct spdk_bs_load_ctx *ctx = cb_arg; 4182 4183 if (bserrno != 0) { 4184 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 4185 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4186 return; 4187 } 4188 4189 ctx->blob->md_ro = false; 4190 blob_remove_xattr(ctx->blob, SNAPSHOT_PENDING_REMOVAL, true); 4191 blob_remove_xattr(ctx->blob, SNAPSHOT_IN_PROGRESS, true); 4192 spdk_blob_set_read_only(ctx->blob); 4193 4194 if (ctx->iter_cb_fn) { 4195 ctx->iter_cb_fn(ctx->iter_cb_arg, ctx->blob, 0); 4196 } 4197 bs_blob_list_add(ctx->blob); 4198 4199 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4200 } 4201 4202 static void 4203 bs_examine_clone(void *cb_arg, struct spdk_blob *blob, int bserrno) 4204 { 4205 struct spdk_bs_load_ctx *ctx = cb_arg; 4206 4207 if (bserrno != 0) { 4208 SPDK_ERRLOG("Failed to open clone of a corrupted blob\n"); 4209 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4210 return; 4211 } 4212 4213 if (blob->parent_id == ctx->blob->id) { 4214 /* Power failure occurred before updating clone (snapshot delete case) 4215 * or after updating clone (creating snapshot case) - keep snapshot */ 4216 spdk_blob_close(blob, bs_update_corrupted_blob, ctx); 4217 } else { 4218 /* Power failure occurred after updating clone (snapshot delete case) 4219 * or before updating clone (creating snapshot case) - remove snapshot */ 4220 spdk_blob_close(blob, bs_delete_corrupted_blob, ctx); 4221 } 4222 } 4223 4224 static void 4225 bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno) 4226 { 4227 struct spdk_bs_load_ctx *ctx = arg; 4228 const void *value; 4229 size_t len; 4230 int rc = 0; 4231 4232 if (bserrno == 0) { 4233 /* Examine blob if it is corrupted after power failure. Fix 4234 * the ones that can be fixed and remove any other corrupted 4235 * ones. If it is not corrupted just process it */ 4236 rc = blob_get_xattr_value(blob, SNAPSHOT_PENDING_REMOVAL, &value, &len, true); 4237 if (rc != 0) { 4238 rc = blob_get_xattr_value(blob, SNAPSHOT_IN_PROGRESS, &value, &len, true); 4239 if (rc != 0) { 4240 /* Not corrupted - process it and continue with iterating through blobs */ 4241 if (ctx->iter_cb_fn) { 4242 ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0); 4243 } 4244 bs_blob_list_add(blob); 4245 spdk_bs_iter_next(ctx->bs, blob, bs_load_iter, ctx); 4246 return; 4247 } 4248 4249 } 4250 4251 assert(len == sizeof(spdk_blob_id)); 4252 4253 ctx->blob = blob; 4254 4255 /* Open clone to check if we are able to fix this blob or should we remove it */ 4256 spdk_bs_open_blob(ctx->bs, *(spdk_blob_id *)value, bs_examine_clone, ctx); 4257 return; 4258 } else if (bserrno == -ENOENT) { 4259 bserrno = 0; 4260 } else { 4261 /* 4262 * This case needs to be looked at further. Same problem 4263 * exists with applications that rely on explicit blob 4264 * iteration. We should just skip the blob that failed 4265 * to load and continue on to the next one. 4266 */ 4267 SPDK_ERRLOG("Error in iterating blobs\n"); 4268 } 4269 4270 ctx->iter_cb_fn = NULL; 4271 4272 spdk_free(ctx->super); 4273 spdk_free(ctx->mask); 4274 bs_sequence_finish(ctx->seq, bserrno); 4275 free(ctx); 4276 } 4277 4278 static void bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg); 4279 4280 static void 4281 bs_load_complete(struct spdk_bs_load_ctx *ctx) 4282 { 4283 ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters); 4284 if (ctx->dumping) { 4285 bs_dump_read_md_page(ctx->seq, ctx); 4286 return; 4287 } 4288 spdk_bs_iter_first(ctx->bs, bs_load_iter, ctx); 4289 } 4290 4291 static void 4292 bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4293 { 4294 struct spdk_bs_load_ctx *ctx = cb_arg; 4295 int rc; 4296 4297 /* The type must be correct */ 4298 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS); 4299 4300 /* The length of the mask (in bits) must not be greater than 4301 * the length of the buffer (converted to bits) */ 4302 assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE * 8)); 4303 4304 /* The length of the mask must be exactly equal to the size 4305 * (in pages) of the metadata region */ 4306 assert(ctx->mask->length == ctx->super->md_len); 4307 4308 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->mask->length); 4309 if (rc < 0) { 4310 spdk_free(ctx->mask); 4311 bs_load_ctx_fail(ctx, rc); 4312 return; 4313 } 4314 4315 spdk_bit_array_load_mask(ctx->bs->used_blobids, ctx->mask->mask); 4316 bs_load_complete(ctx); 4317 } 4318 4319 static void 4320 bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4321 { 4322 struct spdk_bs_load_ctx *ctx = cb_arg; 4323 uint64_t lba, lba_count, mask_size; 4324 int rc; 4325 4326 if (bserrno != 0) { 4327 bs_load_ctx_fail(ctx, bserrno); 4328 return; 4329 } 4330 4331 /* The type must be correct */ 4332 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 4333 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 4334 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 4335 struct spdk_blob_md_page) * 8)); 4336 /* 4337 * The length of the mask must be equal to or larger than the total number of clusters. It may be 4338 * larger than the total number of clusters due to a failure spdk_bs_grow. 4339 */ 4340 assert(ctx->mask->length >= ctx->bs->total_clusters); 4341 if (ctx->mask->length > ctx->bs->total_clusters) { 4342 SPDK_WARNLOG("Shrink the used_custers mask length to total_clusters"); 4343 ctx->mask->length = ctx->bs->total_clusters; 4344 } 4345 4346 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->mask->length); 4347 if (rc < 0) { 4348 spdk_free(ctx->mask); 4349 bs_load_ctx_fail(ctx, rc); 4350 return; 4351 } 4352 4353 spdk_bit_array_load_mask(ctx->used_clusters, ctx->mask->mask); 4354 ctx->bs->num_free_clusters = spdk_bit_array_count_clear(ctx->used_clusters); 4355 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 4356 4357 spdk_free(ctx->mask); 4358 4359 /* Read the used blobids mask */ 4360 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 4361 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 4362 SPDK_MALLOC_DMA); 4363 if (!ctx->mask) { 4364 bs_load_ctx_fail(ctx, -ENOMEM); 4365 return; 4366 } 4367 lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 4368 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 4369 bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 4370 bs_load_used_blobids_cpl, ctx); 4371 } 4372 4373 static void 4374 bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4375 { 4376 struct spdk_bs_load_ctx *ctx = cb_arg; 4377 uint64_t lba, lba_count, mask_size; 4378 int rc; 4379 4380 if (bserrno != 0) { 4381 bs_load_ctx_fail(ctx, bserrno); 4382 return; 4383 } 4384 4385 /* The type must be correct */ 4386 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES); 4387 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 4388 assert(ctx->mask->length <= (ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE * 4389 8)); 4390 /* The length of the mask must be exactly equal to the size (in pages) of the metadata region */ 4391 if (ctx->mask->length != ctx->super->md_len) { 4392 SPDK_ERRLOG("mismatched md_len in used_pages mask: " 4393 "mask->length=%" PRIu32 " super->md_len=%" PRIu32 "\n", 4394 ctx->mask->length, ctx->super->md_len); 4395 assert(false); 4396 } 4397 4398 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->mask->length); 4399 if (rc < 0) { 4400 spdk_free(ctx->mask); 4401 bs_load_ctx_fail(ctx, rc); 4402 return; 4403 } 4404 4405 spdk_bit_array_load_mask(ctx->bs->used_md_pages, ctx->mask->mask); 4406 spdk_free(ctx->mask); 4407 4408 /* Read the used clusters mask */ 4409 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 4410 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 4411 SPDK_MALLOC_DMA); 4412 if (!ctx->mask) { 4413 bs_load_ctx_fail(ctx, -ENOMEM); 4414 return; 4415 } 4416 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 4417 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 4418 bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 4419 bs_load_used_clusters_cpl, ctx); 4420 } 4421 4422 static void 4423 bs_load_read_used_pages(struct spdk_bs_load_ctx *ctx) 4424 { 4425 uint64_t lba, lba_count, mask_size; 4426 4427 /* Read the used pages mask */ 4428 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 4429 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 4430 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4431 if (!ctx->mask) { 4432 bs_load_ctx_fail(ctx, -ENOMEM); 4433 return; 4434 } 4435 4436 lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 4437 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 4438 bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count, 4439 bs_load_used_pages_cpl, ctx); 4440 } 4441 4442 static int 4443 bs_load_replay_md_parse_page(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_page *page) 4444 { 4445 struct spdk_blob_store *bs = ctx->bs; 4446 struct spdk_blob_md_descriptor *desc; 4447 size_t cur_desc = 0; 4448 4449 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4450 while (cur_desc < sizeof(page->descriptors)) { 4451 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 4452 if (desc->length == 0) { 4453 /* If padding and length are 0, this terminates the page */ 4454 break; 4455 } 4456 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 4457 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 4458 unsigned int i, j; 4459 unsigned int cluster_count = 0; 4460 uint32_t cluster_idx; 4461 4462 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 4463 4464 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 4465 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 4466 cluster_idx = desc_extent_rle->extents[i].cluster_idx; 4467 /* 4468 * cluster_idx = 0 means an unallocated cluster - don't mark that 4469 * in the used cluster map. 4470 */ 4471 if (cluster_idx != 0) { 4472 SPDK_NOTICELOG("Recover: cluster %" PRIu32 "\n", cluster_idx + j); 4473 spdk_bit_array_set(ctx->used_clusters, cluster_idx + j); 4474 if (bs->num_free_clusters == 0) { 4475 return -ENOSPC; 4476 } 4477 bs->num_free_clusters--; 4478 } 4479 cluster_count++; 4480 } 4481 } 4482 if (cluster_count == 0) { 4483 return -EINVAL; 4484 } 4485 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4486 struct spdk_blob_md_descriptor_extent_page *desc_extent; 4487 uint32_t i; 4488 uint32_t cluster_count = 0; 4489 uint32_t cluster_idx; 4490 size_t cluster_idx_length; 4491 4492 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 4493 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 4494 4495 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 4496 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 4497 return -EINVAL; 4498 } 4499 4500 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 4501 cluster_idx = desc_extent->cluster_idx[i]; 4502 /* 4503 * cluster_idx = 0 means an unallocated cluster - don't mark that 4504 * in the used cluster map. 4505 */ 4506 if (cluster_idx != 0) { 4507 if (cluster_idx < desc_extent->start_cluster_idx && 4508 cluster_idx >= desc_extent->start_cluster_idx + cluster_count) { 4509 return -EINVAL; 4510 } 4511 spdk_bit_array_set(ctx->used_clusters, cluster_idx); 4512 if (bs->num_free_clusters == 0) { 4513 return -ENOSPC; 4514 } 4515 bs->num_free_clusters--; 4516 } 4517 cluster_count++; 4518 } 4519 4520 if (cluster_count == 0) { 4521 return -EINVAL; 4522 } 4523 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 4524 /* Skip this item */ 4525 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 4526 /* Skip this item */ 4527 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 4528 /* Skip this item */ 4529 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 4530 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 4531 uint32_t num_extent_pages = ctx->num_extent_pages; 4532 uint32_t i; 4533 size_t extent_pages_length; 4534 void *tmp; 4535 4536 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 4537 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 4538 4539 if (desc_extent_table->length == 0 || 4540 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 4541 return -EINVAL; 4542 } 4543 4544 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 4545 if (desc_extent_table->extent_page[i].page_idx != 0) { 4546 if (desc_extent_table->extent_page[i].num_pages != 1) { 4547 return -EINVAL; 4548 } 4549 num_extent_pages += 1; 4550 } 4551 } 4552 4553 if (num_extent_pages > 0) { 4554 tmp = realloc(ctx->extent_page_num, num_extent_pages * sizeof(uint32_t)); 4555 if (tmp == NULL) { 4556 return -ENOMEM; 4557 } 4558 ctx->extent_page_num = tmp; 4559 4560 /* Extent table entries contain md page numbers for extent pages. 4561 * Zeroes represent unallocated extent pages, those are run-length-encoded. 4562 */ 4563 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 4564 if (desc_extent_table->extent_page[i].page_idx != 0) { 4565 ctx->extent_page_num[ctx->num_extent_pages] = desc_extent_table->extent_page[i].page_idx; 4566 ctx->num_extent_pages += 1; 4567 } 4568 } 4569 } 4570 } else { 4571 /* Error */ 4572 return -EINVAL; 4573 } 4574 /* Advance to the next descriptor */ 4575 cur_desc += sizeof(*desc) + desc->length; 4576 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 4577 break; 4578 } 4579 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 4580 } 4581 return 0; 4582 } 4583 4584 static bool 4585 bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page) 4586 { 4587 uint32_t crc; 4588 struct spdk_blob_md_descriptor *desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4589 size_t desc_len; 4590 4591 crc = blob_md_page_calc_crc(page); 4592 if (crc != page->crc) { 4593 return false; 4594 } 4595 4596 /* Extent page should always be of sequence num 0. */ 4597 if (page->sequence_num != 0) { 4598 return false; 4599 } 4600 4601 /* Descriptor type must be EXTENT_PAGE. */ 4602 if (desc->type != SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4603 return false; 4604 } 4605 4606 /* Descriptor length cannot exceed the page. */ 4607 desc_len = sizeof(*desc) + desc->length; 4608 if (desc_len > sizeof(page->descriptors)) { 4609 return false; 4610 } 4611 4612 /* It has to be the only descriptor in the page. */ 4613 if (desc_len + sizeof(*desc) <= sizeof(page->descriptors)) { 4614 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + desc_len); 4615 if (desc->length != 0) { 4616 return false; 4617 } 4618 } 4619 4620 return true; 4621 } 4622 4623 static bool 4624 bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx) 4625 { 4626 uint32_t crc; 4627 struct spdk_blob_md_page *page = ctx->page; 4628 4629 crc = blob_md_page_calc_crc(page); 4630 if (crc != page->crc) { 4631 return false; 4632 } 4633 4634 /* First page of a sequence should match the blobid. */ 4635 if (page->sequence_num == 0 && 4636 bs_page_to_blobid(ctx->cur_page) != page->id) { 4637 return false; 4638 } 4639 assert(bs_load_cur_extent_page_valid(page) == false); 4640 4641 return true; 4642 } 4643 4644 static void bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx); 4645 4646 static void 4647 bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4648 { 4649 struct spdk_bs_load_ctx *ctx = cb_arg; 4650 4651 if (bserrno != 0) { 4652 bs_load_ctx_fail(ctx, bserrno); 4653 return; 4654 } 4655 4656 bs_load_complete(ctx); 4657 } 4658 4659 static void 4660 bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4661 { 4662 struct spdk_bs_load_ctx *ctx = cb_arg; 4663 4664 spdk_free(ctx->mask); 4665 ctx->mask = NULL; 4666 4667 if (bserrno != 0) { 4668 bs_load_ctx_fail(ctx, bserrno); 4669 return; 4670 } 4671 4672 bs_write_used_clusters(seq, ctx, bs_load_write_used_clusters_cpl); 4673 } 4674 4675 static void 4676 bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4677 { 4678 struct spdk_bs_load_ctx *ctx = cb_arg; 4679 4680 spdk_free(ctx->mask); 4681 ctx->mask = NULL; 4682 4683 if (bserrno != 0) { 4684 bs_load_ctx_fail(ctx, bserrno); 4685 return; 4686 } 4687 4688 bs_write_used_blobids(seq, ctx, bs_load_write_used_blobids_cpl); 4689 } 4690 4691 static void 4692 bs_load_write_used_md(struct spdk_bs_load_ctx *ctx) 4693 { 4694 bs_write_used_md(ctx->seq, ctx, bs_load_write_used_pages_cpl); 4695 } 4696 4697 static void 4698 bs_load_replay_md_chain_cpl(struct spdk_bs_load_ctx *ctx) 4699 { 4700 uint64_t num_md_clusters; 4701 uint64_t i; 4702 4703 ctx->in_page_chain = false; 4704 4705 do { 4706 ctx->page_index++; 4707 } while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true); 4708 4709 if (ctx->page_index < ctx->super->md_len) { 4710 ctx->cur_page = ctx->page_index; 4711 bs_load_replay_cur_md_page(ctx); 4712 } else { 4713 /* Claim all of the clusters used by the metadata */ 4714 num_md_clusters = spdk_divide_round_up( 4715 ctx->super->md_start + ctx->super->md_len, ctx->bs->pages_per_cluster); 4716 for (i = 0; i < num_md_clusters; i++) { 4717 spdk_bit_array_set(ctx->used_clusters, i); 4718 } 4719 ctx->bs->num_free_clusters -= num_md_clusters; 4720 spdk_free(ctx->page); 4721 bs_load_write_used_md(ctx); 4722 } 4723 } 4724 4725 static void 4726 bs_load_replay_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4727 { 4728 struct spdk_bs_load_ctx *ctx = cb_arg; 4729 uint32_t page_num; 4730 uint64_t i; 4731 4732 if (bserrno != 0) { 4733 spdk_free(ctx->extent_pages); 4734 bs_load_ctx_fail(ctx, bserrno); 4735 return; 4736 } 4737 4738 for (i = 0; i < ctx->num_extent_pages; i++) { 4739 /* Extent pages are only read when present within in chain md. 4740 * Integrity of md is not right if that page was not a valid extent page. */ 4741 if (bs_load_cur_extent_page_valid(&ctx->extent_pages[i]) != true) { 4742 spdk_free(ctx->extent_pages); 4743 bs_load_ctx_fail(ctx, -EILSEQ); 4744 return; 4745 } 4746 4747 page_num = ctx->extent_page_num[i]; 4748 spdk_bit_array_set(ctx->bs->used_md_pages, page_num); 4749 if (bs_load_replay_md_parse_page(ctx, &ctx->extent_pages[i])) { 4750 spdk_free(ctx->extent_pages); 4751 bs_load_ctx_fail(ctx, -EILSEQ); 4752 return; 4753 } 4754 } 4755 4756 spdk_free(ctx->extent_pages); 4757 free(ctx->extent_page_num); 4758 ctx->extent_page_num = NULL; 4759 ctx->num_extent_pages = 0; 4760 4761 bs_load_replay_md_chain_cpl(ctx); 4762 } 4763 4764 static void 4765 bs_load_replay_extent_pages(struct spdk_bs_load_ctx *ctx) 4766 { 4767 spdk_bs_batch_t *batch; 4768 uint32_t page; 4769 uint64_t lba; 4770 uint64_t i; 4771 4772 ctx->extent_pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE * ctx->num_extent_pages, 0, 4773 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4774 if (!ctx->extent_pages) { 4775 bs_load_ctx_fail(ctx, -ENOMEM); 4776 return; 4777 } 4778 4779 batch = bs_sequence_to_batch(ctx->seq, bs_load_replay_extent_page_cpl, ctx); 4780 4781 for (i = 0; i < ctx->num_extent_pages; i++) { 4782 page = ctx->extent_page_num[i]; 4783 assert(page < ctx->super->md_len); 4784 lba = bs_md_page_to_lba(ctx->bs, page); 4785 bs_batch_read_dev(batch, &ctx->extent_pages[i], lba, 4786 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE)); 4787 } 4788 4789 bs_batch_close(batch); 4790 } 4791 4792 static void 4793 bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4794 { 4795 struct spdk_bs_load_ctx *ctx = cb_arg; 4796 uint32_t page_num; 4797 struct spdk_blob_md_page *page; 4798 4799 if (bserrno != 0) { 4800 bs_load_ctx_fail(ctx, bserrno); 4801 return; 4802 } 4803 4804 page_num = ctx->cur_page; 4805 page = ctx->page; 4806 if (bs_load_cur_md_page_valid(ctx) == true) { 4807 if (page->sequence_num == 0 || ctx->in_page_chain == true) { 4808 spdk_spin_lock(&ctx->bs->used_lock); 4809 bs_claim_md_page(ctx->bs, page_num); 4810 spdk_spin_unlock(&ctx->bs->used_lock); 4811 if (page->sequence_num == 0) { 4812 SPDK_NOTICELOG("Recover: blob 0x%" PRIx32 "\n", page_num); 4813 spdk_bit_array_set(ctx->bs->used_blobids, page_num); 4814 } 4815 if (bs_load_replay_md_parse_page(ctx, page)) { 4816 bs_load_ctx_fail(ctx, -EILSEQ); 4817 return; 4818 } 4819 if (page->next != SPDK_INVALID_MD_PAGE) { 4820 ctx->in_page_chain = true; 4821 ctx->cur_page = page->next; 4822 bs_load_replay_cur_md_page(ctx); 4823 return; 4824 } 4825 if (ctx->num_extent_pages != 0) { 4826 bs_load_replay_extent_pages(ctx); 4827 return; 4828 } 4829 } 4830 } 4831 bs_load_replay_md_chain_cpl(ctx); 4832 } 4833 4834 static void 4835 bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx) 4836 { 4837 uint64_t lba; 4838 4839 assert(ctx->cur_page < ctx->super->md_len); 4840 lba = bs_md_page_to_lba(ctx->bs, ctx->cur_page); 4841 bs_sequence_read_dev(ctx->seq, ctx->page, lba, 4842 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 4843 bs_load_replay_md_cpl, ctx); 4844 } 4845 4846 static void 4847 bs_load_replay_md(struct spdk_bs_load_ctx *ctx) 4848 { 4849 ctx->page_index = 0; 4850 ctx->cur_page = 0; 4851 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 4852 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4853 if (!ctx->page) { 4854 bs_load_ctx_fail(ctx, -ENOMEM); 4855 return; 4856 } 4857 bs_load_replay_cur_md_page(ctx); 4858 } 4859 4860 static void 4861 bs_recover(struct spdk_bs_load_ctx *ctx) 4862 { 4863 int rc; 4864 4865 SPDK_NOTICELOG("Performing recovery on blobstore\n"); 4866 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len); 4867 if (rc < 0) { 4868 bs_load_ctx_fail(ctx, -ENOMEM); 4869 return; 4870 } 4871 4872 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len); 4873 if (rc < 0) { 4874 bs_load_ctx_fail(ctx, -ENOMEM); 4875 return; 4876 } 4877 4878 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 4879 if (rc < 0) { 4880 bs_load_ctx_fail(ctx, -ENOMEM); 4881 return; 4882 } 4883 4884 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->super->md_len); 4885 if (rc < 0) { 4886 bs_load_ctx_fail(ctx, -ENOMEM); 4887 return; 4888 } 4889 4890 ctx->bs->num_free_clusters = ctx->bs->total_clusters; 4891 bs_load_replay_md(ctx); 4892 } 4893 4894 static int 4895 bs_parse_super(struct spdk_bs_load_ctx *ctx) 4896 { 4897 int rc; 4898 4899 if (ctx->super->size == 0) { 4900 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 4901 } 4902 4903 if (ctx->super->io_unit_size == 0) { 4904 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 4905 } 4906 4907 ctx->bs->clean = 1; 4908 ctx->bs->cluster_sz = ctx->super->cluster_size; 4909 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 4910 ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE; 4911 if (spdk_u32_is_pow2(ctx->bs->pages_per_cluster)) { 4912 ctx->bs->pages_per_cluster_shift = spdk_u32log2(ctx->bs->pages_per_cluster); 4913 } 4914 ctx->bs->io_unit_size = ctx->super->io_unit_size; 4915 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 4916 if (rc < 0) { 4917 return -ENOMEM; 4918 } 4919 ctx->bs->md_start = ctx->super->md_start; 4920 ctx->bs->md_len = ctx->super->md_len; 4921 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len); 4922 if (rc < 0) { 4923 return -ENOMEM; 4924 } 4925 4926 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 4927 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 4928 ctx->bs->super_blob = ctx->super->super_blob; 4929 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 4930 4931 return 0; 4932 } 4933 4934 static void 4935 bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4936 { 4937 struct spdk_bs_load_ctx *ctx = cb_arg; 4938 int rc; 4939 4940 rc = bs_super_validate(ctx->super, ctx->bs); 4941 if (rc != 0) { 4942 bs_load_ctx_fail(ctx, rc); 4943 return; 4944 } 4945 4946 rc = bs_parse_super(ctx); 4947 if (rc < 0) { 4948 bs_load_ctx_fail(ctx, rc); 4949 return; 4950 } 4951 4952 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0 || ctx->force_recover) { 4953 bs_recover(ctx); 4954 } else { 4955 bs_load_read_used_pages(ctx); 4956 } 4957 } 4958 4959 static inline int 4960 bs_opts_copy(struct spdk_bs_opts *src, struct spdk_bs_opts *dst) 4961 { 4962 4963 if (!src->opts_size) { 4964 SPDK_ERRLOG("opts_size should not be zero value\n"); 4965 return -1; 4966 } 4967 4968 #define FIELD_OK(field) \ 4969 offsetof(struct spdk_bs_opts, field) + sizeof(src->field) <= src->opts_size 4970 4971 #define SET_FIELD(field) \ 4972 if (FIELD_OK(field)) { \ 4973 dst->field = src->field; \ 4974 } \ 4975 4976 SET_FIELD(cluster_sz); 4977 SET_FIELD(num_md_pages); 4978 SET_FIELD(max_md_ops); 4979 SET_FIELD(max_channel_ops); 4980 SET_FIELD(clear_method); 4981 4982 if (FIELD_OK(bstype)) { 4983 memcpy(&dst->bstype, &src->bstype, sizeof(dst->bstype)); 4984 } 4985 SET_FIELD(iter_cb_fn); 4986 SET_FIELD(iter_cb_arg); 4987 SET_FIELD(force_recover); 4988 SET_FIELD(esnap_bs_dev_create); 4989 SET_FIELD(esnap_ctx); 4990 4991 dst->opts_size = src->opts_size; 4992 4993 /* You should not remove this statement, but need to update the assert statement 4994 * if you add a new field, and also add a corresponding SET_FIELD statement */ 4995 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_opts) == 88, "Incorrect size"); 4996 4997 #undef FIELD_OK 4998 #undef SET_FIELD 4999 5000 return 0; 5001 } 5002 5003 void 5004 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 5005 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 5006 { 5007 struct spdk_blob_store *bs; 5008 struct spdk_bs_cpl cpl; 5009 struct spdk_bs_load_ctx *ctx; 5010 struct spdk_bs_opts opts = {}; 5011 int err; 5012 5013 SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev); 5014 5015 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 5016 SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen); 5017 dev->destroy(dev); 5018 cb_fn(cb_arg, NULL, -EINVAL); 5019 return; 5020 } 5021 5022 spdk_bs_opts_init(&opts, sizeof(opts)); 5023 if (o) { 5024 if (bs_opts_copy(o, &opts)) { 5025 return; 5026 } 5027 } 5028 5029 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 5030 dev->destroy(dev); 5031 cb_fn(cb_arg, NULL, -EINVAL); 5032 return; 5033 } 5034 5035 err = bs_alloc(dev, &opts, &bs, &ctx); 5036 if (err) { 5037 dev->destroy(dev); 5038 cb_fn(cb_arg, NULL, err); 5039 return; 5040 } 5041 5042 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 5043 cpl.u.bs_handle.cb_fn = cb_fn; 5044 cpl.u.bs_handle.cb_arg = cb_arg; 5045 cpl.u.bs_handle.bs = bs; 5046 5047 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5048 if (!ctx->seq) { 5049 spdk_free(ctx->super); 5050 free(ctx); 5051 bs_free(bs); 5052 cb_fn(cb_arg, NULL, -ENOMEM); 5053 return; 5054 } 5055 5056 /* Read the super block */ 5057 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5058 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5059 bs_load_super_cpl, ctx); 5060 } 5061 5062 /* END spdk_bs_load */ 5063 5064 /* START spdk_bs_dump */ 5065 5066 static void 5067 bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno) 5068 { 5069 spdk_free(ctx->super); 5070 5071 /* 5072 * We need to defer calling bs_call_cpl() until after 5073 * dev destruction, so tuck these away for later use. 5074 */ 5075 ctx->bs->unload_err = bserrno; 5076 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5077 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5078 5079 bs_sequence_finish(seq, 0); 5080 bs_free(ctx->bs); 5081 free(ctx); 5082 } 5083 5084 static void 5085 bs_dump_print_xattr(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 5086 { 5087 struct spdk_blob_md_descriptor_xattr *desc_xattr; 5088 uint32_t i; 5089 const char *type; 5090 5091 desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc; 5092 5093 if (desc_xattr->length != 5094 sizeof(desc_xattr->name_length) + sizeof(desc_xattr->value_length) + 5095 desc_xattr->name_length + desc_xattr->value_length) { 5096 } 5097 5098 memcpy(ctx->xattr_name, desc_xattr->name, desc_xattr->name_length); 5099 ctx->xattr_name[desc_xattr->name_length] = '\0'; 5100 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 5101 type = "XATTR"; 5102 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 5103 type = "XATTR_INTERNAL"; 5104 } else { 5105 assert(false); 5106 type = "XATTR_?"; 5107 } 5108 fprintf(ctx->fp, "%s: name = \"%s\"\n", type, ctx->xattr_name); 5109 fprintf(ctx->fp, " value = \""); 5110 ctx->print_xattr_fn(ctx->fp, ctx->super->bstype.bstype, ctx->xattr_name, 5111 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 5112 desc_xattr->value_length); 5113 fprintf(ctx->fp, "\"\n"); 5114 for (i = 0; i < desc_xattr->value_length; i++) { 5115 if (i % 16 == 0) { 5116 fprintf(ctx->fp, " "); 5117 } 5118 fprintf(ctx->fp, "%02" PRIx8 " ", *((uint8_t *)desc_xattr->name + desc_xattr->name_length + i)); 5119 if ((i + 1) % 16 == 0) { 5120 fprintf(ctx->fp, "\n"); 5121 } 5122 } 5123 if (i % 16 != 0) { 5124 fprintf(ctx->fp, "\n"); 5125 } 5126 } 5127 5128 struct type_flag_desc { 5129 uint64_t mask; 5130 uint64_t val; 5131 const char *name; 5132 }; 5133 5134 static void 5135 bs_dump_print_type_bits(struct spdk_bs_load_ctx *ctx, uint64_t flags, 5136 struct type_flag_desc *desc, size_t numflags) 5137 { 5138 uint64_t covered = 0; 5139 size_t i; 5140 5141 for (i = 0; i < numflags; i++) { 5142 if ((desc[i].mask & flags) != desc[i].val) { 5143 continue; 5144 } 5145 fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " %s", desc[i].val, desc[i].name); 5146 if (desc[i].mask != desc[i].val) { 5147 fprintf(ctx->fp, " (mask 0x%" PRIx64 " value 0x%" PRIx64 ")", 5148 desc[i].mask, desc[i].val); 5149 } 5150 fprintf(ctx->fp, "\n"); 5151 covered |= desc[i].mask; 5152 } 5153 if ((flags & ~covered) != 0) { 5154 fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " Unknown\n", flags & ~covered); 5155 } 5156 } 5157 5158 static void 5159 bs_dump_print_type_flags(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 5160 { 5161 struct spdk_blob_md_descriptor_flags *type_desc; 5162 #define ADD_FLAG(f) { f, f, #f } 5163 #define ADD_MASK_VAL(m, v) { m, v, #v } 5164 static struct type_flag_desc invalid[] = { 5165 ADD_FLAG(SPDK_BLOB_THIN_PROV), 5166 ADD_FLAG(SPDK_BLOB_INTERNAL_XATTR), 5167 ADD_FLAG(SPDK_BLOB_EXTENT_TABLE), 5168 }; 5169 static struct type_flag_desc data_ro[] = { 5170 ADD_FLAG(SPDK_BLOB_READ_ONLY), 5171 }; 5172 static struct type_flag_desc md_ro[] = { 5173 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_DEFAULT), 5174 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_NONE), 5175 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_UNMAP), 5176 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_WRITE_ZEROES), 5177 }; 5178 #undef ADD_FLAG 5179 #undef ADD_MASK_VAL 5180 5181 type_desc = (struct spdk_blob_md_descriptor_flags *)desc; 5182 fprintf(ctx->fp, "Flags:\n"); 5183 fprintf(ctx->fp, "\tinvalid: 0x%016" PRIx64 "\n", type_desc->invalid_flags); 5184 bs_dump_print_type_bits(ctx, type_desc->invalid_flags, invalid, 5185 SPDK_COUNTOF(invalid)); 5186 fprintf(ctx->fp, "\tdata_ro: 0x%016" PRIx64 "\n", type_desc->data_ro_flags); 5187 bs_dump_print_type_bits(ctx, type_desc->data_ro_flags, data_ro, 5188 SPDK_COUNTOF(data_ro)); 5189 fprintf(ctx->fp, "\t md_ro: 0x%016" PRIx64 "\n", type_desc->md_ro_flags); 5190 bs_dump_print_type_bits(ctx, type_desc->md_ro_flags, md_ro, 5191 SPDK_COUNTOF(md_ro)); 5192 } 5193 5194 static void 5195 bs_dump_print_extent_table(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 5196 { 5197 struct spdk_blob_md_descriptor_extent_table *et_desc; 5198 uint64_t num_extent_pages; 5199 uint32_t et_idx; 5200 5201 et_desc = (struct spdk_blob_md_descriptor_extent_table *)desc; 5202 num_extent_pages = (et_desc->length - sizeof(et_desc->num_clusters)) / 5203 sizeof(et_desc->extent_page[0]); 5204 5205 fprintf(ctx->fp, "Extent table:\n"); 5206 for (et_idx = 0; et_idx < num_extent_pages; et_idx++) { 5207 if (et_desc->extent_page[et_idx].page_idx == 0) { 5208 /* Zeroes represent unallocated extent pages. */ 5209 continue; 5210 } 5211 fprintf(ctx->fp, "\tExtent page: %5" PRIu32 " length %3" PRIu32 5212 " at LBA %" PRIu64 "\n", et_desc->extent_page[et_idx].page_idx, 5213 et_desc->extent_page[et_idx].num_pages, 5214 bs_md_page_to_lba(ctx->bs, et_desc->extent_page[et_idx].page_idx)); 5215 } 5216 } 5217 5218 static void 5219 bs_dump_print_md_page(struct spdk_bs_load_ctx *ctx) 5220 { 5221 uint32_t page_idx = ctx->cur_page; 5222 struct spdk_blob_md_page *page = ctx->page; 5223 struct spdk_blob_md_descriptor *desc; 5224 size_t cur_desc = 0; 5225 uint32_t crc; 5226 5227 fprintf(ctx->fp, "=========\n"); 5228 fprintf(ctx->fp, "Metadata Page Index: %" PRIu32 " (0x%" PRIx32 ")\n", page_idx, page_idx); 5229 fprintf(ctx->fp, "Start LBA: %" PRIu64 "\n", bs_md_page_to_lba(ctx->bs, page_idx)); 5230 fprintf(ctx->fp, "Blob ID: 0x%" PRIx64 "\n", page->id); 5231 fprintf(ctx->fp, "Sequence: %" PRIu32 "\n", page->sequence_num); 5232 if (page->next == SPDK_INVALID_MD_PAGE) { 5233 fprintf(ctx->fp, "Next: None\n"); 5234 } else { 5235 fprintf(ctx->fp, "Next: %" PRIu32 "\n", page->next); 5236 } 5237 fprintf(ctx->fp, "In used bit array%s:", ctx->super->clean ? "" : " (not clean: dubious)"); 5238 if (spdk_bit_array_get(ctx->bs->used_md_pages, page_idx)) { 5239 fprintf(ctx->fp, " md"); 5240 } 5241 if (spdk_bit_array_get(ctx->bs->used_blobids, page_idx)) { 5242 fprintf(ctx->fp, " blob"); 5243 } 5244 fprintf(ctx->fp, "\n"); 5245 5246 crc = blob_md_page_calc_crc(page); 5247 fprintf(ctx->fp, "CRC: 0x%" PRIx32 " (%s)\n", page->crc, crc == page->crc ? "OK" : "Mismatch"); 5248 5249 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 5250 while (cur_desc < sizeof(page->descriptors)) { 5251 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 5252 if (desc->length == 0) { 5253 /* If padding and length are 0, this terminates the page */ 5254 break; 5255 } 5256 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 5257 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 5258 unsigned int i; 5259 5260 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 5261 5262 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 5263 if (desc_extent_rle->extents[i].cluster_idx != 0) { 5264 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 5265 desc_extent_rle->extents[i].cluster_idx); 5266 } else { 5267 fprintf(ctx->fp, "Unallocated Extent - "); 5268 } 5269 fprintf(ctx->fp, " Length: %" PRIu32, desc_extent_rle->extents[i].length); 5270 fprintf(ctx->fp, "\n"); 5271 } 5272 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 5273 struct spdk_blob_md_descriptor_extent_page *desc_extent; 5274 unsigned int i; 5275 5276 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 5277 5278 for (i = 0; i < desc_extent->length / sizeof(desc_extent->cluster_idx[0]); i++) { 5279 if (desc_extent->cluster_idx[i] != 0) { 5280 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 5281 desc_extent->cluster_idx[i]); 5282 } else { 5283 fprintf(ctx->fp, "Unallocated Extent"); 5284 } 5285 fprintf(ctx->fp, "\n"); 5286 } 5287 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 5288 bs_dump_print_xattr(ctx, desc); 5289 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 5290 bs_dump_print_xattr(ctx, desc); 5291 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 5292 bs_dump_print_type_flags(ctx, desc); 5293 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 5294 bs_dump_print_extent_table(ctx, desc); 5295 } else { 5296 /* Error */ 5297 fprintf(ctx->fp, "Unknown descriptor type %" PRIu8 "\n", desc->type); 5298 } 5299 /* Advance to the next descriptor */ 5300 cur_desc += sizeof(*desc) + desc->length; 5301 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 5302 break; 5303 } 5304 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 5305 } 5306 } 5307 5308 static void 5309 bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5310 { 5311 struct spdk_bs_load_ctx *ctx = cb_arg; 5312 5313 if (bserrno != 0) { 5314 bs_dump_finish(seq, ctx, bserrno); 5315 return; 5316 } 5317 5318 if (ctx->page->id != 0) { 5319 bs_dump_print_md_page(ctx); 5320 } 5321 5322 ctx->cur_page++; 5323 5324 if (ctx->cur_page < ctx->super->md_len) { 5325 bs_dump_read_md_page(seq, ctx); 5326 } else { 5327 spdk_free(ctx->page); 5328 bs_dump_finish(seq, ctx, 0); 5329 } 5330 } 5331 5332 static void 5333 bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg) 5334 { 5335 struct spdk_bs_load_ctx *ctx = cb_arg; 5336 uint64_t lba; 5337 5338 assert(ctx->cur_page < ctx->super->md_len); 5339 lba = bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page); 5340 bs_sequence_read_dev(seq, ctx->page, lba, 5341 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 5342 bs_dump_read_md_page_cpl, ctx); 5343 } 5344 5345 static void 5346 bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5347 { 5348 struct spdk_bs_load_ctx *ctx = cb_arg; 5349 int rc; 5350 5351 fprintf(ctx->fp, "Signature: \"%.8s\" ", ctx->super->signature); 5352 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 5353 sizeof(ctx->super->signature)) != 0) { 5354 fprintf(ctx->fp, "(Mismatch)\n"); 5355 bs_dump_finish(seq, ctx, bserrno); 5356 return; 5357 } else { 5358 fprintf(ctx->fp, "(OK)\n"); 5359 } 5360 fprintf(ctx->fp, "Version: %" PRIu32 "\n", ctx->super->version); 5361 fprintf(ctx->fp, "CRC: 0x%x (%s)\n", ctx->super->crc, 5362 (ctx->super->crc == blob_md_page_calc_crc(ctx->super)) ? "OK" : "Mismatch"); 5363 fprintf(ctx->fp, "Blobstore Type: %.*s\n", SPDK_BLOBSTORE_TYPE_LENGTH, ctx->super->bstype.bstype); 5364 fprintf(ctx->fp, "Cluster Size: %" PRIu32 "\n", ctx->super->cluster_size); 5365 fprintf(ctx->fp, "Super Blob ID: "); 5366 if (ctx->super->super_blob == SPDK_BLOBID_INVALID) { 5367 fprintf(ctx->fp, "(None)\n"); 5368 } else { 5369 fprintf(ctx->fp, "0x%" PRIx64 "\n", ctx->super->super_blob); 5370 } 5371 fprintf(ctx->fp, "Clean: %" PRIu32 "\n", ctx->super->clean); 5372 fprintf(ctx->fp, "Used Metadata Page Mask Start: %" PRIu32 "\n", ctx->super->used_page_mask_start); 5373 fprintf(ctx->fp, "Used Metadata Page Mask Length: %" PRIu32 "\n", ctx->super->used_page_mask_len); 5374 fprintf(ctx->fp, "Used Cluster Mask Start: %" PRIu32 "\n", ctx->super->used_cluster_mask_start); 5375 fprintf(ctx->fp, "Used Cluster Mask Length: %" PRIu32 "\n", ctx->super->used_cluster_mask_len); 5376 fprintf(ctx->fp, "Used Blob ID Mask Start: %" PRIu32 "\n", ctx->super->used_blobid_mask_start); 5377 fprintf(ctx->fp, "Used Blob ID Mask Length: %" PRIu32 "\n", ctx->super->used_blobid_mask_len); 5378 fprintf(ctx->fp, "Metadata Start: %" PRIu32 "\n", ctx->super->md_start); 5379 fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len); 5380 5381 ctx->cur_page = 0; 5382 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 5383 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 5384 if (!ctx->page) { 5385 bs_dump_finish(seq, ctx, -ENOMEM); 5386 return; 5387 } 5388 5389 rc = bs_parse_super(ctx); 5390 if (rc < 0) { 5391 bs_load_ctx_fail(ctx, rc); 5392 return; 5393 } 5394 5395 bs_load_read_used_pages(ctx); 5396 } 5397 5398 void 5399 spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_xattr_fn, 5400 spdk_bs_op_complete cb_fn, void *cb_arg) 5401 { 5402 struct spdk_blob_store *bs; 5403 struct spdk_bs_cpl cpl; 5404 struct spdk_bs_load_ctx *ctx; 5405 struct spdk_bs_opts opts = {}; 5406 int err; 5407 5408 SPDK_DEBUGLOG(blob, "Dumping blobstore from dev %p\n", dev); 5409 5410 spdk_bs_opts_init(&opts, sizeof(opts)); 5411 5412 err = bs_alloc(dev, &opts, &bs, &ctx); 5413 if (err) { 5414 dev->destroy(dev); 5415 cb_fn(cb_arg, err); 5416 return; 5417 } 5418 5419 ctx->dumping = true; 5420 ctx->fp = fp; 5421 ctx->print_xattr_fn = print_xattr_fn; 5422 5423 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5424 cpl.u.bs_basic.cb_fn = cb_fn; 5425 cpl.u.bs_basic.cb_arg = cb_arg; 5426 5427 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5428 if (!ctx->seq) { 5429 spdk_free(ctx->super); 5430 free(ctx); 5431 bs_free(bs); 5432 cb_fn(cb_arg, -ENOMEM); 5433 return; 5434 } 5435 5436 /* Read the super block */ 5437 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5438 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5439 bs_dump_super_cpl, ctx); 5440 } 5441 5442 /* END spdk_bs_dump */ 5443 5444 /* START spdk_bs_init */ 5445 5446 static void 5447 bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5448 { 5449 struct spdk_bs_load_ctx *ctx = cb_arg; 5450 5451 ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters); 5452 spdk_free(ctx->super); 5453 free(ctx); 5454 5455 bs_sequence_finish(seq, bserrno); 5456 } 5457 5458 static void 5459 bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5460 { 5461 struct spdk_bs_load_ctx *ctx = cb_arg; 5462 5463 /* Write super block */ 5464 bs_sequence_write_dev(seq, ctx->super, bs_page_to_lba(ctx->bs, 0), 5465 bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 5466 bs_init_persist_super_cpl, ctx); 5467 } 5468 5469 void 5470 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 5471 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 5472 { 5473 struct spdk_bs_load_ctx *ctx; 5474 struct spdk_blob_store *bs; 5475 struct spdk_bs_cpl cpl; 5476 spdk_bs_sequence_t *seq; 5477 spdk_bs_batch_t *batch; 5478 uint64_t num_md_lba; 5479 uint64_t num_md_pages; 5480 uint64_t num_md_clusters; 5481 uint64_t max_used_cluster_mask_len; 5482 uint32_t i; 5483 struct spdk_bs_opts opts = {}; 5484 int rc; 5485 uint64_t lba, lba_count; 5486 5487 SPDK_DEBUGLOG(blob, "Initializing blobstore on dev %p\n", dev); 5488 5489 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 5490 SPDK_ERRLOG("unsupported dev block length of %d\n", 5491 dev->blocklen); 5492 dev->destroy(dev); 5493 cb_fn(cb_arg, NULL, -EINVAL); 5494 return; 5495 } 5496 5497 spdk_bs_opts_init(&opts, sizeof(opts)); 5498 if (o) { 5499 if (bs_opts_copy(o, &opts)) { 5500 return; 5501 } 5502 } 5503 5504 if (bs_opts_verify(&opts) != 0) { 5505 dev->destroy(dev); 5506 cb_fn(cb_arg, NULL, -EINVAL); 5507 return; 5508 } 5509 5510 rc = bs_alloc(dev, &opts, &bs, &ctx); 5511 if (rc) { 5512 dev->destroy(dev); 5513 cb_fn(cb_arg, NULL, rc); 5514 return; 5515 } 5516 5517 if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) { 5518 /* By default, allocate 1 page per cluster. 5519 * Technically, this over-allocates metadata 5520 * because more metadata will reduce the number 5521 * of usable clusters. This can be addressed with 5522 * more complex math in the future. 5523 */ 5524 bs->md_len = bs->total_clusters; 5525 } else { 5526 bs->md_len = opts.num_md_pages; 5527 } 5528 rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len); 5529 if (rc < 0) { 5530 spdk_free(ctx->super); 5531 free(ctx); 5532 bs_free(bs); 5533 cb_fn(cb_arg, NULL, -ENOMEM); 5534 return; 5535 } 5536 5537 rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len); 5538 if (rc < 0) { 5539 spdk_free(ctx->super); 5540 free(ctx); 5541 bs_free(bs); 5542 cb_fn(cb_arg, NULL, -ENOMEM); 5543 return; 5544 } 5545 5546 rc = spdk_bit_array_resize(&bs->open_blobids, bs->md_len); 5547 if (rc < 0) { 5548 spdk_free(ctx->super); 5549 free(ctx); 5550 bs_free(bs); 5551 cb_fn(cb_arg, NULL, -ENOMEM); 5552 return; 5553 } 5554 5555 memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 5556 sizeof(ctx->super->signature)); 5557 ctx->super->version = SPDK_BS_VERSION; 5558 ctx->super->length = sizeof(*ctx->super); 5559 ctx->super->super_blob = bs->super_blob; 5560 ctx->super->clean = 0; 5561 ctx->super->cluster_size = bs->cluster_sz; 5562 ctx->super->io_unit_size = bs->io_unit_size; 5563 memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype)); 5564 5565 /* Calculate how many pages the metadata consumes at the front 5566 * of the disk. 5567 */ 5568 5569 /* The super block uses 1 page */ 5570 num_md_pages = 1; 5571 5572 /* The used_md_pages mask requires 1 bit per metadata page, rounded 5573 * up to the nearest page, plus a header. 5574 */ 5575 ctx->super->used_page_mask_start = num_md_pages; 5576 ctx->super->used_page_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5577 spdk_divide_round_up(bs->md_len, 8), 5578 SPDK_BS_PAGE_SIZE); 5579 num_md_pages += ctx->super->used_page_mask_len; 5580 5581 /* The used_clusters mask requires 1 bit per cluster, rounded 5582 * up to the nearest page, plus a header. 5583 */ 5584 ctx->super->used_cluster_mask_start = num_md_pages; 5585 ctx->super->used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5586 spdk_divide_round_up(bs->total_clusters, 8), 5587 SPDK_BS_PAGE_SIZE); 5588 /* The blobstore might be extended, then the used_cluster bitmap will need more space. 5589 * Here we calculate the max clusters we can support according to the 5590 * num_md_pages (bs->md_len). 5591 */ 5592 max_used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5593 spdk_divide_round_up(bs->md_len, 8), 5594 SPDK_BS_PAGE_SIZE); 5595 max_used_cluster_mask_len = spdk_max(max_used_cluster_mask_len, 5596 ctx->super->used_cluster_mask_len); 5597 num_md_pages += max_used_cluster_mask_len; 5598 5599 /* The used_blobids mask requires 1 bit per metadata page, rounded 5600 * up to the nearest page, plus a header. 5601 */ 5602 ctx->super->used_blobid_mask_start = num_md_pages; 5603 ctx->super->used_blobid_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5604 spdk_divide_round_up(bs->md_len, 8), 5605 SPDK_BS_PAGE_SIZE); 5606 num_md_pages += ctx->super->used_blobid_mask_len; 5607 5608 /* The metadata region size was chosen above */ 5609 ctx->super->md_start = bs->md_start = num_md_pages; 5610 ctx->super->md_len = bs->md_len; 5611 num_md_pages += bs->md_len; 5612 5613 num_md_lba = bs_page_to_lba(bs, num_md_pages); 5614 5615 ctx->super->size = dev->blockcnt * dev->blocklen; 5616 5617 ctx->super->crc = blob_md_page_calc_crc(ctx->super); 5618 5619 num_md_clusters = spdk_divide_round_up(num_md_pages, bs->pages_per_cluster); 5620 if (num_md_clusters > bs->total_clusters) { 5621 SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, " 5622 "please decrease number of pages reserved for metadata " 5623 "or increase cluster size.\n"); 5624 spdk_free(ctx->super); 5625 spdk_bit_array_free(&ctx->used_clusters); 5626 free(ctx); 5627 bs_free(bs); 5628 cb_fn(cb_arg, NULL, -ENOMEM); 5629 return; 5630 } 5631 /* Claim all of the clusters used by the metadata */ 5632 for (i = 0; i < num_md_clusters; i++) { 5633 spdk_bit_array_set(ctx->used_clusters, i); 5634 } 5635 5636 bs->num_free_clusters -= num_md_clusters; 5637 bs->total_data_clusters = bs->num_free_clusters; 5638 5639 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 5640 cpl.u.bs_handle.cb_fn = cb_fn; 5641 cpl.u.bs_handle.cb_arg = cb_arg; 5642 cpl.u.bs_handle.bs = bs; 5643 5644 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5645 if (!seq) { 5646 spdk_free(ctx->super); 5647 free(ctx); 5648 bs_free(bs); 5649 cb_fn(cb_arg, NULL, -ENOMEM); 5650 return; 5651 } 5652 5653 batch = bs_sequence_to_batch(seq, bs_init_trim_cpl, ctx); 5654 5655 /* Clear metadata space */ 5656 bs_batch_write_zeroes_dev(batch, 0, num_md_lba); 5657 5658 lba = num_md_lba; 5659 lba_count = ctx->bs->dev->blockcnt - lba; 5660 switch (opts.clear_method) { 5661 case BS_CLEAR_WITH_UNMAP: 5662 /* Trim data clusters */ 5663 bs_batch_unmap_dev(batch, lba, lba_count); 5664 break; 5665 case BS_CLEAR_WITH_WRITE_ZEROES: 5666 /* Write_zeroes to data clusters */ 5667 bs_batch_write_zeroes_dev(batch, lba, lba_count); 5668 break; 5669 case BS_CLEAR_WITH_NONE: 5670 default: 5671 break; 5672 } 5673 5674 bs_batch_close(batch); 5675 } 5676 5677 /* END spdk_bs_init */ 5678 5679 /* START spdk_bs_destroy */ 5680 5681 static void 5682 bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5683 { 5684 struct spdk_bs_load_ctx *ctx = cb_arg; 5685 struct spdk_blob_store *bs = ctx->bs; 5686 5687 /* 5688 * We need to defer calling bs_call_cpl() until after 5689 * dev destruction, so tuck these away for later use. 5690 */ 5691 bs->unload_err = bserrno; 5692 memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5693 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5694 5695 bs_sequence_finish(seq, bserrno); 5696 5697 bs_free(bs); 5698 free(ctx); 5699 } 5700 5701 void 5702 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, 5703 void *cb_arg) 5704 { 5705 struct spdk_bs_cpl cpl; 5706 spdk_bs_sequence_t *seq; 5707 struct spdk_bs_load_ctx *ctx; 5708 5709 SPDK_DEBUGLOG(blob, "Destroying blobstore\n"); 5710 5711 if (!RB_EMPTY(&bs->open_blobs)) { 5712 SPDK_ERRLOG("Blobstore still has open blobs\n"); 5713 cb_fn(cb_arg, -EBUSY); 5714 return; 5715 } 5716 5717 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5718 cpl.u.bs_basic.cb_fn = cb_fn; 5719 cpl.u.bs_basic.cb_arg = cb_arg; 5720 5721 ctx = calloc(1, sizeof(*ctx)); 5722 if (!ctx) { 5723 cb_fn(cb_arg, -ENOMEM); 5724 return; 5725 } 5726 5727 ctx->bs = bs; 5728 5729 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5730 if (!seq) { 5731 free(ctx); 5732 cb_fn(cb_arg, -ENOMEM); 5733 return; 5734 } 5735 5736 /* Write zeroes to the super block */ 5737 bs_sequence_write_zeroes_dev(seq, 5738 bs_page_to_lba(bs, 0), 5739 bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)), 5740 bs_destroy_trim_cpl, ctx); 5741 } 5742 5743 /* END spdk_bs_destroy */ 5744 5745 /* START spdk_bs_unload */ 5746 5747 static void 5748 bs_unload_finish(struct spdk_bs_load_ctx *ctx, int bserrno) 5749 { 5750 spdk_bs_sequence_t *seq = ctx->seq; 5751 5752 spdk_free(ctx->super); 5753 5754 /* 5755 * We need to defer calling bs_call_cpl() until after 5756 * dev destruction, so tuck these away for later use. 5757 */ 5758 ctx->bs->unload_err = bserrno; 5759 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5760 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5761 5762 bs_sequence_finish(seq, bserrno); 5763 5764 bs_free(ctx->bs); 5765 free(ctx); 5766 } 5767 5768 static void 5769 bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5770 { 5771 struct spdk_bs_load_ctx *ctx = cb_arg; 5772 5773 bs_unload_finish(ctx, bserrno); 5774 } 5775 5776 static void 5777 bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5778 { 5779 struct spdk_bs_load_ctx *ctx = cb_arg; 5780 5781 spdk_free(ctx->mask); 5782 5783 if (bserrno != 0) { 5784 bs_unload_finish(ctx, bserrno); 5785 return; 5786 } 5787 5788 ctx->super->clean = 1; 5789 5790 bs_write_super(seq, ctx->bs, ctx->super, bs_unload_write_super_cpl, ctx); 5791 } 5792 5793 static void 5794 bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5795 { 5796 struct spdk_bs_load_ctx *ctx = cb_arg; 5797 5798 spdk_free(ctx->mask); 5799 ctx->mask = NULL; 5800 5801 if (bserrno != 0) { 5802 bs_unload_finish(ctx, bserrno); 5803 return; 5804 } 5805 5806 bs_write_used_clusters(seq, ctx, bs_unload_write_used_clusters_cpl); 5807 } 5808 5809 static void 5810 bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5811 { 5812 struct spdk_bs_load_ctx *ctx = cb_arg; 5813 5814 spdk_free(ctx->mask); 5815 ctx->mask = NULL; 5816 5817 if (bserrno != 0) { 5818 bs_unload_finish(ctx, bserrno); 5819 return; 5820 } 5821 5822 bs_write_used_blobids(seq, ctx, bs_unload_write_used_blobids_cpl); 5823 } 5824 5825 static void 5826 bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5827 { 5828 struct spdk_bs_load_ctx *ctx = cb_arg; 5829 int rc; 5830 5831 if (bserrno != 0) { 5832 bs_unload_finish(ctx, bserrno); 5833 return; 5834 } 5835 5836 rc = bs_super_validate(ctx->super, ctx->bs); 5837 if (rc != 0) { 5838 bs_unload_finish(ctx, rc); 5839 return; 5840 } 5841 5842 bs_write_used_md(seq, cb_arg, bs_unload_write_used_pages_cpl); 5843 } 5844 5845 void 5846 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg) 5847 { 5848 struct spdk_bs_cpl cpl; 5849 struct spdk_bs_load_ctx *ctx; 5850 5851 SPDK_DEBUGLOG(blob, "Syncing blobstore\n"); 5852 5853 /* 5854 * If external snapshot channels are being destroyed while the blobstore is unloaded, the 5855 * unload is deferred until after the channel destruction completes. 5856 */ 5857 if (bs->esnap_channels_unloading != 0) { 5858 if (bs->esnap_unload_cb_fn != NULL) { 5859 SPDK_ERRLOG("Blobstore unload in progress\n"); 5860 cb_fn(cb_arg, -EBUSY); 5861 return; 5862 } 5863 SPDK_DEBUGLOG(blob_esnap, "Blobstore unload deferred: %" PRIu32 5864 " esnap clones are unloading\n", bs->esnap_channels_unloading); 5865 bs->esnap_unload_cb_fn = cb_fn; 5866 bs->esnap_unload_cb_arg = cb_arg; 5867 return; 5868 } 5869 if (bs->esnap_unload_cb_fn != NULL) { 5870 SPDK_DEBUGLOG(blob_esnap, "Blobstore deferred unload progressing\n"); 5871 assert(bs->esnap_unload_cb_fn == cb_fn); 5872 assert(bs->esnap_unload_cb_arg == cb_arg); 5873 bs->esnap_unload_cb_fn = NULL; 5874 bs->esnap_unload_cb_arg = NULL; 5875 } 5876 5877 if (!RB_EMPTY(&bs->open_blobs)) { 5878 SPDK_ERRLOG("Blobstore still has open blobs\n"); 5879 cb_fn(cb_arg, -EBUSY); 5880 return; 5881 } 5882 5883 ctx = calloc(1, sizeof(*ctx)); 5884 if (!ctx) { 5885 cb_fn(cb_arg, -ENOMEM); 5886 return; 5887 } 5888 5889 ctx->bs = bs; 5890 5891 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 5892 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 5893 if (!ctx->super) { 5894 free(ctx); 5895 cb_fn(cb_arg, -ENOMEM); 5896 return; 5897 } 5898 5899 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5900 cpl.u.bs_basic.cb_fn = cb_fn; 5901 cpl.u.bs_basic.cb_arg = cb_arg; 5902 5903 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5904 if (!ctx->seq) { 5905 spdk_free(ctx->super); 5906 free(ctx); 5907 cb_fn(cb_arg, -ENOMEM); 5908 return; 5909 } 5910 5911 /* Read super block */ 5912 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5913 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5914 bs_unload_read_super_cpl, ctx); 5915 } 5916 5917 /* END spdk_bs_unload */ 5918 5919 /* START spdk_bs_set_super */ 5920 5921 struct spdk_bs_set_super_ctx { 5922 struct spdk_blob_store *bs; 5923 struct spdk_bs_super_block *super; 5924 }; 5925 5926 static void 5927 bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5928 { 5929 struct spdk_bs_set_super_ctx *ctx = cb_arg; 5930 5931 if (bserrno != 0) { 5932 SPDK_ERRLOG("Unable to write to super block of blobstore\n"); 5933 } 5934 5935 spdk_free(ctx->super); 5936 5937 bs_sequence_finish(seq, bserrno); 5938 5939 free(ctx); 5940 } 5941 5942 static void 5943 bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5944 { 5945 struct spdk_bs_set_super_ctx *ctx = cb_arg; 5946 int rc; 5947 5948 if (bserrno != 0) { 5949 SPDK_ERRLOG("Unable to read super block of blobstore\n"); 5950 spdk_free(ctx->super); 5951 bs_sequence_finish(seq, bserrno); 5952 free(ctx); 5953 return; 5954 } 5955 5956 rc = bs_super_validate(ctx->super, ctx->bs); 5957 if (rc != 0) { 5958 SPDK_ERRLOG("Not a valid super block\n"); 5959 spdk_free(ctx->super); 5960 bs_sequence_finish(seq, rc); 5961 free(ctx); 5962 return; 5963 } 5964 5965 bs_write_super(seq, ctx->bs, ctx->super, bs_set_super_write_cpl, ctx); 5966 } 5967 5968 void 5969 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid, 5970 spdk_bs_op_complete cb_fn, void *cb_arg) 5971 { 5972 struct spdk_bs_cpl cpl; 5973 spdk_bs_sequence_t *seq; 5974 struct spdk_bs_set_super_ctx *ctx; 5975 5976 SPDK_DEBUGLOG(blob, "Setting super blob id on blobstore\n"); 5977 5978 ctx = calloc(1, sizeof(*ctx)); 5979 if (!ctx) { 5980 cb_fn(cb_arg, -ENOMEM); 5981 return; 5982 } 5983 5984 ctx->bs = bs; 5985 5986 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 5987 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 5988 if (!ctx->super) { 5989 free(ctx); 5990 cb_fn(cb_arg, -ENOMEM); 5991 return; 5992 } 5993 5994 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5995 cpl.u.bs_basic.cb_fn = cb_fn; 5996 cpl.u.bs_basic.cb_arg = cb_arg; 5997 5998 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5999 if (!seq) { 6000 spdk_free(ctx->super); 6001 free(ctx); 6002 cb_fn(cb_arg, -ENOMEM); 6003 return; 6004 } 6005 6006 bs->super_blob = blobid; 6007 6008 /* Read super block */ 6009 bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0), 6010 bs_byte_to_lba(bs, sizeof(*ctx->super)), 6011 bs_set_super_read_cpl, ctx); 6012 } 6013 6014 /* END spdk_bs_set_super */ 6015 6016 void 6017 spdk_bs_get_super(struct spdk_blob_store *bs, 6018 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6019 { 6020 if (bs->super_blob == SPDK_BLOBID_INVALID) { 6021 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT); 6022 } else { 6023 cb_fn(cb_arg, bs->super_blob, 0); 6024 } 6025 } 6026 6027 uint64_t 6028 spdk_bs_get_cluster_size(struct spdk_blob_store *bs) 6029 { 6030 return bs->cluster_sz; 6031 } 6032 6033 uint64_t 6034 spdk_bs_get_page_size(struct spdk_blob_store *bs) 6035 { 6036 return SPDK_BS_PAGE_SIZE; 6037 } 6038 6039 uint64_t 6040 spdk_bs_get_io_unit_size(struct spdk_blob_store *bs) 6041 { 6042 return bs->io_unit_size; 6043 } 6044 6045 uint64_t 6046 spdk_bs_free_cluster_count(struct spdk_blob_store *bs) 6047 { 6048 return bs->num_free_clusters; 6049 } 6050 6051 uint64_t 6052 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs) 6053 { 6054 return bs->total_data_clusters; 6055 } 6056 6057 static int 6058 bs_register_md_thread(struct spdk_blob_store *bs) 6059 { 6060 bs->md_channel = spdk_get_io_channel(bs); 6061 if (!bs->md_channel) { 6062 SPDK_ERRLOG("Failed to get IO channel.\n"); 6063 return -1; 6064 } 6065 6066 return 0; 6067 } 6068 6069 static int 6070 bs_unregister_md_thread(struct spdk_blob_store *bs) 6071 { 6072 spdk_put_io_channel(bs->md_channel); 6073 6074 return 0; 6075 } 6076 6077 spdk_blob_id 6078 spdk_blob_get_id(struct spdk_blob *blob) 6079 { 6080 assert(blob != NULL); 6081 6082 return blob->id; 6083 } 6084 6085 uint64_t 6086 spdk_blob_get_num_pages(struct spdk_blob *blob) 6087 { 6088 assert(blob != NULL); 6089 6090 return bs_cluster_to_page(blob->bs, blob->active.num_clusters); 6091 } 6092 6093 uint64_t 6094 spdk_blob_get_num_io_units(struct spdk_blob *blob) 6095 { 6096 assert(blob != NULL); 6097 6098 return spdk_blob_get_num_pages(blob) * bs_io_unit_per_page(blob->bs); 6099 } 6100 6101 uint64_t 6102 spdk_blob_get_num_clusters(struct spdk_blob *blob) 6103 { 6104 assert(blob != NULL); 6105 6106 return blob->active.num_clusters; 6107 } 6108 6109 uint64_t 6110 spdk_blob_get_num_allocated_clusters(struct spdk_blob *blob) 6111 { 6112 assert(blob != NULL); 6113 6114 return blob->active.num_allocated_clusters; 6115 } 6116 6117 static uint64_t 6118 blob_find_io_unit(struct spdk_blob *blob, uint64_t offset, bool is_allocated) 6119 { 6120 uint64_t blob_io_unit_num = spdk_blob_get_num_io_units(blob); 6121 6122 while (offset < blob_io_unit_num) { 6123 if (bs_io_unit_is_allocated(blob, offset) == is_allocated) { 6124 return offset; 6125 } 6126 6127 offset += bs_num_io_units_to_cluster_boundary(blob, offset); 6128 } 6129 6130 return UINT64_MAX; 6131 } 6132 6133 uint64_t 6134 spdk_blob_get_next_allocated_io_unit(struct spdk_blob *blob, uint64_t offset) 6135 { 6136 return blob_find_io_unit(blob, offset, true); 6137 } 6138 6139 uint64_t 6140 spdk_blob_get_next_unallocated_io_unit(struct spdk_blob *blob, uint64_t offset) 6141 { 6142 return blob_find_io_unit(blob, offset, false); 6143 } 6144 6145 /* START spdk_bs_create_blob */ 6146 6147 static void 6148 bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 6149 { 6150 struct spdk_blob *blob = cb_arg; 6151 uint32_t page_idx = bs_blobid_to_page(blob->id); 6152 6153 if (bserrno != 0) { 6154 spdk_spin_lock(&blob->bs->used_lock); 6155 spdk_bit_array_clear(blob->bs->used_blobids, page_idx); 6156 bs_release_md_page(blob->bs, page_idx); 6157 spdk_spin_unlock(&blob->bs->used_lock); 6158 } 6159 6160 blob_free(blob); 6161 6162 bs_sequence_finish(seq, bserrno); 6163 } 6164 6165 static int 6166 blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs, 6167 bool internal) 6168 { 6169 uint64_t i; 6170 size_t value_len = 0; 6171 int rc; 6172 const void *value = NULL; 6173 if (xattrs->count > 0 && xattrs->get_value == NULL) { 6174 return -EINVAL; 6175 } 6176 for (i = 0; i < xattrs->count; i++) { 6177 xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len); 6178 if (value == NULL || value_len == 0) { 6179 return -EINVAL; 6180 } 6181 rc = blob_set_xattr(blob, xattrs->names[i], value, value_len, internal); 6182 if (rc < 0) { 6183 return rc; 6184 } 6185 } 6186 return 0; 6187 } 6188 6189 static void 6190 blob_opts_copy(const struct spdk_blob_opts *src, struct spdk_blob_opts *dst) 6191 { 6192 #define FIELD_OK(field) \ 6193 offsetof(struct spdk_blob_opts, field) + sizeof(src->field) <= src->opts_size 6194 6195 #define SET_FIELD(field) \ 6196 if (FIELD_OK(field)) { \ 6197 dst->field = src->field; \ 6198 } \ 6199 6200 SET_FIELD(num_clusters); 6201 SET_FIELD(thin_provision); 6202 SET_FIELD(clear_method); 6203 6204 if (FIELD_OK(xattrs)) { 6205 memcpy(&dst->xattrs, &src->xattrs, sizeof(src->xattrs)); 6206 } 6207 6208 SET_FIELD(use_extent_table); 6209 SET_FIELD(esnap_id); 6210 SET_FIELD(esnap_id_len); 6211 6212 dst->opts_size = src->opts_size; 6213 6214 /* You should not remove this statement, but need to update the assert statement 6215 * if you add a new field, and also add a corresponding SET_FIELD statement */ 6216 SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_opts) == 80, "Incorrect size"); 6217 6218 #undef FIELD_OK 6219 #undef SET_FIELD 6220 } 6221 6222 static void 6223 bs_create_blob(struct spdk_blob_store *bs, 6224 const struct spdk_blob_opts *opts, 6225 const struct spdk_blob_xattr_opts *internal_xattrs, 6226 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6227 { 6228 struct spdk_blob *blob; 6229 uint32_t page_idx; 6230 struct spdk_bs_cpl cpl; 6231 struct spdk_blob_opts opts_local; 6232 struct spdk_blob_xattr_opts internal_xattrs_default; 6233 spdk_bs_sequence_t *seq; 6234 spdk_blob_id id; 6235 int rc; 6236 6237 assert(spdk_get_thread() == bs->md_thread); 6238 6239 spdk_spin_lock(&bs->used_lock); 6240 page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); 6241 if (page_idx == UINT32_MAX) { 6242 spdk_spin_unlock(&bs->used_lock); 6243 cb_fn(cb_arg, 0, -ENOMEM); 6244 return; 6245 } 6246 spdk_bit_array_set(bs->used_blobids, page_idx); 6247 bs_claim_md_page(bs, page_idx); 6248 spdk_spin_unlock(&bs->used_lock); 6249 6250 id = bs_page_to_blobid(page_idx); 6251 6252 SPDK_DEBUGLOG(blob, "Creating blob with id 0x%" PRIx64 " at page %u\n", id, page_idx); 6253 6254 spdk_blob_opts_init(&opts_local, sizeof(opts_local)); 6255 if (opts) { 6256 blob_opts_copy(opts, &opts_local); 6257 } 6258 6259 blob = blob_alloc(bs, id); 6260 if (!blob) { 6261 rc = -ENOMEM; 6262 goto error; 6263 } 6264 6265 blob->use_extent_table = opts_local.use_extent_table; 6266 if (blob->use_extent_table) { 6267 blob->invalid_flags |= SPDK_BLOB_EXTENT_TABLE; 6268 } 6269 6270 if (!internal_xattrs) { 6271 blob_xattrs_init(&internal_xattrs_default); 6272 internal_xattrs = &internal_xattrs_default; 6273 } 6274 6275 rc = blob_set_xattrs(blob, &opts_local.xattrs, false); 6276 if (rc < 0) { 6277 goto error; 6278 } 6279 6280 rc = blob_set_xattrs(blob, internal_xattrs, true); 6281 if (rc < 0) { 6282 goto error; 6283 } 6284 6285 if (opts_local.thin_provision) { 6286 blob_set_thin_provision(blob); 6287 } 6288 6289 blob_set_clear_method(blob, opts_local.clear_method); 6290 6291 if (opts_local.esnap_id != NULL) { 6292 if (opts_local.esnap_id_len > UINT16_MAX) { 6293 SPDK_ERRLOG("esnap id length %" PRIu64 "is too long\n", 6294 opts_local.esnap_id_len); 6295 rc = -EINVAL; 6296 goto error; 6297 6298 } 6299 blob_set_thin_provision(blob); 6300 blob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 6301 rc = blob_set_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID, 6302 opts_local.esnap_id, opts_local.esnap_id_len, true); 6303 if (rc != 0) { 6304 goto error; 6305 } 6306 } 6307 6308 rc = blob_resize(blob, opts_local.num_clusters); 6309 if (rc < 0) { 6310 goto error; 6311 } 6312 cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6313 cpl.u.blobid.cb_fn = cb_fn; 6314 cpl.u.blobid.cb_arg = cb_arg; 6315 cpl.u.blobid.blobid = blob->id; 6316 6317 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 6318 if (!seq) { 6319 rc = -ENOMEM; 6320 goto error; 6321 } 6322 6323 blob_persist(seq, blob, bs_create_blob_cpl, blob); 6324 return; 6325 6326 error: 6327 SPDK_ERRLOG("Failed to create blob: %s, size in clusters/size: %lu (clusters)\n", 6328 spdk_strerror(rc), opts_local.num_clusters); 6329 if (blob != NULL) { 6330 blob_free(blob); 6331 } 6332 spdk_spin_lock(&bs->used_lock); 6333 spdk_bit_array_clear(bs->used_blobids, page_idx); 6334 bs_release_md_page(bs, page_idx); 6335 spdk_spin_unlock(&bs->used_lock); 6336 cb_fn(cb_arg, 0, rc); 6337 } 6338 6339 void 6340 spdk_bs_create_blob(struct spdk_blob_store *bs, 6341 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6342 { 6343 bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg); 6344 } 6345 6346 void 6347 spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts, 6348 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6349 { 6350 bs_create_blob(bs, opts, NULL, cb_fn, cb_arg); 6351 } 6352 6353 /* END spdk_bs_create_blob */ 6354 6355 /* START blob_cleanup */ 6356 6357 struct spdk_clone_snapshot_ctx { 6358 struct spdk_bs_cpl cpl; 6359 int bserrno; 6360 bool frozen; 6361 6362 struct spdk_io_channel *channel; 6363 6364 /* Current cluster for inflate operation */ 6365 uint64_t cluster; 6366 6367 /* For inflation force allocation of all unallocated clusters and remove 6368 * thin-provisioning. Otherwise only decouple parent and keep clone thin. */ 6369 bool allocate_all; 6370 6371 struct { 6372 spdk_blob_id id; 6373 struct spdk_blob *blob; 6374 bool md_ro; 6375 } original; 6376 struct { 6377 spdk_blob_id id; 6378 struct spdk_blob *blob; 6379 } new; 6380 6381 /* xattrs specified for snapshot/clones only. They have no impact on 6382 * the original blobs xattrs. */ 6383 const struct spdk_blob_xattr_opts *xattrs; 6384 }; 6385 6386 static void 6387 bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno) 6388 { 6389 struct spdk_clone_snapshot_ctx *ctx = cb_arg; 6390 struct spdk_bs_cpl *cpl = &ctx->cpl; 6391 6392 if (bserrno != 0) { 6393 if (ctx->bserrno != 0) { 6394 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6395 } else { 6396 ctx->bserrno = bserrno; 6397 } 6398 } 6399 6400 switch (cpl->type) { 6401 case SPDK_BS_CPL_TYPE_BLOBID: 6402 cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno); 6403 break; 6404 case SPDK_BS_CPL_TYPE_BLOB_BASIC: 6405 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno); 6406 break; 6407 default: 6408 SPDK_UNREACHABLE(); 6409 break; 6410 } 6411 6412 free(ctx); 6413 } 6414 6415 static void 6416 bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 6417 { 6418 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6419 struct spdk_blob *origblob = ctx->original.blob; 6420 6421 if (bserrno != 0) { 6422 if (ctx->bserrno != 0) { 6423 SPDK_ERRLOG("Unfreeze error %d\n", bserrno); 6424 } else { 6425 ctx->bserrno = bserrno; 6426 } 6427 } 6428 6429 ctx->original.id = origblob->id; 6430 origblob->locked_operation_in_progress = false; 6431 6432 /* Revert md_ro to original state */ 6433 origblob->md_ro = ctx->original.md_ro; 6434 6435 spdk_blob_close(origblob, bs_clone_snapshot_cleanup_finish, ctx); 6436 } 6437 6438 static void 6439 bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno) 6440 { 6441 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6442 struct spdk_blob *origblob = ctx->original.blob; 6443 6444 if (bserrno != 0) { 6445 if (ctx->bserrno != 0) { 6446 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6447 } else { 6448 ctx->bserrno = bserrno; 6449 } 6450 } 6451 6452 if (ctx->frozen) { 6453 /* Unfreeze any outstanding I/O */ 6454 blob_unfreeze_io(origblob, bs_snapshot_unfreeze_cpl, ctx); 6455 } else { 6456 bs_snapshot_unfreeze_cpl(ctx, 0); 6457 } 6458 6459 } 6460 6461 static void 6462 bs_clone_snapshot_newblob_cleanup(struct spdk_clone_snapshot_ctx *ctx, int bserrno) 6463 { 6464 struct spdk_blob *newblob = ctx->new.blob; 6465 6466 if (bserrno != 0) { 6467 if (ctx->bserrno != 0) { 6468 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6469 } else { 6470 ctx->bserrno = bserrno; 6471 } 6472 } 6473 6474 ctx->new.id = newblob->id; 6475 spdk_blob_close(newblob, bs_clone_snapshot_origblob_cleanup, ctx); 6476 } 6477 6478 /* END blob_cleanup */ 6479 6480 /* START spdk_bs_create_snapshot */ 6481 6482 static void 6483 bs_snapshot_swap_cluster_maps(struct spdk_blob *blob1, struct spdk_blob *blob2) 6484 { 6485 uint64_t *cluster_temp; 6486 uint64_t num_allocated_clusters_temp; 6487 uint32_t *extent_page_temp; 6488 6489 cluster_temp = blob1->active.clusters; 6490 blob1->active.clusters = blob2->active.clusters; 6491 blob2->active.clusters = cluster_temp; 6492 6493 num_allocated_clusters_temp = blob1->active.num_allocated_clusters; 6494 blob1->active.num_allocated_clusters = blob2->active.num_allocated_clusters; 6495 blob2->active.num_allocated_clusters = num_allocated_clusters_temp; 6496 6497 extent_page_temp = blob1->active.extent_pages; 6498 blob1->active.extent_pages = blob2->active.extent_pages; 6499 blob2->active.extent_pages = extent_page_temp; 6500 } 6501 6502 /* Copies an internal xattr */ 6503 static int 6504 bs_snapshot_copy_xattr(struct spdk_blob *toblob, struct spdk_blob *fromblob, const char *name) 6505 { 6506 const void *val = NULL; 6507 size_t len; 6508 int bserrno; 6509 6510 bserrno = blob_get_xattr_value(fromblob, name, &val, &len, true); 6511 if (bserrno != 0) { 6512 SPDK_ERRLOG("blob 0x%" PRIx64 " missing %s XATTR\n", fromblob->id, name); 6513 return bserrno; 6514 } 6515 6516 bserrno = blob_set_xattr(toblob, name, val, len, true); 6517 if (bserrno != 0) { 6518 SPDK_ERRLOG("could not set %s XATTR on blob 0x%" PRIx64 "\n", 6519 name, toblob->id); 6520 return bserrno; 6521 } 6522 return 0; 6523 } 6524 6525 static void 6526 bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno) 6527 { 6528 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6529 struct spdk_blob *origblob = ctx->original.blob; 6530 struct spdk_blob *newblob = ctx->new.blob; 6531 6532 if (bserrno != 0) { 6533 bs_snapshot_swap_cluster_maps(newblob, origblob); 6534 if (blob_is_esnap_clone(newblob)) { 6535 bs_snapshot_copy_xattr(origblob, newblob, BLOB_EXTERNAL_SNAPSHOT_ID); 6536 origblob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 6537 } 6538 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6539 return; 6540 } 6541 6542 /* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */ 6543 bserrno = blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true); 6544 if (bserrno != 0) { 6545 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6546 return; 6547 } 6548 6549 bs_blob_list_add(ctx->original.blob); 6550 6551 spdk_blob_set_read_only(newblob); 6552 6553 /* sync snapshot metadata */ 6554 spdk_blob_sync_md(newblob, bs_clone_snapshot_origblob_cleanup, ctx); 6555 } 6556 6557 static void 6558 bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno) 6559 { 6560 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6561 struct spdk_blob *origblob = ctx->original.blob; 6562 struct spdk_blob *newblob = ctx->new.blob; 6563 6564 if (bserrno != 0) { 6565 /* return cluster map back to original */ 6566 bs_snapshot_swap_cluster_maps(newblob, origblob); 6567 6568 /* Newblob md sync failed. Valid clusters are only present in origblob. 6569 * Since I/O is frozen on origblob, not changes to zeroed out cluster map should have occurred. 6570 * Newblob needs to be reverted to thin_provisioned state at creation to properly close. */ 6571 blob_set_thin_provision(newblob); 6572 assert(spdk_mem_all_zero(newblob->active.clusters, 6573 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 6574 assert(spdk_mem_all_zero(newblob->active.extent_pages, 6575 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 6576 6577 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6578 return; 6579 } 6580 6581 /* Set internal xattr for snapshot id */ 6582 bserrno = blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true); 6583 if (bserrno != 0) { 6584 /* return cluster map back to original */ 6585 bs_snapshot_swap_cluster_maps(newblob, origblob); 6586 blob_set_thin_provision(newblob); 6587 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6588 return; 6589 } 6590 6591 /* Create new back_bs_dev for snapshot */ 6592 origblob->back_bs_dev = bs_create_blob_bs_dev(newblob); 6593 if (origblob->back_bs_dev == NULL) { 6594 /* return cluster map back to original */ 6595 bs_snapshot_swap_cluster_maps(newblob, origblob); 6596 blob_set_thin_provision(newblob); 6597 bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL); 6598 return; 6599 } 6600 6601 /* Remove the xattr that references an external snapshot */ 6602 if (blob_is_esnap_clone(origblob)) { 6603 origblob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT; 6604 bserrno = blob_remove_xattr(origblob, BLOB_EXTERNAL_SNAPSHOT_ID, true); 6605 if (bserrno != 0) { 6606 if (bserrno == -ENOENT) { 6607 SPDK_ERRLOG("blob 0x%" PRIx64 " has no " BLOB_EXTERNAL_SNAPSHOT_ID 6608 " xattr to remove\n", origblob->id); 6609 assert(false); 6610 } else { 6611 /* return cluster map back to original */ 6612 bs_snapshot_swap_cluster_maps(newblob, origblob); 6613 blob_set_thin_provision(newblob); 6614 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6615 return; 6616 } 6617 } 6618 } 6619 6620 bs_blob_list_remove(origblob); 6621 origblob->parent_id = newblob->id; 6622 /* set clone blob as thin provisioned */ 6623 blob_set_thin_provision(origblob); 6624 6625 bs_blob_list_add(newblob); 6626 6627 /* sync clone metadata */ 6628 spdk_blob_sync_md(origblob, bs_snapshot_origblob_sync_cpl, ctx); 6629 } 6630 6631 static void 6632 bs_snapshot_freeze_cpl(void *cb_arg, int rc) 6633 { 6634 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6635 struct spdk_blob *origblob = ctx->original.blob; 6636 struct spdk_blob *newblob = ctx->new.blob; 6637 int bserrno; 6638 6639 if (rc != 0) { 6640 bs_clone_snapshot_newblob_cleanup(ctx, rc); 6641 return; 6642 } 6643 6644 ctx->frozen = true; 6645 6646 if (blob_is_esnap_clone(origblob)) { 6647 /* Clean up any channels associated with the original blob id because future IO will 6648 * perform IO using the snapshot blob_id. 6649 */ 6650 blob_esnap_destroy_bs_dev_channels(origblob, false, NULL, NULL); 6651 } 6652 if (newblob->back_bs_dev) { 6653 blob_back_bs_destroy(newblob); 6654 } 6655 /* set new back_bs_dev for snapshot */ 6656 newblob->back_bs_dev = origblob->back_bs_dev; 6657 /* Set invalid flags from origblob */ 6658 newblob->invalid_flags = origblob->invalid_flags; 6659 6660 /* inherit parent from original blob if set */ 6661 newblob->parent_id = origblob->parent_id; 6662 switch (origblob->parent_id) { 6663 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 6664 bserrno = bs_snapshot_copy_xattr(newblob, origblob, BLOB_EXTERNAL_SNAPSHOT_ID); 6665 if (bserrno != 0) { 6666 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6667 return; 6668 } 6669 break; 6670 case SPDK_BLOBID_INVALID: 6671 break; 6672 default: 6673 /* Set internal xattr for snapshot id */ 6674 bserrno = blob_set_xattr(newblob, BLOB_SNAPSHOT, 6675 &origblob->parent_id, sizeof(spdk_blob_id), true); 6676 if (bserrno != 0) { 6677 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6678 return; 6679 } 6680 } 6681 6682 /* swap cluster maps */ 6683 bs_snapshot_swap_cluster_maps(newblob, origblob); 6684 6685 /* Set the clear method on the new blob to match the original. */ 6686 blob_set_clear_method(newblob, origblob->clear_method); 6687 6688 /* sync snapshot metadata */ 6689 spdk_blob_sync_md(newblob, bs_snapshot_newblob_sync_cpl, ctx); 6690 } 6691 6692 static void 6693 bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6694 { 6695 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6696 struct spdk_blob *origblob = ctx->original.blob; 6697 struct spdk_blob *newblob = _blob; 6698 6699 if (bserrno != 0) { 6700 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6701 return; 6702 } 6703 6704 ctx->new.blob = newblob; 6705 assert(spdk_blob_is_thin_provisioned(newblob)); 6706 assert(spdk_mem_all_zero(newblob->active.clusters, 6707 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 6708 assert(spdk_mem_all_zero(newblob->active.extent_pages, 6709 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 6710 6711 blob_freeze_io(origblob, bs_snapshot_freeze_cpl, ctx); 6712 } 6713 6714 static void 6715 bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 6716 { 6717 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6718 struct spdk_blob *origblob = ctx->original.blob; 6719 6720 if (bserrno != 0) { 6721 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6722 return; 6723 } 6724 6725 ctx->new.id = blobid; 6726 ctx->cpl.u.blobid.blobid = blobid; 6727 6728 spdk_bs_open_blob(origblob->bs, ctx->new.id, bs_snapshot_newblob_open_cpl, ctx); 6729 } 6730 6731 6732 static void 6733 bs_xattr_snapshot(void *arg, const char *name, 6734 const void **value, size_t *value_len) 6735 { 6736 assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0); 6737 6738 struct spdk_blob *blob = (struct spdk_blob *)arg; 6739 *value = &blob->id; 6740 *value_len = sizeof(blob->id); 6741 } 6742 6743 static void 6744 bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6745 { 6746 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6747 struct spdk_blob_opts opts; 6748 struct spdk_blob_xattr_opts internal_xattrs; 6749 char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS }; 6750 6751 if (bserrno != 0) { 6752 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6753 return; 6754 } 6755 6756 ctx->original.blob = _blob; 6757 6758 if (_blob->data_ro || _blob->md_ro) { 6759 SPDK_DEBUGLOG(blob, "Cannot create snapshot from read only blob with id 0x%" 6760 PRIx64 "\n", _blob->id); 6761 ctx->bserrno = -EINVAL; 6762 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6763 return; 6764 } 6765 6766 if (_blob->locked_operation_in_progress) { 6767 SPDK_DEBUGLOG(blob, "Cannot create snapshot - another operation in progress\n"); 6768 ctx->bserrno = -EBUSY; 6769 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6770 return; 6771 } 6772 6773 _blob->locked_operation_in_progress = true; 6774 6775 spdk_blob_opts_init(&opts, sizeof(opts)); 6776 blob_xattrs_init(&internal_xattrs); 6777 6778 /* Change the size of new blob to the same as in original blob, 6779 * but do not allocate clusters */ 6780 opts.thin_provision = true; 6781 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 6782 opts.use_extent_table = _blob->use_extent_table; 6783 6784 /* If there are any xattrs specified for snapshot, set them now */ 6785 if (ctx->xattrs) { 6786 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 6787 } 6788 /* Set internal xattr SNAPSHOT_IN_PROGRESS */ 6789 internal_xattrs.count = 1; 6790 internal_xattrs.ctx = _blob; 6791 internal_xattrs.names = xattrs_names; 6792 internal_xattrs.get_value = bs_xattr_snapshot; 6793 6794 bs_create_blob(_blob->bs, &opts, &internal_xattrs, 6795 bs_snapshot_newblob_create_cpl, ctx); 6796 } 6797 6798 void 6799 spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid, 6800 const struct spdk_blob_xattr_opts *snapshot_xattrs, 6801 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6802 { 6803 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6804 6805 if (!ctx) { 6806 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 6807 return; 6808 } 6809 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6810 ctx->cpl.u.blobid.cb_fn = cb_fn; 6811 ctx->cpl.u.blobid.cb_arg = cb_arg; 6812 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 6813 ctx->bserrno = 0; 6814 ctx->frozen = false; 6815 ctx->original.id = blobid; 6816 ctx->xattrs = snapshot_xattrs; 6817 6818 spdk_bs_open_blob(bs, ctx->original.id, bs_snapshot_origblob_open_cpl, ctx); 6819 } 6820 /* END spdk_bs_create_snapshot */ 6821 6822 /* START spdk_bs_create_clone */ 6823 6824 static void 6825 bs_xattr_clone(void *arg, const char *name, 6826 const void **value, size_t *value_len) 6827 { 6828 assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0); 6829 6830 struct spdk_blob *blob = (struct spdk_blob *)arg; 6831 *value = &blob->id; 6832 *value_len = sizeof(blob->id); 6833 } 6834 6835 static void 6836 bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6837 { 6838 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6839 struct spdk_blob *clone = _blob; 6840 6841 ctx->new.blob = clone; 6842 bs_blob_list_add(clone); 6843 6844 spdk_blob_close(clone, bs_clone_snapshot_origblob_cleanup, ctx); 6845 } 6846 6847 static void 6848 bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 6849 { 6850 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6851 6852 ctx->cpl.u.blobid.blobid = blobid; 6853 spdk_bs_open_blob(ctx->original.blob->bs, blobid, bs_clone_newblob_open_cpl, ctx); 6854 } 6855 6856 static void 6857 bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6858 { 6859 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6860 struct spdk_blob_opts opts; 6861 struct spdk_blob_xattr_opts internal_xattrs; 6862 char *xattr_names[] = { BLOB_SNAPSHOT }; 6863 6864 if (bserrno != 0) { 6865 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6866 return; 6867 } 6868 6869 ctx->original.blob = _blob; 6870 ctx->original.md_ro = _blob->md_ro; 6871 6872 if (!_blob->data_ro || !_blob->md_ro) { 6873 SPDK_DEBUGLOG(blob, "Clone not from read-only blob\n"); 6874 ctx->bserrno = -EINVAL; 6875 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6876 return; 6877 } 6878 6879 if (_blob->locked_operation_in_progress) { 6880 SPDK_DEBUGLOG(blob, "Cannot create clone - another operation in progress\n"); 6881 ctx->bserrno = -EBUSY; 6882 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6883 return; 6884 } 6885 6886 _blob->locked_operation_in_progress = true; 6887 6888 spdk_blob_opts_init(&opts, sizeof(opts)); 6889 blob_xattrs_init(&internal_xattrs); 6890 6891 opts.thin_provision = true; 6892 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 6893 opts.use_extent_table = _blob->use_extent_table; 6894 if (ctx->xattrs) { 6895 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 6896 } 6897 6898 /* Set internal xattr BLOB_SNAPSHOT */ 6899 internal_xattrs.count = 1; 6900 internal_xattrs.ctx = _blob; 6901 internal_xattrs.names = xattr_names; 6902 internal_xattrs.get_value = bs_xattr_clone; 6903 6904 bs_create_blob(_blob->bs, &opts, &internal_xattrs, 6905 bs_clone_newblob_create_cpl, ctx); 6906 } 6907 6908 void 6909 spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid, 6910 const struct spdk_blob_xattr_opts *clone_xattrs, 6911 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6912 { 6913 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6914 6915 if (!ctx) { 6916 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 6917 return; 6918 } 6919 6920 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6921 ctx->cpl.u.blobid.cb_fn = cb_fn; 6922 ctx->cpl.u.blobid.cb_arg = cb_arg; 6923 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 6924 ctx->bserrno = 0; 6925 ctx->xattrs = clone_xattrs; 6926 ctx->original.id = blobid; 6927 6928 spdk_bs_open_blob(bs, ctx->original.id, bs_clone_origblob_open_cpl, ctx); 6929 } 6930 6931 /* END spdk_bs_create_clone */ 6932 6933 /* START spdk_bs_inflate_blob */ 6934 6935 static void 6936 bs_inflate_blob_set_parent_cpl(void *cb_arg, struct spdk_blob *_parent, int bserrno) 6937 { 6938 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6939 struct spdk_blob *_blob = ctx->original.blob; 6940 6941 if (bserrno != 0) { 6942 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6943 return; 6944 } 6945 6946 /* Temporarily override md_ro flag for MD modification */ 6947 _blob->md_ro = false; 6948 6949 bserrno = blob_set_xattr(_blob, BLOB_SNAPSHOT, &_parent->id, sizeof(spdk_blob_id), true); 6950 if (bserrno != 0) { 6951 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6952 return; 6953 } 6954 6955 assert(_parent != NULL); 6956 6957 bs_blob_list_remove(_blob); 6958 _blob->parent_id = _parent->id; 6959 6960 blob_back_bs_destroy(_blob); 6961 _blob->back_bs_dev = bs_create_blob_bs_dev(_parent); 6962 bs_blob_list_add(_blob); 6963 6964 spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx); 6965 } 6966 6967 static void 6968 bs_inflate_blob_done(struct spdk_clone_snapshot_ctx *ctx) 6969 { 6970 struct spdk_blob *_blob = ctx->original.blob; 6971 struct spdk_blob *_parent; 6972 6973 if (ctx->allocate_all) { 6974 /* remove thin provisioning */ 6975 bs_blob_list_remove(_blob); 6976 if (_blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 6977 blob_remove_xattr(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, true); 6978 _blob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT; 6979 } else { 6980 blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 6981 } 6982 _blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV; 6983 blob_back_bs_destroy(_blob); 6984 _blob->parent_id = SPDK_BLOBID_INVALID; 6985 } else { 6986 /* For now, esnap clones always have allocate_all set. */ 6987 assert(!blob_is_esnap_clone(_blob)); 6988 6989 _parent = ((struct spdk_blob_bs_dev *)(_blob->back_bs_dev))->blob; 6990 if (_parent->parent_id != SPDK_BLOBID_INVALID) { 6991 /* We must change the parent of the inflated blob */ 6992 spdk_bs_open_blob(_blob->bs, _parent->parent_id, 6993 bs_inflate_blob_set_parent_cpl, ctx); 6994 return; 6995 } 6996 6997 bs_blob_list_remove(_blob); 6998 _blob->parent_id = SPDK_BLOBID_INVALID; 6999 blob_back_bs_destroy(_blob); 7000 _blob->back_bs_dev = bs_create_zeroes_dev(); 7001 } 7002 7003 /* Temporarily override md_ro flag for MD modification */ 7004 _blob->md_ro = false; 7005 blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 7006 _blob->state = SPDK_BLOB_STATE_DIRTY; 7007 7008 spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx); 7009 } 7010 7011 /* Check if cluster needs allocation */ 7012 static inline bool 7013 bs_cluster_needs_allocation(struct spdk_blob *blob, uint64_t cluster, bool allocate_all) 7014 { 7015 struct spdk_blob_bs_dev *b; 7016 7017 assert(blob != NULL); 7018 7019 if (blob->active.clusters[cluster] != 0) { 7020 /* Cluster is already allocated */ 7021 return false; 7022 } 7023 7024 if (blob->parent_id == SPDK_BLOBID_INVALID) { 7025 /* Blob have no parent blob */ 7026 return allocate_all; 7027 } 7028 7029 if (blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 7030 return true; 7031 } 7032 7033 b = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 7034 return (allocate_all || b->blob->active.clusters[cluster] != 0); 7035 } 7036 7037 static void 7038 bs_inflate_blob_touch_next(void *cb_arg, int bserrno) 7039 { 7040 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 7041 struct spdk_blob *_blob = ctx->original.blob; 7042 struct spdk_bs_cpl cpl; 7043 spdk_bs_user_op_t *op; 7044 uint64_t offset; 7045 7046 if (bserrno != 0) { 7047 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 7048 return; 7049 } 7050 7051 for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) { 7052 if (bs_cluster_needs_allocation(_blob, ctx->cluster, ctx->allocate_all)) { 7053 break; 7054 } 7055 } 7056 7057 if (ctx->cluster < _blob->active.num_clusters) { 7058 offset = bs_cluster_to_lba(_blob->bs, ctx->cluster); 7059 7060 /* We may safely increment a cluster before copying */ 7061 ctx->cluster++; 7062 7063 /* Use a dummy 0B read as a context for cluster copy */ 7064 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7065 cpl.u.blob_basic.cb_fn = bs_inflate_blob_touch_next; 7066 cpl.u.blob_basic.cb_arg = ctx; 7067 7068 op = bs_user_op_alloc(ctx->channel, &cpl, SPDK_BLOB_READ, _blob, 7069 NULL, 0, offset, 0); 7070 if (!op) { 7071 bs_clone_snapshot_origblob_cleanup(ctx, -ENOMEM); 7072 return; 7073 } 7074 7075 bs_allocate_and_copy_cluster(_blob, ctx->channel, offset, op); 7076 } else { 7077 bs_inflate_blob_done(ctx); 7078 } 7079 } 7080 7081 static void 7082 bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 7083 { 7084 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 7085 uint64_t clusters_needed; 7086 uint64_t i; 7087 7088 if (bserrno != 0) { 7089 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 7090 return; 7091 } 7092 7093 ctx->original.blob = _blob; 7094 ctx->original.md_ro = _blob->md_ro; 7095 7096 if (_blob->locked_operation_in_progress) { 7097 SPDK_DEBUGLOG(blob, "Cannot inflate blob - another operation in progress\n"); 7098 ctx->bserrno = -EBUSY; 7099 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 7100 return; 7101 } 7102 7103 _blob->locked_operation_in_progress = true; 7104 7105 switch (_blob->parent_id) { 7106 case SPDK_BLOBID_INVALID: 7107 if (!ctx->allocate_all) { 7108 /* This blob has no parent, so we cannot decouple it. */ 7109 SPDK_ERRLOG("Cannot decouple parent of blob with no parent.\n"); 7110 bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 7111 return; 7112 } 7113 break; 7114 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 7115 /* 7116 * It would be better to rely on back_bs_dev->is_zeroes(), to determine which 7117 * clusters require allocation. Until there is a blobstore consumer that 7118 * uses esnaps with an spdk_bs_dev that implements a useful is_zeroes() it is not 7119 * worth the effort. 7120 */ 7121 ctx->allocate_all = true; 7122 break; 7123 default: 7124 break; 7125 } 7126 7127 if (spdk_blob_is_thin_provisioned(_blob) == false) { 7128 /* This is not thin provisioned blob. No need to inflate. */ 7129 bs_clone_snapshot_origblob_cleanup(ctx, 0); 7130 return; 7131 } 7132 7133 /* Do two passes - one to verify that we can obtain enough clusters 7134 * and another to actually claim them. 7135 */ 7136 clusters_needed = 0; 7137 for (i = 0; i < _blob->active.num_clusters; i++) { 7138 if (bs_cluster_needs_allocation(_blob, i, ctx->allocate_all)) { 7139 clusters_needed++; 7140 } 7141 } 7142 7143 if (clusters_needed > _blob->bs->num_free_clusters) { 7144 /* Not enough free clusters. Cannot satisfy the request. */ 7145 bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC); 7146 return; 7147 } 7148 7149 ctx->cluster = 0; 7150 bs_inflate_blob_touch_next(ctx, 0); 7151 } 7152 7153 static void 7154 bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7155 spdk_blob_id blobid, bool allocate_all, spdk_blob_op_complete cb_fn, void *cb_arg) 7156 { 7157 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 7158 7159 if (!ctx) { 7160 cb_fn(cb_arg, -ENOMEM); 7161 return; 7162 } 7163 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7164 ctx->cpl.u.bs_basic.cb_fn = cb_fn; 7165 ctx->cpl.u.bs_basic.cb_arg = cb_arg; 7166 ctx->bserrno = 0; 7167 ctx->original.id = blobid; 7168 ctx->channel = channel; 7169 ctx->allocate_all = allocate_all; 7170 7171 spdk_bs_open_blob(bs, ctx->original.id, bs_inflate_blob_open_cpl, ctx); 7172 } 7173 7174 void 7175 spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7176 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 7177 { 7178 bs_inflate_blob(bs, channel, blobid, true, cb_fn, cb_arg); 7179 } 7180 7181 void 7182 spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7183 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 7184 { 7185 bs_inflate_blob(bs, channel, blobid, false, cb_fn, cb_arg); 7186 } 7187 /* END spdk_bs_inflate_blob */ 7188 7189 /* START spdk_bs_blob_shallow_copy */ 7190 7191 struct shallow_copy_ctx { 7192 struct spdk_bs_cpl cpl; 7193 int bserrno; 7194 7195 /* Blob source for copy */ 7196 struct spdk_blob_store *bs; 7197 spdk_blob_id blobid; 7198 struct spdk_blob *blob; 7199 struct spdk_io_channel *blob_channel; 7200 7201 /* Destination device for copy */ 7202 struct spdk_bs_dev *ext_dev; 7203 struct spdk_io_channel *ext_channel; 7204 7205 /* Current cluster for copy operation */ 7206 uint64_t cluster; 7207 7208 /* Buffer for blob reading */ 7209 uint8_t *read_buff; 7210 7211 /* Struct for external device writing */ 7212 struct spdk_bs_dev_cb_args ext_args; 7213 7214 /* Actual number of copied clusters */ 7215 uint64_t copied_clusters_count; 7216 7217 /* Status callback for updates about the ongoing operation */ 7218 spdk_blob_shallow_copy_status status_cb; 7219 7220 /* Argument passed to function status_cb */ 7221 void *status_cb_arg; 7222 }; 7223 7224 static void 7225 bs_shallow_copy_cleanup_finish(void *cb_arg, int bserrno) 7226 { 7227 struct shallow_copy_ctx *ctx = cb_arg; 7228 struct spdk_bs_cpl *cpl = &ctx->cpl; 7229 7230 if (bserrno != 0) { 7231 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, cleanup error %d\n", ctx->blob->id, bserrno); 7232 ctx->bserrno = bserrno; 7233 } 7234 7235 ctx->ext_dev->destroy_channel(ctx->ext_dev, ctx->ext_channel); 7236 spdk_free(ctx->read_buff); 7237 7238 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno); 7239 7240 free(ctx); 7241 } 7242 7243 static void 7244 bs_shallow_copy_bdev_write_cpl(struct spdk_io_channel *channel, void *cb_arg, int bserrno) 7245 { 7246 struct shallow_copy_ctx *ctx = cb_arg; 7247 struct spdk_blob *_blob = ctx->blob; 7248 7249 if (bserrno != 0) { 7250 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, ext dev write error %d\n", ctx->blob->id, bserrno); 7251 ctx->bserrno = bserrno; 7252 _blob->locked_operation_in_progress = false; 7253 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7254 return; 7255 } 7256 7257 ctx->cluster++; 7258 if (ctx->status_cb) { 7259 ctx->copied_clusters_count++; 7260 ctx->status_cb(ctx->copied_clusters_count, ctx->status_cb_arg); 7261 } 7262 7263 bs_shallow_copy_cluster_find_next(ctx); 7264 } 7265 7266 static void 7267 bs_shallow_copy_blob_read_cpl(void *cb_arg, int bserrno) 7268 { 7269 struct shallow_copy_ctx *ctx = cb_arg; 7270 struct spdk_bs_dev *ext_dev = ctx->ext_dev; 7271 struct spdk_blob *_blob = ctx->blob; 7272 7273 if (bserrno != 0) { 7274 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, blob read error %d\n", ctx->blob->id, bserrno); 7275 ctx->bserrno = bserrno; 7276 _blob->locked_operation_in_progress = false; 7277 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7278 return; 7279 } 7280 7281 ctx->ext_args.channel = ctx->ext_channel; 7282 ctx->ext_args.cb_fn = bs_shallow_copy_bdev_write_cpl; 7283 ctx->ext_args.cb_arg = ctx; 7284 7285 ext_dev->write(ext_dev, ctx->ext_channel, ctx->read_buff, 7286 bs_cluster_to_lba(_blob->bs, ctx->cluster), 7287 bs_dev_byte_to_lba(_blob->bs->dev, _blob->bs->cluster_sz), 7288 &ctx->ext_args); 7289 } 7290 7291 static void 7292 bs_shallow_copy_cluster_find_next(void *cb_arg) 7293 { 7294 struct shallow_copy_ctx *ctx = cb_arg; 7295 struct spdk_blob *_blob = ctx->blob; 7296 7297 while (ctx->cluster < _blob->active.num_clusters) { 7298 if (_blob->active.clusters[ctx->cluster] != 0) { 7299 break; 7300 } 7301 7302 ctx->cluster++; 7303 } 7304 7305 if (ctx->cluster < _blob->active.num_clusters) { 7306 blob_request_submit_op_single(ctx->blob_channel, _blob, ctx->read_buff, 7307 bs_cluster_to_lba(_blob->bs, ctx->cluster), 7308 bs_dev_byte_to_lba(_blob->bs->dev, _blob->bs->cluster_sz), 7309 bs_shallow_copy_blob_read_cpl, ctx, SPDK_BLOB_READ); 7310 } else { 7311 _blob->locked_operation_in_progress = false; 7312 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7313 } 7314 } 7315 7316 static void 7317 bs_shallow_copy_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 7318 { 7319 struct shallow_copy_ctx *ctx = cb_arg; 7320 struct spdk_bs_dev *ext_dev = ctx->ext_dev; 7321 uint32_t blob_block_size; 7322 uint64_t blob_total_size; 7323 7324 if (bserrno != 0) { 7325 SPDK_ERRLOG("Shallow copy blob open error %d\n", bserrno); 7326 ctx->bserrno = bserrno; 7327 bs_shallow_copy_cleanup_finish(ctx, 0); 7328 return; 7329 } 7330 7331 if (!spdk_blob_is_read_only(_blob)) { 7332 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, blob must be read only\n", _blob->id); 7333 ctx->bserrno = -EPERM; 7334 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7335 return; 7336 } 7337 7338 blob_block_size = _blob->bs->dev->blocklen; 7339 blob_total_size = spdk_blob_get_num_clusters(_blob) * spdk_bs_get_cluster_size(_blob->bs); 7340 7341 if (blob_total_size > ext_dev->blockcnt * ext_dev->blocklen) { 7342 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, external device must have at least blob size\n", 7343 _blob->id); 7344 ctx->bserrno = -EINVAL; 7345 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7346 return; 7347 } 7348 7349 if (blob_block_size % ext_dev->blocklen != 0) { 7350 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, external device block size is not compatible with \ 7351 blobstore block size\n", _blob->id); 7352 ctx->bserrno = -EINVAL; 7353 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7354 return; 7355 } 7356 7357 ctx->blob = _blob; 7358 7359 if (_blob->locked_operation_in_progress) { 7360 SPDK_DEBUGLOG(blob, "blob 0x%" PRIx64 " shallow copy - another operation in progress\n", _blob->id); 7361 ctx->bserrno = -EBUSY; 7362 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7363 return; 7364 } 7365 7366 _blob->locked_operation_in_progress = true; 7367 7368 ctx->cluster = 0; 7369 bs_shallow_copy_cluster_find_next(ctx); 7370 } 7371 7372 int 7373 spdk_bs_blob_shallow_copy(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7374 spdk_blob_id blobid, struct spdk_bs_dev *ext_dev, 7375 spdk_blob_shallow_copy_status status_cb_fn, void *status_cb_arg, 7376 spdk_blob_op_complete cb_fn, void *cb_arg) 7377 { 7378 struct shallow_copy_ctx *ctx; 7379 struct spdk_io_channel *ext_channel; 7380 7381 ctx = calloc(1, sizeof(*ctx)); 7382 if (!ctx) { 7383 return -ENOMEM; 7384 } 7385 7386 ctx->bs = bs; 7387 ctx->blobid = blobid; 7388 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7389 ctx->cpl.u.bs_basic.cb_fn = cb_fn; 7390 ctx->cpl.u.bs_basic.cb_arg = cb_arg; 7391 ctx->bserrno = 0; 7392 ctx->blob_channel = channel; 7393 ctx->status_cb = status_cb_fn; 7394 ctx->status_cb_arg = status_cb_arg; 7395 ctx->read_buff = spdk_malloc(bs->cluster_sz, bs->dev->blocklen, NULL, 7396 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 7397 if (!ctx->read_buff) { 7398 free(ctx); 7399 return -ENOMEM; 7400 } 7401 7402 ext_channel = ext_dev->create_channel(ext_dev); 7403 if (!ext_channel) { 7404 spdk_free(ctx->read_buff); 7405 free(ctx); 7406 return -ENOMEM; 7407 } 7408 ctx->ext_dev = ext_dev; 7409 ctx->ext_channel = ext_channel; 7410 7411 spdk_bs_open_blob(ctx->bs, ctx->blobid, bs_shallow_copy_blob_open_cpl, ctx); 7412 7413 return 0; 7414 } 7415 /* END spdk_bs_blob_shallow_copy */ 7416 7417 /* START spdk_bs_blob_set_parent */ 7418 7419 struct set_parent_ctx { 7420 struct spdk_blob_store *bs; 7421 int bserrno; 7422 spdk_bs_op_complete cb_fn; 7423 void *cb_arg; 7424 7425 struct spdk_blob *blob; 7426 bool blob_md_ro; 7427 7428 struct blob_parent parent; 7429 }; 7430 7431 static void 7432 bs_set_parent_cleanup_finish(void *cb_arg, int bserrno) 7433 { 7434 struct set_parent_ctx *ctx = cb_arg; 7435 7436 assert(ctx != NULL); 7437 7438 if (bserrno != 0) { 7439 SPDK_ERRLOG("blob set parent finish error %d\n", bserrno); 7440 if (ctx->bserrno == 0) { 7441 ctx->bserrno = bserrno; 7442 } 7443 } 7444 7445 ctx->cb_fn(ctx->cb_arg, ctx->bserrno); 7446 7447 free(ctx); 7448 } 7449 7450 static void 7451 bs_set_parent_close_snapshot(void *cb_arg, int bserrno) 7452 { 7453 struct set_parent_ctx *ctx = cb_arg; 7454 7455 if (ctx->bserrno != 0) { 7456 spdk_blob_close(ctx->parent.u.snapshot.blob, bs_set_parent_cleanup_finish, ctx); 7457 return; 7458 } 7459 7460 if (bserrno != 0) { 7461 SPDK_ERRLOG("blob close error %d\n", bserrno); 7462 ctx->bserrno = bserrno; 7463 } 7464 7465 bs_set_parent_cleanup_finish(ctx, ctx->bserrno); 7466 } 7467 7468 static void 7469 bs_set_parent_close_blob(void *cb_arg, int bserrno) 7470 { 7471 struct set_parent_ctx *ctx = cb_arg; 7472 struct spdk_blob *blob = ctx->blob; 7473 struct spdk_blob *snapshot = ctx->parent.u.snapshot.blob; 7474 7475 if (bserrno != 0 && ctx->bserrno == 0) { 7476 SPDK_ERRLOG("error %d in metadata sync\n", bserrno); 7477 ctx->bserrno = bserrno; 7478 } 7479 7480 /* Revert md_ro to original state */ 7481 blob->md_ro = ctx->blob_md_ro; 7482 7483 blob->locked_operation_in_progress = false; 7484 snapshot->locked_operation_in_progress = false; 7485 7486 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7487 } 7488 7489 static void 7490 bs_set_parent_set_back_bs_dev_done(void *cb_arg, int bserrno) 7491 { 7492 struct set_parent_ctx *ctx = cb_arg; 7493 struct spdk_blob *blob = ctx->blob; 7494 7495 if (bserrno != 0) { 7496 SPDK_ERRLOG("error %d setting back_bs_dev\n", bserrno); 7497 ctx->bserrno = bserrno; 7498 bs_set_parent_close_blob(ctx, bserrno); 7499 return; 7500 } 7501 7502 spdk_blob_sync_md(blob, bs_set_parent_close_blob, ctx); 7503 } 7504 7505 static int 7506 bs_set_parent_refs(struct spdk_blob *blob, struct blob_parent *parent) 7507 { 7508 int rc; 7509 7510 bs_blob_list_remove(blob); 7511 7512 rc = blob_set_xattr(blob, BLOB_SNAPSHOT, &parent->u.snapshot.id, sizeof(spdk_blob_id), true); 7513 if (rc != 0) { 7514 SPDK_ERRLOG("error %d setting snapshot xattr\n", rc); 7515 return rc; 7516 } 7517 blob->parent_id = parent->u.snapshot.id; 7518 7519 if (blob_is_esnap_clone(blob)) { 7520 /* Remove the xattr that references the external snapshot */ 7521 blob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT; 7522 blob_remove_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID, true); 7523 } 7524 7525 bs_blob_list_add(blob); 7526 7527 return 0; 7528 } 7529 7530 static void 7531 bs_set_parent_snapshot_open_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno) 7532 { 7533 struct set_parent_ctx *ctx = cb_arg; 7534 struct spdk_blob *blob = ctx->blob; 7535 struct spdk_bs_dev *back_bs_dev; 7536 7537 if (bserrno != 0) { 7538 SPDK_ERRLOG("snapshot open error %d\n", bserrno); 7539 ctx->bserrno = bserrno; 7540 spdk_blob_close(blob, bs_set_parent_cleanup_finish, ctx); 7541 return; 7542 } 7543 7544 ctx->parent.u.snapshot.blob = snapshot; 7545 ctx->parent.u.snapshot.id = snapshot->id; 7546 7547 if (!spdk_blob_is_snapshot(snapshot)) { 7548 SPDK_ERRLOG("parent blob is not a snapshot\n"); 7549 ctx->bserrno = -EINVAL; 7550 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7551 return; 7552 } 7553 7554 if (blob->active.num_clusters != snapshot->active.num_clusters) { 7555 SPDK_ERRLOG("parent blob has a number of clusters different from child's ones\n"); 7556 ctx->bserrno = -EINVAL; 7557 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7558 return; 7559 } 7560 7561 if (blob->locked_operation_in_progress || snapshot->locked_operation_in_progress) { 7562 SPDK_ERRLOG("cannot set parent of blob, another operation in progress\n"); 7563 ctx->bserrno = -EBUSY; 7564 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7565 return; 7566 } 7567 7568 blob->locked_operation_in_progress = true; 7569 snapshot->locked_operation_in_progress = true; 7570 7571 /* Temporarily override md_ro flag for MD modification */ 7572 blob->md_ro = false; 7573 7574 back_bs_dev = bs_create_blob_bs_dev(snapshot); 7575 7576 blob_set_back_bs_dev(blob, back_bs_dev, bs_set_parent_refs, &ctx->parent, 7577 bs_set_parent_set_back_bs_dev_done, 7578 ctx); 7579 } 7580 7581 static void 7582 bs_set_parent_blob_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 7583 { 7584 struct set_parent_ctx *ctx = cb_arg; 7585 7586 if (bserrno != 0) { 7587 SPDK_ERRLOG("blob open error %d\n", bserrno); 7588 ctx->bserrno = bserrno; 7589 bs_set_parent_cleanup_finish(ctx, 0); 7590 return; 7591 } 7592 7593 if (!spdk_blob_is_thin_provisioned(blob)) { 7594 SPDK_ERRLOG("blob is not thin-provisioned\n"); 7595 ctx->bserrno = -EINVAL; 7596 spdk_blob_close(blob, bs_set_parent_cleanup_finish, ctx); 7597 return; 7598 } 7599 7600 ctx->blob = blob; 7601 ctx->blob_md_ro = blob->md_ro; 7602 7603 spdk_bs_open_blob(ctx->bs, ctx->parent.u.snapshot.id, bs_set_parent_snapshot_open_cpl, ctx); 7604 } 7605 7606 void 7607 spdk_bs_blob_set_parent(struct spdk_blob_store *bs, spdk_blob_id blob_id, 7608 spdk_blob_id snapshot_id, spdk_blob_op_complete cb_fn, void *cb_arg) 7609 { 7610 struct set_parent_ctx *ctx; 7611 7612 if (snapshot_id == SPDK_BLOBID_INVALID) { 7613 SPDK_ERRLOG("snapshot id not valid\n"); 7614 cb_fn(cb_arg, -EINVAL); 7615 return; 7616 } 7617 7618 if (blob_id == snapshot_id) { 7619 SPDK_ERRLOG("blob id and snapshot id cannot be the same\n"); 7620 cb_fn(cb_arg, -EINVAL); 7621 return; 7622 } 7623 7624 if (spdk_blob_get_parent_snapshot(bs, blob_id) == snapshot_id) { 7625 SPDK_NOTICELOG("snapshot is already the parent of blob\n"); 7626 cb_fn(cb_arg, -EEXIST); 7627 return; 7628 } 7629 7630 ctx = calloc(1, sizeof(*ctx)); 7631 if (!ctx) { 7632 cb_fn(cb_arg, -ENOMEM); 7633 return; 7634 } 7635 7636 ctx->bs = bs; 7637 ctx->parent.u.snapshot.id = snapshot_id; 7638 ctx->cb_fn = cb_fn; 7639 ctx->cb_arg = cb_arg; 7640 ctx->bserrno = 0; 7641 7642 spdk_bs_open_blob(bs, blob_id, bs_set_parent_blob_open_cpl, ctx); 7643 } 7644 /* END spdk_bs_blob_set_parent */ 7645 7646 /* START spdk_bs_blob_set_external_parent */ 7647 7648 static void 7649 bs_set_external_parent_cleanup_finish(void *cb_arg, int bserrno) 7650 { 7651 struct set_parent_ctx *ctx = cb_arg; 7652 7653 if (bserrno != 0) { 7654 SPDK_ERRLOG("blob set external parent finish error %d\n", bserrno); 7655 if (ctx->bserrno == 0) { 7656 ctx->bserrno = bserrno; 7657 } 7658 } 7659 7660 ctx->cb_fn(ctx->cb_arg, ctx->bserrno); 7661 7662 free(ctx->parent.u.esnap.id); 7663 free(ctx); 7664 } 7665 7666 static void 7667 bs_set_external_parent_close_blob(void *cb_arg, int bserrno) 7668 { 7669 struct set_parent_ctx *ctx = cb_arg; 7670 struct spdk_blob *blob = ctx->blob; 7671 7672 if (bserrno != 0 && ctx->bserrno == 0) { 7673 SPDK_ERRLOG("error %d in metadata sync\n", bserrno); 7674 ctx->bserrno = bserrno; 7675 } 7676 7677 /* Revert md_ro to original state */ 7678 blob->md_ro = ctx->blob_md_ro; 7679 7680 blob->locked_operation_in_progress = false; 7681 7682 spdk_blob_close(blob, bs_set_external_parent_cleanup_finish, ctx); 7683 } 7684 7685 static void 7686 bs_set_external_parent_unfrozen(void *cb_arg, int bserrno) 7687 { 7688 struct set_parent_ctx *ctx = cb_arg; 7689 struct spdk_blob *blob = ctx->blob; 7690 7691 if (bserrno != 0) { 7692 SPDK_ERRLOG("error %d setting back_bs_dev\n", bserrno); 7693 ctx->bserrno = bserrno; 7694 bs_set_external_parent_close_blob(ctx, bserrno); 7695 return; 7696 } 7697 7698 spdk_blob_sync_md(blob, bs_set_external_parent_close_blob, ctx); 7699 } 7700 7701 static int 7702 bs_set_external_parent_refs(struct spdk_blob *blob, struct blob_parent *parent) 7703 { 7704 int rc; 7705 7706 bs_blob_list_remove(blob); 7707 7708 if (spdk_blob_is_clone(blob)) { 7709 /* Remove the xattr that references the snapshot */ 7710 blob->parent_id = SPDK_BLOBID_INVALID; 7711 blob_remove_xattr(blob, BLOB_SNAPSHOT, true); 7712 } 7713 7714 rc = blob_set_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID, parent->u.esnap.id, 7715 parent->u.esnap.id_len, true); 7716 if (rc != 0) { 7717 SPDK_ERRLOG("error %d setting external snapshot xattr\n", rc); 7718 return rc; 7719 } 7720 blob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 7721 7722 bs_blob_list_add(blob); 7723 7724 return 0; 7725 } 7726 7727 static void 7728 bs_set_external_parent_blob_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 7729 { 7730 struct set_parent_ctx *ctx = cb_arg; 7731 const void *esnap_id; 7732 size_t esnap_id_len; 7733 int rc; 7734 7735 if (bserrno != 0) { 7736 SPDK_ERRLOG("blob open error %d\n", bserrno); 7737 ctx->bserrno = bserrno; 7738 bs_set_parent_cleanup_finish(ctx, 0); 7739 return; 7740 } 7741 7742 ctx->blob = blob; 7743 ctx->blob_md_ro = blob->md_ro; 7744 7745 rc = spdk_blob_get_esnap_id(blob, &esnap_id, &esnap_id_len); 7746 if (rc == 0 && esnap_id != NULL && esnap_id_len == ctx->parent.u.esnap.id_len && 7747 memcmp(esnap_id, ctx->parent.u.esnap.id, esnap_id_len) == 0) { 7748 SPDK_ERRLOG("external snapshot is already the parent of blob\n"); 7749 ctx->bserrno = -EEXIST; 7750 goto error; 7751 } 7752 7753 if (!spdk_blob_is_thin_provisioned(blob)) { 7754 SPDK_ERRLOG("blob is not thin-provisioned\n"); 7755 ctx->bserrno = -EINVAL; 7756 goto error; 7757 } 7758 7759 if (blob->locked_operation_in_progress) { 7760 SPDK_ERRLOG("cannot set external parent of blob, another operation in progress\n"); 7761 ctx->bserrno = -EBUSY; 7762 goto error; 7763 } 7764 7765 blob->locked_operation_in_progress = true; 7766 7767 /* Temporarily override md_ro flag for MD modification */ 7768 blob->md_ro = false; 7769 7770 blob_set_back_bs_dev(blob, ctx->parent.u.esnap.back_bs_dev, bs_set_external_parent_refs, 7771 &ctx->parent, bs_set_external_parent_unfrozen, ctx); 7772 return; 7773 7774 error: 7775 spdk_blob_close(blob, bs_set_external_parent_cleanup_finish, ctx); 7776 } 7777 7778 void 7779 spdk_bs_blob_set_external_parent(struct spdk_blob_store *bs, spdk_blob_id blob_id, 7780 struct spdk_bs_dev *esnap_bs_dev, const void *esnap_id, 7781 uint32_t esnap_id_len, spdk_blob_op_complete cb_fn, void *cb_arg) 7782 { 7783 struct set_parent_ctx *ctx; 7784 uint64_t esnap_dev_size, cluster_sz; 7785 7786 if (sizeof(blob_id) == esnap_id_len && memcmp(&blob_id, esnap_id, sizeof(blob_id)) == 0) { 7787 SPDK_ERRLOG("blob id and external snapshot id cannot be the same\n"); 7788 cb_fn(cb_arg, -EINVAL); 7789 return; 7790 } 7791 7792 esnap_dev_size = esnap_bs_dev->blockcnt * esnap_bs_dev->blocklen; 7793 cluster_sz = spdk_bs_get_cluster_size(bs); 7794 if ((esnap_dev_size % cluster_sz) != 0) { 7795 SPDK_ERRLOG("Esnap device size %" PRIu64 " is not an integer multiple of " 7796 "cluster size %" PRIu64 "\n", esnap_dev_size, cluster_sz); 7797 cb_fn(cb_arg, -EINVAL); 7798 return; 7799 } 7800 7801 ctx = calloc(1, sizeof(*ctx)); 7802 if (!ctx) { 7803 cb_fn(cb_arg, -ENOMEM); 7804 return; 7805 } 7806 7807 ctx->parent.u.esnap.id = calloc(1, esnap_id_len); 7808 if (!ctx->parent.u.esnap.id) { 7809 free(ctx); 7810 cb_fn(cb_arg, -ENOMEM); 7811 return; 7812 } 7813 7814 ctx->bs = bs; 7815 ctx->parent.u.esnap.back_bs_dev = esnap_bs_dev; 7816 memcpy(ctx->parent.u.esnap.id, esnap_id, esnap_id_len); 7817 ctx->parent.u.esnap.id_len = esnap_id_len; 7818 ctx->cb_fn = cb_fn; 7819 ctx->cb_arg = cb_arg; 7820 ctx->bserrno = 0; 7821 7822 spdk_bs_open_blob(bs, blob_id, bs_set_external_parent_blob_open_cpl, ctx); 7823 } 7824 /* END spdk_bs_blob_set_external_parent */ 7825 7826 /* START spdk_blob_resize */ 7827 struct spdk_bs_resize_ctx { 7828 spdk_blob_op_complete cb_fn; 7829 void *cb_arg; 7830 struct spdk_blob *blob; 7831 uint64_t sz; 7832 int rc; 7833 }; 7834 7835 static void 7836 bs_resize_unfreeze_cpl(void *cb_arg, int rc) 7837 { 7838 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 7839 7840 if (rc != 0) { 7841 SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc); 7842 } 7843 7844 if (ctx->rc != 0) { 7845 SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc); 7846 rc = ctx->rc; 7847 } 7848 7849 ctx->blob->locked_operation_in_progress = false; 7850 7851 ctx->cb_fn(ctx->cb_arg, rc); 7852 free(ctx); 7853 } 7854 7855 static void 7856 bs_resize_freeze_cpl(void *cb_arg, int rc) 7857 { 7858 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 7859 7860 if (rc != 0) { 7861 ctx->blob->locked_operation_in_progress = false; 7862 ctx->cb_fn(ctx->cb_arg, rc); 7863 free(ctx); 7864 return; 7865 } 7866 7867 ctx->rc = blob_resize(ctx->blob, ctx->sz); 7868 7869 blob_unfreeze_io(ctx->blob, bs_resize_unfreeze_cpl, ctx); 7870 } 7871 7872 void 7873 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg) 7874 { 7875 struct spdk_bs_resize_ctx *ctx; 7876 7877 blob_verify_md_op(blob); 7878 7879 SPDK_DEBUGLOG(blob, "Resizing blob 0x%" PRIx64 " to %" PRIu64 " clusters\n", blob->id, sz); 7880 7881 if (blob->md_ro) { 7882 cb_fn(cb_arg, -EPERM); 7883 return; 7884 } 7885 7886 if (sz == blob->active.num_clusters) { 7887 cb_fn(cb_arg, 0); 7888 return; 7889 } 7890 7891 if (blob->locked_operation_in_progress) { 7892 cb_fn(cb_arg, -EBUSY); 7893 return; 7894 } 7895 7896 ctx = calloc(1, sizeof(*ctx)); 7897 if (!ctx) { 7898 cb_fn(cb_arg, -ENOMEM); 7899 return; 7900 } 7901 7902 blob->locked_operation_in_progress = true; 7903 ctx->cb_fn = cb_fn; 7904 ctx->cb_arg = cb_arg; 7905 ctx->blob = blob; 7906 ctx->sz = sz; 7907 blob_freeze_io(blob, bs_resize_freeze_cpl, ctx); 7908 } 7909 7910 /* END spdk_blob_resize */ 7911 7912 7913 /* START spdk_bs_delete_blob */ 7914 7915 static void 7916 bs_delete_close_cpl(void *cb_arg, int bserrno) 7917 { 7918 spdk_bs_sequence_t *seq = cb_arg; 7919 7920 bs_sequence_finish(seq, bserrno); 7921 } 7922 7923 static void 7924 bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7925 { 7926 struct spdk_blob *blob = cb_arg; 7927 7928 if (bserrno != 0) { 7929 /* 7930 * We already removed this blob from the blobstore tailq, so 7931 * we need to free it here since this is the last reference 7932 * to it. 7933 */ 7934 blob_free(blob); 7935 bs_delete_close_cpl(seq, bserrno); 7936 return; 7937 } 7938 7939 /* 7940 * This will immediately decrement the ref_count and call 7941 * the completion routine since the metadata state is clean. 7942 * By calling spdk_blob_close, we reduce the number of call 7943 * points into code that touches the blob->open_ref count 7944 * and the blobstore's blob list. 7945 */ 7946 spdk_blob_close(blob, bs_delete_close_cpl, seq); 7947 } 7948 7949 struct delete_snapshot_ctx { 7950 struct spdk_blob_list *parent_snapshot_entry; 7951 struct spdk_blob *snapshot; 7952 struct spdk_blob_md_page *page; 7953 bool snapshot_md_ro; 7954 struct spdk_blob *clone; 7955 bool clone_md_ro; 7956 spdk_blob_op_with_handle_complete cb_fn; 7957 void *cb_arg; 7958 int bserrno; 7959 uint32_t next_extent_page; 7960 }; 7961 7962 static void 7963 delete_blob_cleanup_finish(void *cb_arg, int bserrno) 7964 { 7965 struct delete_snapshot_ctx *ctx = cb_arg; 7966 7967 if (bserrno != 0) { 7968 SPDK_ERRLOG("Snapshot cleanup error %d\n", bserrno); 7969 } 7970 7971 assert(ctx != NULL); 7972 7973 if (bserrno != 0 && ctx->bserrno == 0) { 7974 ctx->bserrno = bserrno; 7975 } 7976 7977 ctx->cb_fn(ctx->cb_arg, ctx->snapshot, ctx->bserrno); 7978 spdk_free(ctx->page); 7979 free(ctx); 7980 } 7981 7982 static void 7983 delete_snapshot_cleanup_snapshot(void *cb_arg, int bserrno) 7984 { 7985 struct delete_snapshot_ctx *ctx = cb_arg; 7986 7987 if (bserrno != 0) { 7988 ctx->bserrno = bserrno; 7989 SPDK_ERRLOG("Clone cleanup error %d\n", bserrno); 7990 } 7991 7992 if (ctx->bserrno != 0) { 7993 assert(blob_lookup(ctx->snapshot->bs, ctx->snapshot->id) == NULL); 7994 RB_INSERT(spdk_blob_tree, &ctx->snapshot->bs->open_blobs, ctx->snapshot); 7995 spdk_bit_array_set(ctx->snapshot->bs->open_blobids, ctx->snapshot->id); 7996 } 7997 7998 ctx->snapshot->locked_operation_in_progress = false; 7999 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 8000 8001 spdk_blob_close(ctx->snapshot, delete_blob_cleanup_finish, ctx); 8002 } 8003 8004 static void 8005 delete_snapshot_cleanup_clone(void *cb_arg, int bserrno) 8006 { 8007 struct delete_snapshot_ctx *ctx = cb_arg; 8008 8009 ctx->clone->locked_operation_in_progress = false; 8010 ctx->clone->md_ro = ctx->clone_md_ro; 8011 8012 spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx); 8013 } 8014 8015 static void 8016 delete_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 8017 { 8018 struct delete_snapshot_ctx *ctx = cb_arg; 8019 8020 if (bserrno) { 8021 ctx->bserrno = bserrno; 8022 delete_snapshot_cleanup_clone(ctx, 0); 8023 return; 8024 } 8025 8026 ctx->clone->locked_operation_in_progress = false; 8027 spdk_blob_close(ctx->clone, delete_blob_cleanup_finish, ctx); 8028 } 8029 8030 static void 8031 delete_snapshot_sync_snapshot_cpl(void *cb_arg, int bserrno) 8032 { 8033 struct delete_snapshot_ctx *ctx = cb_arg; 8034 struct spdk_blob_list *parent_snapshot_entry = NULL; 8035 struct spdk_blob_list *snapshot_entry = NULL; 8036 struct spdk_blob_list *clone_entry = NULL; 8037 struct spdk_blob_list *snapshot_clone_entry = NULL; 8038 8039 if (bserrno) { 8040 SPDK_ERRLOG("Failed to sync MD on blob\n"); 8041 ctx->bserrno = bserrno; 8042 delete_snapshot_cleanup_clone(ctx, 0); 8043 return; 8044 } 8045 8046 /* Get snapshot entry for the snapshot we want to remove */ 8047 snapshot_entry = bs_get_snapshot_entry(ctx->snapshot->bs, ctx->snapshot->id); 8048 8049 assert(snapshot_entry != NULL); 8050 8051 /* Remove clone entry in this snapshot (at this point there can be only one clone) */ 8052 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 8053 assert(clone_entry != NULL); 8054 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 8055 snapshot_entry->clone_count--; 8056 assert(TAILQ_EMPTY(&snapshot_entry->clones)); 8057 8058 switch (ctx->snapshot->parent_id) { 8059 case SPDK_BLOBID_INVALID: 8060 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 8061 /* No parent snapshot - just remove clone entry */ 8062 free(clone_entry); 8063 break; 8064 default: 8065 /* This snapshot is at the same time a clone of another snapshot - we need to 8066 * update parent snapshot (remove current clone, add new one inherited from 8067 * the snapshot that is being removed) */ 8068 8069 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 8070 * snapshot that we are removing */ 8071 blob_get_snapshot_and_clone_entries(ctx->snapshot, &parent_snapshot_entry, 8072 &snapshot_clone_entry); 8073 8074 /* Switch clone entry in parent snapshot */ 8075 TAILQ_INSERT_TAIL(&parent_snapshot_entry->clones, clone_entry, link); 8076 TAILQ_REMOVE(&parent_snapshot_entry->clones, snapshot_clone_entry, link); 8077 free(snapshot_clone_entry); 8078 } 8079 8080 /* Restore md_ro flags */ 8081 ctx->clone->md_ro = ctx->clone_md_ro; 8082 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 8083 8084 blob_unfreeze_io(ctx->clone, delete_snapshot_unfreeze_cpl, ctx); 8085 } 8086 8087 static void 8088 delete_snapshot_sync_clone_cpl(void *cb_arg, int bserrno) 8089 { 8090 struct delete_snapshot_ctx *ctx = cb_arg; 8091 uint64_t i; 8092 8093 ctx->snapshot->md_ro = false; 8094 8095 if (bserrno) { 8096 SPDK_ERRLOG("Failed to sync MD on clone\n"); 8097 ctx->bserrno = bserrno; 8098 8099 /* Restore snapshot to previous state */ 8100 bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true); 8101 if (bserrno != 0) { 8102 delete_snapshot_cleanup_clone(ctx, bserrno); 8103 return; 8104 } 8105 8106 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx); 8107 return; 8108 } 8109 8110 /* Clear cluster map entries for snapshot */ 8111 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 8112 if (ctx->clone->active.clusters[i] == ctx->snapshot->active.clusters[i]) { 8113 if (ctx->snapshot->active.clusters[i] != 0) { 8114 ctx->snapshot->active.num_allocated_clusters--; 8115 } 8116 ctx->snapshot->active.clusters[i] = 0; 8117 } 8118 } 8119 for (i = 0; i < ctx->snapshot->active.num_extent_pages && 8120 i < ctx->clone->active.num_extent_pages; i++) { 8121 if (ctx->clone->active.extent_pages[i] == ctx->snapshot->active.extent_pages[i]) { 8122 ctx->snapshot->active.extent_pages[i] = 0; 8123 } 8124 } 8125 8126 blob_set_thin_provision(ctx->snapshot); 8127 ctx->snapshot->state = SPDK_BLOB_STATE_DIRTY; 8128 8129 if (ctx->parent_snapshot_entry != NULL) { 8130 ctx->snapshot->back_bs_dev = NULL; 8131 } 8132 8133 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_cpl, ctx); 8134 } 8135 8136 static void 8137 delete_snapshot_update_extent_pages_cpl(struct delete_snapshot_ctx *ctx) 8138 { 8139 int bserrno; 8140 8141 /* Delete old backing bs_dev from clone (related to snapshot that will be removed) */ 8142 blob_back_bs_destroy(ctx->clone); 8143 8144 /* Set/remove snapshot xattr and switch parent ID and backing bs_dev on clone... */ 8145 if (ctx->snapshot->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 8146 bserrno = bs_snapshot_copy_xattr(ctx->clone, ctx->snapshot, 8147 BLOB_EXTERNAL_SNAPSHOT_ID); 8148 if (bserrno != 0) { 8149 ctx->bserrno = bserrno; 8150 8151 /* Restore snapshot to previous state */ 8152 bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true); 8153 if (bserrno != 0) { 8154 delete_snapshot_cleanup_clone(ctx, bserrno); 8155 return; 8156 } 8157 8158 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx); 8159 return; 8160 } 8161 ctx->clone->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT; 8162 ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev; 8163 /* Do not delete the external snapshot along with this snapshot */ 8164 ctx->snapshot->back_bs_dev = NULL; 8165 ctx->clone->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 8166 } else if (ctx->parent_snapshot_entry != NULL) { 8167 /* ...to parent snapshot */ 8168 ctx->clone->parent_id = ctx->parent_snapshot_entry->id; 8169 ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev; 8170 blob_set_xattr(ctx->clone, BLOB_SNAPSHOT, &ctx->parent_snapshot_entry->id, 8171 sizeof(spdk_blob_id), 8172 true); 8173 } else { 8174 /* ...to blobid invalid and zeroes dev */ 8175 ctx->clone->parent_id = SPDK_BLOBID_INVALID; 8176 ctx->clone->back_bs_dev = bs_create_zeroes_dev(); 8177 blob_remove_xattr(ctx->clone, BLOB_SNAPSHOT, true); 8178 } 8179 8180 spdk_blob_sync_md(ctx->clone, delete_snapshot_sync_clone_cpl, ctx); 8181 } 8182 8183 static void 8184 delete_snapshot_update_extent_pages(void *cb_arg, int bserrno) 8185 { 8186 struct delete_snapshot_ctx *ctx = cb_arg; 8187 uint32_t *extent_page; 8188 uint64_t i; 8189 8190 for (i = ctx->next_extent_page; i < ctx->snapshot->active.num_extent_pages && 8191 i < ctx->clone->active.num_extent_pages; i++) { 8192 if (ctx->snapshot->active.extent_pages[i] == 0) { 8193 /* No extent page to use from snapshot */ 8194 continue; 8195 } 8196 8197 extent_page = &ctx->clone->active.extent_pages[i]; 8198 if (*extent_page == 0) { 8199 /* Copy extent page from snapshot when clone did not have a matching one */ 8200 *extent_page = ctx->snapshot->active.extent_pages[i]; 8201 continue; 8202 } 8203 8204 /* Clone and snapshot both contain partially filled matching extent pages. 8205 * Update the clone extent page in place with cluster map containing the mix of both. */ 8206 ctx->next_extent_page = i + 1; 8207 memset(ctx->page, 0, SPDK_BS_PAGE_SIZE); 8208 8209 blob_write_extent_page(ctx->clone, *extent_page, i * SPDK_EXTENTS_PER_EP, ctx->page, 8210 delete_snapshot_update_extent_pages, ctx); 8211 return; 8212 } 8213 delete_snapshot_update_extent_pages_cpl(ctx); 8214 } 8215 8216 static void 8217 delete_snapshot_sync_snapshot_xattr_cpl(void *cb_arg, int bserrno) 8218 { 8219 struct delete_snapshot_ctx *ctx = cb_arg; 8220 uint64_t i; 8221 8222 /* Temporarily override md_ro flag for clone for MD modification */ 8223 ctx->clone_md_ro = ctx->clone->md_ro; 8224 ctx->clone->md_ro = false; 8225 8226 if (bserrno) { 8227 SPDK_ERRLOG("Failed to sync MD with xattr on blob\n"); 8228 ctx->bserrno = bserrno; 8229 delete_snapshot_cleanup_clone(ctx, 0); 8230 return; 8231 } 8232 8233 /* Copy snapshot map to clone map (only unallocated clusters in clone) */ 8234 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 8235 if (ctx->clone->active.clusters[i] == 0) { 8236 ctx->clone->active.clusters[i] = ctx->snapshot->active.clusters[i]; 8237 if (ctx->clone->active.clusters[i] != 0) { 8238 ctx->clone->active.num_allocated_clusters++; 8239 } 8240 } 8241 } 8242 ctx->next_extent_page = 0; 8243 delete_snapshot_update_extent_pages(ctx, 0); 8244 } 8245 8246 static void 8247 delete_snapshot_esnap_channels_destroyed_cb(void *cb_arg, struct spdk_blob *blob, int bserrno) 8248 { 8249 struct delete_snapshot_ctx *ctx = cb_arg; 8250 8251 if (bserrno != 0) { 8252 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to destroy esnap channels: %d\n", 8253 blob->id, bserrno); 8254 /* That error should not stop us from syncing metadata. */ 8255 } 8256 8257 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx); 8258 } 8259 8260 static void 8261 delete_snapshot_freeze_io_cb(void *cb_arg, int bserrno) 8262 { 8263 struct delete_snapshot_ctx *ctx = cb_arg; 8264 8265 if (bserrno) { 8266 SPDK_ERRLOG("Failed to freeze I/O on clone\n"); 8267 ctx->bserrno = bserrno; 8268 delete_snapshot_cleanup_clone(ctx, 0); 8269 return; 8270 } 8271 8272 /* Temporarily override md_ro flag for snapshot for MD modification */ 8273 ctx->snapshot_md_ro = ctx->snapshot->md_ro; 8274 ctx->snapshot->md_ro = false; 8275 8276 /* Mark blob as pending for removal for power failure safety, use clone id for recovery */ 8277 ctx->bserrno = blob_set_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, &ctx->clone->id, 8278 sizeof(spdk_blob_id), true); 8279 if (ctx->bserrno != 0) { 8280 delete_snapshot_cleanup_clone(ctx, 0); 8281 return; 8282 } 8283 8284 if (blob_is_esnap_clone(ctx->snapshot)) { 8285 blob_esnap_destroy_bs_dev_channels(ctx->snapshot, false, 8286 delete_snapshot_esnap_channels_destroyed_cb, 8287 ctx); 8288 return; 8289 } 8290 8291 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx); 8292 } 8293 8294 static void 8295 delete_snapshot_open_clone_cb(void *cb_arg, struct spdk_blob *clone, int bserrno) 8296 { 8297 struct delete_snapshot_ctx *ctx = cb_arg; 8298 8299 if (bserrno) { 8300 SPDK_ERRLOG("Failed to open clone\n"); 8301 ctx->bserrno = bserrno; 8302 delete_snapshot_cleanup_snapshot(ctx, 0); 8303 return; 8304 } 8305 8306 ctx->clone = clone; 8307 8308 if (clone->locked_operation_in_progress) { 8309 SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress on its clone\n"); 8310 ctx->bserrno = -EBUSY; 8311 spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx); 8312 return; 8313 } 8314 8315 clone->locked_operation_in_progress = true; 8316 8317 blob_freeze_io(clone, delete_snapshot_freeze_io_cb, ctx); 8318 } 8319 8320 static void 8321 update_clone_on_snapshot_deletion(struct spdk_blob *snapshot, struct delete_snapshot_ctx *ctx) 8322 { 8323 struct spdk_blob_list *snapshot_entry = NULL; 8324 struct spdk_blob_list *clone_entry = NULL; 8325 struct spdk_blob_list *snapshot_clone_entry = NULL; 8326 8327 /* Get snapshot entry for the snapshot we want to remove */ 8328 snapshot_entry = bs_get_snapshot_entry(snapshot->bs, snapshot->id); 8329 8330 assert(snapshot_entry != NULL); 8331 8332 /* Get clone of the snapshot (at this point there can be only one clone) */ 8333 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 8334 assert(snapshot_entry->clone_count == 1); 8335 assert(clone_entry != NULL); 8336 8337 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 8338 * snapshot that we are removing */ 8339 blob_get_snapshot_and_clone_entries(snapshot, &ctx->parent_snapshot_entry, 8340 &snapshot_clone_entry); 8341 8342 spdk_bs_open_blob(snapshot->bs, clone_entry->id, delete_snapshot_open_clone_cb, ctx); 8343 } 8344 8345 static void 8346 bs_delete_blob_finish(void *cb_arg, struct spdk_blob *blob, int bserrno) 8347 { 8348 spdk_bs_sequence_t *seq = cb_arg; 8349 struct spdk_blob_list *snapshot_entry = NULL; 8350 uint32_t page_num; 8351 8352 if (bserrno) { 8353 SPDK_ERRLOG("Failed to remove blob\n"); 8354 bs_sequence_finish(seq, bserrno); 8355 return; 8356 } 8357 8358 /* Remove snapshot from the list */ 8359 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 8360 if (snapshot_entry != NULL) { 8361 TAILQ_REMOVE(&blob->bs->snapshots, snapshot_entry, link); 8362 free(snapshot_entry); 8363 } 8364 8365 page_num = bs_blobid_to_page(blob->id); 8366 spdk_bit_array_clear(blob->bs->used_blobids, page_num); 8367 blob->state = SPDK_BLOB_STATE_DIRTY; 8368 blob->active.num_pages = 0; 8369 blob_resize(blob, 0); 8370 8371 blob_persist(seq, blob, bs_delete_persist_cpl, blob); 8372 } 8373 8374 static int 8375 bs_is_blob_deletable(struct spdk_blob *blob, bool *update_clone) 8376 { 8377 struct spdk_blob_list *snapshot_entry = NULL; 8378 struct spdk_blob_list *clone_entry = NULL; 8379 struct spdk_blob *clone = NULL; 8380 bool has_one_clone = false; 8381 8382 /* Check if this is a snapshot with clones */ 8383 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 8384 if (snapshot_entry != NULL) { 8385 if (snapshot_entry->clone_count > 1) { 8386 SPDK_ERRLOG("Cannot remove snapshot with more than one clone\n"); 8387 return -EBUSY; 8388 } else if (snapshot_entry->clone_count == 1) { 8389 has_one_clone = true; 8390 } 8391 } 8392 8393 /* Check if someone has this blob open (besides this delete context): 8394 * - open_ref = 1 - only this context opened blob, so it is ok to remove it 8395 * - open_ref <= 2 && has_one_clone = true - clone is holding snapshot 8396 * and that is ok, because we will update it accordingly */ 8397 if (blob->open_ref <= 2 && has_one_clone) { 8398 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 8399 assert(clone_entry != NULL); 8400 clone = blob_lookup(blob->bs, clone_entry->id); 8401 8402 if (blob->open_ref == 2 && clone == NULL) { 8403 /* Clone is closed and someone else opened this blob */ 8404 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 8405 return -EBUSY; 8406 } 8407 8408 *update_clone = true; 8409 return 0; 8410 } 8411 8412 if (blob->open_ref > 1) { 8413 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 8414 return -EBUSY; 8415 } 8416 8417 assert(has_one_clone == false); 8418 *update_clone = false; 8419 return 0; 8420 } 8421 8422 static void 8423 bs_delete_enomem_close_cpl(void *cb_arg, int bserrno) 8424 { 8425 spdk_bs_sequence_t *seq = cb_arg; 8426 8427 bs_sequence_finish(seq, -ENOMEM); 8428 } 8429 8430 static void 8431 bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 8432 { 8433 spdk_bs_sequence_t *seq = cb_arg; 8434 struct delete_snapshot_ctx *ctx; 8435 bool update_clone = false; 8436 8437 if (bserrno != 0) { 8438 bs_sequence_finish(seq, bserrno); 8439 return; 8440 } 8441 8442 blob_verify_md_op(blob); 8443 8444 ctx = calloc(1, sizeof(*ctx)); 8445 if (ctx == NULL) { 8446 spdk_blob_close(blob, bs_delete_enomem_close_cpl, seq); 8447 return; 8448 } 8449 8450 ctx->snapshot = blob; 8451 ctx->cb_fn = bs_delete_blob_finish; 8452 ctx->cb_arg = seq; 8453 8454 /* Check if blob can be removed and if it is a snapshot with clone on top of it */ 8455 ctx->bserrno = bs_is_blob_deletable(blob, &update_clone); 8456 if (ctx->bserrno) { 8457 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 8458 return; 8459 } 8460 8461 if (blob->locked_operation_in_progress) { 8462 SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress\n"); 8463 ctx->bserrno = -EBUSY; 8464 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 8465 return; 8466 } 8467 8468 blob->locked_operation_in_progress = true; 8469 8470 /* 8471 * Remove the blob from the blob_store list now, to ensure it does not 8472 * get returned after this point by blob_lookup(). 8473 */ 8474 spdk_bit_array_clear(blob->bs->open_blobids, blob->id); 8475 RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob); 8476 8477 if (update_clone) { 8478 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 8479 if (!ctx->page) { 8480 ctx->bserrno = -ENOMEM; 8481 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 8482 return; 8483 } 8484 /* This blob is a snapshot with active clone - update clone first */ 8485 update_clone_on_snapshot_deletion(blob, ctx); 8486 } else { 8487 /* This blob does not have any clones - just remove it */ 8488 bs_blob_list_remove(blob); 8489 bs_delete_blob_finish(seq, blob, 0); 8490 free(ctx); 8491 } 8492 } 8493 8494 void 8495 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 8496 spdk_blob_op_complete cb_fn, void *cb_arg) 8497 { 8498 struct spdk_bs_cpl cpl; 8499 spdk_bs_sequence_t *seq; 8500 8501 SPDK_DEBUGLOG(blob, "Deleting blob 0x%" PRIx64 "\n", blobid); 8502 8503 assert(spdk_get_thread() == bs->md_thread); 8504 8505 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 8506 cpl.u.blob_basic.cb_fn = cb_fn; 8507 cpl.u.blob_basic.cb_arg = cb_arg; 8508 8509 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 8510 if (!seq) { 8511 cb_fn(cb_arg, -ENOMEM); 8512 return; 8513 } 8514 8515 spdk_bs_open_blob(bs, blobid, bs_delete_open_cpl, seq); 8516 } 8517 8518 /* END spdk_bs_delete_blob */ 8519 8520 /* START spdk_bs_open_blob */ 8521 8522 static void 8523 bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8524 { 8525 struct spdk_blob *blob = cb_arg; 8526 struct spdk_blob *existing; 8527 8528 if (bserrno != 0) { 8529 blob_free(blob); 8530 seq->cpl.u.blob_handle.blob = NULL; 8531 bs_sequence_finish(seq, bserrno); 8532 return; 8533 } 8534 8535 existing = blob_lookup(blob->bs, blob->id); 8536 if (existing) { 8537 blob_free(blob); 8538 existing->open_ref++; 8539 seq->cpl.u.blob_handle.blob = existing; 8540 bs_sequence_finish(seq, 0); 8541 return; 8542 } 8543 8544 blob->open_ref++; 8545 8546 spdk_bit_array_set(blob->bs->open_blobids, blob->id); 8547 RB_INSERT(spdk_blob_tree, &blob->bs->open_blobs, blob); 8548 8549 bs_sequence_finish(seq, bserrno); 8550 } 8551 8552 static inline void 8553 blob_open_opts_copy(const struct spdk_blob_open_opts *src, struct spdk_blob_open_opts *dst) 8554 { 8555 #define FIELD_OK(field) \ 8556 offsetof(struct spdk_blob_open_opts, field) + sizeof(src->field) <= src->opts_size 8557 8558 #define SET_FIELD(field) \ 8559 if (FIELD_OK(field)) { \ 8560 dst->field = src->field; \ 8561 } \ 8562 8563 SET_FIELD(clear_method); 8564 SET_FIELD(esnap_ctx); 8565 8566 dst->opts_size = src->opts_size; 8567 8568 /* You should not remove this statement, but need to update the assert statement 8569 * if you add a new field, and also add a corresponding SET_FIELD statement */ 8570 SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_open_opts) == 24, "Incorrect size"); 8571 8572 #undef FIELD_OK 8573 #undef SET_FIELD 8574 } 8575 8576 static void 8577 bs_open_blob(struct spdk_blob_store *bs, 8578 spdk_blob_id blobid, 8579 struct spdk_blob_open_opts *opts, 8580 spdk_blob_op_with_handle_complete cb_fn, 8581 void *cb_arg) 8582 { 8583 struct spdk_blob *blob; 8584 struct spdk_bs_cpl cpl; 8585 struct spdk_blob_open_opts opts_local; 8586 spdk_bs_sequence_t *seq; 8587 uint32_t page_num; 8588 8589 SPDK_DEBUGLOG(blob, "Opening blob 0x%" PRIx64 "\n", blobid); 8590 assert(spdk_get_thread() == bs->md_thread); 8591 8592 page_num = bs_blobid_to_page(blobid); 8593 if (spdk_bit_array_get(bs->used_blobids, page_num) == false) { 8594 /* Invalid blobid */ 8595 cb_fn(cb_arg, NULL, -ENOENT); 8596 return; 8597 } 8598 8599 blob = blob_lookup(bs, blobid); 8600 if (blob) { 8601 blob->open_ref++; 8602 cb_fn(cb_arg, blob, 0); 8603 return; 8604 } 8605 8606 blob = blob_alloc(bs, blobid); 8607 if (!blob) { 8608 cb_fn(cb_arg, NULL, -ENOMEM); 8609 return; 8610 } 8611 8612 spdk_blob_open_opts_init(&opts_local, sizeof(opts_local)); 8613 if (opts) { 8614 blob_open_opts_copy(opts, &opts_local); 8615 } 8616 8617 blob->clear_method = opts_local.clear_method; 8618 8619 cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE; 8620 cpl.u.blob_handle.cb_fn = cb_fn; 8621 cpl.u.blob_handle.cb_arg = cb_arg; 8622 cpl.u.blob_handle.blob = blob; 8623 cpl.u.blob_handle.esnap_ctx = opts_local.esnap_ctx; 8624 8625 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 8626 if (!seq) { 8627 blob_free(blob); 8628 cb_fn(cb_arg, NULL, -ENOMEM); 8629 return; 8630 } 8631 8632 blob_load(seq, blob, bs_open_blob_cpl, blob); 8633 } 8634 8635 void 8636 spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 8637 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 8638 { 8639 bs_open_blob(bs, blobid, NULL, cb_fn, cb_arg); 8640 } 8641 8642 void 8643 spdk_bs_open_blob_ext(struct spdk_blob_store *bs, spdk_blob_id blobid, 8644 struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 8645 { 8646 bs_open_blob(bs, blobid, opts, cb_fn, cb_arg); 8647 } 8648 8649 /* END spdk_bs_open_blob */ 8650 8651 /* START spdk_blob_set_read_only */ 8652 int 8653 spdk_blob_set_read_only(struct spdk_blob *blob) 8654 { 8655 blob_verify_md_op(blob); 8656 8657 blob->data_ro_flags |= SPDK_BLOB_READ_ONLY; 8658 8659 blob->state = SPDK_BLOB_STATE_DIRTY; 8660 return 0; 8661 } 8662 /* END spdk_blob_set_read_only */ 8663 8664 /* START spdk_blob_sync_md */ 8665 8666 static void 8667 blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8668 { 8669 struct spdk_blob *blob = cb_arg; 8670 8671 if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 8672 blob->data_ro = true; 8673 blob->md_ro = true; 8674 } 8675 8676 bs_sequence_finish(seq, bserrno); 8677 } 8678 8679 static void 8680 blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 8681 { 8682 struct spdk_bs_cpl cpl; 8683 spdk_bs_sequence_t *seq; 8684 8685 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 8686 cpl.u.blob_basic.cb_fn = cb_fn; 8687 cpl.u.blob_basic.cb_arg = cb_arg; 8688 8689 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 8690 if (!seq) { 8691 cb_fn(cb_arg, -ENOMEM); 8692 return; 8693 } 8694 8695 blob_persist(seq, blob, blob_sync_md_cpl, blob); 8696 } 8697 8698 void 8699 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 8700 { 8701 blob_verify_md_op(blob); 8702 8703 SPDK_DEBUGLOG(blob, "Syncing blob 0x%" PRIx64 "\n", blob->id); 8704 8705 if (blob->md_ro) { 8706 assert(blob->state == SPDK_BLOB_STATE_CLEAN); 8707 cb_fn(cb_arg, 0); 8708 return; 8709 } 8710 8711 blob_sync_md(blob, cb_fn, cb_arg); 8712 } 8713 8714 /* END spdk_blob_sync_md */ 8715 8716 struct spdk_blob_cluster_op_ctx { 8717 struct spdk_thread *thread; 8718 struct spdk_blob *blob; 8719 uint32_t cluster_num; /* cluster index in blob */ 8720 uint32_t cluster; /* cluster on disk */ 8721 uint32_t extent_page; /* extent page on disk */ 8722 struct spdk_blob_md_page *page; /* preallocated extent page */ 8723 int rc; 8724 spdk_blob_op_complete cb_fn; 8725 void *cb_arg; 8726 }; 8727 8728 static void 8729 blob_op_cluster_msg_cpl(void *arg) 8730 { 8731 struct spdk_blob_cluster_op_ctx *ctx = arg; 8732 8733 ctx->cb_fn(ctx->cb_arg, ctx->rc); 8734 free(ctx); 8735 } 8736 8737 static void 8738 blob_op_cluster_msg_cb(void *arg, int bserrno) 8739 { 8740 struct spdk_blob_cluster_op_ctx *ctx = arg; 8741 8742 ctx->rc = bserrno; 8743 spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx); 8744 } 8745 8746 static void 8747 blob_insert_new_ep_cb(void *arg, int bserrno) 8748 { 8749 struct spdk_blob_cluster_op_ctx *ctx = arg; 8750 uint32_t *extent_page; 8751 8752 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 8753 *extent_page = ctx->extent_page; 8754 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8755 blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx); 8756 } 8757 8758 struct spdk_blob_write_extent_page_ctx { 8759 struct spdk_blob_store *bs; 8760 8761 uint32_t extent; 8762 struct spdk_blob_md_page *page; 8763 }; 8764 8765 static void 8766 blob_free_cluster_msg_cb(void *arg, int bserrno) 8767 { 8768 struct spdk_blob_cluster_op_ctx *ctx = arg; 8769 8770 spdk_spin_lock(&ctx->blob->bs->used_lock); 8771 bs_release_cluster(ctx->blob->bs, ctx->cluster); 8772 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8773 8774 ctx->rc = bserrno; 8775 spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx); 8776 } 8777 8778 static void 8779 blob_free_cluster_update_ep_cb(void *arg, int bserrno) 8780 { 8781 struct spdk_blob_cluster_op_ctx *ctx = arg; 8782 8783 if (bserrno != 0 || ctx->blob->bs->clean == 0) { 8784 blob_free_cluster_msg_cb(ctx, bserrno); 8785 return; 8786 } 8787 8788 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8789 blob_sync_md(ctx->blob, blob_free_cluster_msg_cb, ctx); 8790 } 8791 8792 static void 8793 blob_free_cluster_free_ep_cb(void *arg, int bserrno) 8794 { 8795 struct spdk_blob_cluster_op_ctx *ctx = arg; 8796 8797 spdk_spin_lock(&ctx->blob->bs->used_lock); 8798 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8799 bs_release_md_page(ctx->blob->bs, ctx->extent_page); 8800 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8801 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8802 blob_sync_md(ctx->blob, blob_free_cluster_msg_cb, ctx); 8803 } 8804 8805 static void 8806 blob_persist_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8807 { 8808 struct spdk_blob_write_extent_page_ctx *ctx = cb_arg; 8809 8810 free(ctx); 8811 bs_sequence_finish(seq, bserrno); 8812 } 8813 8814 static void 8815 blob_write_extent_page_ready(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8816 { 8817 struct spdk_blob_write_extent_page_ctx *ctx = cb_arg; 8818 8819 if (bserrno != 0) { 8820 blob_persist_extent_page_cpl(seq, ctx, bserrno); 8821 return; 8822 } 8823 bs_sequence_write_dev(seq, ctx->page, bs_md_page_to_lba(ctx->bs, ctx->extent), 8824 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 8825 blob_persist_extent_page_cpl, ctx); 8826 } 8827 8828 static void 8829 blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 8830 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg) 8831 { 8832 struct spdk_blob_write_extent_page_ctx *ctx; 8833 spdk_bs_sequence_t *seq; 8834 struct spdk_bs_cpl cpl; 8835 8836 ctx = calloc(1, sizeof(*ctx)); 8837 if (!ctx) { 8838 cb_fn(cb_arg, -ENOMEM); 8839 return; 8840 } 8841 ctx->bs = blob->bs; 8842 ctx->extent = extent; 8843 ctx->page = page; 8844 8845 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 8846 cpl.u.blob_basic.cb_fn = cb_fn; 8847 cpl.u.blob_basic.cb_arg = cb_arg; 8848 8849 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 8850 if (!seq) { 8851 free(ctx); 8852 cb_fn(cb_arg, -ENOMEM); 8853 return; 8854 } 8855 8856 assert(page); 8857 page->next = SPDK_INVALID_MD_PAGE; 8858 page->id = blob->id; 8859 page->sequence_num = 0; 8860 8861 blob_serialize_extent_page(blob, cluster_num, page); 8862 8863 page->crc = blob_md_page_calc_crc(page); 8864 8865 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent) == true); 8866 8867 bs_mark_dirty(seq, blob->bs, blob_write_extent_page_ready, ctx); 8868 } 8869 8870 static void 8871 blob_insert_cluster_msg(void *arg) 8872 { 8873 struct spdk_blob_cluster_op_ctx *ctx = arg; 8874 uint32_t *extent_page; 8875 8876 ctx->rc = blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster); 8877 if (ctx->rc != 0) { 8878 spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx); 8879 return; 8880 } 8881 8882 if (ctx->blob->use_extent_table == false) { 8883 /* Extent table is not used, proceed with sync of md that will only use extents_rle. */ 8884 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8885 blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx); 8886 return; 8887 } 8888 8889 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 8890 if (*extent_page == 0) { 8891 /* Extent page requires allocation. 8892 * It was already claimed in the used_md_pages map and placed in ctx. */ 8893 assert(ctx->extent_page != 0); 8894 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8895 blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, ctx->page, 8896 blob_insert_new_ep_cb, ctx); 8897 } else { 8898 /* It is possible for original thread to allocate extent page for 8899 * different cluster in the same extent page. In such case proceed with 8900 * updating the existing extent page, but release the additional one. */ 8901 if (ctx->extent_page != 0) { 8902 spdk_spin_lock(&ctx->blob->bs->used_lock); 8903 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8904 bs_release_md_page(ctx->blob->bs, ctx->extent_page); 8905 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8906 ctx->extent_page = 0; 8907 } 8908 /* Extent page already allocated. 8909 * Every cluster allocation, requires just an update of single extent page. */ 8910 blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, ctx->page, 8911 blob_op_cluster_msg_cb, ctx); 8912 } 8913 } 8914 8915 static void 8916 blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 8917 uint64_t cluster, uint32_t extent_page, struct spdk_blob_md_page *page, 8918 spdk_blob_op_complete cb_fn, void *cb_arg) 8919 { 8920 struct spdk_blob_cluster_op_ctx *ctx; 8921 8922 ctx = calloc(1, sizeof(*ctx)); 8923 if (ctx == NULL) { 8924 cb_fn(cb_arg, -ENOMEM); 8925 return; 8926 } 8927 8928 ctx->thread = spdk_get_thread(); 8929 ctx->blob = blob; 8930 ctx->cluster_num = cluster_num; 8931 ctx->cluster = cluster; 8932 ctx->extent_page = extent_page; 8933 ctx->page = page; 8934 ctx->cb_fn = cb_fn; 8935 ctx->cb_arg = cb_arg; 8936 8937 spdk_thread_send_msg(blob->bs->md_thread, blob_insert_cluster_msg, ctx); 8938 } 8939 8940 static void 8941 blob_free_cluster_msg(void *arg) 8942 { 8943 struct spdk_blob_cluster_op_ctx *ctx = arg; 8944 uint32_t *extent_page; 8945 uint32_t start_cluster_idx; 8946 bool free_extent_page = true; 8947 size_t i; 8948 8949 ctx->cluster = bs_lba_to_cluster(ctx->blob->bs, ctx->blob->active.clusters[ctx->cluster_num]); 8950 8951 /* There were concurrent unmaps to the same cluster, only release the cluster on the first one */ 8952 if (ctx->cluster == 0) { 8953 blob_op_cluster_msg_cb(ctx, 0); 8954 return; 8955 } 8956 8957 ctx->blob->active.clusters[ctx->cluster_num] = 0; 8958 if (ctx->cluster != 0) { 8959 ctx->blob->active.num_allocated_clusters--; 8960 } 8961 8962 if (ctx->blob->use_extent_table == false) { 8963 /* Extent table is not used, proceed with sync of md that will only use extents_rle. */ 8964 spdk_spin_lock(&ctx->blob->bs->used_lock); 8965 bs_release_cluster(ctx->blob->bs, ctx->cluster); 8966 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8967 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8968 blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx); 8969 return; 8970 } 8971 8972 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 8973 8974 /* There shouldn't be parallel release operations on same cluster */ 8975 assert(*extent_page == ctx->extent_page); 8976 8977 start_cluster_idx = (ctx->cluster_num / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP; 8978 for (i = 0; i < SPDK_EXTENTS_PER_EP; ++i) { 8979 if (ctx->blob->active.clusters[start_cluster_idx + i] != 0) { 8980 free_extent_page = false; 8981 break; 8982 } 8983 } 8984 8985 if (free_extent_page) { 8986 assert(ctx->extent_page != 0); 8987 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8988 ctx->blob->active.extent_pages[bs_cluster_to_extent_table_id(ctx->cluster_num)] = 0; 8989 blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, ctx->page, 8990 blob_free_cluster_free_ep_cb, ctx); 8991 } else { 8992 blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, ctx->page, 8993 blob_free_cluster_update_ep_cb, ctx); 8994 } 8995 } 8996 8997 8998 static void 8999 blob_free_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, uint32_t extent_page, 9000 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg) 9001 { 9002 struct spdk_blob_cluster_op_ctx *ctx; 9003 9004 ctx = calloc(1, sizeof(*ctx)); 9005 if (ctx == NULL) { 9006 cb_fn(cb_arg, -ENOMEM); 9007 return; 9008 } 9009 9010 ctx->thread = spdk_get_thread(); 9011 ctx->blob = blob; 9012 ctx->cluster_num = cluster_num; 9013 ctx->extent_page = extent_page; 9014 ctx->page = page; 9015 ctx->cb_fn = cb_fn; 9016 ctx->cb_arg = cb_arg; 9017 9018 spdk_thread_send_msg(blob->bs->md_thread, blob_free_cluster_msg, ctx); 9019 } 9020 9021 /* START spdk_blob_close */ 9022 9023 static void 9024 blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9025 { 9026 struct spdk_blob *blob = cb_arg; 9027 9028 if (bserrno == 0) { 9029 blob->open_ref--; 9030 if (blob->open_ref == 0) { 9031 /* 9032 * Blobs with active.num_pages == 0 are deleted blobs. 9033 * these blobs are removed from the blob_store list 9034 * when the deletion process starts - so don't try to 9035 * remove them again. 9036 */ 9037 if (blob->active.num_pages > 0) { 9038 spdk_bit_array_clear(blob->bs->open_blobids, blob->id); 9039 RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob); 9040 } 9041 blob_free(blob); 9042 } 9043 } 9044 9045 bs_sequence_finish(seq, bserrno); 9046 } 9047 9048 static void 9049 blob_close_esnap_done(void *cb_arg, struct spdk_blob *blob, int bserrno) 9050 { 9051 spdk_bs_sequence_t *seq = cb_arg; 9052 9053 if (bserrno != 0) { 9054 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": close failed with error %d\n", 9055 blob->id, bserrno); 9056 bs_sequence_finish(seq, bserrno); 9057 return; 9058 } 9059 9060 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": closed, syncing metadata on thread %s\n", 9061 blob->id, spdk_thread_get_name(spdk_get_thread())); 9062 9063 /* Sync metadata */ 9064 blob_persist(seq, blob, blob_close_cpl, blob); 9065 } 9066 9067 void 9068 spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 9069 { 9070 struct spdk_bs_cpl cpl; 9071 spdk_bs_sequence_t *seq; 9072 9073 blob_verify_md_op(blob); 9074 9075 SPDK_DEBUGLOG(blob, "Closing blob 0x%" PRIx64 "\n", blob->id); 9076 9077 if (blob->open_ref == 0) { 9078 cb_fn(cb_arg, -EBADF); 9079 return; 9080 } 9081 9082 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 9083 cpl.u.blob_basic.cb_fn = cb_fn; 9084 cpl.u.blob_basic.cb_arg = cb_arg; 9085 9086 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 9087 if (!seq) { 9088 cb_fn(cb_arg, -ENOMEM); 9089 return; 9090 } 9091 9092 if (blob->open_ref == 1 && blob_is_esnap_clone(blob)) { 9093 blob_esnap_destroy_bs_dev_channels(blob, false, blob_close_esnap_done, seq); 9094 return; 9095 } 9096 9097 /* Sync metadata */ 9098 blob_persist(seq, blob, blob_close_cpl, blob); 9099 } 9100 9101 /* END spdk_blob_close */ 9102 9103 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs) 9104 { 9105 return spdk_get_io_channel(bs); 9106 } 9107 9108 void 9109 spdk_bs_free_io_channel(struct spdk_io_channel *channel) 9110 { 9111 blob_esnap_destroy_bs_channel(spdk_io_channel_get_ctx(channel)); 9112 spdk_put_io_channel(channel); 9113 } 9114 9115 void 9116 spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel, 9117 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 9118 { 9119 blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 9120 SPDK_BLOB_UNMAP); 9121 } 9122 9123 void 9124 spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel, 9125 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 9126 { 9127 blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 9128 SPDK_BLOB_WRITE_ZEROES); 9129 } 9130 9131 void 9132 spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel, 9133 void *payload, uint64_t offset, uint64_t length, 9134 spdk_blob_op_complete cb_fn, void *cb_arg) 9135 { 9136 blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 9137 SPDK_BLOB_WRITE); 9138 } 9139 9140 void 9141 spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel, 9142 void *payload, uint64_t offset, uint64_t length, 9143 spdk_blob_op_complete cb_fn, void *cb_arg) 9144 { 9145 blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 9146 SPDK_BLOB_READ); 9147 } 9148 9149 void 9150 spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel, 9151 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9152 spdk_blob_op_complete cb_fn, void *cb_arg) 9153 { 9154 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, NULL); 9155 } 9156 9157 void 9158 spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel, 9159 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9160 spdk_blob_op_complete cb_fn, void *cb_arg) 9161 { 9162 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, NULL); 9163 } 9164 9165 void 9166 spdk_blob_io_writev_ext(struct spdk_blob *blob, struct spdk_io_channel *channel, 9167 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9168 spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts) 9169 { 9170 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, 9171 io_opts); 9172 } 9173 9174 void 9175 spdk_blob_io_readv_ext(struct spdk_blob *blob, struct spdk_io_channel *channel, 9176 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9177 spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts) 9178 { 9179 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, 9180 io_opts); 9181 } 9182 9183 struct spdk_bs_iter_ctx { 9184 int64_t page_num; 9185 struct spdk_blob_store *bs; 9186 9187 spdk_blob_op_with_handle_complete cb_fn; 9188 void *cb_arg; 9189 }; 9190 9191 static void 9192 bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 9193 { 9194 struct spdk_bs_iter_ctx *ctx = cb_arg; 9195 struct spdk_blob_store *bs = ctx->bs; 9196 spdk_blob_id id; 9197 9198 if (bserrno == 0) { 9199 ctx->cb_fn(ctx->cb_arg, _blob, bserrno); 9200 free(ctx); 9201 return; 9202 } 9203 9204 ctx->page_num++; 9205 ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num); 9206 if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) { 9207 ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT); 9208 free(ctx); 9209 return; 9210 } 9211 9212 id = bs_page_to_blobid(ctx->page_num); 9213 9214 spdk_bs_open_blob(bs, id, bs_iter_cpl, ctx); 9215 } 9216 9217 void 9218 spdk_bs_iter_first(struct spdk_blob_store *bs, 9219 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 9220 { 9221 struct spdk_bs_iter_ctx *ctx; 9222 9223 ctx = calloc(1, sizeof(*ctx)); 9224 if (!ctx) { 9225 cb_fn(cb_arg, NULL, -ENOMEM); 9226 return; 9227 } 9228 9229 ctx->page_num = -1; 9230 ctx->bs = bs; 9231 ctx->cb_fn = cb_fn; 9232 ctx->cb_arg = cb_arg; 9233 9234 bs_iter_cpl(ctx, NULL, -1); 9235 } 9236 9237 static void 9238 bs_iter_close_cpl(void *cb_arg, int bserrno) 9239 { 9240 struct spdk_bs_iter_ctx *ctx = cb_arg; 9241 9242 bs_iter_cpl(ctx, NULL, -1); 9243 } 9244 9245 void 9246 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob, 9247 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 9248 { 9249 struct spdk_bs_iter_ctx *ctx; 9250 9251 assert(blob != NULL); 9252 9253 ctx = calloc(1, sizeof(*ctx)); 9254 if (!ctx) { 9255 cb_fn(cb_arg, NULL, -ENOMEM); 9256 return; 9257 } 9258 9259 ctx->page_num = bs_blobid_to_page(blob->id); 9260 ctx->bs = bs; 9261 ctx->cb_fn = cb_fn; 9262 ctx->cb_arg = cb_arg; 9263 9264 /* Close the existing blob */ 9265 spdk_blob_close(blob, bs_iter_close_cpl, ctx); 9266 } 9267 9268 static int 9269 blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 9270 uint16_t value_len, bool internal) 9271 { 9272 struct spdk_xattr_tailq *xattrs; 9273 struct spdk_xattr *xattr; 9274 size_t desc_size; 9275 void *tmp; 9276 9277 blob_verify_md_op(blob); 9278 9279 if (blob->md_ro) { 9280 return -EPERM; 9281 } 9282 9283 desc_size = sizeof(struct spdk_blob_md_descriptor_xattr) + strlen(name) + value_len; 9284 if (desc_size > SPDK_BS_MAX_DESC_SIZE) { 9285 SPDK_DEBUGLOG(blob, "Xattr '%s' of size %zu does not fix into single page %zu\n", name, 9286 desc_size, SPDK_BS_MAX_DESC_SIZE); 9287 return -ENOMEM; 9288 } 9289 9290 if (internal) { 9291 xattrs = &blob->xattrs_internal; 9292 blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR; 9293 } else { 9294 xattrs = &blob->xattrs; 9295 } 9296 9297 TAILQ_FOREACH(xattr, xattrs, link) { 9298 if (!strcmp(name, xattr->name)) { 9299 tmp = malloc(value_len); 9300 if (!tmp) { 9301 return -ENOMEM; 9302 } 9303 9304 free(xattr->value); 9305 xattr->value_len = value_len; 9306 xattr->value = tmp; 9307 memcpy(xattr->value, value, value_len); 9308 9309 blob->state = SPDK_BLOB_STATE_DIRTY; 9310 9311 return 0; 9312 } 9313 } 9314 9315 xattr = calloc(1, sizeof(*xattr)); 9316 if (!xattr) { 9317 return -ENOMEM; 9318 } 9319 9320 xattr->name = strdup(name); 9321 if (!xattr->name) { 9322 free(xattr); 9323 return -ENOMEM; 9324 } 9325 9326 xattr->value_len = value_len; 9327 xattr->value = malloc(value_len); 9328 if (!xattr->value) { 9329 free(xattr->name); 9330 free(xattr); 9331 return -ENOMEM; 9332 } 9333 memcpy(xattr->value, value, value_len); 9334 TAILQ_INSERT_TAIL(xattrs, xattr, link); 9335 9336 blob->state = SPDK_BLOB_STATE_DIRTY; 9337 9338 return 0; 9339 } 9340 9341 int 9342 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 9343 uint16_t value_len) 9344 { 9345 return blob_set_xattr(blob, name, value, value_len, false); 9346 } 9347 9348 static int 9349 blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal) 9350 { 9351 struct spdk_xattr_tailq *xattrs; 9352 struct spdk_xattr *xattr; 9353 9354 blob_verify_md_op(blob); 9355 9356 if (blob->md_ro) { 9357 return -EPERM; 9358 } 9359 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 9360 9361 TAILQ_FOREACH(xattr, xattrs, link) { 9362 if (!strcmp(name, xattr->name)) { 9363 TAILQ_REMOVE(xattrs, xattr, link); 9364 free(xattr->value); 9365 free(xattr->name); 9366 free(xattr); 9367 9368 if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) { 9369 blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR; 9370 } 9371 blob->state = SPDK_BLOB_STATE_DIRTY; 9372 9373 return 0; 9374 } 9375 } 9376 9377 return -ENOENT; 9378 } 9379 9380 int 9381 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name) 9382 { 9383 return blob_remove_xattr(blob, name, false); 9384 } 9385 9386 static int 9387 blob_get_xattr_value(struct spdk_blob *blob, const char *name, 9388 const void **value, size_t *value_len, bool internal) 9389 { 9390 struct spdk_xattr *xattr; 9391 struct spdk_xattr_tailq *xattrs; 9392 9393 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 9394 9395 TAILQ_FOREACH(xattr, xattrs, link) { 9396 if (!strcmp(name, xattr->name)) { 9397 *value = xattr->value; 9398 *value_len = xattr->value_len; 9399 return 0; 9400 } 9401 } 9402 return -ENOENT; 9403 } 9404 9405 int 9406 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 9407 const void **value, size_t *value_len) 9408 { 9409 blob_verify_md_op(blob); 9410 9411 return blob_get_xattr_value(blob, name, value, value_len, false); 9412 } 9413 9414 struct spdk_xattr_names { 9415 uint32_t count; 9416 const char *names[0]; 9417 }; 9418 9419 static int 9420 blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names) 9421 { 9422 struct spdk_xattr *xattr; 9423 int count = 0; 9424 9425 TAILQ_FOREACH(xattr, xattrs, link) { 9426 count++; 9427 } 9428 9429 *names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *)); 9430 if (*names == NULL) { 9431 return -ENOMEM; 9432 } 9433 9434 TAILQ_FOREACH(xattr, xattrs, link) { 9435 (*names)->names[(*names)->count++] = xattr->name; 9436 } 9437 9438 return 0; 9439 } 9440 9441 int 9442 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names) 9443 { 9444 blob_verify_md_op(blob); 9445 9446 return blob_get_xattr_names(&blob->xattrs, names); 9447 } 9448 9449 uint32_t 9450 spdk_xattr_names_get_count(struct spdk_xattr_names *names) 9451 { 9452 assert(names != NULL); 9453 9454 return names->count; 9455 } 9456 9457 const char * 9458 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index) 9459 { 9460 if (index >= names->count) { 9461 return NULL; 9462 } 9463 9464 return names->names[index]; 9465 } 9466 9467 void 9468 spdk_xattr_names_free(struct spdk_xattr_names *names) 9469 { 9470 free(names); 9471 } 9472 9473 struct spdk_bs_type 9474 spdk_bs_get_bstype(struct spdk_blob_store *bs) 9475 { 9476 return bs->bstype; 9477 } 9478 9479 void 9480 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype) 9481 { 9482 memcpy(&bs->bstype, &bstype, sizeof(bstype)); 9483 } 9484 9485 bool 9486 spdk_blob_is_read_only(struct spdk_blob *blob) 9487 { 9488 assert(blob != NULL); 9489 return (blob->data_ro || blob->md_ro); 9490 } 9491 9492 bool 9493 spdk_blob_is_snapshot(struct spdk_blob *blob) 9494 { 9495 struct spdk_blob_list *snapshot_entry; 9496 9497 assert(blob != NULL); 9498 9499 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 9500 if (snapshot_entry == NULL) { 9501 return false; 9502 } 9503 9504 return true; 9505 } 9506 9507 bool 9508 spdk_blob_is_clone(struct spdk_blob *blob) 9509 { 9510 assert(blob != NULL); 9511 9512 if (blob->parent_id != SPDK_BLOBID_INVALID && 9513 blob->parent_id != SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 9514 assert(spdk_blob_is_thin_provisioned(blob)); 9515 return true; 9516 } 9517 9518 return false; 9519 } 9520 9521 bool 9522 spdk_blob_is_thin_provisioned(struct spdk_blob *blob) 9523 { 9524 assert(blob != NULL); 9525 return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 9526 } 9527 9528 bool 9529 spdk_blob_is_esnap_clone(const struct spdk_blob *blob) 9530 { 9531 return blob_is_esnap_clone(blob); 9532 } 9533 9534 static void 9535 blob_update_clear_method(struct spdk_blob *blob) 9536 { 9537 enum blob_clear_method stored_cm; 9538 9539 assert(blob != NULL); 9540 9541 /* If BLOB_CLEAR_WITH_DEFAULT was passed in, use the setting stored 9542 * in metadata previously. If something other than the default was 9543 * specified, ignore stored value and used what was passed in. 9544 */ 9545 stored_cm = ((blob->md_ro_flags & SPDK_BLOB_CLEAR_METHOD) >> SPDK_BLOB_CLEAR_METHOD_SHIFT); 9546 9547 if (blob->clear_method == BLOB_CLEAR_WITH_DEFAULT) { 9548 blob->clear_method = stored_cm; 9549 } else if (blob->clear_method != stored_cm) { 9550 SPDK_WARNLOG("Using passed in clear method 0x%x instead of stored value of 0x%x\n", 9551 blob->clear_method, stored_cm); 9552 } 9553 } 9554 9555 spdk_blob_id 9556 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id) 9557 { 9558 struct spdk_blob_list *snapshot_entry = NULL; 9559 struct spdk_blob_list *clone_entry = NULL; 9560 9561 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 9562 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 9563 if (clone_entry->id == blob_id) { 9564 return snapshot_entry->id; 9565 } 9566 } 9567 } 9568 9569 return SPDK_BLOBID_INVALID; 9570 } 9571 9572 int 9573 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids, 9574 size_t *count) 9575 { 9576 struct spdk_blob_list *snapshot_entry, *clone_entry; 9577 size_t n; 9578 9579 snapshot_entry = bs_get_snapshot_entry(bs, blobid); 9580 if (snapshot_entry == NULL) { 9581 *count = 0; 9582 return 0; 9583 } 9584 9585 if (ids == NULL || *count < snapshot_entry->clone_count) { 9586 *count = snapshot_entry->clone_count; 9587 return -ENOMEM; 9588 } 9589 *count = snapshot_entry->clone_count; 9590 9591 n = 0; 9592 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 9593 ids[n++] = clone_entry->id; 9594 } 9595 9596 return 0; 9597 } 9598 9599 static void 9600 bs_load_grow_continue(struct spdk_bs_load_ctx *ctx) 9601 { 9602 int rc; 9603 9604 if (ctx->super->size == 0) { 9605 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9606 } 9607 9608 if (ctx->super->io_unit_size == 0) { 9609 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 9610 } 9611 9612 /* Parse the super block */ 9613 ctx->bs->clean = 1; 9614 ctx->bs->cluster_sz = ctx->super->cluster_size; 9615 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 9616 ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE; 9617 if (spdk_u32_is_pow2(ctx->bs->pages_per_cluster)) { 9618 ctx->bs->pages_per_cluster_shift = spdk_u32log2(ctx->bs->pages_per_cluster); 9619 } 9620 ctx->bs->io_unit_size = ctx->super->io_unit_size; 9621 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 9622 if (rc < 0) { 9623 bs_load_ctx_fail(ctx, -ENOMEM); 9624 return; 9625 } 9626 ctx->bs->md_start = ctx->super->md_start; 9627 ctx->bs->md_len = ctx->super->md_len; 9628 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len); 9629 if (rc < 0) { 9630 bs_load_ctx_fail(ctx, -ENOMEM); 9631 return; 9632 } 9633 9634 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 9635 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 9636 ctx->bs->super_blob = ctx->super->super_blob; 9637 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 9638 9639 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0) { 9640 SPDK_ERRLOG("Can not grow an unclean blobstore, please load it normally to clean it.\n"); 9641 bs_load_ctx_fail(ctx, -EIO); 9642 return; 9643 } else { 9644 bs_load_read_used_pages(ctx); 9645 } 9646 } 9647 9648 static void 9649 bs_load_grow_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9650 { 9651 struct spdk_bs_load_ctx *ctx = cb_arg; 9652 9653 if (bserrno != 0) { 9654 bs_load_ctx_fail(ctx, bserrno); 9655 return; 9656 } 9657 bs_load_grow_continue(ctx); 9658 } 9659 9660 static void 9661 bs_load_grow_used_clusters_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9662 { 9663 struct spdk_bs_load_ctx *ctx = cb_arg; 9664 9665 if (bserrno != 0) { 9666 bs_load_ctx_fail(ctx, bserrno); 9667 return; 9668 } 9669 9670 spdk_free(ctx->mask); 9671 9672 bs_sequence_write_dev(ctx->seq, ctx->super, bs_page_to_lba(ctx->bs, 0), 9673 bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 9674 bs_load_grow_super_write_cpl, ctx); 9675 } 9676 9677 static void 9678 bs_load_grow_used_clusters_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9679 { 9680 struct spdk_bs_load_ctx *ctx = cb_arg; 9681 uint64_t lba, lba_count; 9682 uint64_t dev_size; 9683 uint64_t total_clusters; 9684 9685 if (bserrno != 0) { 9686 bs_load_ctx_fail(ctx, bserrno); 9687 return; 9688 } 9689 9690 /* The type must be correct */ 9691 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 9692 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 9693 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 9694 struct spdk_blob_md_page) * 8)); 9695 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9696 total_clusters = dev_size / ctx->super->cluster_size; 9697 ctx->mask->length = total_clusters; 9698 9699 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 9700 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 9701 bs_sequence_write_dev(ctx->seq, ctx->mask, lba, lba_count, 9702 bs_load_grow_used_clusters_write_cpl, ctx); 9703 } 9704 9705 static void 9706 bs_load_try_to_grow(struct spdk_bs_load_ctx *ctx) 9707 { 9708 uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask; 9709 uint64_t lba, lba_count, mask_size; 9710 9711 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9712 total_clusters = dev_size / ctx->super->cluster_size; 9713 used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 9714 spdk_divide_round_up(total_clusters, 8), 9715 SPDK_BS_PAGE_SIZE); 9716 max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start; 9717 /* No necessary to grow or no space to grow */ 9718 if (ctx->super->size >= dev_size || used_cluster_mask_len > max_used_cluster_mask) { 9719 SPDK_DEBUGLOG(blob, "No grow\n"); 9720 bs_load_grow_continue(ctx); 9721 return; 9722 } 9723 9724 SPDK_DEBUGLOG(blob, "Resize blobstore\n"); 9725 9726 ctx->super->size = dev_size; 9727 ctx->super->used_cluster_mask_len = used_cluster_mask_len; 9728 ctx->super->crc = blob_md_page_calc_crc(ctx->super); 9729 9730 mask_size = used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 9731 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 9732 SPDK_MALLOC_DMA); 9733 if (!ctx->mask) { 9734 bs_load_ctx_fail(ctx, -ENOMEM); 9735 return; 9736 } 9737 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 9738 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 9739 bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count, 9740 bs_load_grow_used_clusters_read_cpl, ctx); 9741 } 9742 9743 static void 9744 bs_grow_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9745 { 9746 struct spdk_bs_load_ctx *ctx = cb_arg; 9747 int rc; 9748 9749 rc = bs_super_validate(ctx->super, ctx->bs); 9750 if (rc != 0) { 9751 bs_load_ctx_fail(ctx, rc); 9752 return; 9753 } 9754 9755 bs_load_try_to_grow(ctx); 9756 } 9757 9758 struct spdk_bs_grow_ctx { 9759 struct spdk_blob_store *bs; 9760 struct spdk_bs_super_block *super; 9761 9762 struct spdk_bit_pool *new_used_clusters; 9763 struct spdk_bs_md_mask *new_used_clusters_mask; 9764 9765 spdk_bs_sequence_t *seq; 9766 }; 9767 9768 static void 9769 bs_grow_live_done(struct spdk_bs_grow_ctx *ctx, int bserrno) 9770 { 9771 if (bserrno != 0) { 9772 spdk_bit_pool_free(&ctx->new_used_clusters); 9773 } 9774 9775 bs_sequence_finish(ctx->seq, bserrno); 9776 free(ctx->new_used_clusters_mask); 9777 spdk_free(ctx->super); 9778 free(ctx); 9779 } 9780 9781 static void 9782 bs_grow_live_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9783 { 9784 struct spdk_bs_grow_ctx *ctx = cb_arg; 9785 struct spdk_blob_store *bs = ctx->bs; 9786 uint64_t total_clusters; 9787 9788 if (bserrno != 0) { 9789 bs_grow_live_done(ctx, bserrno); 9790 return; 9791 } 9792 9793 /* 9794 * Blobstore is not clean until unload, for now only the super block is up to date. 9795 * This is similar to state right after blobstore init, when bs_write_used_md() didn't 9796 * yet execute. 9797 * When cleanly unloaded, the used md pages will be written out. 9798 * In case of unclean shutdown, loading blobstore will go through recovery path correctly 9799 * filling out the used_clusters with new size and writing it out. 9800 */ 9801 bs->clean = 0; 9802 9803 /* Reverting the super->size past this point is complex, avoid any error paths 9804 * that require to do so. */ 9805 spdk_spin_lock(&bs->used_lock); 9806 9807 total_clusters = ctx->super->size / ctx->super->cluster_size; 9808 9809 assert(total_clusters >= spdk_bit_pool_capacity(bs->used_clusters)); 9810 spdk_bit_pool_store_mask(bs->used_clusters, ctx->new_used_clusters_mask); 9811 9812 assert(total_clusters == spdk_bit_pool_capacity(ctx->new_used_clusters)); 9813 spdk_bit_pool_load_mask(ctx->new_used_clusters, ctx->new_used_clusters_mask); 9814 9815 spdk_bit_pool_free(&bs->used_clusters); 9816 bs->used_clusters = ctx->new_used_clusters; 9817 9818 bs->total_clusters = total_clusters; 9819 bs->total_data_clusters = bs->total_clusters - spdk_divide_round_up( 9820 bs->md_start + bs->md_len, bs->pages_per_cluster); 9821 9822 bs->num_free_clusters = spdk_bit_pool_count_free(bs->used_clusters); 9823 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 9824 spdk_spin_unlock(&bs->used_lock); 9825 9826 bs_grow_live_done(ctx, 0); 9827 } 9828 9829 static void 9830 bs_grow_live_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9831 { 9832 struct spdk_bs_grow_ctx *ctx = cb_arg; 9833 uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask; 9834 int rc; 9835 9836 if (bserrno != 0) { 9837 bs_grow_live_done(ctx, bserrno); 9838 return; 9839 } 9840 9841 rc = bs_super_validate(ctx->super, ctx->bs); 9842 if (rc != 0) { 9843 bs_grow_live_done(ctx, rc); 9844 return; 9845 } 9846 9847 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9848 total_clusters = dev_size / ctx->super->cluster_size; 9849 used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 9850 spdk_divide_round_up(total_clusters, 8), 9851 SPDK_BS_PAGE_SIZE); 9852 max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start; 9853 /* Only checking dev_size. Since it can change, but total_clusters remain the same. */ 9854 if (dev_size == ctx->super->size) { 9855 SPDK_DEBUGLOG(blob, "No need to grow blobstore\n"); 9856 bs_grow_live_done(ctx, 0); 9857 return; 9858 } 9859 /* 9860 * Blobstore cannot be shrunk, so check before if: 9861 * - new size of the device is smaller than size in super_block 9862 * - new total number of clusters is smaller than used_clusters bit_pool 9863 * - there is enough space in metadata for used_cluster_mask to be written out 9864 */ 9865 if (dev_size < ctx->super->size || 9866 total_clusters < spdk_bit_pool_capacity(ctx->bs->used_clusters) || 9867 used_cluster_mask_len > max_used_cluster_mask) { 9868 SPDK_DEBUGLOG(blob, "No space to grow blobstore\n"); 9869 bs_grow_live_done(ctx, -ENOSPC); 9870 return; 9871 } 9872 9873 SPDK_DEBUGLOG(blob, "Resizing blobstore\n"); 9874 9875 ctx->new_used_clusters_mask = calloc(1, total_clusters); 9876 if (!ctx->new_used_clusters_mask) { 9877 bs_grow_live_done(ctx, -ENOMEM); 9878 return; 9879 } 9880 ctx->new_used_clusters = spdk_bit_pool_create(total_clusters); 9881 if (!ctx->new_used_clusters) { 9882 bs_grow_live_done(ctx, -ENOMEM); 9883 return; 9884 } 9885 9886 ctx->super->clean = 0; 9887 ctx->super->size = dev_size; 9888 ctx->super->used_cluster_mask_len = used_cluster_mask_len; 9889 bs_write_super(seq, ctx->bs, ctx->super, bs_grow_live_super_write_cpl, ctx); 9890 } 9891 9892 void 9893 spdk_bs_grow_live(struct spdk_blob_store *bs, 9894 spdk_bs_op_complete cb_fn, void *cb_arg) 9895 { 9896 struct spdk_bs_cpl cpl; 9897 struct spdk_bs_grow_ctx *ctx; 9898 9899 assert(spdk_get_thread() == bs->md_thread); 9900 9901 SPDK_DEBUGLOG(blob, "Growing blobstore on dev %p\n", bs->dev); 9902 9903 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 9904 cpl.u.bs_basic.cb_fn = cb_fn; 9905 cpl.u.bs_basic.cb_arg = cb_arg; 9906 9907 ctx = calloc(1, sizeof(struct spdk_bs_grow_ctx)); 9908 if (!ctx) { 9909 cb_fn(cb_arg, -ENOMEM); 9910 return; 9911 } 9912 ctx->bs = bs; 9913 9914 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 9915 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 9916 if (!ctx->super) { 9917 free(ctx); 9918 cb_fn(cb_arg, -ENOMEM); 9919 return; 9920 } 9921 9922 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 9923 if (!ctx->seq) { 9924 spdk_free(ctx->super); 9925 free(ctx); 9926 cb_fn(cb_arg, -ENOMEM); 9927 return; 9928 } 9929 9930 /* Read the super block */ 9931 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 9932 bs_byte_to_lba(bs, sizeof(*ctx->super)), 9933 bs_grow_live_load_super_cpl, ctx); 9934 } 9935 9936 void 9937 spdk_bs_grow(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 9938 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 9939 { 9940 struct spdk_blob_store *bs; 9941 struct spdk_bs_cpl cpl; 9942 struct spdk_bs_load_ctx *ctx; 9943 struct spdk_bs_opts opts = {}; 9944 int err; 9945 9946 SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev); 9947 9948 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 9949 SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen); 9950 dev->destroy(dev); 9951 cb_fn(cb_arg, NULL, -EINVAL); 9952 return; 9953 } 9954 9955 spdk_bs_opts_init(&opts, sizeof(opts)); 9956 if (o) { 9957 if (bs_opts_copy(o, &opts)) { 9958 return; 9959 } 9960 } 9961 9962 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 9963 dev->destroy(dev); 9964 cb_fn(cb_arg, NULL, -EINVAL); 9965 return; 9966 } 9967 9968 err = bs_alloc(dev, &opts, &bs, &ctx); 9969 if (err) { 9970 dev->destroy(dev); 9971 cb_fn(cb_arg, NULL, err); 9972 return; 9973 } 9974 9975 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 9976 cpl.u.bs_handle.cb_fn = cb_fn; 9977 cpl.u.bs_handle.cb_arg = cb_arg; 9978 cpl.u.bs_handle.bs = bs; 9979 9980 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 9981 if (!ctx->seq) { 9982 spdk_free(ctx->super); 9983 free(ctx); 9984 bs_free(bs); 9985 cb_fn(cb_arg, NULL, -ENOMEM); 9986 return; 9987 } 9988 9989 /* Read the super block */ 9990 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 9991 bs_byte_to_lba(bs, sizeof(*ctx->super)), 9992 bs_grow_load_super_cpl, ctx); 9993 } 9994 9995 int 9996 spdk_blob_get_esnap_id(struct spdk_blob *blob, const void **id, size_t *len) 9997 { 9998 if (!blob_is_esnap_clone(blob)) { 9999 return -EINVAL; 10000 } 10001 10002 return blob_get_xattr_value(blob, BLOB_EXTERNAL_SNAPSHOT_ID, id, len, true); 10003 } 10004 10005 struct spdk_io_channel * 10006 blob_esnap_get_io_channel(struct spdk_io_channel *ch, struct spdk_blob *blob) 10007 { 10008 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(ch); 10009 struct spdk_bs_dev *bs_dev = blob->back_bs_dev; 10010 struct blob_esnap_channel find = {}; 10011 struct blob_esnap_channel *esnap_channel, *existing; 10012 10013 find.blob_id = blob->id; 10014 esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find); 10015 if (spdk_likely(esnap_channel != NULL)) { 10016 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": using cached channel on thread %s\n", 10017 blob->id, spdk_thread_get_name(spdk_get_thread())); 10018 return esnap_channel->channel; 10019 } 10020 10021 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": allocating channel on thread %s\n", 10022 blob->id, spdk_thread_get_name(spdk_get_thread())); 10023 10024 esnap_channel = calloc(1, sizeof(*esnap_channel)); 10025 if (esnap_channel == NULL) { 10026 SPDK_NOTICELOG("blob 0x%" PRIx64 " channel allocation failed: no memory\n", 10027 find.blob_id); 10028 return NULL; 10029 } 10030 esnap_channel->channel = bs_dev->create_channel(bs_dev); 10031 if (esnap_channel->channel == NULL) { 10032 SPDK_NOTICELOG("blob 0x%" PRIx64 " back channel allocation failed\n", blob->id); 10033 free(esnap_channel); 10034 return NULL; 10035 } 10036 esnap_channel->blob_id = find.blob_id; 10037 existing = RB_INSERT(blob_esnap_channel_tree, &bs_channel->esnap_channels, esnap_channel); 10038 if (spdk_unlikely(existing != NULL)) { 10039 /* 10040 * This should be unreachable: all modifications to this tree happen on this thread. 10041 */ 10042 SPDK_ERRLOG("blob 0x%" PRIx64 "lost race to allocate a channel\n", find.blob_id); 10043 assert(false); 10044 10045 bs_dev->destroy_channel(bs_dev, esnap_channel->channel); 10046 free(esnap_channel); 10047 10048 return existing->channel; 10049 } 10050 10051 return esnap_channel->channel; 10052 } 10053 10054 static int 10055 blob_esnap_channel_compare(struct blob_esnap_channel *c1, struct blob_esnap_channel *c2) 10056 { 10057 return (c1->blob_id < c2->blob_id ? -1 : c1->blob_id > c2->blob_id); 10058 } 10059 10060 struct blob_esnap_destroy_ctx { 10061 spdk_blob_op_with_handle_complete cb_fn; 10062 void *cb_arg; 10063 struct spdk_blob *blob; 10064 struct spdk_bs_dev *back_bs_dev; 10065 bool abort_io; 10066 }; 10067 10068 static void 10069 blob_esnap_destroy_channels_done(struct spdk_io_channel_iter *i, int status) 10070 { 10071 struct blob_esnap_destroy_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 10072 struct spdk_blob *blob = ctx->blob; 10073 struct spdk_blob_store *bs = blob->bs; 10074 10075 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": done destroying channels for this blob\n", 10076 blob->id); 10077 10078 if (ctx->cb_fn != NULL) { 10079 ctx->cb_fn(ctx->cb_arg, blob, status); 10080 } 10081 free(ctx); 10082 10083 bs->esnap_channels_unloading--; 10084 if (bs->esnap_channels_unloading == 0 && bs->esnap_unload_cb_fn != NULL) { 10085 spdk_bs_unload(bs, bs->esnap_unload_cb_fn, bs->esnap_unload_cb_arg); 10086 } 10087 } 10088 10089 static void 10090 blob_esnap_destroy_one_channel(struct spdk_io_channel_iter *i) 10091 { 10092 struct blob_esnap_destroy_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 10093 struct spdk_blob *blob = ctx->blob; 10094 struct spdk_bs_dev *bs_dev = ctx->back_bs_dev; 10095 struct spdk_io_channel *channel = spdk_io_channel_iter_get_channel(i); 10096 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(channel); 10097 struct blob_esnap_channel *esnap_channel; 10098 struct blob_esnap_channel find = {}; 10099 10100 assert(spdk_get_thread() == spdk_io_channel_get_thread(channel)); 10101 10102 find.blob_id = blob->id; 10103 esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find); 10104 if (esnap_channel != NULL) { 10105 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": destroying channel on thread %s\n", 10106 blob->id, spdk_thread_get_name(spdk_get_thread())); 10107 RB_REMOVE(blob_esnap_channel_tree, &bs_channel->esnap_channels, esnap_channel); 10108 10109 if (ctx->abort_io) { 10110 spdk_bs_user_op_t *op, *tmp; 10111 10112 TAILQ_FOREACH_SAFE(op, &bs_channel->queued_io, link, tmp) { 10113 if (op->back_channel == esnap_channel->channel) { 10114 TAILQ_REMOVE(&bs_channel->queued_io, op, link); 10115 bs_user_op_abort(op, -EIO); 10116 } 10117 } 10118 } 10119 10120 bs_dev->destroy_channel(bs_dev, esnap_channel->channel); 10121 free(esnap_channel); 10122 } 10123 10124 spdk_for_each_channel_continue(i, 0); 10125 } 10126 10127 /* 10128 * Destroy the channels for a specific blob on each thread with a blobstore channel. This should be 10129 * used when closing an esnap clone blob and after decoupling from the parent. 10130 */ 10131 static void 10132 blob_esnap_destroy_bs_dev_channels(struct spdk_blob *blob, bool abort_io, 10133 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 10134 { 10135 struct blob_esnap_destroy_ctx *ctx; 10136 10137 if (!blob_is_esnap_clone(blob) || blob->back_bs_dev == NULL) { 10138 if (cb_fn != NULL) { 10139 cb_fn(cb_arg, blob, 0); 10140 } 10141 return; 10142 } 10143 10144 ctx = calloc(1, sizeof(*ctx)); 10145 if (ctx == NULL) { 10146 if (cb_fn != NULL) { 10147 cb_fn(cb_arg, blob, -ENOMEM); 10148 } 10149 return; 10150 } 10151 ctx->cb_fn = cb_fn; 10152 ctx->cb_arg = cb_arg; 10153 ctx->blob = blob; 10154 ctx->back_bs_dev = blob->back_bs_dev; 10155 ctx->abort_io = abort_io; 10156 10157 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": destroying channels for this blob\n", 10158 blob->id); 10159 10160 blob->bs->esnap_channels_unloading++; 10161 spdk_for_each_channel(blob->bs, blob_esnap_destroy_one_channel, ctx, 10162 blob_esnap_destroy_channels_done); 10163 } 10164 10165 /* 10166 * Destroy all bs_dev channels on a specific blobstore channel. This should be used when a 10167 * bs_channel is destroyed. 10168 */ 10169 static void 10170 blob_esnap_destroy_bs_channel(struct spdk_bs_channel *ch) 10171 { 10172 struct blob_esnap_channel *esnap_channel, *esnap_channel_tmp; 10173 10174 assert(spdk_get_thread() == spdk_io_channel_get_thread(spdk_io_channel_from_ctx(ch))); 10175 10176 SPDK_DEBUGLOG(blob_esnap, "destroying channels on thread %s\n", 10177 spdk_thread_get_name(spdk_get_thread())); 10178 RB_FOREACH_SAFE(esnap_channel, blob_esnap_channel_tree, &ch->esnap_channels, 10179 esnap_channel_tmp) { 10180 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 10181 ": destroying one channel in thread %s\n", 10182 esnap_channel->blob_id, spdk_thread_get_name(spdk_get_thread())); 10183 RB_REMOVE(blob_esnap_channel_tree, &ch->esnap_channels, esnap_channel); 10184 spdk_put_io_channel(esnap_channel->channel); 10185 free(esnap_channel); 10186 } 10187 SPDK_DEBUGLOG(blob_esnap, "done destroying channels on thread %s\n", 10188 spdk_thread_get_name(spdk_get_thread())); 10189 } 10190 10191 static void 10192 blob_set_back_bs_dev_done(void *_ctx, int bserrno) 10193 { 10194 struct set_bs_dev_ctx *ctx = _ctx; 10195 10196 if (bserrno != 0) { 10197 /* Even though the unfreeze failed, the update may have succeed. */ 10198 SPDK_ERRLOG("blob 0x%" PRIx64 ": unfreeze failed with error %d\n", ctx->blob->id, 10199 bserrno); 10200 } 10201 ctx->cb_fn(ctx->cb_arg, ctx->bserrno); 10202 free(ctx); 10203 } 10204 10205 static void 10206 blob_frozen_set_back_bs_dev(void *_ctx, struct spdk_blob *blob, int bserrno) 10207 { 10208 struct set_bs_dev_ctx *ctx = _ctx; 10209 int rc; 10210 10211 if (bserrno != 0) { 10212 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to release old back_bs_dev with error %d\n", 10213 blob->id, bserrno); 10214 ctx->bserrno = bserrno; 10215 blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx); 10216 return; 10217 } 10218 10219 if (blob->back_bs_dev != NULL) { 10220 blob->back_bs_dev->destroy(blob->back_bs_dev); 10221 blob->back_bs_dev = NULL; 10222 } 10223 10224 if (ctx->parent_refs_cb_fn) { 10225 rc = ctx->parent_refs_cb_fn(blob, ctx->parent_refs_cb_arg); 10226 if (rc != 0) { 10227 ctx->bserrno = rc; 10228 blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx); 10229 return; 10230 } 10231 } 10232 10233 SPDK_NOTICELOG("blob 0x%" PRIx64 ": hotplugged back_bs_dev\n", blob->id); 10234 blob->back_bs_dev = ctx->back_bs_dev; 10235 ctx->bserrno = 0; 10236 10237 blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx); 10238 } 10239 10240 static void 10241 blob_set_back_bs_dev_frozen(void *_ctx, int bserrno) 10242 { 10243 struct set_bs_dev_ctx *ctx = _ctx; 10244 struct spdk_blob *blob = ctx->blob; 10245 10246 if (bserrno != 0) { 10247 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to freeze with error %d\n", blob->id, 10248 bserrno); 10249 ctx->cb_fn(ctx->cb_arg, bserrno); 10250 free(ctx); 10251 return; 10252 } 10253 10254 /* 10255 * This does not prevent future reads from the esnap device because any future IO will 10256 * lazily create a new esnap IO channel. 10257 */ 10258 blob_esnap_destroy_bs_dev_channels(blob, true, blob_frozen_set_back_bs_dev, ctx); 10259 } 10260 10261 void 10262 spdk_blob_set_esnap_bs_dev(struct spdk_blob *blob, struct spdk_bs_dev *back_bs_dev, 10263 spdk_blob_op_complete cb_fn, void *cb_arg) 10264 { 10265 if (!blob_is_esnap_clone(blob)) { 10266 SPDK_ERRLOG("blob 0x%" PRIx64 ": not an esnap clone\n", blob->id); 10267 cb_fn(cb_arg, -EINVAL); 10268 return; 10269 } 10270 10271 blob_set_back_bs_dev(blob, back_bs_dev, NULL, NULL, cb_fn, cb_arg); 10272 } 10273 10274 struct spdk_bs_dev * 10275 spdk_blob_get_esnap_bs_dev(const struct spdk_blob *blob) 10276 { 10277 if (!blob_is_esnap_clone(blob)) { 10278 SPDK_ERRLOG("blob 0x%" PRIx64 ": not an esnap clone\n", blob->id); 10279 return NULL; 10280 } 10281 10282 return blob->back_bs_dev; 10283 } 10284 10285 bool 10286 spdk_blob_is_degraded(const struct spdk_blob *blob) 10287 { 10288 if (blob->bs->dev->is_degraded != NULL && blob->bs->dev->is_degraded(blob->bs->dev)) { 10289 return true; 10290 } 10291 if (blob->back_bs_dev == NULL || blob->back_bs_dev->is_degraded == NULL) { 10292 return false; 10293 } 10294 10295 return blob->back_bs_dev->is_degraded(blob->back_bs_dev); 10296 } 10297 10298 SPDK_LOG_REGISTER_COMPONENT(blob) 10299 SPDK_LOG_REGISTER_COMPONENT(blob_esnap) 10300