1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "spdk/blob.h" 10 #include "spdk/crc32.h" 11 #include "spdk/env.h" 12 #include "spdk/queue.h" 13 #include "spdk/thread.h" 14 #include "spdk/bit_array.h" 15 #include "spdk/bit_pool.h" 16 #include "spdk/likely.h" 17 #include "spdk/util.h" 18 #include "spdk/string.h" 19 #include "spdk/trace.h" 20 21 #include "spdk_internal/assert.h" 22 #include "spdk_internal/trace_defs.h" 23 #include "spdk/log.h" 24 25 #include "blobstore.h" 26 27 #define BLOB_CRC32C_INITIAL 0xffffffffUL 28 29 static int bs_register_md_thread(struct spdk_blob_store *bs); 30 static int bs_unregister_md_thread(struct spdk_blob_store *bs); 31 static void blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 32 static void blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 33 uint64_t cluster, uint32_t extent, struct spdk_blob_md_page *page, 34 spdk_blob_op_complete cb_fn, void *cb_arg); 35 static void blob_free_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 36 uint32_t extent_page, struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg); 37 38 static int blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 39 uint16_t value_len, bool internal); 40 static int blob_get_xattr_value(struct spdk_blob *blob, const char *name, 41 const void **value, size_t *value_len, bool internal); 42 static int blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal); 43 44 static void blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 45 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg); 46 static void blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg); 47 48 static void bs_shallow_copy_cluster_find_next(void *cb_arg); 49 50 /* 51 * External snapshots require a channel per thread per esnap bdev. The tree 52 * is populated lazily as blob IOs are handled by the back_bs_dev. When this 53 * channel is destroyed, all the channels in the tree are destroyed. 54 */ 55 56 struct blob_esnap_channel { 57 RB_ENTRY(blob_esnap_channel) node; 58 spdk_blob_id blob_id; 59 struct spdk_io_channel *channel; 60 }; 61 62 static int blob_esnap_channel_compare(struct blob_esnap_channel *c1, struct blob_esnap_channel *c2); 63 static void blob_esnap_destroy_bs_dev_channels(struct spdk_blob *blob, bool abort_io, 64 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg); 65 static void blob_esnap_destroy_bs_channel(struct spdk_bs_channel *ch); 66 static void blob_set_back_bs_dev_frozen(void *_ctx, int bserrno); 67 RB_GENERATE_STATIC(blob_esnap_channel_tree, blob_esnap_channel, node, blob_esnap_channel_compare) 68 69 static inline bool 70 blob_is_esnap_clone(const struct spdk_blob *blob) 71 { 72 assert(blob != NULL); 73 return !!(blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT); 74 } 75 76 static int 77 blob_id_cmp(struct spdk_blob *blob1, struct spdk_blob *blob2) 78 { 79 assert(blob1 != NULL && blob2 != NULL); 80 return (blob1->id < blob2->id ? -1 : blob1->id > blob2->id); 81 } 82 83 RB_GENERATE_STATIC(spdk_blob_tree, spdk_blob, link, blob_id_cmp); 84 85 static void 86 blob_verify_md_op(struct spdk_blob *blob) 87 { 88 assert(blob != NULL); 89 assert(spdk_get_thread() == blob->bs->md_thread); 90 assert(blob->state != SPDK_BLOB_STATE_LOADING); 91 } 92 93 static struct spdk_blob_list * 94 bs_get_snapshot_entry(struct spdk_blob_store *bs, spdk_blob_id blobid) 95 { 96 struct spdk_blob_list *snapshot_entry = NULL; 97 98 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 99 if (snapshot_entry->id == blobid) { 100 break; 101 } 102 } 103 104 return snapshot_entry; 105 } 106 107 static void 108 bs_claim_md_page(struct spdk_blob_store *bs, uint32_t page) 109 { 110 assert(spdk_spin_held(&bs->used_lock)); 111 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 112 assert(spdk_bit_array_get(bs->used_md_pages, page) == false); 113 114 spdk_bit_array_set(bs->used_md_pages, page); 115 } 116 117 static void 118 bs_release_md_page(struct spdk_blob_store *bs, uint32_t page) 119 { 120 assert(spdk_spin_held(&bs->used_lock)); 121 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 122 assert(spdk_bit_array_get(bs->used_md_pages, page) == true); 123 124 spdk_bit_array_clear(bs->used_md_pages, page); 125 } 126 127 static uint32_t 128 bs_claim_cluster(struct spdk_blob_store *bs) 129 { 130 uint32_t cluster_num; 131 132 assert(spdk_spin_held(&bs->used_lock)); 133 134 cluster_num = spdk_bit_pool_allocate_bit(bs->used_clusters); 135 if (cluster_num == UINT32_MAX) { 136 return UINT32_MAX; 137 } 138 139 SPDK_DEBUGLOG(blob, "Claiming cluster %u\n", cluster_num); 140 bs->num_free_clusters--; 141 142 return cluster_num; 143 } 144 145 static void 146 bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 147 { 148 assert(spdk_spin_held(&bs->used_lock)); 149 assert(cluster_num < spdk_bit_pool_capacity(bs->used_clusters)); 150 assert(spdk_bit_pool_is_allocated(bs->used_clusters, cluster_num) == true); 151 assert(bs->num_free_clusters < bs->total_clusters); 152 153 SPDK_DEBUGLOG(blob, "Releasing cluster %u\n", cluster_num); 154 155 spdk_bit_pool_free_bit(bs->used_clusters, cluster_num); 156 bs->num_free_clusters++; 157 } 158 159 static int 160 blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster) 161 { 162 uint64_t *cluster_lba = &blob->active.clusters[cluster_num]; 163 164 blob_verify_md_op(blob); 165 166 if (*cluster_lba != 0) { 167 return -EEXIST; 168 } 169 170 *cluster_lba = bs_cluster_to_lba(blob->bs, cluster); 171 blob->active.num_allocated_clusters++; 172 173 return 0; 174 } 175 176 static int 177 bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num, 178 uint64_t *cluster, uint32_t *lowest_free_md_page, bool update_map) 179 { 180 uint32_t *extent_page = 0; 181 182 assert(spdk_spin_held(&blob->bs->used_lock)); 183 184 *cluster = bs_claim_cluster(blob->bs); 185 if (*cluster == UINT32_MAX) { 186 /* No more free clusters. Cannot satisfy the request */ 187 return -ENOSPC; 188 } 189 190 if (blob->use_extent_table) { 191 extent_page = bs_cluster_to_extent_page(blob, cluster_num); 192 if (*extent_page == 0) { 193 /* Extent page shall never occupy md_page so start the search from 1 */ 194 if (*lowest_free_md_page == 0) { 195 *lowest_free_md_page = 1; 196 } 197 /* No extent_page is allocated for the cluster */ 198 *lowest_free_md_page = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, 199 *lowest_free_md_page); 200 if (*lowest_free_md_page == UINT32_MAX) { 201 /* No more free md pages. Cannot satisfy the request */ 202 bs_release_cluster(blob->bs, *cluster); 203 return -ENOSPC; 204 } 205 bs_claim_md_page(blob->bs, *lowest_free_md_page); 206 } 207 } 208 209 SPDK_DEBUGLOG(blob, "Claiming cluster %" PRIu64 " for blob 0x%" PRIx64 "\n", *cluster, 210 blob->id); 211 212 if (update_map) { 213 blob_insert_cluster(blob, cluster_num, *cluster); 214 if (blob->use_extent_table && *extent_page == 0) { 215 *extent_page = *lowest_free_md_page; 216 } 217 } 218 219 return 0; 220 } 221 222 static void 223 blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs) 224 { 225 xattrs->count = 0; 226 xattrs->names = NULL; 227 xattrs->ctx = NULL; 228 xattrs->get_value = NULL; 229 } 230 231 void 232 spdk_blob_opts_init(struct spdk_blob_opts *opts, size_t opts_size) 233 { 234 if (!opts) { 235 SPDK_ERRLOG("opts should not be NULL\n"); 236 return; 237 } 238 239 if (!opts_size) { 240 SPDK_ERRLOG("opts_size should not be zero value\n"); 241 return; 242 } 243 244 memset(opts, 0, opts_size); 245 opts->opts_size = opts_size; 246 247 #define FIELD_OK(field) \ 248 offsetof(struct spdk_blob_opts, field) + sizeof(opts->field) <= opts_size 249 250 #define SET_FIELD(field, value) \ 251 if (FIELD_OK(field)) { \ 252 opts->field = value; \ 253 } \ 254 255 SET_FIELD(num_clusters, 0); 256 SET_FIELD(thin_provision, false); 257 SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT); 258 259 if (FIELD_OK(xattrs)) { 260 blob_xattrs_init(&opts->xattrs); 261 } 262 263 SET_FIELD(use_extent_table, true); 264 265 #undef FIELD_OK 266 #undef SET_FIELD 267 } 268 269 void 270 spdk_blob_open_opts_init(struct spdk_blob_open_opts *opts, size_t opts_size) 271 { 272 if (!opts) { 273 SPDK_ERRLOG("opts should not be NULL\n"); 274 return; 275 } 276 277 if (!opts_size) { 278 SPDK_ERRLOG("opts_size should not be zero value\n"); 279 return; 280 } 281 282 memset(opts, 0, opts_size); 283 opts->opts_size = opts_size; 284 285 #define FIELD_OK(field) \ 286 offsetof(struct spdk_blob_open_opts, field) + sizeof(opts->field) <= opts_size 287 288 #define SET_FIELD(field, value) \ 289 if (FIELD_OK(field)) { \ 290 opts->field = value; \ 291 } \ 292 293 SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT); 294 295 #undef FIELD_OK 296 #undef SET_FILED 297 } 298 299 static struct spdk_blob * 300 blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id) 301 { 302 struct spdk_blob *blob; 303 304 blob = calloc(1, sizeof(*blob)); 305 if (!blob) { 306 return NULL; 307 } 308 309 blob->id = id; 310 blob->bs = bs; 311 312 blob->parent_id = SPDK_BLOBID_INVALID; 313 314 blob->state = SPDK_BLOB_STATE_DIRTY; 315 blob->extent_rle_found = false; 316 blob->extent_table_found = false; 317 blob->active.num_pages = 1; 318 blob->active.pages = calloc(1, sizeof(*blob->active.pages)); 319 if (!blob->active.pages) { 320 free(blob); 321 return NULL; 322 } 323 324 blob->active.pages[0] = bs_blobid_to_page(id); 325 326 TAILQ_INIT(&blob->xattrs); 327 TAILQ_INIT(&blob->xattrs_internal); 328 TAILQ_INIT(&blob->pending_persists); 329 TAILQ_INIT(&blob->persists_to_complete); 330 331 return blob; 332 } 333 334 static void 335 xattrs_free(struct spdk_xattr_tailq *xattrs) 336 { 337 struct spdk_xattr *xattr, *xattr_tmp; 338 339 TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) { 340 TAILQ_REMOVE(xattrs, xattr, link); 341 free(xattr->name); 342 free(xattr->value); 343 free(xattr); 344 } 345 } 346 347 static void 348 blob_unref_back_bs_dev(struct spdk_blob *blob) 349 { 350 blob->back_bs_dev->destroy(blob->back_bs_dev); 351 blob->back_bs_dev = NULL; 352 } 353 354 static void 355 blob_free(struct spdk_blob *blob) 356 { 357 assert(blob != NULL); 358 assert(TAILQ_EMPTY(&blob->pending_persists)); 359 assert(TAILQ_EMPTY(&blob->persists_to_complete)); 360 361 free(blob->active.extent_pages); 362 free(blob->clean.extent_pages); 363 free(blob->active.clusters); 364 free(blob->clean.clusters); 365 free(blob->active.pages); 366 free(blob->clean.pages); 367 368 xattrs_free(&blob->xattrs); 369 xattrs_free(&blob->xattrs_internal); 370 371 if (blob->back_bs_dev) { 372 blob_unref_back_bs_dev(blob); 373 } 374 375 free(blob); 376 } 377 378 static void 379 blob_back_bs_destroy_esnap_done(void *ctx, struct spdk_blob *blob, int bserrno) 380 { 381 struct spdk_bs_dev *bs_dev = ctx; 382 383 if (bserrno != 0) { 384 /* 385 * This is probably due to a memory allocation failure when creating the 386 * blob_esnap_destroy_ctx before iterating threads. 387 */ 388 SPDK_ERRLOG("blob 0x%" PRIx64 ": Unable to destroy bs dev channels: error %d\n", 389 blob->id, bserrno); 390 assert(false); 391 } 392 393 if (bs_dev == NULL) { 394 /* 395 * This check exists to make scanbuild happy. 396 * 397 * blob->back_bs_dev for an esnap is NULL during the first iteration of blobs while 398 * the blobstore is being loaded. It could also be NULL if there was an error 399 * opening the esnap device. In each of these cases, no channels could have been 400 * created because back_bs_dev->create_channel() would have led to a NULL pointer 401 * deref. 402 */ 403 assert(false); 404 return; 405 } 406 407 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": calling destroy on back_bs_dev\n", blob->id); 408 bs_dev->destroy(bs_dev); 409 } 410 411 static void 412 blob_back_bs_destroy(struct spdk_blob *blob) 413 { 414 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": preparing to destroy back_bs_dev\n", 415 blob->id); 416 417 blob_esnap_destroy_bs_dev_channels(blob, false, blob_back_bs_destroy_esnap_done, 418 blob->back_bs_dev); 419 blob->back_bs_dev = NULL; 420 } 421 422 struct blob_parent { 423 union { 424 struct { 425 spdk_blob_id id; 426 struct spdk_blob *blob; 427 } snapshot; 428 429 struct { 430 void *id; 431 uint32_t id_len; 432 struct spdk_bs_dev *back_bs_dev; 433 } esnap; 434 } u; 435 }; 436 437 typedef int (*set_parent_refs_cb)(struct spdk_blob *blob, struct blob_parent *parent); 438 439 struct set_bs_dev_ctx { 440 struct spdk_blob *blob; 441 struct spdk_bs_dev *back_bs_dev; 442 443 /* 444 * This callback is used during a set parent operation to change the references 445 * to the parent of the blob. 446 */ 447 set_parent_refs_cb parent_refs_cb_fn; 448 struct blob_parent *parent_refs_cb_arg; 449 450 spdk_blob_op_complete cb_fn; 451 void *cb_arg; 452 int bserrno; 453 }; 454 455 static void 456 blob_set_back_bs_dev(struct spdk_blob *blob, struct spdk_bs_dev *back_bs_dev, 457 set_parent_refs_cb parent_refs_cb_fn, struct blob_parent *parent_refs_cb_arg, 458 spdk_blob_op_complete cb_fn, void *cb_arg) 459 { 460 struct set_bs_dev_ctx *ctx; 461 462 ctx = calloc(1, sizeof(*ctx)); 463 if (ctx == NULL) { 464 SPDK_ERRLOG("blob 0x%" PRIx64 ": out of memory while setting back_bs_dev\n", 465 blob->id); 466 cb_fn(cb_arg, -ENOMEM); 467 return; 468 } 469 470 ctx->parent_refs_cb_fn = parent_refs_cb_fn; 471 ctx->parent_refs_cb_arg = parent_refs_cb_arg; 472 ctx->cb_fn = cb_fn; 473 ctx->cb_arg = cb_arg; 474 ctx->back_bs_dev = back_bs_dev; 475 ctx->blob = blob; 476 477 blob_freeze_io(blob, blob_set_back_bs_dev_frozen, ctx); 478 } 479 480 struct freeze_io_ctx { 481 struct spdk_bs_cpl cpl; 482 struct spdk_blob *blob; 483 }; 484 485 static void 486 blob_io_sync(struct spdk_io_channel_iter *i) 487 { 488 spdk_for_each_channel_continue(i, 0); 489 } 490 491 static void 492 blob_execute_queued_io(struct spdk_io_channel_iter *i) 493 { 494 struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); 495 struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch); 496 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 497 struct spdk_bs_request_set *set; 498 struct spdk_bs_user_op_args *args; 499 spdk_bs_user_op_t *op, *tmp; 500 501 TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) { 502 set = (struct spdk_bs_request_set *)op; 503 args = &set->u.user_op; 504 505 if (args->blob == ctx->blob) { 506 TAILQ_REMOVE(&ch->queued_io, op, link); 507 bs_user_op_execute(op); 508 } 509 } 510 511 spdk_for_each_channel_continue(i, 0); 512 } 513 514 static void 515 blob_io_cpl(struct spdk_io_channel_iter *i, int status) 516 { 517 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 518 519 ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0); 520 521 free(ctx); 522 } 523 524 static void 525 blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 526 { 527 struct freeze_io_ctx *ctx; 528 529 blob_verify_md_op(blob); 530 531 ctx = calloc(1, sizeof(*ctx)); 532 if (!ctx) { 533 cb_fn(cb_arg, -ENOMEM); 534 return; 535 } 536 537 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 538 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 539 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 540 ctx->blob = blob; 541 542 /* Freeze I/O on blob */ 543 blob->frozen_refcnt++; 544 545 spdk_for_each_channel(blob->bs, blob_io_sync, ctx, blob_io_cpl); 546 } 547 548 static void 549 blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 550 { 551 struct freeze_io_ctx *ctx; 552 553 blob_verify_md_op(blob); 554 555 ctx = calloc(1, sizeof(*ctx)); 556 if (!ctx) { 557 cb_fn(cb_arg, -ENOMEM); 558 return; 559 } 560 561 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 562 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 563 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 564 ctx->blob = blob; 565 566 assert(blob->frozen_refcnt > 0); 567 568 blob->frozen_refcnt--; 569 570 spdk_for_each_channel(blob->bs, blob_execute_queued_io, ctx, blob_io_cpl); 571 } 572 573 static int 574 blob_mark_clean(struct spdk_blob *blob) 575 { 576 uint32_t *extent_pages = NULL; 577 uint64_t *clusters = NULL; 578 uint32_t *pages = NULL; 579 580 assert(blob != NULL); 581 582 if (blob->active.num_extent_pages) { 583 assert(blob->active.extent_pages); 584 extent_pages = calloc(blob->active.num_extent_pages, sizeof(*blob->active.extent_pages)); 585 if (!extent_pages) { 586 return -ENOMEM; 587 } 588 memcpy(extent_pages, blob->active.extent_pages, 589 blob->active.num_extent_pages * sizeof(*extent_pages)); 590 } 591 592 if (blob->active.num_clusters) { 593 assert(blob->active.clusters); 594 clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters)); 595 if (!clusters) { 596 free(extent_pages); 597 return -ENOMEM; 598 } 599 memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 600 } 601 602 if (blob->active.num_pages) { 603 assert(blob->active.pages); 604 pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages)); 605 if (!pages) { 606 free(extent_pages); 607 free(clusters); 608 return -ENOMEM; 609 } 610 memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 611 } 612 613 free(blob->clean.extent_pages); 614 free(blob->clean.clusters); 615 free(blob->clean.pages); 616 617 blob->clean.num_extent_pages = blob->active.num_extent_pages; 618 blob->clean.extent_pages = blob->active.extent_pages; 619 blob->clean.num_clusters = blob->active.num_clusters; 620 blob->clean.clusters = blob->active.clusters; 621 blob->clean.num_allocated_clusters = blob->active.num_allocated_clusters; 622 blob->clean.num_pages = blob->active.num_pages; 623 blob->clean.pages = blob->active.pages; 624 625 blob->active.extent_pages = extent_pages; 626 blob->active.clusters = clusters; 627 blob->active.pages = pages; 628 629 /* If the metadata was dirtied again while the metadata was being written to disk, 630 * we do not want to revert the DIRTY state back to CLEAN here. 631 */ 632 if (blob->state == SPDK_BLOB_STATE_LOADING) { 633 blob->state = SPDK_BLOB_STATE_CLEAN; 634 } 635 636 return 0; 637 } 638 639 static int 640 blob_deserialize_xattr(struct spdk_blob *blob, 641 struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal) 642 { 643 struct spdk_xattr *xattr; 644 645 if (desc_xattr->length != sizeof(desc_xattr->name_length) + 646 sizeof(desc_xattr->value_length) + 647 desc_xattr->name_length + desc_xattr->value_length) { 648 return -EINVAL; 649 } 650 651 xattr = calloc(1, sizeof(*xattr)); 652 if (xattr == NULL) { 653 return -ENOMEM; 654 } 655 656 xattr->name = malloc(desc_xattr->name_length + 1); 657 if (xattr->name == NULL) { 658 free(xattr); 659 return -ENOMEM; 660 } 661 662 xattr->value = malloc(desc_xattr->value_length); 663 if (xattr->value == NULL) { 664 free(xattr->name); 665 free(xattr); 666 return -ENOMEM; 667 } 668 669 memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length); 670 xattr->name[desc_xattr->name_length] = '\0'; 671 xattr->value_len = desc_xattr->value_length; 672 memcpy(xattr->value, 673 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 674 desc_xattr->value_length); 675 676 TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link); 677 678 return 0; 679 } 680 681 682 static int 683 blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob) 684 { 685 struct spdk_blob_md_descriptor *desc; 686 size_t cur_desc = 0; 687 void *tmp; 688 689 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 690 while (cur_desc < sizeof(page->descriptors)) { 691 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 692 if (desc->length == 0) { 693 /* If padding and length are 0, this terminates the page */ 694 break; 695 } 696 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 697 struct spdk_blob_md_descriptor_flags *desc_flags; 698 699 desc_flags = (struct spdk_blob_md_descriptor_flags *)desc; 700 701 if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) { 702 return -EINVAL; 703 } 704 705 if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) != 706 SPDK_BLOB_INVALID_FLAGS_MASK) { 707 return -EINVAL; 708 } 709 710 if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) != 711 SPDK_BLOB_DATA_RO_FLAGS_MASK) { 712 blob->data_ro = true; 713 blob->md_ro = true; 714 } 715 716 if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) != 717 SPDK_BLOB_MD_RO_FLAGS_MASK) { 718 blob->md_ro = true; 719 } 720 721 if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 722 blob->data_ro = true; 723 blob->md_ro = true; 724 } 725 726 blob->invalid_flags = desc_flags->invalid_flags; 727 blob->data_ro_flags = desc_flags->data_ro_flags; 728 blob->md_ro_flags = desc_flags->md_ro_flags; 729 730 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 731 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 732 unsigned int i, j; 733 unsigned int cluster_count = blob->active.num_clusters; 734 735 if (blob->extent_table_found) { 736 /* Extent Table already present in the md, 737 * both descriptors should never be at the same time. */ 738 return -EINVAL; 739 } 740 blob->extent_rle_found = true; 741 742 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 743 744 if (desc_extent_rle->length == 0 || 745 (desc_extent_rle->length % sizeof(desc_extent_rle->extents[0]) != 0)) { 746 return -EINVAL; 747 } 748 749 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 750 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 751 if (desc_extent_rle->extents[i].cluster_idx != 0) { 752 if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, 753 desc_extent_rle->extents[i].cluster_idx + j)) { 754 return -EINVAL; 755 } 756 } 757 cluster_count++; 758 } 759 } 760 761 if (cluster_count == 0) { 762 return -EINVAL; 763 } 764 tmp = realloc(blob->active.clusters, cluster_count * sizeof(*blob->active.clusters)); 765 if (tmp == NULL) { 766 return -ENOMEM; 767 } 768 blob->active.clusters = tmp; 769 blob->active.cluster_array_size = cluster_count; 770 771 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 772 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 773 if (desc_extent_rle->extents[i].cluster_idx != 0) { 774 blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs, 775 desc_extent_rle->extents[i].cluster_idx + j); 776 blob->active.num_allocated_clusters++; 777 } else if (spdk_blob_is_thin_provisioned(blob)) { 778 blob->active.clusters[blob->active.num_clusters++] = 0; 779 } else { 780 return -EINVAL; 781 } 782 } 783 } 784 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 785 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 786 uint32_t num_extent_pages = blob->active.num_extent_pages; 787 uint32_t i, j; 788 size_t extent_pages_length; 789 790 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 791 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 792 793 if (blob->extent_rle_found) { 794 /* This means that Extent RLE is present in MD, 795 * both should never be at the same time. */ 796 return -EINVAL; 797 } else if (blob->extent_table_found && 798 desc_extent_table->num_clusters != blob->remaining_clusters_in_et) { 799 /* Number of clusters in this ET does not match number 800 * from previously read EXTENT_TABLE. */ 801 return -EINVAL; 802 } 803 804 if (desc_extent_table->length == 0 || 805 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 806 return -EINVAL; 807 } 808 809 blob->extent_table_found = true; 810 811 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 812 num_extent_pages += desc_extent_table->extent_page[i].num_pages; 813 } 814 815 if (num_extent_pages > 0) { 816 tmp = realloc(blob->active.extent_pages, num_extent_pages * sizeof(uint32_t)); 817 if (tmp == NULL) { 818 return -ENOMEM; 819 } 820 blob->active.extent_pages = tmp; 821 } 822 blob->active.extent_pages_array_size = num_extent_pages; 823 824 blob->remaining_clusters_in_et = desc_extent_table->num_clusters; 825 826 /* Extent table entries contain md page numbers for extent pages. 827 * Zeroes represent unallocated extent pages, those are run-length-encoded. 828 */ 829 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 830 if (desc_extent_table->extent_page[i].page_idx != 0) { 831 assert(desc_extent_table->extent_page[i].num_pages == 1); 832 blob->active.extent_pages[blob->active.num_extent_pages++] = 833 desc_extent_table->extent_page[i].page_idx; 834 } else if (spdk_blob_is_thin_provisioned(blob)) { 835 for (j = 0; j < desc_extent_table->extent_page[i].num_pages; j++) { 836 blob->active.extent_pages[blob->active.num_extent_pages++] = 0; 837 } 838 } else { 839 return -EINVAL; 840 } 841 } 842 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 843 struct spdk_blob_md_descriptor_extent_page *desc_extent; 844 unsigned int i; 845 unsigned int cluster_count = 0; 846 size_t cluster_idx_length; 847 848 if (blob->extent_rle_found) { 849 /* This means that Extent RLE is present in MD, 850 * both should never be at the same time. */ 851 return -EINVAL; 852 } 853 854 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 855 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 856 857 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 858 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 859 return -EINVAL; 860 } 861 862 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 863 if (desc_extent->cluster_idx[i] != 0) { 864 if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, desc_extent->cluster_idx[i])) { 865 return -EINVAL; 866 } 867 } 868 cluster_count++; 869 } 870 871 if (cluster_count == 0) { 872 return -EINVAL; 873 } 874 875 /* When reading extent pages sequentially starting cluster idx should match 876 * current size of a blob. 877 * If changed to batch reading, this check shall be removed. */ 878 if (desc_extent->start_cluster_idx != blob->active.num_clusters) { 879 return -EINVAL; 880 } 881 882 tmp = realloc(blob->active.clusters, 883 (cluster_count + blob->active.num_clusters) * sizeof(*blob->active.clusters)); 884 if (tmp == NULL) { 885 return -ENOMEM; 886 } 887 blob->active.clusters = tmp; 888 blob->active.cluster_array_size = (cluster_count + blob->active.num_clusters); 889 890 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 891 if (desc_extent->cluster_idx[i] != 0) { 892 blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs, 893 desc_extent->cluster_idx[i]); 894 blob->active.num_allocated_clusters++; 895 } else if (spdk_blob_is_thin_provisioned(blob)) { 896 blob->active.clusters[blob->active.num_clusters++] = 0; 897 } else { 898 return -EINVAL; 899 } 900 } 901 assert(desc_extent->start_cluster_idx + cluster_count == blob->active.num_clusters); 902 assert(blob->remaining_clusters_in_et >= cluster_count); 903 blob->remaining_clusters_in_et -= cluster_count; 904 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 905 int rc; 906 907 rc = blob_deserialize_xattr(blob, 908 (struct spdk_blob_md_descriptor_xattr *) desc, false); 909 if (rc != 0) { 910 return rc; 911 } 912 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 913 int rc; 914 915 rc = blob_deserialize_xattr(blob, 916 (struct spdk_blob_md_descriptor_xattr *) desc, true); 917 if (rc != 0) { 918 return rc; 919 } 920 } else { 921 /* Unrecognized descriptor type. Do not fail - just continue to the 922 * next descriptor. If this descriptor is associated with some feature 923 * defined in a newer version of blobstore, that version of blobstore 924 * should create and set an associated feature flag to specify if this 925 * blob can be loaded or not. 926 */ 927 } 928 929 /* Advance to the next descriptor */ 930 cur_desc += sizeof(*desc) + desc->length; 931 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 932 break; 933 } 934 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 935 } 936 937 return 0; 938 } 939 940 static bool bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page); 941 942 static int 943 blob_parse_extent_page(struct spdk_blob_md_page *extent_page, struct spdk_blob *blob) 944 { 945 assert(blob != NULL); 946 assert(blob->state == SPDK_BLOB_STATE_LOADING); 947 948 if (bs_load_cur_extent_page_valid(extent_page) == false) { 949 return -ENOENT; 950 } 951 952 return blob_parse_page(extent_page, blob); 953 } 954 955 static int 956 blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count, 957 struct spdk_blob *blob) 958 { 959 const struct spdk_blob_md_page *page; 960 uint32_t i; 961 int rc; 962 void *tmp; 963 964 assert(page_count > 0); 965 assert(pages[0].sequence_num == 0); 966 assert(blob != NULL); 967 assert(blob->state == SPDK_BLOB_STATE_LOADING); 968 assert(blob->active.clusters == NULL); 969 970 /* The blobid provided doesn't match what's in the MD, this can 971 * happen for example if a bogus blobid is passed in through open. 972 */ 973 if (blob->id != pages[0].id) { 974 SPDK_ERRLOG("Blobid (0x%" PRIx64 ") doesn't match what's in metadata " 975 "(0x%" PRIx64 ")\n", blob->id, pages[0].id); 976 return -ENOENT; 977 } 978 979 tmp = realloc(blob->active.pages, page_count * sizeof(*blob->active.pages)); 980 if (!tmp) { 981 return -ENOMEM; 982 } 983 blob->active.pages = tmp; 984 985 blob->active.pages[0] = pages[0].id; 986 987 for (i = 1; i < page_count; i++) { 988 assert(spdk_bit_array_get(blob->bs->used_md_pages, pages[i - 1].next)); 989 blob->active.pages[i] = pages[i - 1].next; 990 } 991 blob->active.num_pages = page_count; 992 993 for (i = 0; i < page_count; i++) { 994 page = &pages[i]; 995 996 assert(page->id == blob->id); 997 assert(page->sequence_num == i); 998 999 rc = blob_parse_page(page, blob); 1000 if (rc != 0) { 1001 return rc; 1002 } 1003 } 1004 1005 return 0; 1006 } 1007 1008 static int 1009 blob_serialize_add_page(const struct spdk_blob *blob, 1010 struct spdk_blob_md_page **pages, 1011 uint32_t *page_count, 1012 struct spdk_blob_md_page **last_page) 1013 { 1014 struct spdk_blob_md_page *page, *tmp_pages; 1015 1016 assert(pages != NULL); 1017 assert(page_count != NULL); 1018 1019 *last_page = NULL; 1020 if (*page_count == 0) { 1021 assert(*pages == NULL); 1022 *pages = spdk_malloc(SPDK_BS_PAGE_SIZE, 0, 1023 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 1024 if (*pages == NULL) { 1025 return -ENOMEM; 1026 } 1027 *page_count = 1; 1028 } else { 1029 assert(*pages != NULL); 1030 tmp_pages = spdk_realloc(*pages, SPDK_BS_PAGE_SIZE * (*page_count + 1), 0); 1031 if (tmp_pages == NULL) { 1032 return -ENOMEM; 1033 } 1034 (*page_count)++; 1035 *pages = tmp_pages; 1036 } 1037 1038 page = &(*pages)[*page_count - 1]; 1039 memset(page, 0, sizeof(*page)); 1040 page->id = blob->id; 1041 page->sequence_num = *page_count - 1; 1042 page->next = SPDK_INVALID_MD_PAGE; 1043 *last_page = page; 1044 1045 return 0; 1046 } 1047 1048 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor. 1049 * Update required_sz on both success and failure. 1050 * 1051 */ 1052 static int 1053 blob_serialize_xattr(const struct spdk_xattr *xattr, 1054 uint8_t *buf, size_t buf_sz, 1055 size_t *required_sz, bool internal) 1056 { 1057 struct spdk_blob_md_descriptor_xattr *desc; 1058 1059 *required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) + 1060 strlen(xattr->name) + 1061 xattr->value_len; 1062 1063 if (buf_sz < *required_sz) { 1064 return -1; 1065 } 1066 1067 desc = (struct spdk_blob_md_descriptor_xattr *)buf; 1068 1069 desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR; 1070 desc->length = sizeof(desc->name_length) + 1071 sizeof(desc->value_length) + 1072 strlen(xattr->name) + 1073 xattr->value_len; 1074 desc->name_length = strlen(xattr->name); 1075 desc->value_length = xattr->value_len; 1076 1077 memcpy(desc->name, xattr->name, desc->name_length); 1078 memcpy((void *)((uintptr_t)desc->name + desc->name_length), 1079 xattr->value, 1080 desc->value_length); 1081 1082 return 0; 1083 } 1084 1085 static void 1086 blob_serialize_extent_table_entry(const struct spdk_blob *blob, 1087 uint64_t start_ep, uint64_t *next_ep, 1088 uint8_t **buf, size_t *remaining_sz) 1089 { 1090 struct spdk_blob_md_descriptor_extent_table *desc; 1091 size_t cur_sz; 1092 uint64_t i, et_idx; 1093 uint32_t extent_page, ep_len; 1094 1095 /* The buffer must have room for at least num_clusters entry */ 1096 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->num_clusters); 1097 if (*remaining_sz < cur_sz) { 1098 *next_ep = start_ep; 1099 return; 1100 } 1101 1102 desc = (struct spdk_blob_md_descriptor_extent_table *)*buf; 1103 desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE; 1104 1105 desc->num_clusters = blob->active.num_clusters; 1106 1107 ep_len = 1; 1108 et_idx = 0; 1109 for (i = start_ep; i < blob->active.num_extent_pages; i++) { 1110 if (*remaining_sz < cur_sz + sizeof(desc->extent_page[0])) { 1111 /* If we ran out of buffer space, return */ 1112 break; 1113 } 1114 1115 extent_page = blob->active.extent_pages[i]; 1116 /* Verify that next extent_page is unallocated */ 1117 if (extent_page == 0 && 1118 (i + 1 < blob->active.num_extent_pages && blob->active.extent_pages[i + 1] == 0)) { 1119 ep_len++; 1120 continue; 1121 } 1122 desc->extent_page[et_idx].page_idx = extent_page; 1123 desc->extent_page[et_idx].num_pages = ep_len; 1124 et_idx++; 1125 1126 ep_len = 1; 1127 cur_sz += sizeof(desc->extent_page[et_idx]); 1128 } 1129 *next_ep = i; 1130 1131 desc->length = sizeof(desc->num_clusters) + sizeof(desc->extent_page[0]) * et_idx; 1132 *remaining_sz -= sizeof(struct spdk_blob_md_descriptor) + desc->length; 1133 *buf += sizeof(struct spdk_blob_md_descriptor) + desc->length; 1134 } 1135 1136 static int 1137 blob_serialize_extent_table(const struct spdk_blob *blob, 1138 struct spdk_blob_md_page **pages, 1139 struct spdk_blob_md_page *cur_page, 1140 uint32_t *page_count, uint8_t **buf, 1141 size_t *remaining_sz) 1142 { 1143 uint64_t last_extent_page; 1144 int rc; 1145 1146 last_extent_page = 0; 1147 /* At least single extent table entry has to be always persisted. 1148 * Such case occurs with num_extent_pages == 0. */ 1149 while (last_extent_page <= blob->active.num_extent_pages) { 1150 blob_serialize_extent_table_entry(blob, last_extent_page, &last_extent_page, buf, 1151 remaining_sz); 1152 1153 if (last_extent_page == blob->active.num_extent_pages) { 1154 break; 1155 } 1156 1157 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1158 if (rc < 0) { 1159 return rc; 1160 } 1161 1162 *buf = (uint8_t *)cur_page->descriptors; 1163 *remaining_sz = sizeof(cur_page->descriptors); 1164 } 1165 1166 return 0; 1167 } 1168 1169 static void 1170 blob_serialize_extent_rle(const struct spdk_blob *blob, 1171 uint64_t start_cluster, uint64_t *next_cluster, 1172 uint8_t **buf, size_t *buf_sz) 1173 { 1174 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 1175 size_t cur_sz; 1176 uint64_t i, extent_idx; 1177 uint64_t lba, lba_per_cluster, lba_count; 1178 1179 /* The buffer must have room for at least one extent */ 1180 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc_extent_rle->extents[0]); 1181 if (*buf_sz < cur_sz) { 1182 *next_cluster = start_cluster; 1183 return; 1184 } 1185 1186 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)*buf; 1187 desc_extent_rle->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE; 1188 1189 lba_per_cluster = bs_cluster_to_lba(blob->bs, 1); 1190 /* Assert for scan-build false positive */ 1191 assert(lba_per_cluster > 0); 1192 1193 lba = blob->active.clusters[start_cluster]; 1194 lba_count = lba_per_cluster; 1195 extent_idx = 0; 1196 for (i = start_cluster + 1; i < blob->active.num_clusters; i++) { 1197 if ((lba + lba_count) == blob->active.clusters[i] && lba != 0) { 1198 /* Run-length encode sequential non-zero LBA */ 1199 lba_count += lba_per_cluster; 1200 continue; 1201 } else if (lba == 0 && blob->active.clusters[i] == 0) { 1202 /* Run-length encode unallocated clusters */ 1203 lba_count += lba_per_cluster; 1204 continue; 1205 } 1206 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1207 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1208 extent_idx++; 1209 1210 cur_sz += sizeof(desc_extent_rle->extents[extent_idx]); 1211 1212 if (*buf_sz < cur_sz) { 1213 /* If we ran out of buffer space, return */ 1214 *next_cluster = i; 1215 break; 1216 } 1217 1218 lba = blob->active.clusters[i]; 1219 lba_count = lba_per_cluster; 1220 } 1221 1222 if (*buf_sz >= cur_sz) { 1223 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1224 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1225 extent_idx++; 1226 1227 *next_cluster = blob->active.num_clusters; 1228 } 1229 1230 desc_extent_rle->length = sizeof(desc_extent_rle->extents[0]) * extent_idx; 1231 *buf_sz -= sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1232 *buf += sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1233 } 1234 1235 static int 1236 blob_serialize_extents_rle(const struct spdk_blob *blob, 1237 struct spdk_blob_md_page **pages, 1238 struct spdk_blob_md_page *cur_page, 1239 uint32_t *page_count, uint8_t **buf, 1240 size_t *remaining_sz) 1241 { 1242 uint64_t last_cluster; 1243 int rc; 1244 1245 last_cluster = 0; 1246 while (last_cluster < blob->active.num_clusters) { 1247 blob_serialize_extent_rle(blob, last_cluster, &last_cluster, buf, remaining_sz); 1248 1249 if (last_cluster == blob->active.num_clusters) { 1250 break; 1251 } 1252 1253 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1254 if (rc < 0) { 1255 return rc; 1256 } 1257 1258 *buf = (uint8_t *)cur_page->descriptors; 1259 *remaining_sz = sizeof(cur_page->descriptors); 1260 } 1261 1262 return 0; 1263 } 1264 1265 static void 1266 blob_serialize_extent_page(const struct spdk_blob *blob, 1267 uint64_t cluster, struct spdk_blob_md_page *page) 1268 { 1269 struct spdk_blob_md_descriptor_extent_page *desc_extent; 1270 uint64_t i, extent_idx; 1271 uint64_t lba, lba_per_cluster; 1272 uint64_t start_cluster_idx = (cluster / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP; 1273 1274 desc_extent = (struct spdk_blob_md_descriptor_extent_page *) page->descriptors; 1275 desc_extent->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE; 1276 1277 lba_per_cluster = bs_cluster_to_lba(blob->bs, 1); 1278 1279 desc_extent->start_cluster_idx = start_cluster_idx; 1280 extent_idx = 0; 1281 for (i = start_cluster_idx; i < blob->active.num_clusters; i++) { 1282 lba = blob->active.clusters[i]; 1283 desc_extent->cluster_idx[extent_idx++] = lba / lba_per_cluster; 1284 if (extent_idx >= SPDK_EXTENTS_PER_EP) { 1285 break; 1286 } 1287 } 1288 desc_extent->length = sizeof(desc_extent->start_cluster_idx) + 1289 sizeof(desc_extent->cluster_idx[0]) * extent_idx; 1290 } 1291 1292 static void 1293 blob_serialize_flags(const struct spdk_blob *blob, 1294 uint8_t *buf, size_t *buf_sz) 1295 { 1296 struct spdk_blob_md_descriptor_flags *desc; 1297 1298 /* 1299 * Flags get serialized first, so we should always have room for the flags 1300 * descriptor. 1301 */ 1302 assert(*buf_sz >= sizeof(*desc)); 1303 1304 desc = (struct spdk_blob_md_descriptor_flags *)buf; 1305 desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS; 1306 desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor); 1307 desc->invalid_flags = blob->invalid_flags; 1308 desc->data_ro_flags = blob->data_ro_flags; 1309 desc->md_ro_flags = blob->md_ro_flags; 1310 1311 *buf_sz -= sizeof(*desc); 1312 } 1313 1314 static int 1315 blob_serialize_xattrs(const struct spdk_blob *blob, 1316 const struct spdk_xattr_tailq *xattrs, bool internal, 1317 struct spdk_blob_md_page **pages, 1318 struct spdk_blob_md_page *cur_page, 1319 uint32_t *page_count, uint8_t **buf, 1320 size_t *remaining_sz) 1321 { 1322 const struct spdk_xattr *xattr; 1323 int rc; 1324 1325 TAILQ_FOREACH(xattr, xattrs, link) { 1326 size_t required_sz = 0; 1327 1328 rc = blob_serialize_xattr(xattr, 1329 *buf, *remaining_sz, 1330 &required_sz, internal); 1331 if (rc < 0) { 1332 /* Need to add a new page to the chain */ 1333 rc = blob_serialize_add_page(blob, pages, page_count, 1334 &cur_page); 1335 if (rc < 0) { 1336 spdk_free(*pages); 1337 *pages = NULL; 1338 *page_count = 0; 1339 return rc; 1340 } 1341 1342 *buf = (uint8_t *)cur_page->descriptors; 1343 *remaining_sz = sizeof(cur_page->descriptors); 1344 1345 /* Try again */ 1346 required_sz = 0; 1347 rc = blob_serialize_xattr(xattr, 1348 *buf, *remaining_sz, 1349 &required_sz, internal); 1350 1351 if (rc < 0) { 1352 spdk_free(*pages); 1353 *pages = NULL; 1354 *page_count = 0; 1355 return rc; 1356 } 1357 } 1358 1359 *remaining_sz -= required_sz; 1360 *buf += required_sz; 1361 } 1362 1363 return 0; 1364 } 1365 1366 static int 1367 blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages, 1368 uint32_t *page_count) 1369 { 1370 struct spdk_blob_md_page *cur_page; 1371 int rc; 1372 uint8_t *buf; 1373 size_t remaining_sz; 1374 1375 assert(pages != NULL); 1376 assert(page_count != NULL); 1377 assert(blob != NULL); 1378 assert(blob->state == SPDK_BLOB_STATE_DIRTY); 1379 1380 *pages = NULL; 1381 *page_count = 0; 1382 1383 /* A blob always has at least 1 page, even if it has no descriptors */ 1384 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1385 if (rc < 0) { 1386 return rc; 1387 } 1388 1389 buf = (uint8_t *)cur_page->descriptors; 1390 remaining_sz = sizeof(cur_page->descriptors); 1391 1392 /* Serialize flags */ 1393 blob_serialize_flags(blob, buf, &remaining_sz); 1394 buf += sizeof(struct spdk_blob_md_descriptor_flags); 1395 1396 /* Serialize xattrs */ 1397 rc = blob_serialize_xattrs(blob, &blob->xattrs, false, 1398 pages, cur_page, page_count, &buf, &remaining_sz); 1399 if (rc < 0) { 1400 return rc; 1401 } 1402 1403 /* Serialize internal xattrs */ 1404 rc = blob_serialize_xattrs(blob, &blob->xattrs_internal, true, 1405 pages, cur_page, page_count, &buf, &remaining_sz); 1406 if (rc < 0) { 1407 return rc; 1408 } 1409 1410 if (blob->use_extent_table) { 1411 /* Serialize extent table */ 1412 rc = blob_serialize_extent_table(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1413 } else { 1414 /* Serialize extents */ 1415 rc = blob_serialize_extents_rle(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1416 } 1417 1418 return rc; 1419 } 1420 1421 struct spdk_blob_load_ctx { 1422 struct spdk_blob *blob; 1423 1424 struct spdk_blob_md_page *pages; 1425 uint32_t num_pages; 1426 uint32_t next_extent_page; 1427 spdk_bs_sequence_t *seq; 1428 1429 spdk_bs_sequence_cpl cb_fn; 1430 void *cb_arg; 1431 }; 1432 1433 static uint32_t 1434 blob_md_page_calc_crc(void *page) 1435 { 1436 uint32_t crc; 1437 1438 crc = BLOB_CRC32C_INITIAL; 1439 crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc); 1440 crc ^= BLOB_CRC32C_INITIAL; 1441 1442 return crc; 1443 1444 } 1445 1446 static void 1447 blob_load_final(struct spdk_blob_load_ctx *ctx, int bserrno) 1448 { 1449 struct spdk_blob *blob = ctx->blob; 1450 1451 if (bserrno == 0) { 1452 blob_mark_clean(blob); 1453 } 1454 1455 ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno); 1456 1457 /* Free the memory */ 1458 spdk_free(ctx->pages); 1459 free(ctx); 1460 } 1461 1462 static void 1463 blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno) 1464 { 1465 struct spdk_blob_load_ctx *ctx = cb_arg; 1466 struct spdk_blob *blob = ctx->blob; 1467 1468 if (bserrno == 0) { 1469 blob->back_bs_dev = bs_create_blob_bs_dev(snapshot); 1470 if (blob->back_bs_dev == NULL) { 1471 bserrno = -ENOMEM; 1472 } 1473 } 1474 if (bserrno != 0) { 1475 SPDK_ERRLOG("Snapshot fail\n"); 1476 } 1477 1478 blob_load_final(ctx, bserrno); 1479 } 1480 1481 static void blob_update_clear_method(struct spdk_blob *blob); 1482 1483 static int 1484 blob_load_esnap(struct spdk_blob *blob, void *blob_ctx) 1485 { 1486 struct spdk_blob_store *bs = blob->bs; 1487 struct spdk_bs_dev *bs_dev = NULL; 1488 const void *esnap_id = NULL; 1489 size_t id_len = 0; 1490 int rc; 1491 1492 if (bs->esnap_bs_dev_create == NULL) { 1493 SPDK_NOTICELOG("blob 0x%" PRIx64 " is an esnap clone but the blobstore was opened " 1494 "without support for esnap clones\n", blob->id); 1495 return -ENOTSUP; 1496 } 1497 assert(blob->back_bs_dev == NULL); 1498 1499 rc = blob_get_xattr_value(blob, BLOB_EXTERNAL_SNAPSHOT_ID, &esnap_id, &id_len, true); 1500 if (rc != 0) { 1501 SPDK_ERRLOG("blob 0x%" PRIx64 " is an esnap clone but has no esnap ID\n", blob->id); 1502 return -EINVAL; 1503 } 1504 assert(id_len > 0 && id_len < UINT32_MAX); 1505 1506 SPDK_INFOLOG(blob, "Creating external snapshot device\n"); 1507 1508 rc = bs->esnap_bs_dev_create(bs->esnap_ctx, blob_ctx, blob, esnap_id, (uint32_t)id_len, 1509 &bs_dev); 1510 if (rc != 0) { 1511 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": failed to load back_bs_dev " 1512 "with error %d\n", blob->id, rc); 1513 return rc; 1514 } 1515 1516 /* 1517 * Note: bs_dev might be NULL if the consumer chose to not open the external snapshot. 1518 * This especially might happen during spdk_bs_load() iteration. 1519 */ 1520 if (bs_dev != NULL) { 1521 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": loaded back_bs_dev\n", blob->id); 1522 if ((bs->io_unit_size % bs_dev->blocklen) != 0) { 1523 SPDK_NOTICELOG("blob 0x%" PRIx64 " external snapshot device block size %u " 1524 "is not compatible with blobstore block size %u\n", 1525 blob->id, bs_dev->blocklen, bs->io_unit_size); 1526 bs_dev->destroy(bs_dev); 1527 return -EINVAL; 1528 } 1529 } 1530 1531 blob->back_bs_dev = bs_dev; 1532 blob->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT; 1533 1534 return 0; 1535 } 1536 1537 static void 1538 blob_load_backing_dev(spdk_bs_sequence_t *seq, void *cb_arg) 1539 { 1540 struct spdk_blob_load_ctx *ctx = cb_arg; 1541 struct spdk_blob *blob = ctx->blob; 1542 const void *value; 1543 size_t len; 1544 int rc; 1545 1546 if (blob_is_esnap_clone(blob)) { 1547 rc = blob_load_esnap(blob, seq->cpl.u.blob_handle.esnap_ctx); 1548 blob_load_final(ctx, rc); 1549 return; 1550 } 1551 1552 if (spdk_blob_is_thin_provisioned(blob)) { 1553 rc = blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true); 1554 if (rc == 0) { 1555 if (len != sizeof(spdk_blob_id)) { 1556 blob_load_final(ctx, -EINVAL); 1557 return; 1558 } 1559 /* open snapshot blob and continue in the callback function */ 1560 blob->parent_id = *(spdk_blob_id *)value; 1561 spdk_bs_open_blob(blob->bs, blob->parent_id, 1562 blob_load_snapshot_cpl, ctx); 1563 return; 1564 } else { 1565 /* add zeroes_dev for thin provisioned blob */ 1566 blob->back_bs_dev = bs_create_zeroes_dev(); 1567 } 1568 } else { 1569 /* standard blob */ 1570 blob->back_bs_dev = NULL; 1571 } 1572 blob_load_final(ctx, 0); 1573 } 1574 1575 static void 1576 blob_load_cpl_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1577 { 1578 struct spdk_blob_load_ctx *ctx = cb_arg; 1579 struct spdk_blob *blob = ctx->blob; 1580 struct spdk_blob_md_page *page; 1581 uint64_t i; 1582 uint32_t crc; 1583 uint64_t lba; 1584 void *tmp; 1585 uint64_t sz; 1586 1587 if (bserrno) { 1588 SPDK_ERRLOG("Extent page read failed: %d\n", bserrno); 1589 blob_load_final(ctx, bserrno); 1590 return; 1591 } 1592 1593 if (ctx->pages == NULL) { 1594 /* First iteration of this function, allocate buffer for single EXTENT_PAGE */ 1595 ctx->pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 1596 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 1597 if (!ctx->pages) { 1598 blob_load_final(ctx, -ENOMEM); 1599 return; 1600 } 1601 ctx->num_pages = 1; 1602 ctx->next_extent_page = 0; 1603 } else { 1604 page = &ctx->pages[0]; 1605 crc = blob_md_page_calc_crc(page); 1606 if (crc != page->crc) { 1607 blob_load_final(ctx, -EINVAL); 1608 return; 1609 } 1610 1611 if (page->next != SPDK_INVALID_MD_PAGE) { 1612 blob_load_final(ctx, -EINVAL); 1613 return; 1614 } 1615 1616 bserrno = blob_parse_extent_page(page, blob); 1617 if (bserrno) { 1618 blob_load_final(ctx, bserrno); 1619 return; 1620 } 1621 } 1622 1623 for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) { 1624 if (blob->active.extent_pages[i] != 0) { 1625 /* Extent page was allocated, read and parse it. */ 1626 lba = bs_md_page_to_lba(blob->bs, blob->active.extent_pages[i]); 1627 ctx->next_extent_page = i + 1; 1628 1629 bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1630 bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 1631 blob_load_cpl_extents_cpl, ctx); 1632 return; 1633 } else { 1634 /* Thin provisioned blobs can point to unallocated extent pages. 1635 * In this case blob size should be increased by up to the amount left in remaining_clusters_in_et. */ 1636 1637 sz = spdk_min(blob->remaining_clusters_in_et, SPDK_EXTENTS_PER_EP); 1638 blob->active.num_clusters += sz; 1639 blob->remaining_clusters_in_et -= sz; 1640 1641 assert(spdk_blob_is_thin_provisioned(blob)); 1642 assert(i + 1 < blob->active.num_extent_pages || blob->remaining_clusters_in_et == 0); 1643 1644 tmp = realloc(blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 1645 if (tmp == NULL) { 1646 blob_load_final(ctx, -ENOMEM); 1647 return; 1648 } 1649 memset(tmp + sizeof(*blob->active.clusters) * blob->active.cluster_array_size, 0, 1650 sizeof(*blob->active.clusters) * (blob->active.num_clusters - blob->active.cluster_array_size)); 1651 blob->active.clusters = tmp; 1652 blob->active.cluster_array_size = blob->active.num_clusters; 1653 } 1654 } 1655 1656 blob_load_backing_dev(seq, ctx); 1657 } 1658 1659 static void 1660 blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1661 { 1662 struct spdk_blob_load_ctx *ctx = cb_arg; 1663 struct spdk_blob *blob = ctx->blob; 1664 struct spdk_blob_md_page *page; 1665 int rc; 1666 uint32_t crc; 1667 uint32_t current_page; 1668 1669 if (ctx->num_pages == 1) { 1670 current_page = bs_blobid_to_page(blob->id); 1671 } else { 1672 assert(ctx->num_pages != 0); 1673 page = &ctx->pages[ctx->num_pages - 2]; 1674 current_page = page->next; 1675 } 1676 1677 if (bserrno) { 1678 SPDK_ERRLOG("Metadata page %d read failed for blobid 0x%" PRIx64 ": %d\n", 1679 current_page, blob->id, bserrno); 1680 blob_load_final(ctx, bserrno); 1681 return; 1682 } 1683 1684 page = &ctx->pages[ctx->num_pages - 1]; 1685 crc = blob_md_page_calc_crc(page); 1686 if (crc != page->crc) { 1687 SPDK_ERRLOG("Metadata page %d crc mismatch for blobid 0x%" PRIx64 "\n", 1688 current_page, blob->id); 1689 blob_load_final(ctx, -EINVAL); 1690 return; 1691 } 1692 1693 if (page->next != SPDK_INVALID_MD_PAGE) { 1694 struct spdk_blob_md_page *tmp_pages; 1695 uint32_t next_page = page->next; 1696 uint64_t next_lba = bs_md_page_to_lba(blob->bs, next_page); 1697 1698 /* Read the next page */ 1699 tmp_pages = spdk_realloc(ctx->pages, (sizeof(*page) * (ctx->num_pages + 1)), 0); 1700 if (tmp_pages == NULL) { 1701 blob_load_final(ctx, -ENOMEM); 1702 return; 1703 } 1704 ctx->num_pages++; 1705 ctx->pages = tmp_pages; 1706 1707 bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1], 1708 next_lba, 1709 bs_byte_to_lba(blob->bs, sizeof(*page)), 1710 blob_load_cpl, ctx); 1711 return; 1712 } 1713 1714 /* Parse the pages */ 1715 rc = blob_parse(ctx->pages, ctx->num_pages, blob); 1716 if (rc) { 1717 blob_load_final(ctx, rc); 1718 return; 1719 } 1720 1721 if (blob->extent_table_found == true) { 1722 /* If EXTENT_TABLE was found, that means support for it should be enabled. */ 1723 assert(blob->extent_rle_found == false); 1724 blob->use_extent_table = true; 1725 } else { 1726 /* If EXTENT_RLE or no extent_* descriptor was found disable support 1727 * for extent table. No extent_* descriptors means that blob has length of 0 1728 * and no extent_rle descriptors were persisted for it. 1729 * EXTENT_TABLE if used, is always present in metadata regardless of length. */ 1730 blob->use_extent_table = false; 1731 } 1732 1733 /* Check the clear_method stored in metadata vs what may have been passed 1734 * via spdk_bs_open_blob_ext() and update accordingly. 1735 */ 1736 blob_update_clear_method(blob); 1737 1738 spdk_free(ctx->pages); 1739 ctx->pages = NULL; 1740 1741 if (blob->extent_table_found) { 1742 blob_load_cpl_extents_cpl(seq, ctx, 0); 1743 } else { 1744 blob_load_backing_dev(seq, ctx); 1745 } 1746 } 1747 1748 /* Load a blob from disk given a blobid */ 1749 static void 1750 blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 1751 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 1752 { 1753 struct spdk_blob_load_ctx *ctx; 1754 struct spdk_blob_store *bs; 1755 uint32_t page_num; 1756 uint64_t lba; 1757 1758 blob_verify_md_op(blob); 1759 1760 bs = blob->bs; 1761 1762 ctx = calloc(1, sizeof(*ctx)); 1763 if (!ctx) { 1764 cb_fn(seq, cb_arg, -ENOMEM); 1765 return; 1766 } 1767 1768 ctx->blob = blob; 1769 ctx->pages = spdk_realloc(ctx->pages, SPDK_BS_PAGE_SIZE, 0); 1770 if (!ctx->pages) { 1771 free(ctx); 1772 cb_fn(seq, cb_arg, -ENOMEM); 1773 return; 1774 } 1775 ctx->num_pages = 1; 1776 ctx->cb_fn = cb_fn; 1777 ctx->cb_arg = cb_arg; 1778 ctx->seq = seq; 1779 1780 page_num = bs_blobid_to_page(blob->id); 1781 lba = bs_md_page_to_lba(blob->bs, page_num); 1782 1783 blob->state = SPDK_BLOB_STATE_LOADING; 1784 1785 bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1786 bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE), 1787 blob_load_cpl, ctx); 1788 } 1789 1790 struct spdk_blob_persist_ctx { 1791 struct spdk_blob *blob; 1792 1793 struct spdk_blob_md_page *pages; 1794 uint32_t next_extent_page; 1795 struct spdk_blob_md_page *extent_page; 1796 1797 spdk_bs_sequence_t *seq; 1798 spdk_bs_sequence_cpl cb_fn; 1799 void *cb_arg; 1800 TAILQ_ENTRY(spdk_blob_persist_ctx) link; 1801 }; 1802 1803 static void 1804 bs_batch_clear_dev(struct spdk_blob *blob, spdk_bs_batch_t *batch, uint64_t lba, 1805 uint64_t lba_count) 1806 { 1807 switch (blob->clear_method) { 1808 case BLOB_CLEAR_WITH_DEFAULT: 1809 case BLOB_CLEAR_WITH_UNMAP: 1810 bs_batch_unmap_dev(batch, lba, lba_count); 1811 break; 1812 case BLOB_CLEAR_WITH_WRITE_ZEROES: 1813 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1814 break; 1815 case BLOB_CLEAR_WITH_NONE: 1816 default: 1817 break; 1818 } 1819 } 1820 1821 static int 1822 bs_super_validate(struct spdk_bs_super_block *super, struct spdk_blob_store *bs) 1823 { 1824 uint32_t crc; 1825 static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH]; 1826 1827 if (super->version > SPDK_BS_VERSION || 1828 super->version < SPDK_BS_INITIAL_VERSION) { 1829 return -EILSEQ; 1830 } 1831 1832 if (memcmp(super->signature, SPDK_BS_SUPER_BLOCK_SIG, 1833 sizeof(super->signature)) != 0) { 1834 return -EILSEQ; 1835 } 1836 1837 crc = blob_md_page_calc_crc(super); 1838 if (crc != super->crc) { 1839 return -EILSEQ; 1840 } 1841 1842 if (memcmp(&bs->bstype, &super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 1843 SPDK_DEBUGLOG(blob, "Bstype matched - loading blobstore\n"); 1844 } else if (memcmp(&bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 1845 SPDK_DEBUGLOG(blob, "Bstype wildcard used - loading blobstore regardless bstype\n"); 1846 } else { 1847 SPDK_DEBUGLOG(blob, "Unexpected bstype\n"); 1848 SPDK_LOGDUMP(blob, "Expected:", bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 1849 SPDK_LOGDUMP(blob, "Found:", super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 1850 return -ENXIO; 1851 } 1852 1853 if (super->size > bs->dev->blockcnt * bs->dev->blocklen) { 1854 SPDK_NOTICELOG("Size mismatch, dev size: %" PRIu64 ", blobstore size: %" PRIu64 "\n", 1855 bs->dev->blockcnt * bs->dev->blocklen, super->size); 1856 return -EILSEQ; 1857 } 1858 1859 return 0; 1860 } 1861 1862 static void bs_mark_dirty(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 1863 spdk_bs_sequence_cpl cb_fn, void *cb_arg); 1864 1865 static void 1866 blob_persist_complete_cb(void *arg) 1867 { 1868 struct spdk_blob_persist_ctx *ctx = arg; 1869 1870 /* Call user callback */ 1871 ctx->cb_fn(ctx->seq, ctx->cb_arg, 0); 1872 1873 /* Free the memory */ 1874 spdk_free(ctx->pages); 1875 free(ctx); 1876 } 1877 1878 static void blob_persist_start(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 1879 1880 static void 1881 blob_persist_complete(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx, int bserrno) 1882 { 1883 struct spdk_blob_persist_ctx *next_persist, *tmp; 1884 struct spdk_blob *blob = ctx->blob; 1885 1886 if (bserrno == 0) { 1887 blob_mark_clean(blob); 1888 } 1889 1890 assert(ctx == TAILQ_FIRST(&blob->persists_to_complete)); 1891 1892 /* Complete all persists that were pending when the current persist started */ 1893 TAILQ_FOREACH_SAFE(next_persist, &blob->persists_to_complete, link, tmp) { 1894 TAILQ_REMOVE(&blob->persists_to_complete, next_persist, link); 1895 spdk_thread_send_msg(spdk_get_thread(), blob_persist_complete_cb, next_persist); 1896 } 1897 1898 if (TAILQ_EMPTY(&blob->pending_persists)) { 1899 return; 1900 } 1901 1902 /* Queue up all pending persists for completion and start blob persist with first one */ 1903 TAILQ_SWAP(&blob->persists_to_complete, &blob->pending_persists, spdk_blob_persist_ctx, link); 1904 next_persist = TAILQ_FIRST(&blob->persists_to_complete); 1905 1906 blob->state = SPDK_BLOB_STATE_DIRTY; 1907 bs_mark_dirty(seq, blob->bs, blob_persist_start, next_persist); 1908 } 1909 1910 static void 1911 blob_persist_clear_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1912 { 1913 struct spdk_blob_persist_ctx *ctx = cb_arg; 1914 struct spdk_blob *blob = ctx->blob; 1915 struct spdk_blob_store *bs = blob->bs; 1916 size_t i; 1917 1918 if (bserrno != 0) { 1919 blob_persist_complete(seq, ctx, bserrno); 1920 return; 1921 } 1922 1923 spdk_spin_lock(&bs->used_lock); 1924 1925 /* Release all extent_pages that were truncated */ 1926 for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) { 1927 /* Nothing to release if it was not allocated */ 1928 if (blob->active.extent_pages[i] != 0) { 1929 bs_release_md_page(bs, blob->active.extent_pages[i]); 1930 } 1931 } 1932 1933 spdk_spin_unlock(&bs->used_lock); 1934 1935 if (blob->active.num_extent_pages == 0) { 1936 free(blob->active.extent_pages); 1937 blob->active.extent_pages = NULL; 1938 blob->active.extent_pages_array_size = 0; 1939 } else if (blob->active.num_extent_pages != blob->active.extent_pages_array_size) { 1940 #ifndef __clang_analyzer__ 1941 void *tmp; 1942 1943 /* scan-build really can't figure reallocs, workaround it */ 1944 tmp = realloc(blob->active.extent_pages, sizeof(uint32_t) * blob->active.num_extent_pages); 1945 assert(tmp != NULL); 1946 blob->active.extent_pages = tmp; 1947 #endif 1948 blob->active.extent_pages_array_size = blob->active.num_extent_pages; 1949 } 1950 1951 blob_persist_complete(seq, ctx, bserrno); 1952 } 1953 1954 static void 1955 blob_persist_clear_extents(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 1956 { 1957 struct spdk_blob *blob = ctx->blob; 1958 struct spdk_blob_store *bs = blob->bs; 1959 size_t i; 1960 uint64_t lba; 1961 uint64_t lba_count; 1962 spdk_bs_batch_t *batch; 1963 1964 batch = bs_sequence_to_batch(seq, blob_persist_clear_extents_cpl, ctx); 1965 lba_count = bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE); 1966 1967 /* Clear all extent_pages that were truncated */ 1968 for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) { 1969 /* Nothing to clear if it was not allocated */ 1970 if (blob->active.extent_pages[i] != 0) { 1971 lba = bs_md_page_to_lba(bs, blob->active.extent_pages[i]); 1972 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1973 } 1974 } 1975 1976 bs_batch_close(batch); 1977 } 1978 1979 static void 1980 blob_persist_clear_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1981 { 1982 struct spdk_blob_persist_ctx *ctx = cb_arg; 1983 struct spdk_blob *blob = ctx->blob; 1984 struct spdk_blob_store *bs = blob->bs; 1985 size_t i; 1986 1987 if (bserrno != 0) { 1988 blob_persist_complete(seq, ctx, bserrno); 1989 return; 1990 } 1991 1992 spdk_spin_lock(&bs->used_lock); 1993 /* Release all clusters that were truncated */ 1994 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1995 uint32_t cluster_num = bs_lba_to_cluster(bs, blob->active.clusters[i]); 1996 1997 /* Nothing to release if it was not allocated */ 1998 if (blob->active.clusters[i] != 0) { 1999 bs_release_cluster(bs, cluster_num); 2000 } 2001 } 2002 spdk_spin_unlock(&bs->used_lock); 2003 2004 if (blob->active.num_clusters == 0) { 2005 free(blob->active.clusters); 2006 blob->active.clusters = NULL; 2007 blob->active.cluster_array_size = 0; 2008 } else if (blob->active.num_clusters != blob->active.cluster_array_size) { 2009 #ifndef __clang_analyzer__ 2010 void *tmp; 2011 2012 /* scan-build really can't figure reallocs, workaround it */ 2013 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * blob->active.num_clusters); 2014 assert(tmp != NULL); 2015 blob->active.clusters = tmp; 2016 2017 #endif 2018 blob->active.cluster_array_size = blob->active.num_clusters; 2019 } 2020 2021 /* Move on to clearing extent pages */ 2022 blob_persist_clear_extents(seq, ctx); 2023 } 2024 2025 static void 2026 blob_persist_clear_clusters(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 2027 { 2028 struct spdk_blob *blob = ctx->blob; 2029 struct spdk_blob_store *bs = blob->bs; 2030 spdk_bs_batch_t *batch; 2031 size_t i; 2032 uint64_t lba; 2033 uint64_t lba_count; 2034 2035 /* Clusters don't move around in blobs. The list shrinks or grows 2036 * at the end, but no changes ever occur in the middle of the list. 2037 */ 2038 2039 batch = bs_sequence_to_batch(seq, blob_persist_clear_clusters_cpl, ctx); 2040 2041 /* Clear all clusters that were truncated */ 2042 lba = 0; 2043 lba_count = 0; 2044 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 2045 uint64_t next_lba = blob->active.clusters[i]; 2046 uint64_t next_lba_count = bs_cluster_to_lba(bs, 1); 2047 2048 if (next_lba > 0 && (lba + lba_count) == next_lba) { 2049 /* This cluster is contiguous with the previous one. */ 2050 lba_count += next_lba_count; 2051 continue; 2052 } else if (next_lba == 0) { 2053 continue; 2054 } 2055 2056 /* This cluster is not contiguous with the previous one. */ 2057 2058 /* If a run of LBAs previously existing, clear them now */ 2059 if (lba_count > 0) { 2060 bs_batch_clear_dev(ctx->blob, batch, lba, lba_count); 2061 } 2062 2063 /* Start building the next batch */ 2064 lba = next_lba; 2065 if (next_lba > 0) { 2066 lba_count = next_lba_count; 2067 } else { 2068 lba_count = 0; 2069 } 2070 } 2071 2072 /* If we ended with a contiguous set of LBAs, clear them now */ 2073 if (lba_count > 0) { 2074 bs_batch_clear_dev(ctx->blob, batch, lba, lba_count); 2075 } 2076 2077 bs_batch_close(batch); 2078 } 2079 2080 static void 2081 blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2082 { 2083 struct spdk_blob_persist_ctx *ctx = cb_arg; 2084 struct spdk_blob *blob = ctx->blob; 2085 struct spdk_blob_store *bs = blob->bs; 2086 size_t i; 2087 2088 if (bserrno != 0) { 2089 blob_persist_complete(seq, ctx, bserrno); 2090 return; 2091 } 2092 2093 spdk_spin_lock(&bs->used_lock); 2094 2095 /* This loop starts at 1 because the first page is special and handled 2096 * below. The pages (except the first) are never written in place, 2097 * so any pages in the clean list must be zeroed. 2098 */ 2099 for (i = 1; i < blob->clean.num_pages; i++) { 2100 bs_release_md_page(bs, blob->clean.pages[i]); 2101 } 2102 2103 if (blob->active.num_pages == 0) { 2104 uint32_t page_num; 2105 2106 page_num = bs_blobid_to_page(blob->id); 2107 bs_release_md_page(bs, page_num); 2108 } 2109 2110 spdk_spin_unlock(&bs->used_lock); 2111 2112 /* Move on to clearing clusters */ 2113 blob_persist_clear_clusters(seq, ctx); 2114 } 2115 2116 static void 2117 blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2118 { 2119 struct spdk_blob_persist_ctx *ctx = cb_arg; 2120 struct spdk_blob *blob = ctx->blob; 2121 struct spdk_blob_store *bs = blob->bs; 2122 uint64_t lba; 2123 uint64_t lba_count; 2124 spdk_bs_batch_t *batch; 2125 size_t i; 2126 2127 if (bserrno != 0) { 2128 blob_persist_complete(seq, ctx, bserrno); 2129 return; 2130 } 2131 2132 batch = bs_sequence_to_batch(seq, blob_persist_zero_pages_cpl, ctx); 2133 2134 lba_count = bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE); 2135 2136 /* This loop starts at 1 because the first page is special and handled 2137 * below. The pages (except the first) are never written in place, 2138 * so any pages in the clean list must be zeroed. 2139 */ 2140 for (i = 1; i < blob->clean.num_pages; i++) { 2141 lba = bs_md_page_to_lba(bs, blob->clean.pages[i]); 2142 2143 bs_batch_write_zeroes_dev(batch, lba, lba_count); 2144 } 2145 2146 /* The first page will only be zeroed if this is a delete. */ 2147 if (blob->active.num_pages == 0) { 2148 uint32_t page_num; 2149 2150 /* The first page in the metadata goes where the blobid indicates */ 2151 page_num = bs_blobid_to_page(blob->id); 2152 lba = bs_md_page_to_lba(bs, page_num); 2153 2154 bs_batch_write_zeroes_dev(batch, lba, lba_count); 2155 } 2156 2157 bs_batch_close(batch); 2158 } 2159 2160 static void 2161 blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2162 { 2163 struct spdk_blob_persist_ctx *ctx = cb_arg; 2164 struct spdk_blob *blob = ctx->blob; 2165 struct spdk_blob_store *bs = blob->bs; 2166 uint64_t lba; 2167 uint32_t lba_count; 2168 struct spdk_blob_md_page *page; 2169 2170 if (bserrno != 0) { 2171 blob_persist_complete(seq, ctx, bserrno); 2172 return; 2173 } 2174 2175 if (blob->active.num_pages == 0) { 2176 /* Move on to the next step */ 2177 blob_persist_zero_pages(seq, ctx, 0); 2178 return; 2179 } 2180 2181 lba_count = bs_byte_to_lba(bs, sizeof(*page)); 2182 2183 page = &ctx->pages[0]; 2184 /* The first page in the metadata goes where the blobid indicates */ 2185 lba = bs_md_page_to_lba(bs, bs_blobid_to_page(blob->id)); 2186 2187 bs_sequence_write_dev(seq, page, lba, lba_count, 2188 blob_persist_zero_pages, ctx); 2189 } 2190 2191 static void 2192 blob_persist_write_page_chain(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 2193 { 2194 struct spdk_blob *blob = ctx->blob; 2195 struct spdk_blob_store *bs = blob->bs; 2196 uint64_t lba; 2197 uint32_t lba_count; 2198 struct spdk_blob_md_page *page; 2199 spdk_bs_batch_t *batch; 2200 size_t i; 2201 2202 /* Clusters don't move around in blobs. The list shrinks or grows 2203 * at the end, but no changes ever occur in the middle of the list. 2204 */ 2205 2206 lba_count = bs_byte_to_lba(bs, sizeof(*page)); 2207 2208 batch = bs_sequence_to_batch(seq, blob_persist_write_page_root, ctx); 2209 2210 /* This starts at 1. The root page is not written until 2211 * all of the others are finished 2212 */ 2213 for (i = 1; i < blob->active.num_pages; i++) { 2214 page = &ctx->pages[i]; 2215 assert(page->sequence_num == i); 2216 2217 lba = bs_md_page_to_lba(bs, blob->active.pages[i]); 2218 2219 bs_batch_write_dev(batch, page, lba, lba_count); 2220 } 2221 2222 bs_batch_close(batch); 2223 } 2224 2225 static int 2226 blob_resize(struct spdk_blob *blob, uint64_t sz) 2227 { 2228 uint64_t i; 2229 uint64_t *tmp; 2230 uint64_t cluster; 2231 uint32_t lfmd; /* lowest free md page */ 2232 uint64_t num_clusters; 2233 uint32_t *ep_tmp; 2234 uint64_t new_num_ep = 0, current_num_ep = 0; 2235 struct spdk_blob_store *bs; 2236 int rc; 2237 2238 bs = blob->bs; 2239 2240 blob_verify_md_op(blob); 2241 2242 if (blob->active.num_clusters == sz) { 2243 return 0; 2244 } 2245 2246 if (blob->active.num_clusters < blob->active.cluster_array_size) { 2247 /* If this blob was resized to be larger, then smaller, then 2248 * larger without syncing, then the cluster array already 2249 * contains spare assigned clusters we can use. 2250 */ 2251 num_clusters = spdk_min(blob->active.cluster_array_size, 2252 sz); 2253 } else { 2254 num_clusters = blob->active.num_clusters; 2255 } 2256 2257 if (blob->use_extent_table) { 2258 /* Round up since every cluster beyond current Extent Table size, 2259 * requires new extent page. */ 2260 new_num_ep = spdk_divide_round_up(sz, SPDK_EXTENTS_PER_EP); 2261 current_num_ep = spdk_divide_round_up(num_clusters, SPDK_EXTENTS_PER_EP); 2262 } 2263 2264 assert(!spdk_spin_held(&bs->used_lock)); 2265 2266 /* Check first that we have enough clusters and md pages before we start claiming them. 2267 * bs->used_lock is held to ensure that clusters we think are free are still free when we go 2268 * to claim them later in this function. 2269 */ 2270 if (sz > num_clusters && spdk_blob_is_thin_provisioned(blob) == false) { 2271 spdk_spin_lock(&bs->used_lock); 2272 if ((sz - num_clusters) > bs->num_free_clusters) { 2273 rc = -ENOSPC; 2274 goto out; 2275 } 2276 lfmd = 0; 2277 for (i = current_num_ep; i < new_num_ep ; i++) { 2278 lfmd = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, lfmd); 2279 if (lfmd == UINT32_MAX) { 2280 /* No more free md pages. Cannot satisfy the request */ 2281 rc = -ENOSPC; 2282 goto out; 2283 } 2284 } 2285 } 2286 2287 if (sz > num_clusters) { 2288 /* Expand the cluster array if necessary. 2289 * We only shrink the array when persisting. 2290 */ 2291 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * sz); 2292 if (sz > 0 && tmp == NULL) { 2293 rc = -ENOMEM; 2294 goto out; 2295 } 2296 memset(tmp + blob->active.cluster_array_size, 0, 2297 sizeof(*blob->active.clusters) * (sz - blob->active.cluster_array_size)); 2298 blob->active.clusters = tmp; 2299 blob->active.cluster_array_size = sz; 2300 2301 /* Expand the extents table, only if enough clusters were added */ 2302 if (new_num_ep > current_num_ep && blob->use_extent_table) { 2303 ep_tmp = realloc(blob->active.extent_pages, sizeof(*blob->active.extent_pages) * new_num_ep); 2304 if (new_num_ep > 0 && ep_tmp == NULL) { 2305 rc = -ENOMEM; 2306 goto out; 2307 } 2308 memset(ep_tmp + blob->active.extent_pages_array_size, 0, 2309 sizeof(*blob->active.extent_pages) * (new_num_ep - blob->active.extent_pages_array_size)); 2310 blob->active.extent_pages = ep_tmp; 2311 blob->active.extent_pages_array_size = new_num_ep; 2312 } 2313 } 2314 2315 blob->state = SPDK_BLOB_STATE_DIRTY; 2316 2317 if (spdk_blob_is_thin_provisioned(blob) == false) { 2318 cluster = 0; 2319 lfmd = 0; 2320 for (i = num_clusters; i < sz; i++) { 2321 bs_allocate_cluster(blob, i, &cluster, &lfmd, true); 2322 /* Do not increment lfmd here. lfmd will get updated 2323 * to the md_page allocated (if any) when a new extent 2324 * page is needed. Just pass that value again, 2325 * bs_allocate_cluster will just start at that index 2326 * to find the next free md_page when needed. 2327 */ 2328 } 2329 } 2330 2331 /* If we are shrinking the blob, we must adjust num_allocated_clusters */ 2332 for (i = sz; i < num_clusters; i++) { 2333 if (blob->active.clusters[i] != 0) { 2334 blob->active.num_allocated_clusters--; 2335 } 2336 } 2337 2338 blob->active.num_clusters = sz; 2339 blob->active.num_extent_pages = new_num_ep; 2340 2341 rc = 0; 2342 out: 2343 if (spdk_spin_held(&bs->used_lock)) { 2344 spdk_spin_unlock(&bs->used_lock); 2345 } 2346 2347 return rc; 2348 } 2349 2350 static void 2351 blob_persist_generate_new_md(struct spdk_blob_persist_ctx *ctx) 2352 { 2353 spdk_bs_sequence_t *seq = ctx->seq; 2354 struct spdk_blob *blob = ctx->blob; 2355 struct spdk_blob_store *bs = blob->bs; 2356 uint64_t i; 2357 uint32_t page_num; 2358 void *tmp; 2359 int rc; 2360 2361 /* Generate the new metadata */ 2362 rc = blob_serialize(blob, &ctx->pages, &blob->active.num_pages); 2363 if (rc < 0) { 2364 blob_persist_complete(seq, ctx, rc); 2365 return; 2366 } 2367 2368 assert(blob->active.num_pages >= 1); 2369 2370 /* Resize the cache of page indices */ 2371 tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 2372 if (!tmp) { 2373 blob_persist_complete(seq, ctx, -ENOMEM); 2374 return; 2375 } 2376 blob->active.pages = tmp; 2377 2378 /* Assign this metadata to pages. This requires two passes - one to verify that there are 2379 * enough pages and a second to actually claim them. The used_lock is held across 2380 * both passes to ensure things don't change in the middle. 2381 */ 2382 spdk_spin_lock(&bs->used_lock); 2383 page_num = 0; 2384 /* Note that this loop starts at one. The first page location is fixed by the blobid. */ 2385 for (i = 1; i < blob->active.num_pages; i++) { 2386 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 2387 if (page_num == UINT32_MAX) { 2388 spdk_spin_unlock(&bs->used_lock); 2389 blob_persist_complete(seq, ctx, -ENOMEM); 2390 return; 2391 } 2392 page_num++; 2393 } 2394 2395 page_num = 0; 2396 blob->active.pages[0] = bs_blobid_to_page(blob->id); 2397 for (i = 1; i < blob->active.num_pages; i++) { 2398 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 2399 ctx->pages[i - 1].next = page_num; 2400 /* Now that previous metadata page is complete, calculate the crc for it. */ 2401 ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]); 2402 blob->active.pages[i] = page_num; 2403 bs_claim_md_page(bs, page_num); 2404 SPDK_DEBUGLOG(blob, "Claiming page %u for blob 0x%" PRIx64 "\n", page_num, 2405 blob->id); 2406 page_num++; 2407 } 2408 spdk_spin_unlock(&bs->used_lock); 2409 ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]); 2410 /* Start writing the metadata from last page to first */ 2411 blob->state = SPDK_BLOB_STATE_CLEAN; 2412 blob_persist_write_page_chain(seq, ctx); 2413 } 2414 2415 static void 2416 blob_persist_write_extent_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2417 { 2418 struct spdk_blob_persist_ctx *ctx = cb_arg; 2419 struct spdk_blob *blob = ctx->blob; 2420 size_t i; 2421 uint32_t extent_page_id; 2422 uint32_t page_count = 0; 2423 int rc; 2424 2425 if (ctx->extent_page != NULL) { 2426 spdk_free(ctx->extent_page); 2427 ctx->extent_page = NULL; 2428 } 2429 2430 if (bserrno != 0) { 2431 blob_persist_complete(seq, ctx, bserrno); 2432 return; 2433 } 2434 2435 /* Only write out Extent Pages when blob was resized. */ 2436 for (i = ctx->next_extent_page; i < blob->active.extent_pages_array_size; i++) { 2437 extent_page_id = blob->active.extent_pages[i]; 2438 if (extent_page_id == 0) { 2439 /* No Extent Page to persist */ 2440 assert(spdk_blob_is_thin_provisioned(blob)); 2441 continue; 2442 } 2443 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent_page_id)); 2444 ctx->next_extent_page = i + 1; 2445 rc = blob_serialize_add_page(ctx->blob, &ctx->extent_page, &page_count, &ctx->extent_page); 2446 if (rc < 0) { 2447 blob_persist_complete(seq, ctx, rc); 2448 return; 2449 } 2450 2451 blob->state = SPDK_BLOB_STATE_DIRTY; 2452 blob_serialize_extent_page(blob, i * SPDK_EXTENTS_PER_EP, ctx->extent_page); 2453 2454 ctx->extent_page->crc = blob_md_page_calc_crc(ctx->extent_page); 2455 2456 bs_sequence_write_dev(seq, ctx->extent_page, bs_md_page_to_lba(blob->bs, extent_page_id), 2457 bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 2458 blob_persist_write_extent_pages, ctx); 2459 return; 2460 } 2461 2462 blob_persist_generate_new_md(ctx); 2463 } 2464 2465 static void 2466 blob_persist_start(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2467 { 2468 struct spdk_blob_persist_ctx *ctx = cb_arg; 2469 struct spdk_blob *blob = ctx->blob; 2470 2471 if (bserrno != 0) { 2472 blob_persist_complete(seq, ctx, bserrno); 2473 return; 2474 } 2475 2476 if (blob->active.num_pages == 0) { 2477 /* This is the signal that the blob should be deleted. 2478 * Immediately jump to the clean up routine. */ 2479 assert(blob->clean.num_pages > 0); 2480 blob->state = SPDK_BLOB_STATE_CLEAN; 2481 blob_persist_zero_pages(seq, ctx, 0); 2482 return; 2483 2484 } 2485 2486 if (blob->clean.num_clusters < blob->active.num_clusters) { 2487 /* Blob was resized up */ 2488 assert(blob->clean.num_extent_pages <= blob->active.num_extent_pages); 2489 ctx->next_extent_page = spdk_max(1, blob->clean.num_extent_pages) - 1; 2490 } else if (blob->active.num_clusters < blob->active.cluster_array_size) { 2491 /* Blob was resized down */ 2492 assert(blob->clean.num_extent_pages >= blob->active.num_extent_pages); 2493 ctx->next_extent_page = spdk_max(1, blob->active.num_extent_pages) - 1; 2494 } else { 2495 /* No change in size occurred */ 2496 blob_persist_generate_new_md(ctx); 2497 return; 2498 } 2499 2500 blob_persist_write_extent_pages(seq, ctx, 0); 2501 } 2502 2503 struct spdk_bs_mark_dirty { 2504 struct spdk_blob_store *bs; 2505 struct spdk_bs_super_block *super; 2506 spdk_bs_sequence_cpl cb_fn; 2507 void *cb_arg; 2508 }; 2509 2510 static void 2511 bs_mark_dirty_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2512 { 2513 struct spdk_bs_mark_dirty *ctx = cb_arg; 2514 2515 if (bserrno == 0) { 2516 ctx->bs->clean = 0; 2517 } 2518 2519 ctx->cb_fn(seq, ctx->cb_arg, bserrno); 2520 2521 spdk_free(ctx->super); 2522 free(ctx); 2523 } 2524 2525 static void bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2526 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg); 2527 2528 2529 static void 2530 bs_mark_dirty_write(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2531 { 2532 struct spdk_bs_mark_dirty *ctx = cb_arg; 2533 int rc; 2534 2535 if (bserrno != 0) { 2536 bs_mark_dirty_write_cpl(seq, ctx, bserrno); 2537 return; 2538 } 2539 2540 rc = bs_super_validate(ctx->super, ctx->bs); 2541 if (rc != 0) { 2542 bs_mark_dirty_write_cpl(seq, ctx, rc); 2543 return; 2544 } 2545 2546 ctx->super->clean = 0; 2547 if (ctx->super->size == 0) { 2548 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 2549 } 2550 2551 bs_write_super(seq, ctx->bs, ctx->super, bs_mark_dirty_write_cpl, ctx); 2552 } 2553 2554 static void 2555 bs_mark_dirty(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2556 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2557 { 2558 struct spdk_bs_mark_dirty *ctx; 2559 2560 /* Blobstore is already marked dirty */ 2561 if (bs->clean == 0) { 2562 cb_fn(seq, cb_arg, 0); 2563 return; 2564 } 2565 2566 ctx = calloc(1, sizeof(*ctx)); 2567 if (!ctx) { 2568 cb_fn(seq, cb_arg, -ENOMEM); 2569 return; 2570 } 2571 ctx->bs = bs; 2572 ctx->cb_fn = cb_fn; 2573 ctx->cb_arg = cb_arg; 2574 2575 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 2576 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 2577 if (!ctx->super) { 2578 free(ctx); 2579 cb_fn(seq, cb_arg, -ENOMEM); 2580 return; 2581 } 2582 2583 bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0), 2584 bs_byte_to_lba(bs, sizeof(*ctx->super)), 2585 bs_mark_dirty_write, ctx); 2586 } 2587 2588 /* Write a blob to disk */ 2589 static void 2590 blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 2591 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2592 { 2593 struct spdk_blob_persist_ctx *ctx; 2594 2595 blob_verify_md_op(blob); 2596 2597 if (blob->state == SPDK_BLOB_STATE_CLEAN && TAILQ_EMPTY(&blob->persists_to_complete)) { 2598 cb_fn(seq, cb_arg, 0); 2599 return; 2600 } 2601 2602 ctx = calloc(1, sizeof(*ctx)); 2603 if (!ctx) { 2604 cb_fn(seq, cb_arg, -ENOMEM); 2605 return; 2606 } 2607 ctx->blob = blob; 2608 ctx->seq = seq; 2609 ctx->cb_fn = cb_fn; 2610 ctx->cb_arg = cb_arg; 2611 2612 /* Multiple blob persists can affect one another, via blob->state or 2613 * blob mutable data changes. To prevent it, queue up the persists. */ 2614 if (!TAILQ_EMPTY(&blob->persists_to_complete)) { 2615 TAILQ_INSERT_TAIL(&blob->pending_persists, ctx, link); 2616 return; 2617 } 2618 TAILQ_INSERT_HEAD(&blob->persists_to_complete, ctx, link); 2619 2620 bs_mark_dirty(seq, blob->bs, blob_persist_start, ctx); 2621 } 2622 2623 struct spdk_blob_copy_cluster_ctx { 2624 struct spdk_blob *blob; 2625 uint8_t *buf; 2626 uint64_t page; 2627 uint64_t new_cluster; 2628 uint32_t new_extent_page; 2629 spdk_bs_sequence_t *seq; 2630 struct spdk_blob_md_page *new_cluster_page; 2631 }; 2632 2633 struct spdk_blob_free_cluster_ctx { 2634 struct spdk_blob *blob; 2635 uint64_t page; 2636 struct spdk_blob_md_page *md_page; 2637 uint64_t cluster_num; 2638 uint32_t extent_page; 2639 spdk_bs_sequence_t *seq; 2640 }; 2641 2642 static void 2643 blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno) 2644 { 2645 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2646 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq; 2647 TAILQ_HEAD(, spdk_bs_request_set) requests; 2648 spdk_bs_user_op_t *op; 2649 2650 TAILQ_INIT(&requests); 2651 TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link); 2652 2653 while (!TAILQ_EMPTY(&requests)) { 2654 op = TAILQ_FIRST(&requests); 2655 TAILQ_REMOVE(&requests, op, link); 2656 if (bserrno == 0) { 2657 bs_user_op_execute(op); 2658 } else { 2659 bs_user_op_abort(op, bserrno); 2660 } 2661 } 2662 2663 spdk_free(ctx->buf); 2664 free(ctx); 2665 } 2666 2667 static void 2668 blob_free_cluster_cpl(void *cb_arg, int bserrno) 2669 { 2670 struct spdk_blob_free_cluster_ctx *ctx = cb_arg; 2671 spdk_bs_sequence_t *seq = ctx->seq; 2672 2673 bs_sequence_finish(seq, bserrno); 2674 2675 free(ctx); 2676 } 2677 2678 static void 2679 blob_insert_cluster_revert(struct spdk_blob_copy_cluster_ctx *ctx) 2680 { 2681 spdk_spin_lock(&ctx->blob->bs->used_lock); 2682 bs_release_cluster(ctx->blob->bs, ctx->new_cluster); 2683 if (ctx->new_extent_page != 0) { 2684 bs_release_md_page(ctx->blob->bs, ctx->new_extent_page); 2685 } 2686 spdk_spin_unlock(&ctx->blob->bs->used_lock); 2687 } 2688 2689 static void 2690 blob_insert_cluster_clear_cpl(void *cb_arg, int bserrno) 2691 { 2692 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2693 2694 if (bserrno) { 2695 SPDK_WARNLOG("Failed to clear cluster: %d\n", bserrno); 2696 } 2697 2698 blob_insert_cluster_revert(ctx); 2699 bs_sequence_finish(ctx->seq, bserrno); 2700 } 2701 2702 static void 2703 blob_insert_cluster_clear(struct spdk_blob_copy_cluster_ctx *ctx) 2704 { 2705 struct spdk_bs_cpl cpl; 2706 spdk_bs_batch_t *batch; 2707 struct spdk_io_channel *ch = spdk_io_channel_from_ctx(ctx->seq->channel); 2708 2709 /* 2710 * We allocated a cluster and we copied data to it. But now, we realized that we don't need 2711 * this cluster and we want to release it. We must ensure that we clear the data on this 2712 * cluster. 2713 * The cluster may later be re-allocated by a thick-provisioned blob for example. When 2714 * reading from this thick-provisioned blob before writing data, we should read zeroes. 2715 */ 2716 2717 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2718 cpl.u.blob_basic.cb_fn = blob_insert_cluster_clear_cpl; 2719 cpl.u.blob_basic.cb_arg = ctx; 2720 2721 batch = bs_batch_open(ch, &cpl, ctx->blob); 2722 if (!batch) { 2723 blob_insert_cluster_clear_cpl(ctx, -ENOMEM); 2724 return; 2725 } 2726 2727 bs_batch_clear_dev(ctx->blob, batch, bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 2728 bs_cluster_to_lba(ctx->blob->bs, 1)); 2729 bs_batch_close(batch); 2730 } 2731 2732 static void 2733 blob_insert_cluster_cpl(void *cb_arg, int bserrno) 2734 { 2735 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2736 2737 if (bserrno) { 2738 if (bserrno == -EEXIST) { 2739 /* The metadata insert failed because another thread 2740 * allocated the cluster first. Clear and free our cluster 2741 * but continue without error. */ 2742 blob_insert_cluster_clear(ctx); 2743 return; 2744 } 2745 2746 blob_insert_cluster_revert(ctx); 2747 } 2748 2749 bs_sequence_finish(ctx->seq, bserrno); 2750 } 2751 2752 static void 2753 blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2754 { 2755 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2756 uint32_t cluster_number; 2757 2758 if (bserrno) { 2759 /* The write failed, so jump to the final completion handler */ 2760 bs_sequence_finish(seq, bserrno); 2761 return; 2762 } 2763 2764 cluster_number = bs_page_to_cluster(ctx->blob->bs, ctx->page); 2765 2766 blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2767 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx); 2768 } 2769 2770 static void 2771 blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2772 { 2773 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2774 2775 if (bserrno != 0) { 2776 /* The read failed, so jump to the final completion handler */ 2777 bs_sequence_finish(seq, bserrno); 2778 return; 2779 } 2780 2781 /* Write whole cluster */ 2782 bs_sequence_write_dev(seq, ctx->buf, 2783 bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 2784 bs_cluster_to_lba(ctx->blob->bs, 1), 2785 blob_write_copy_cpl, ctx); 2786 } 2787 2788 static bool 2789 blob_can_copy(struct spdk_blob *blob, uint32_t cluster_start_page, uint64_t *base_lba) 2790 { 2791 uint64_t lba = bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page); 2792 2793 return (!blob_is_esnap_clone(blob) && blob->bs->dev->copy != NULL) && 2794 blob->back_bs_dev->translate_lba(blob->back_bs_dev, lba, base_lba); 2795 } 2796 2797 static void 2798 blob_copy(struct spdk_blob_copy_cluster_ctx *ctx, spdk_bs_user_op_t *op, uint64_t src_lba) 2799 { 2800 struct spdk_blob *blob = ctx->blob; 2801 uint64_t lba_count = bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz); 2802 2803 bs_sequence_copy_dev(ctx->seq, 2804 bs_cluster_to_lba(blob->bs, ctx->new_cluster), 2805 src_lba, 2806 lba_count, 2807 blob_write_copy_cpl, ctx); 2808 } 2809 2810 static void 2811 bs_allocate_and_copy_cluster(struct spdk_blob *blob, 2812 struct spdk_io_channel *_ch, 2813 uint64_t io_unit, spdk_bs_user_op_t *op) 2814 { 2815 struct spdk_bs_cpl cpl; 2816 struct spdk_bs_channel *ch; 2817 struct spdk_blob_copy_cluster_ctx *ctx; 2818 uint32_t cluster_start_page; 2819 uint32_t cluster_number; 2820 bool is_zeroes; 2821 bool can_copy; 2822 bool is_valid_range; 2823 uint64_t copy_src_lba; 2824 int rc; 2825 2826 ch = spdk_io_channel_get_ctx(_ch); 2827 2828 if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) { 2829 /* There are already operations pending. Queue this user op 2830 * and return because it will be re-executed when the outstanding 2831 * cluster allocation completes. */ 2832 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2833 return; 2834 } 2835 2836 /* Round the io_unit offset down to the first page in the cluster */ 2837 cluster_start_page = bs_io_unit_to_cluster_start(blob, io_unit); 2838 2839 /* Calculate which index in the metadata cluster array the corresponding 2840 * cluster is supposed to be at. */ 2841 cluster_number = bs_io_unit_to_cluster_number(blob, io_unit); 2842 2843 ctx = calloc(1, sizeof(*ctx)); 2844 if (!ctx) { 2845 bs_user_op_abort(op, -ENOMEM); 2846 return; 2847 } 2848 2849 assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0); 2850 2851 ctx->blob = blob; 2852 ctx->page = cluster_start_page; 2853 ctx->new_cluster_page = ch->new_cluster_page; 2854 memset(ctx->new_cluster_page, 0, SPDK_BS_PAGE_SIZE); 2855 2856 /* Check if the cluster that we intend to do CoW for is valid for 2857 * the backing dev. For zeroes backing dev, it'll be always valid. 2858 * For other backing dev e.g. a snapshot, it could be invalid if 2859 * the blob has been resized after snapshot was taken. */ 2860 is_valid_range = blob->back_bs_dev->is_range_valid(blob->back_bs_dev, 2861 bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page), 2862 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz)); 2863 2864 can_copy = is_valid_range && blob_can_copy(blob, cluster_start_page, ©_src_lba); 2865 2866 is_zeroes = is_valid_range && blob->back_bs_dev->is_zeroes(blob->back_bs_dev, 2867 bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page), 2868 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz)); 2869 if (blob->parent_id != SPDK_BLOBID_INVALID && !is_zeroes && !can_copy) { 2870 ctx->buf = spdk_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen, 2871 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 2872 if (!ctx->buf) { 2873 SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n", 2874 blob->bs->cluster_sz); 2875 free(ctx); 2876 bs_user_op_abort(op, -ENOMEM); 2877 return; 2878 } 2879 } 2880 2881 spdk_spin_lock(&blob->bs->used_lock); 2882 rc = bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, &ctx->new_extent_page, 2883 false); 2884 spdk_spin_unlock(&blob->bs->used_lock); 2885 if (rc != 0) { 2886 spdk_free(ctx->buf); 2887 free(ctx); 2888 bs_user_op_abort(op, rc); 2889 return; 2890 } 2891 2892 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2893 cpl.u.blob_basic.cb_fn = blob_allocate_and_copy_cluster_cpl; 2894 cpl.u.blob_basic.cb_arg = ctx; 2895 2896 ctx->seq = bs_sequence_start_blob(_ch, &cpl, blob); 2897 if (!ctx->seq) { 2898 spdk_spin_lock(&blob->bs->used_lock); 2899 bs_release_cluster(blob->bs, ctx->new_cluster); 2900 spdk_spin_unlock(&blob->bs->used_lock); 2901 spdk_free(ctx->buf); 2902 free(ctx); 2903 bs_user_op_abort(op, -ENOMEM); 2904 return; 2905 } 2906 2907 /* Queue the user op to block other incoming operations */ 2908 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2909 2910 if (blob->parent_id != SPDK_BLOBID_INVALID && !is_zeroes) { 2911 if (can_copy) { 2912 blob_copy(ctx, op, copy_src_lba); 2913 } else { 2914 /* Read cluster from backing device */ 2915 bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf, 2916 bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page), 2917 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz), 2918 blob_write_copy, ctx); 2919 } 2920 2921 } else { 2922 blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2923 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx); 2924 } 2925 } 2926 2927 static inline bool 2928 blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t io_unit, uint64_t length, 2929 uint64_t *lba, uint64_t *lba_count) 2930 { 2931 *lba_count = length; 2932 2933 if (!bs_io_unit_is_allocated(blob, io_unit)) { 2934 assert(blob->back_bs_dev != NULL); 2935 *lba = bs_io_unit_to_back_dev_lba(blob, io_unit); 2936 *lba_count = bs_io_unit_to_back_dev_lba(blob, *lba_count); 2937 return false; 2938 } else { 2939 *lba = bs_blob_io_unit_to_lba(blob, io_unit); 2940 return true; 2941 } 2942 } 2943 2944 struct op_split_ctx { 2945 struct spdk_blob *blob; 2946 struct spdk_io_channel *channel; 2947 uint64_t io_unit_offset; 2948 uint64_t io_units_remaining; 2949 void *curr_payload; 2950 enum spdk_blob_op_type op_type; 2951 spdk_bs_sequence_t *seq; 2952 bool in_submit_ctx; 2953 bool completed_in_submit_ctx; 2954 bool done; 2955 }; 2956 2957 static void 2958 blob_request_submit_op_split_next(void *cb_arg, int bserrno) 2959 { 2960 struct op_split_ctx *ctx = cb_arg; 2961 struct spdk_blob *blob = ctx->blob; 2962 struct spdk_io_channel *ch = ctx->channel; 2963 enum spdk_blob_op_type op_type = ctx->op_type; 2964 uint8_t *buf; 2965 uint64_t offset; 2966 uint64_t length; 2967 uint64_t op_length; 2968 2969 if (bserrno != 0 || ctx->io_units_remaining == 0) { 2970 bs_sequence_finish(ctx->seq, bserrno); 2971 if (ctx->in_submit_ctx) { 2972 /* Defer freeing of the ctx object, since it will be 2973 * accessed when this unwinds back to the submission 2974 * context. 2975 */ 2976 ctx->done = true; 2977 } else { 2978 free(ctx); 2979 } 2980 return; 2981 } 2982 2983 if (ctx->in_submit_ctx) { 2984 /* If this split operation completed in the context 2985 * of its submission, mark the flag and return immediately 2986 * to avoid recursion. 2987 */ 2988 ctx->completed_in_submit_ctx = true; 2989 return; 2990 } 2991 2992 while (true) { 2993 ctx->completed_in_submit_ctx = false; 2994 2995 offset = ctx->io_unit_offset; 2996 length = ctx->io_units_remaining; 2997 buf = ctx->curr_payload; 2998 op_length = spdk_min(length, bs_num_io_units_to_cluster_boundary(blob, 2999 offset)); 3000 3001 /* Update length and payload for next operation */ 3002 ctx->io_units_remaining -= op_length; 3003 ctx->io_unit_offset += op_length; 3004 if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) { 3005 ctx->curr_payload += op_length * blob->bs->io_unit_size; 3006 } 3007 3008 assert(!ctx->in_submit_ctx); 3009 ctx->in_submit_ctx = true; 3010 3011 switch (op_type) { 3012 case SPDK_BLOB_READ: 3013 spdk_blob_io_read(blob, ch, buf, offset, op_length, 3014 blob_request_submit_op_split_next, ctx); 3015 break; 3016 case SPDK_BLOB_WRITE: 3017 spdk_blob_io_write(blob, ch, buf, offset, op_length, 3018 blob_request_submit_op_split_next, ctx); 3019 break; 3020 case SPDK_BLOB_UNMAP: 3021 spdk_blob_io_unmap(blob, ch, offset, op_length, 3022 blob_request_submit_op_split_next, ctx); 3023 break; 3024 case SPDK_BLOB_WRITE_ZEROES: 3025 spdk_blob_io_write_zeroes(blob, ch, offset, op_length, 3026 blob_request_submit_op_split_next, ctx); 3027 break; 3028 case SPDK_BLOB_READV: 3029 case SPDK_BLOB_WRITEV: 3030 SPDK_ERRLOG("readv/write not valid\n"); 3031 bs_sequence_finish(ctx->seq, -EINVAL); 3032 free(ctx); 3033 return; 3034 } 3035 3036 #ifndef __clang_analyzer__ 3037 /* scan-build reports a false positive around accessing the ctx here. It 3038 * forms a path that recursively calls this function, but then says 3039 * "assuming ctx->in_submit_ctx is false", when that isn't possible. 3040 * This path does free(ctx), returns to here, and reports a use-after-free 3041 * bug. Wrapping this bit of code so that scan-build doesn't see it 3042 * works around the scan-build bug. 3043 */ 3044 assert(ctx->in_submit_ctx); 3045 ctx->in_submit_ctx = false; 3046 3047 /* If the operation completed immediately, loop back and submit the 3048 * next operation. Otherwise we can return and the next split 3049 * operation will get submitted when this current operation is 3050 * later completed asynchronously. 3051 */ 3052 if (ctx->completed_in_submit_ctx) { 3053 continue; 3054 } else if (ctx->done) { 3055 free(ctx); 3056 } 3057 #endif 3058 break; 3059 } 3060 } 3061 3062 static void 3063 blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob, 3064 void *payload, uint64_t offset, uint64_t length, 3065 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 3066 { 3067 struct op_split_ctx *ctx; 3068 spdk_bs_sequence_t *seq; 3069 struct spdk_bs_cpl cpl; 3070 3071 assert(blob != NULL); 3072 3073 ctx = calloc(1, sizeof(struct op_split_ctx)); 3074 if (ctx == NULL) { 3075 cb_fn(cb_arg, -ENOMEM); 3076 return; 3077 } 3078 3079 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 3080 cpl.u.blob_basic.cb_fn = cb_fn; 3081 cpl.u.blob_basic.cb_arg = cb_arg; 3082 3083 seq = bs_sequence_start_blob(ch, &cpl, blob); 3084 if (!seq) { 3085 free(ctx); 3086 cb_fn(cb_arg, -ENOMEM); 3087 return; 3088 } 3089 3090 ctx->blob = blob; 3091 ctx->channel = ch; 3092 ctx->curr_payload = payload; 3093 ctx->io_unit_offset = offset; 3094 ctx->io_units_remaining = length; 3095 ctx->op_type = op_type; 3096 ctx->seq = seq; 3097 3098 blob_request_submit_op_split_next(ctx, 0); 3099 } 3100 3101 static void 3102 spdk_free_cluster_unmap_complete(void *cb_arg, int bserrno) 3103 { 3104 struct spdk_blob_free_cluster_ctx *ctx = cb_arg; 3105 3106 if (bserrno) { 3107 bs_sequence_finish(ctx->seq, bserrno); 3108 free(ctx); 3109 return; 3110 } 3111 3112 blob_free_cluster_on_md_thread(ctx->blob, ctx->cluster_num, 3113 ctx->extent_page, ctx->md_page, blob_free_cluster_cpl, ctx); 3114 } 3115 3116 static void 3117 blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob, 3118 void *payload, uint64_t offset, uint64_t length, 3119 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 3120 { 3121 struct spdk_bs_cpl cpl; 3122 uint64_t lba; 3123 uint64_t lba_count; 3124 bool is_allocated; 3125 3126 assert(blob != NULL); 3127 3128 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 3129 cpl.u.blob_basic.cb_fn = cb_fn; 3130 cpl.u.blob_basic.cb_arg = cb_arg; 3131 3132 if (blob->frozen_refcnt) { 3133 /* This blob I/O is frozen */ 3134 spdk_bs_user_op_t *op; 3135 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 3136 3137 op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 3138 if (!op) { 3139 cb_fn(cb_arg, -ENOMEM); 3140 return; 3141 } 3142 3143 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 3144 3145 return; 3146 } 3147 3148 is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 3149 3150 switch (op_type) { 3151 case SPDK_BLOB_READ: { 3152 spdk_bs_batch_t *batch; 3153 3154 batch = bs_batch_open(_ch, &cpl, blob); 3155 if (!batch) { 3156 cb_fn(cb_arg, -ENOMEM); 3157 return; 3158 } 3159 3160 if (is_allocated) { 3161 /* Read from the blob */ 3162 bs_batch_read_dev(batch, payload, lba, lba_count); 3163 } else { 3164 /* Read from the backing block device */ 3165 bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count); 3166 } 3167 3168 bs_batch_close(batch); 3169 break; 3170 } 3171 case SPDK_BLOB_WRITE: 3172 case SPDK_BLOB_WRITE_ZEROES: { 3173 if (is_allocated) { 3174 /* Write to the blob */ 3175 spdk_bs_batch_t *batch; 3176 3177 if (lba_count == 0) { 3178 cb_fn(cb_arg, 0); 3179 return; 3180 } 3181 3182 batch = bs_batch_open(_ch, &cpl, blob); 3183 if (!batch) { 3184 cb_fn(cb_arg, -ENOMEM); 3185 return; 3186 } 3187 3188 if (op_type == SPDK_BLOB_WRITE) { 3189 bs_batch_write_dev(batch, payload, lba, lba_count); 3190 } else { 3191 bs_batch_write_zeroes_dev(batch, lba, lba_count); 3192 } 3193 3194 bs_batch_close(batch); 3195 } else { 3196 /* Queue this operation and allocate the cluster */ 3197 spdk_bs_user_op_t *op; 3198 3199 op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 3200 if (!op) { 3201 cb_fn(cb_arg, -ENOMEM); 3202 return; 3203 } 3204 3205 bs_allocate_and_copy_cluster(blob, _ch, offset, op); 3206 } 3207 break; 3208 } 3209 case SPDK_BLOB_UNMAP: { 3210 struct spdk_blob_free_cluster_ctx *ctx = NULL; 3211 spdk_bs_batch_t *batch; 3212 3213 /* if aligned with cluster release cluster */ 3214 if (spdk_blob_is_thin_provisioned(blob) && is_allocated && 3215 blob_backed_with_zeroes_dev(blob) && 3216 bs_io_units_per_cluster(blob) == length) { 3217 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 3218 uint32_t cluster_start_page; 3219 uint32_t cluster_number; 3220 3221 assert(offset % bs_io_units_per_cluster(blob) == 0); 3222 3223 /* Round the io_unit offset down to the first page in the cluster */ 3224 cluster_start_page = bs_io_unit_to_cluster_start(blob, offset); 3225 3226 /* Calculate which index in the metadata cluster array the corresponding 3227 * cluster is supposed to be at. */ 3228 cluster_number = bs_io_unit_to_cluster_number(blob, offset); 3229 3230 ctx = calloc(1, sizeof(*ctx)); 3231 if (!ctx) { 3232 cb_fn(cb_arg, -ENOMEM); 3233 return; 3234 } 3235 /* When freeing a cluster the flow should be (in order): 3236 * 1. Unmap the underlying area (so if the cluster is reclaimed in the future, it won't leak 3237 * old data) 3238 * 2. Once the unmap completes (to avoid any races with incoming writes that may claim the 3239 * cluster), update and sync metadata freeing the cluster 3240 * 3. Once metadata update is done, complete the user unmap request 3241 */ 3242 ctx->blob = blob; 3243 ctx->page = cluster_start_page; 3244 ctx->cluster_num = cluster_number; 3245 ctx->md_page = bs_channel->new_cluster_page; 3246 ctx->seq = bs_sequence_start_bs(_ch, &cpl); 3247 if (!ctx->seq) { 3248 free(ctx); 3249 cb_fn(cb_arg, -ENOMEM); 3250 return; 3251 } 3252 3253 if (blob->use_extent_table) { 3254 ctx->extent_page = *bs_cluster_to_extent_page(blob, cluster_number); 3255 } 3256 3257 cpl.u.blob_basic.cb_fn = spdk_free_cluster_unmap_complete; 3258 cpl.u.blob_basic.cb_arg = ctx; 3259 } 3260 3261 batch = bs_batch_open(_ch, &cpl, blob); 3262 if (!batch) { 3263 free(ctx); 3264 cb_fn(cb_arg, -ENOMEM); 3265 return; 3266 } 3267 3268 if (is_allocated) { 3269 bs_batch_unmap_dev(batch, lba, lba_count); 3270 } 3271 3272 bs_batch_close(batch); 3273 break; 3274 } 3275 case SPDK_BLOB_READV: 3276 case SPDK_BLOB_WRITEV: 3277 SPDK_ERRLOG("readv/write not valid\n"); 3278 cb_fn(cb_arg, -EINVAL); 3279 break; 3280 } 3281 } 3282 3283 static void 3284 blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel, 3285 void *payload, uint64_t offset, uint64_t length, 3286 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 3287 { 3288 assert(blob != NULL); 3289 3290 if (blob->data_ro && op_type != SPDK_BLOB_READ) { 3291 cb_fn(cb_arg, -EPERM); 3292 return; 3293 } 3294 3295 if (length == 0) { 3296 cb_fn(cb_arg, 0); 3297 return; 3298 } 3299 3300 if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 3301 cb_fn(cb_arg, -EINVAL); 3302 return; 3303 } 3304 if (length <= bs_num_io_units_to_cluster_boundary(blob, offset)) { 3305 blob_request_submit_op_single(_channel, blob, payload, offset, length, 3306 cb_fn, cb_arg, op_type); 3307 } else { 3308 blob_request_submit_op_split(_channel, blob, payload, offset, length, 3309 cb_fn, cb_arg, op_type); 3310 } 3311 } 3312 3313 struct rw_iov_ctx { 3314 struct spdk_blob *blob; 3315 struct spdk_io_channel *channel; 3316 spdk_blob_op_complete cb_fn; 3317 void *cb_arg; 3318 bool read; 3319 int iovcnt; 3320 struct iovec *orig_iov; 3321 uint64_t io_unit_offset; 3322 uint64_t io_units_remaining; 3323 uint64_t io_units_done; 3324 struct spdk_blob_ext_io_opts *ext_io_opts; 3325 struct iovec iov[0]; 3326 }; 3327 3328 static void 3329 rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3330 { 3331 assert(cb_arg == NULL); 3332 bs_sequence_finish(seq, bserrno); 3333 } 3334 3335 static void 3336 rw_iov_split_next(void *cb_arg, int bserrno) 3337 { 3338 struct rw_iov_ctx *ctx = cb_arg; 3339 struct spdk_blob *blob = ctx->blob; 3340 struct iovec *iov, *orig_iov; 3341 int iovcnt; 3342 size_t orig_iovoff; 3343 uint64_t io_units_count, io_units_to_boundary, io_unit_offset; 3344 uint64_t byte_count; 3345 3346 if (bserrno != 0 || ctx->io_units_remaining == 0) { 3347 ctx->cb_fn(ctx->cb_arg, bserrno); 3348 free(ctx); 3349 return; 3350 } 3351 3352 io_unit_offset = ctx->io_unit_offset; 3353 io_units_to_boundary = bs_num_io_units_to_cluster_boundary(blob, io_unit_offset); 3354 io_units_count = spdk_min(ctx->io_units_remaining, io_units_to_boundary); 3355 /* 3356 * Get index and offset into the original iov array for our current position in the I/O sequence. 3357 * byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will 3358 * point to the current position in the I/O sequence. 3359 */ 3360 byte_count = ctx->io_units_done * blob->bs->io_unit_size; 3361 orig_iov = &ctx->orig_iov[0]; 3362 orig_iovoff = 0; 3363 while (byte_count > 0) { 3364 if (byte_count >= orig_iov->iov_len) { 3365 byte_count -= orig_iov->iov_len; 3366 orig_iov++; 3367 } else { 3368 orig_iovoff = byte_count; 3369 byte_count = 0; 3370 } 3371 } 3372 3373 /* 3374 * Build an iov array for the next I/O in the sequence. byte_count will keep track of how many 3375 * bytes of this next I/O remain to be accounted for in the new iov array. 3376 */ 3377 byte_count = io_units_count * blob->bs->io_unit_size; 3378 iov = &ctx->iov[0]; 3379 iovcnt = 0; 3380 while (byte_count > 0) { 3381 assert(iovcnt < ctx->iovcnt); 3382 iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff); 3383 iov->iov_base = orig_iov->iov_base + orig_iovoff; 3384 byte_count -= iov->iov_len; 3385 orig_iovoff = 0; 3386 orig_iov++; 3387 iov++; 3388 iovcnt++; 3389 } 3390 3391 ctx->io_unit_offset += io_units_count; 3392 ctx->io_units_remaining -= io_units_count; 3393 ctx->io_units_done += io_units_count; 3394 iov = &ctx->iov[0]; 3395 3396 if (ctx->read) { 3397 spdk_blob_io_readv_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 3398 io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts); 3399 } else { 3400 spdk_blob_io_writev_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 3401 io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts); 3402 } 3403 } 3404 3405 static void 3406 blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel, 3407 struct iovec *iov, int iovcnt, 3408 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg, bool read, 3409 struct spdk_blob_ext_io_opts *ext_io_opts) 3410 { 3411 struct spdk_bs_cpl cpl; 3412 3413 assert(blob != NULL); 3414 3415 if (!read && blob->data_ro) { 3416 cb_fn(cb_arg, -EPERM); 3417 return; 3418 } 3419 3420 if (length == 0) { 3421 cb_fn(cb_arg, 0); 3422 return; 3423 } 3424 3425 if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 3426 cb_fn(cb_arg, -EINVAL); 3427 return; 3428 } 3429 3430 /* 3431 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having 3432 * to split a request that spans a cluster boundary. For I/O that do not span a cluster boundary, 3433 * there will be no noticeable difference compared to using a batch. For I/O that do span a cluster 3434 * boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need 3435 * to allocate a separate iov array and split the I/O such that none of the resulting 3436 * smaller I/O cross a cluster boundary. These smaller I/O will be issued in sequence (not in parallel) 3437 * but since this case happens very infrequently, any performance impact will be negligible. 3438 * 3439 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs 3440 * for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them 3441 * in a batch. That would also require creating an intermediate spdk_bs_cpl that would get called 3442 * when the batch was completed, to allow for freeing the memory for the iov arrays. 3443 */ 3444 if (spdk_likely(length <= bs_num_io_units_to_cluster_boundary(blob, offset))) { 3445 uint64_t lba_count; 3446 uint64_t lba; 3447 bool is_allocated; 3448 3449 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 3450 cpl.u.blob_basic.cb_fn = cb_fn; 3451 cpl.u.blob_basic.cb_arg = cb_arg; 3452 3453 if (blob->frozen_refcnt) { 3454 /* This blob I/O is frozen */ 3455 enum spdk_blob_op_type op_type; 3456 spdk_bs_user_op_t *op; 3457 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel); 3458 3459 op_type = read ? SPDK_BLOB_READV : SPDK_BLOB_WRITEV; 3460 op = bs_user_op_alloc(_channel, &cpl, op_type, blob, iov, iovcnt, offset, length); 3461 if (!op) { 3462 cb_fn(cb_arg, -ENOMEM); 3463 return; 3464 } 3465 3466 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 3467 3468 return; 3469 } 3470 3471 is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 3472 3473 if (read) { 3474 spdk_bs_sequence_t *seq; 3475 3476 seq = bs_sequence_start_blob(_channel, &cpl, blob); 3477 if (!seq) { 3478 cb_fn(cb_arg, -ENOMEM); 3479 return; 3480 } 3481 3482 seq->ext_io_opts = ext_io_opts; 3483 3484 if (is_allocated) { 3485 bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL); 3486 } else { 3487 bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count, 3488 rw_iov_done, NULL); 3489 } 3490 } else { 3491 if (is_allocated) { 3492 spdk_bs_sequence_t *seq; 3493 3494 seq = bs_sequence_start_blob(_channel, &cpl, blob); 3495 if (!seq) { 3496 cb_fn(cb_arg, -ENOMEM); 3497 return; 3498 } 3499 3500 seq->ext_io_opts = ext_io_opts; 3501 3502 bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL); 3503 } else { 3504 /* Queue this operation and allocate the cluster */ 3505 spdk_bs_user_op_t *op; 3506 3507 op = bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset, 3508 length); 3509 if (!op) { 3510 cb_fn(cb_arg, -ENOMEM); 3511 return; 3512 } 3513 3514 op->ext_io_opts = ext_io_opts; 3515 3516 bs_allocate_and_copy_cluster(blob, _channel, offset, op); 3517 } 3518 } 3519 } else { 3520 struct rw_iov_ctx *ctx; 3521 3522 ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec)); 3523 if (ctx == NULL) { 3524 cb_fn(cb_arg, -ENOMEM); 3525 return; 3526 } 3527 3528 ctx->blob = blob; 3529 ctx->channel = _channel; 3530 ctx->cb_fn = cb_fn; 3531 ctx->cb_arg = cb_arg; 3532 ctx->read = read; 3533 ctx->orig_iov = iov; 3534 ctx->iovcnt = iovcnt; 3535 ctx->io_unit_offset = offset; 3536 ctx->io_units_remaining = length; 3537 ctx->io_units_done = 0; 3538 ctx->ext_io_opts = ext_io_opts; 3539 3540 rw_iov_split_next(ctx, 0); 3541 } 3542 } 3543 3544 static struct spdk_blob * 3545 blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid) 3546 { 3547 struct spdk_blob find; 3548 3549 if (spdk_bit_array_get(bs->open_blobids, blobid) == 0) { 3550 return NULL; 3551 } 3552 3553 find.id = blobid; 3554 return RB_FIND(spdk_blob_tree, &bs->open_blobs, &find); 3555 } 3556 3557 static void 3558 blob_get_snapshot_and_clone_entries(struct spdk_blob *blob, 3559 struct spdk_blob_list **snapshot_entry, struct spdk_blob_list **clone_entry) 3560 { 3561 assert(blob != NULL); 3562 *snapshot_entry = NULL; 3563 *clone_entry = NULL; 3564 3565 if (blob->parent_id == SPDK_BLOBID_INVALID) { 3566 return; 3567 } 3568 3569 TAILQ_FOREACH(*snapshot_entry, &blob->bs->snapshots, link) { 3570 if ((*snapshot_entry)->id == blob->parent_id) { 3571 break; 3572 } 3573 } 3574 3575 if (*snapshot_entry != NULL) { 3576 TAILQ_FOREACH(*clone_entry, &(*snapshot_entry)->clones, link) { 3577 if ((*clone_entry)->id == blob->id) { 3578 break; 3579 } 3580 } 3581 3582 assert(*clone_entry != NULL); 3583 } 3584 } 3585 3586 static int 3587 bs_channel_create(void *io_device, void *ctx_buf) 3588 { 3589 struct spdk_blob_store *bs = io_device; 3590 struct spdk_bs_channel *channel = ctx_buf; 3591 struct spdk_bs_dev *dev; 3592 uint32_t max_ops = bs->max_channel_ops; 3593 uint32_t i; 3594 3595 dev = bs->dev; 3596 3597 channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set)); 3598 if (!channel->req_mem) { 3599 return -1; 3600 } 3601 3602 TAILQ_INIT(&channel->reqs); 3603 3604 for (i = 0; i < max_ops; i++) { 3605 TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link); 3606 } 3607 3608 channel->bs = bs; 3609 channel->dev = dev; 3610 channel->dev_channel = dev->create_channel(dev); 3611 3612 if (!channel->dev_channel) { 3613 SPDK_ERRLOG("Failed to create device channel.\n"); 3614 free(channel->req_mem); 3615 return -1; 3616 } 3617 3618 channel->new_cluster_page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, NULL, SPDK_ENV_NUMA_ID_ANY, 3619 SPDK_MALLOC_DMA); 3620 if (!channel->new_cluster_page) { 3621 SPDK_ERRLOG("Failed to allocate new cluster page\n"); 3622 free(channel->req_mem); 3623 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 3624 return -1; 3625 } 3626 3627 TAILQ_INIT(&channel->need_cluster_alloc); 3628 TAILQ_INIT(&channel->queued_io); 3629 RB_INIT(&channel->esnap_channels); 3630 3631 return 0; 3632 } 3633 3634 static void 3635 bs_channel_destroy(void *io_device, void *ctx_buf) 3636 { 3637 struct spdk_bs_channel *channel = ctx_buf; 3638 spdk_bs_user_op_t *op; 3639 3640 while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) { 3641 op = TAILQ_FIRST(&channel->need_cluster_alloc); 3642 TAILQ_REMOVE(&channel->need_cluster_alloc, op, link); 3643 bs_user_op_abort(op, -EIO); 3644 } 3645 3646 while (!TAILQ_EMPTY(&channel->queued_io)) { 3647 op = TAILQ_FIRST(&channel->queued_io); 3648 TAILQ_REMOVE(&channel->queued_io, op, link); 3649 bs_user_op_abort(op, -EIO); 3650 } 3651 3652 blob_esnap_destroy_bs_channel(channel); 3653 3654 free(channel->req_mem); 3655 spdk_free(channel->new_cluster_page); 3656 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 3657 } 3658 3659 static void 3660 bs_dev_destroy(void *io_device) 3661 { 3662 struct spdk_blob_store *bs = io_device; 3663 struct spdk_blob *blob, *blob_tmp; 3664 3665 bs->dev->destroy(bs->dev); 3666 3667 RB_FOREACH_SAFE(blob, spdk_blob_tree, &bs->open_blobs, blob_tmp) { 3668 RB_REMOVE(spdk_blob_tree, &bs->open_blobs, blob); 3669 spdk_bit_array_clear(bs->open_blobids, blob->id); 3670 blob_free(blob); 3671 } 3672 3673 spdk_spin_destroy(&bs->used_lock); 3674 3675 spdk_bit_array_free(&bs->open_blobids); 3676 spdk_bit_array_free(&bs->used_blobids); 3677 spdk_bit_array_free(&bs->used_md_pages); 3678 spdk_bit_pool_free(&bs->used_clusters); 3679 /* 3680 * If this function is called for any reason except a successful unload, 3681 * the unload_cpl type will be NONE and this will be a nop. 3682 */ 3683 bs_call_cpl(&bs->unload_cpl, bs->unload_err); 3684 3685 free(bs); 3686 } 3687 3688 static int 3689 bs_blob_list_add(struct spdk_blob *blob) 3690 { 3691 spdk_blob_id snapshot_id; 3692 struct spdk_blob_list *snapshot_entry = NULL; 3693 struct spdk_blob_list *clone_entry = NULL; 3694 3695 assert(blob != NULL); 3696 3697 snapshot_id = blob->parent_id; 3698 if (snapshot_id == SPDK_BLOBID_INVALID || 3699 snapshot_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 3700 return 0; 3701 } 3702 3703 snapshot_entry = bs_get_snapshot_entry(blob->bs, snapshot_id); 3704 if (snapshot_entry == NULL) { 3705 /* Snapshot not found */ 3706 snapshot_entry = calloc(1, sizeof(struct spdk_blob_list)); 3707 if (snapshot_entry == NULL) { 3708 return -ENOMEM; 3709 } 3710 snapshot_entry->id = snapshot_id; 3711 TAILQ_INIT(&snapshot_entry->clones); 3712 TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link); 3713 } else { 3714 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 3715 if (clone_entry->id == blob->id) { 3716 break; 3717 } 3718 } 3719 } 3720 3721 if (clone_entry == NULL) { 3722 /* Clone not found */ 3723 clone_entry = calloc(1, sizeof(struct spdk_blob_list)); 3724 if (clone_entry == NULL) { 3725 return -ENOMEM; 3726 } 3727 clone_entry->id = blob->id; 3728 TAILQ_INIT(&clone_entry->clones); 3729 TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link); 3730 snapshot_entry->clone_count++; 3731 } 3732 3733 return 0; 3734 } 3735 3736 static void 3737 bs_blob_list_remove(struct spdk_blob *blob) 3738 { 3739 struct spdk_blob_list *snapshot_entry = NULL; 3740 struct spdk_blob_list *clone_entry = NULL; 3741 3742 blob_get_snapshot_and_clone_entries(blob, &snapshot_entry, &clone_entry); 3743 3744 if (snapshot_entry == NULL) { 3745 return; 3746 } 3747 3748 blob->parent_id = SPDK_BLOBID_INVALID; 3749 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3750 free(clone_entry); 3751 3752 snapshot_entry->clone_count--; 3753 } 3754 3755 static int 3756 bs_blob_list_free(struct spdk_blob_store *bs) 3757 { 3758 struct spdk_blob_list *snapshot_entry; 3759 struct spdk_blob_list *snapshot_entry_tmp; 3760 struct spdk_blob_list *clone_entry; 3761 struct spdk_blob_list *clone_entry_tmp; 3762 3763 TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) { 3764 TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) { 3765 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3766 free(clone_entry); 3767 } 3768 TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link); 3769 free(snapshot_entry); 3770 } 3771 3772 return 0; 3773 } 3774 3775 static void 3776 bs_free(struct spdk_blob_store *bs) 3777 { 3778 bs_blob_list_free(bs); 3779 3780 bs_unregister_md_thread(bs); 3781 spdk_io_device_unregister(bs, bs_dev_destroy); 3782 } 3783 3784 void 3785 spdk_bs_opts_init(struct spdk_bs_opts *opts, size_t opts_size) 3786 { 3787 3788 if (!opts) { 3789 SPDK_ERRLOG("opts should not be NULL\n"); 3790 return; 3791 } 3792 3793 if (!opts_size) { 3794 SPDK_ERRLOG("opts_size should not be zero value\n"); 3795 return; 3796 } 3797 3798 memset(opts, 0, opts_size); 3799 opts->opts_size = opts_size; 3800 3801 #define FIELD_OK(field) \ 3802 offsetof(struct spdk_bs_opts, field) + sizeof(opts->field) <= opts_size 3803 3804 #define SET_FIELD(field, value) \ 3805 if (FIELD_OK(field)) { \ 3806 opts->field = value; \ 3807 } \ 3808 3809 SET_FIELD(cluster_sz, SPDK_BLOB_OPTS_CLUSTER_SZ); 3810 SET_FIELD(num_md_pages, SPDK_BLOB_OPTS_NUM_MD_PAGES); 3811 SET_FIELD(max_md_ops, SPDK_BLOB_OPTS_NUM_MD_PAGES); 3812 SET_FIELD(max_channel_ops, SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS); 3813 SET_FIELD(clear_method, BS_CLEAR_WITH_UNMAP); 3814 3815 if (FIELD_OK(bstype)) { 3816 memset(&opts->bstype, 0, sizeof(opts->bstype)); 3817 } 3818 3819 SET_FIELD(iter_cb_fn, NULL); 3820 SET_FIELD(iter_cb_arg, NULL); 3821 SET_FIELD(force_recover, false); 3822 SET_FIELD(esnap_bs_dev_create, NULL); 3823 SET_FIELD(esnap_ctx, NULL); 3824 3825 #undef FIELD_OK 3826 #undef SET_FIELD 3827 } 3828 3829 static int 3830 bs_opts_verify(struct spdk_bs_opts *opts) 3831 { 3832 if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 || 3833 opts->max_channel_ops == 0) { 3834 SPDK_ERRLOG("Blobstore options cannot be set to 0\n"); 3835 return -1; 3836 } 3837 3838 return 0; 3839 } 3840 3841 /* START spdk_bs_load */ 3842 3843 /* spdk_bs_load_ctx is used for init, load, unload and dump code paths. */ 3844 3845 struct spdk_bs_load_ctx { 3846 struct spdk_blob_store *bs; 3847 struct spdk_bs_super_block *super; 3848 3849 struct spdk_bs_md_mask *mask; 3850 bool in_page_chain; 3851 uint32_t page_index; 3852 uint32_t cur_page; 3853 struct spdk_blob_md_page *page; 3854 3855 uint64_t num_extent_pages; 3856 uint32_t *extent_page_num; 3857 struct spdk_blob_md_page *extent_pages; 3858 struct spdk_bit_array *used_clusters; 3859 3860 spdk_bs_sequence_t *seq; 3861 spdk_blob_op_with_handle_complete iter_cb_fn; 3862 void *iter_cb_arg; 3863 struct spdk_blob *blob; 3864 spdk_blob_id blobid; 3865 3866 bool force_recover; 3867 3868 /* These fields are used in the spdk_bs_dump path. */ 3869 bool dumping; 3870 FILE *fp; 3871 spdk_bs_dump_print_xattr print_xattr_fn; 3872 char xattr_name[4096]; 3873 }; 3874 3875 static int 3876 bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_blob_store **_bs, 3877 struct spdk_bs_load_ctx **_ctx) 3878 { 3879 struct spdk_blob_store *bs; 3880 struct spdk_bs_load_ctx *ctx; 3881 uint64_t dev_size; 3882 int rc; 3883 3884 dev_size = dev->blocklen * dev->blockcnt; 3885 if (dev_size < opts->cluster_sz) { 3886 /* Device size cannot be smaller than cluster size of blobstore */ 3887 SPDK_INFOLOG(blob, "Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n", 3888 dev_size, opts->cluster_sz); 3889 return -ENOSPC; 3890 } 3891 if (opts->cluster_sz < SPDK_BS_PAGE_SIZE) { 3892 /* Cluster size cannot be smaller than page size */ 3893 SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n", 3894 opts->cluster_sz, SPDK_BS_PAGE_SIZE); 3895 return -EINVAL; 3896 } 3897 bs = calloc(1, sizeof(struct spdk_blob_store)); 3898 if (!bs) { 3899 return -ENOMEM; 3900 } 3901 3902 ctx = calloc(1, sizeof(struct spdk_bs_load_ctx)); 3903 if (!ctx) { 3904 free(bs); 3905 return -ENOMEM; 3906 } 3907 3908 ctx->bs = bs; 3909 ctx->iter_cb_fn = opts->iter_cb_fn; 3910 ctx->iter_cb_arg = opts->iter_cb_arg; 3911 ctx->force_recover = opts->force_recover; 3912 3913 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 3914 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 3915 if (!ctx->super) { 3916 free(ctx); 3917 free(bs); 3918 return -ENOMEM; 3919 } 3920 3921 RB_INIT(&bs->open_blobs); 3922 TAILQ_INIT(&bs->snapshots); 3923 bs->dev = dev; 3924 bs->md_thread = spdk_get_thread(); 3925 assert(bs->md_thread != NULL); 3926 3927 /* 3928 * Do not use bs_lba_to_cluster() here since blockcnt may not be an 3929 * even multiple of the cluster size. 3930 */ 3931 bs->cluster_sz = opts->cluster_sz; 3932 bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen); 3933 ctx->used_clusters = spdk_bit_array_create(bs->total_clusters); 3934 if (!ctx->used_clusters) { 3935 spdk_free(ctx->super); 3936 free(ctx); 3937 free(bs); 3938 return -ENOMEM; 3939 } 3940 3941 bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE; 3942 if (spdk_u32_is_pow2(bs->pages_per_cluster)) { 3943 bs->pages_per_cluster_shift = spdk_u32log2(bs->pages_per_cluster); 3944 } 3945 bs->num_free_clusters = bs->total_clusters; 3946 bs->io_unit_size = dev->blocklen; 3947 3948 bs->max_channel_ops = opts->max_channel_ops; 3949 bs->super_blob = SPDK_BLOBID_INVALID; 3950 memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype)); 3951 bs->esnap_bs_dev_create = opts->esnap_bs_dev_create; 3952 bs->esnap_ctx = opts->esnap_ctx; 3953 3954 /* The metadata is assumed to be at least 1 page */ 3955 bs->used_md_pages = spdk_bit_array_create(1); 3956 bs->used_blobids = spdk_bit_array_create(0); 3957 bs->open_blobids = spdk_bit_array_create(0); 3958 3959 spdk_spin_init(&bs->used_lock); 3960 3961 spdk_io_device_register(bs, bs_channel_create, bs_channel_destroy, 3962 sizeof(struct spdk_bs_channel), "blobstore"); 3963 rc = bs_register_md_thread(bs); 3964 if (rc == -1) { 3965 spdk_io_device_unregister(bs, NULL); 3966 spdk_spin_destroy(&bs->used_lock); 3967 spdk_bit_array_free(&bs->open_blobids); 3968 spdk_bit_array_free(&bs->used_blobids); 3969 spdk_bit_array_free(&bs->used_md_pages); 3970 spdk_bit_array_free(&ctx->used_clusters); 3971 spdk_free(ctx->super); 3972 free(ctx); 3973 free(bs); 3974 /* FIXME: this is a lie but don't know how to get a proper error code here */ 3975 return -ENOMEM; 3976 } 3977 3978 *_ctx = ctx; 3979 *_bs = bs; 3980 return 0; 3981 } 3982 3983 static void 3984 bs_load_ctx_fail(struct spdk_bs_load_ctx *ctx, int bserrno) 3985 { 3986 assert(bserrno != 0); 3987 3988 spdk_free(ctx->super); 3989 bs_sequence_finish(ctx->seq, bserrno); 3990 bs_free(ctx->bs); 3991 spdk_bit_array_free(&ctx->used_clusters); 3992 free(ctx); 3993 } 3994 3995 static void 3996 bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 3997 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg) 3998 { 3999 /* Update the values in the super block */ 4000 super->super_blob = bs->super_blob; 4001 memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype)); 4002 super->crc = blob_md_page_calc_crc(super); 4003 bs_sequence_write_dev(seq, super, bs_page_to_lba(bs, 0), 4004 bs_byte_to_lba(bs, sizeof(*super)), 4005 cb_fn, cb_arg); 4006 } 4007 4008 static void 4009 bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 4010 { 4011 struct spdk_bs_load_ctx *ctx = arg; 4012 uint64_t mask_size, lba, lba_count; 4013 4014 /* Write out the used clusters mask */ 4015 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 4016 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 4017 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4018 if (!ctx->mask) { 4019 bs_load_ctx_fail(ctx, -ENOMEM); 4020 return; 4021 } 4022 4023 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS; 4024 ctx->mask->length = ctx->bs->total_clusters; 4025 /* We could get here through the normal unload path, or through dirty 4026 * shutdown recovery. For the normal unload path, we use the mask from 4027 * the bit pool. For dirty shutdown recovery, we don't have a bit pool yet - 4028 * only the bit array from the load ctx. 4029 */ 4030 if (ctx->bs->used_clusters) { 4031 assert(ctx->mask->length == spdk_bit_pool_capacity(ctx->bs->used_clusters)); 4032 spdk_bit_pool_store_mask(ctx->bs->used_clusters, ctx->mask->mask); 4033 } else { 4034 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->used_clusters)); 4035 spdk_bit_array_store_mask(ctx->used_clusters, ctx->mask->mask); 4036 } 4037 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 4038 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 4039 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 4040 } 4041 4042 static void 4043 bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 4044 { 4045 struct spdk_bs_load_ctx *ctx = arg; 4046 uint64_t mask_size, lba, lba_count; 4047 4048 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 4049 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 4050 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4051 if (!ctx->mask) { 4052 bs_load_ctx_fail(ctx, -ENOMEM); 4053 return; 4054 } 4055 4056 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES; 4057 ctx->mask->length = ctx->super->md_len; 4058 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages)); 4059 4060 spdk_bit_array_store_mask(ctx->bs->used_md_pages, ctx->mask->mask); 4061 lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 4062 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 4063 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 4064 } 4065 4066 static void 4067 bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 4068 { 4069 struct spdk_bs_load_ctx *ctx = arg; 4070 uint64_t mask_size, lba, lba_count; 4071 4072 if (ctx->super->used_blobid_mask_len == 0) { 4073 /* 4074 * This is a pre-v3 on-disk format where the blobid mask does not get 4075 * written to disk. 4076 */ 4077 cb_fn(seq, arg, 0); 4078 return; 4079 } 4080 4081 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 4082 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_NUMA_ID_ANY, 4083 SPDK_MALLOC_DMA); 4084 if (!ctx->mask) { 4085 bs_load_ctx_fail(ctx, -ENOMEM); 4086 return; 4087 } 4088 4089 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS; 4090 ctx->mask->length = ctx->super->md_len; 4091 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids)); 4092 4093 spdk_bit_array_store_mask(ctx->bs->used_blobids, ctx->mask->mask); 4094 lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 4095 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 4096 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 4097 } 4098 4099 static void 4100 blob_set_thin_provision(struct spdk_blob *blob) 4101 { 4102 blob_verify_md_op(blob); 4103 blob->invalid_flags |= SPDK_BLOB_THIN_PROV; 4104 blob->state = SPDK_BLOB_STATE_DIRTY; 4105 } 4106 4107 static void 4108 blob_set_clear_method(struct spdk_blob *blob, enum blob_clear_method clear_method) 4109 { 4110 blob_verify_md_op(blob); 4111 blob->clear_method = clear_method; 4112 blob->md_ro_flags |= (clear_method << SPDK_BLOB_CLEAR_METHOD_SHIFT); 4113 blob->state = SPDK_BLOB_STATE_DIRTY; 4114 } 4115 4116 static void bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno); 4117 4118 static void 4119 bs_delete_corrupted_blob_cpl(void *cb_arg, int bserrno) 4120 { 4121 struct spdk_bs_load_ctx *ctx = cb_arg; 4122 spdk_blob_id id; 4123 int64_t page_num; 4124 4125 /* Iterate to next blob (we can't use spdk_bs_iter_next function as our 4126 * last blob has been removed */ 4127 page_num = bs_blobid_to_page(ctx->blobid); 4128 page_num++; 4129 page_num = spdk_bit_array_find_first_set(ctx->bs->used_blobids, page_num); 4130 if (page_num >= spdk_bit_array_capacity(ctx->bs->used_blobids)) { 4131 bs_load_iter(ctx, NULL, -ENOENT); 4132 return; 4133 } 4134 4135 id = bs_page_to_blobid(page_num); 4136 4137 spdk_bs_open_blob(ctx->bs, id, bs_load_iter, ctx); 4138 } 4139 4140 static void 4141 bs_delete_corrupted_close_cb(void *cb_arg, int bserrno) 4142 { 4143 struct spdk_bs_load_ctx *ctx = cb_arg; 4144 4145 if (bserrno != 0) { 4146 SPDK_ERRLOG("Failed to close corrupted blob\n"); 4147 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4148 return; 4149 } 4150 4151 spdk_bs_delete_blob(ctx->bs, ctx->blobid, bs_delete_corrupted_blob_cpl, ctx); 4152 } 4153 4154 static void 4155 bs_delete_corrupted_blob(void *cb_arg, int bserrno) 4156 { 4157 struct spdk_bs_load_ctx *ctx = cb_arg; 4158 uint64_t i; 4159 4160 if (bserrno != 0) { 4161 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 4162 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4163 return; 4164 } 4165 4166 /* Snapshot and clone have the same copy of cluster map and extent pages 4167 * at this point. Let's clear both for snapshot now, 4168 * so that it won't be cleared for clone later when we remove snapshot. 4169 * Also set thin provision to pass data corruption check */ 4170 for (i = 0; i < ctx->blob->active.num_clusters; i++) { 4171 ctx->blob->active.clusters[i] = 0; 4172 } 4173 for (i = 0; i < ctx->blob->active.num_extent_pages; i++) { 4174 ctx->blob->active.extent_pages[i] = 0; 4175 } 4176 4177 ctx->blob->active.num_allocated_clusters = 0; 4178 4179 ctx->blob->md_ro = false; 4180 4181 blob_set_thin_provision(ctx->blob); 4182 4183 ctx->blobid = ctx->blob->id; 4184 4185 spdk_blob_close(ctx->blob, bs_delete_corrupted_close_cb, ctx); 4186 } 4187 4188 static void 4189 bs_update_corrupted_blob(void *cb_arg, int bserrno) 4190 { 4191 struct spdk_bs_load_ctx *ctx = cb_arg; 4192 4193 if (bserrno != 0) { 4194 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 4195 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4196 return; 4197 } 4198 4199 ctx->blob->md_ro = false; 4200 blob_remove_xattr(ctx->blob, SNAPSHOT_PENDING_REMOVAL, true); 4201 blob_remove_xattr(ctx->blob, SNAPSHOT_IN_PROGRESS, true); 4202 spdk_blob_set_read_only(ctx->blob); 4203 4204 if (ctx->iter_cb_fn) { 4205 ctx->iter_cb_fn(ctx->iter_cb_arg, ctx->blob, 0); 4206 } 4207 bs_blob_list_add(ctx->blob); 4208 4209 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4210 } 4211 4212 static void 4213 bs_examine_clone(void *cb_arg, struct spdk_blob *blob, int bserrno) 4214 { 4215 struct spdk_bs_load_ctx *ctx = cb_arg; 4216 4217 if (bserrno != 0) { 4218 SPDK_ERRLOG("Failed to open clone of a corrupted blob\n"); 4219 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4220 return; 4221 } 4222 4223 if (blob->parent_id == ctx->blob->id) { 4224 /* Power failure occurred before updating clone (snapshot delete case) 4225 * or after updating clone (creating snapshot case) - keep snapshot */ 4226 spdk_blob_close(blob, bs_update_corrupted_blob, ctx); 4227 } else { 4228 /* Power failure occurred after updating clone (snapshot delete case) 4229 * or before updating clone (creating snapshot case) - remove snapshot */ 4230 spdk_blob_close(blob, bs_delete_corrupted_blob, ctx); 4231 } 4232 } 4233 4234 static void 4235 bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno) 4236 { 4237 struct spdk_bs_load_ctx *ctx = arg; 4238 const void *value; 4239 size_t len; 4240 int rc = 0; 4241 4242 if (bserrno == 0) { 4243 /* Examine blob if it is corrupted after power failure. Fix 4244 * the ones that can be fixed and remove any other corrupted 4245 * ones. If it is not corrupted just process it */ 4246 rc = blob_get_xattr_value(blob, SNAPSHOT_PENDING_REMOVAL, &value, &len, true); 4247 if (rc != 0) { 4248 rc = blob_get_xattr_value(blob, SNAPSHOT_IN_PROGRESS, &value, &len, true); 4249 if (rc != 0) { 4250 /* Not corrupted - process it and continue with iterating through blobs */ 4251 if (ctx->iter_cb_fn) { 4252 ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0); 4253 } 4254 bs_blob_list_add(blob); 4255 spdk_bs_iter_next(ctx->bs, blob, bs_load_iter, ctx); 4256 return; 4257 } 4258 4259 } 4260 4261 assert(len == sizeof(spdk_blob_id)); 4262 4263 ctx->blob = blob; 4264 4265 /* Open clone to check if we are able to fix this blob or should we remove it */ 4266 spdk_bs_open_blob(ctx->bs, *(spdk_blob_id *)value, bs_examine_clone, ctx); 4267 return; 4268 } else if (bserrno == -ENOENT) { 4269 bserrno = 0; 4270 } else { 4271 /* 4272 * This case needs to be looked at further. Same problem 4273 * exists with applications that rely on explicit blob 4274 * iteration. We should just skip the blob that failed 4275 * to load and continue on to the next one. 4276 */ 4277 SPDK_ERRLOG("Error in iterating blobs\n"); 4278 } 4279 4280 ctx->iter_cb_fn = NULL; 4281 4282 spdk_free(ctx->super); 4283 spdk_free(ctx->mask); 4284 bs_sequence_finish(ctx->seq, bserrno); 4285 free(ctx); 4286 } 4287 4288 static void bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg); 4289 4290 static void 4291 bs_load_complete(struct spdk_bs_load_ctx *ctx) 4292 { 4293 ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters); 4294 if (ctx->dumping) { 4295 bs_dump_read_md_page(ctx->seq, ctx); 4296 return; 4297 } 4298 spdk_bs_iter_first(ctx->bs, bs_load_iter, ctx); 4299 } 4300 4301 static void 4302 bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4303 { 4304 struct spdk_bs_load_ctx *ctx = cb_arg; 4305 int rc; 4306 4307 /* The type must be correct */ 4308 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS); 4309 4310 /* The length of the mask (in bits) must not be greater than 4311 * the length of the buffer (converted to bits) */ 4312 assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE * 8)); 4313 4314 /* The length of the mask must be exactly equal to the size 4315 * (in pages) of the metadata region */ 4316 assert(ctx->mask->length == ctx->super->md_len); 4317 4318 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->mask->length); 4319 if (rc < 0) { 4320 spdk_free(ctx->mask); 4321 bs_load_ctx_fail(ctx, rc); 4322 return; 4323 } 4324 4325 spdk_bit_array_load_mask(ctx->bs->used_blobids, ctx->mask->mask); 4326 bs_load_complete(ctx); 4327 } 4328 4329 static void 4330 bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4331 { 4332 struct spdk_bs_load_ctx *ctx = cb_arg; 4333 uint64_t lba, lba_count, mask_size; 4334 int rc; 4335 4336 if (bserrno != 0) { 4337 bs_load_ctx_fail(ctx, bserrno); 4338 return; 4339 } 4340 4341 /* The type must be correct */ 4342 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 4343 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 4344 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 4345 struct spdk_blob_md_page) * 8)); 4346 /* 4347 * The length of the mask must be equal to or larger than the total number of clusters. It may be 4348 * larger than the total number of clusters due to a failure spdk_bs_grow. 4349 */ 4350 assert(ctx->mask->length >= ctx->bs->total_clusters); 4351 if (ctx->mask->length > ctx->bs->total_clusters) { 4352 SPDK_WARNLOG("Shrink the used_custers mask length to total_clusters"); 4353 ctx->mask->length = ctx->bs->total_clusters; 4354 } 4355 4356 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->mask->length); 4357 if (rc < 0) { 4358 spdk_free(ctx->mask); 4359 bs_load_ctx_fail(ctx, rc); 4360 return; 4361 } 4362 4363 spdk_bit_array_load_mask(ctx->used_clusters, ctx->mask->mask); 4364 ctx->bs->num_free_clusters = spdk_bit_array_count_clear(ctx->used_clusters); 4365 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 4366 4367 spdk_free(ctx->mask); 4368 4369 /* Read the used blobids mask */ 4370 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 4371 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_NUMA_ID_ANY, 4372 SPDK_MALLOC_DMA); 4373 if (!ctx->mask) { 4374 bs_load_ctx_fail(ctx, -ENOMEM); 4375 return; 4376 } 4377 lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 4378 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 4379 bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 4380 bs_load_used_blobids_cpl, ctx); 4381 } 4382 4383 static void 4384 bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4385 { 4386 struct spdk_bs_load_ctx *ctx = cb_arg; 4387 uint64_t lba, lba_count, mask_size; 4388 int rc; 4389 4390 if (bserrno != 0) { 4391 bs_load_ctx_fail(ctx, bserrno); 4392 return; 4393 } 4394 4395 /* The type must be correct */ 4396 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES); 4397 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 4398 assert(ctx->mask->length <= (ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE * 4399 8)); 4400 /* The length of the mask must be exactly equal to the size (in pages) of the metadata region */ 4401 if (ctx->mask->length != ctx->super->md_len) { 4402 SPDK_ERRLOG("mismatched md_len in used_pages mask: " 4403 "mask->length=%" PRIu32 " super->md_len=%" PRIu32 "\n", 4404 ctx->mask->length, ctx->super->md_len); 4405 assert(false); 4406 } 4407 4408 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->mask->length); 4409 if (rc < 0) { 4410 spdk_free(ctx->mask); 4411 bs_load_ctx_fail(ctx, rc); 4412 return; 4413 } 4414 4415 spdk_bit_array_load_mask(ctx->bs->used_md_pages, ctx->mask->mask); 4416 spdk_free(ctx->mask); 4417 4418 /* Read the used clusters mask */ 4419 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 4420 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_NUMA_ID_ANY, 4421 SPDK_MALLOC_DMA); 4422 if (!ctx->mask) { 4423 bs_load_ctx_fail(ctx, -ENOMEM); 4424 return; 4425 } 4426 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 4427 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 4428 bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 4429 bs_load_used_clusters_cpl, ctx); 4430 } 4431 4432 static void 4433 bs_load_read_used_pages(struct spdk_bs_load_ctx *ctx) 4434 { 4435 uint64_t lba, lba_count, mask_size; 4436 4437 /* Read the used pages mask */ 4438 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 4439 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 4440 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4441 if (!ctx->mask) { 4442 bs_load_ctx_fail(ctx, -ENOMEM); 4443 return; 4444 } 4445 4446 lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 4447 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 4448 bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count, 4449 bs_load_used_pages_cpl, ctx); 4450 } 4451 4452 static int 4453 bs_load_replay_md_parse_page(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_page *page) 4454 { 4455 struct spdk_blob_store *bs = ctx->bs; 4456 struct spdk_blob_md_descriptor *desc; 4457 size_t cur_desc = 0; 4458 4459 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4460 while (cur_desc < sizeof(page->descriptors)) { 4461 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 4462 if (desc->length == 0) { 4463 /* If padding and length are 0, this terminates the page */ 4464 break; 4465 } 4466 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 4467 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 4468 unsigned int i, j; 4469 unsigned int cluster_count = 0; 4470 uint32_t cluster_idx; 4471 4472 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 4473 4474 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 4475 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 4476 cluster_idx = desc_extent_rle->extents[i].cluster_idx; 4477 /* 4478 * cluster_idx = 0 means an unallocated cluster - don't mark that 4479 * in the used cluster map. 4480 */ 4481 if (cluster_idx != 0) { 4482 SPDK_NOTICELOG("Recover: cluster %" PRIu32 "\n", cluster_idx + j); 4483 spdk_bit_array_set(ctx->used_clusters, cluster_idx + j); 4484 if (bs->num_free_clusters == 0) { 4485 return -ENOSPC; 4486 } 4487 bs->num_free_clusters--; 4488 } 4489 cluster_count++; 4490 } 4491 } 4492 if (cluster_count == 0) { 4493 return -EINVAL; 4494 } 4495 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4496 struct spdk_blob_md_descriptor_extent_page *desc_extent; 4497 uint32_t i; 4498 uint32_t cluster_count = 0; 4499 uint32_t cluster_idx; 4500 size_t cluster_idx_length; 4501 4502 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 4503 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 4504 4505 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 4506 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 4507 return -EINVAL; 4508 } 4509 4510 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 4511 cluster_idx = desc_extent->cluster_idx[i]; 4512 /* 4513 * cluster_idx = 0 means an unallocated cluster - don't mark that 4514 * in the used cluster map. 4515 */ 4516 if (cluster_idx != 0) { 4517 if (cluster_idx < desc_extent->start_cluster_idx && 4518 cluster_idx >= desc_extent->start_cluster_idx + cluster_count) { 4519 return -EINVAL; 4520 } 4521 spdk_bit_array_set(ctx->used_clusters, cluster_idx); 4522 if (bs->num_free_clusters == 0) { 4523 return -ENOSPC; 4524 } 4525 bs->num_free_clusters--; 4526 } 4527 cluster_count++; 4528 } 4529 4530 if (cluster_count == 0) { 4531 return -EINVAL; 4532 } 4533 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 4534 /* Skip this item */ 4535 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 4536 /* Skip this item */ 4537 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 4538 /* Skip this item */ 4539 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 4540 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 4541 uint32_t num_extent_pages = ctx->num_extent_pages; 4542 uint32_t i; 4543 size_t extent_pages_length; 4544 void *tmp; 4545 4546 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 4547 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 4548 4549 if (desc_extent_table->length == 0 || 4550 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 4551 return -EINVAL; 4552 } 4553 4554 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 4555 if (desc_extent_table->extent_page[i].page_idx != 0) { 4556 if (desc_extent_table->extent_page[i].num_pages != 1) { 4557 return -EINVAL; 4558 } 4559 num_extent_pages += 1; 4560 } 4561 } 4562 4563 if (num_extent_pages > 0) { 4564 tmp = realloc(ctx->extent_page_num, num_extent_pages * sizeof(uint32_t)); 4565 if (tmp == NULL) { 4566 return -ENOMEM; 4567 } 4568 ctx->extent_page_num = tmp; 4569 4570 /* Extent table entries contain md page numbers for extent pages. 4571 * Zeroes represent unallocated extent pages, those are run-length-encoded. 4572 */ 4573 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 4574 if (desc_extent_table->extent_page[i].page_idx != 0) { 4575 ctx->extent_page_num[ctx->num_extent_pages] = desc_extent_table->extent_page[i].page_idx; 4576 ctx->num_extent_pages += 1; 4577 } 4578 } 4579 } 4580 } else { 4581 /* Error */ 4582 return -EINVAL; 4583 } 4584 /* Advance to the next descriptor */ 4585 cur_desc += sizeof(*desc) + desc->length; 4586 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 4587 break; 4588 } 4589 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 4590 } 4591 return 0; 4592 } 4593 4594 static bool 4595 bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page) 4596 { 4597 uint32_t crc; 4598 struct spdk_blob_md_descriptor *desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4599 size_t desc_len; 4600 4601 crc = blob_md_page_calc_crc(page); 4602 if (crc != page->crc) { 4603 return false; 4604 } 4605 4606 /* Extent page should always be of sequence num 0. */ 4607 if (page->sequence_num != 0) { 4608 return false; 4609 } 4610 4611 /* Descriptor type must be EXTENT_PAGE. */ 4612 if (desc->type != SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4613 return false; 4614 } 4615 4616 /* Descriptor length cannot exceed the page. */ 4617 desc_len = sizeof(*desc) + desc->length; 4618 if (desc_len > sizeof(page->descriptors)) { 4619 return false; 4620 } 4621 4622 /* It has to be the only descriptor in the page. */ 4623 if (desc_len + sizeof(*desc) <= sizeof(page->descriptors)) { 4624 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + desc_len); 4625 if (desc->length != 0) { 4626 return false; 4627 } 4628 } 4629 4630 return true; 4631 } 4632 4633 static bool 4634 bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx) 4635 { 4636 uint32_t crc; 4637 struct spdk_blob_md_page *page = ctx->page; 4638 4639 crc = blob_md_page_calc_crc(page); 4640 if (crc != page->crc) { 4641 return false; 4642 } 4643 4644 /* First page of a sequence should match the blobid. */ 4645 if (page->sequence_num == 0 && 4646 bs_page_to_blobid(ctx->cur_page) != page->id) { 4647 return false; 4648 } 4649 assert(bs_load_cur_extent_page_valid(page) == false); 4650 4651 return true; 4652 } 4653 4654 static void bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx); 4655 4656 static void 4657 bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4658 { 4659 struct spdk_bs_load_ctx *ctx = cb_arg; 4660 4661 if (bserrno != 0) { 4662 bs_load_ctx_fail(ctx, bserrno); 4663 return; 4664 } 4665 4666 bs_load_complete(ctx); 4667 } 4668 4669 static void 4670 bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4671 { 4672 struct spdk_bs_load_ctx *ctx = cb_arg; 4673 4674 spdk_free(ctx->mask); 4675 ctx->mask = NULL; 4676 4677 if (bserrno != 0) { 4678 bs_load_ctx_fail(ctx, bserrno); 4679 return; 4680 } 4681 4682 bs_write_used_clusters(seq, ctx, bs_load_write_used_clusters_cpl); 4683 } 4684 4685 static void 4686 bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4687 { 4688 struct spdk_bs_load_ctx *ctx = cb_arg; 4689 4690 spdk_free(ctx->mask); 4691 ctx->mask = NULL; 4692 4693 if (bserrno != 0) { 4694 bs_load_ctx_fail(ctx, bserrno); 4695 return; 4696 } 4697 4698 bs_write_used_blobids(seq, ctx, bs_load_write_used_blobids_cpl); 4699 } 4700 4701 static void 4702 bs_load_write_used_md(struct spdk_bs_load_ctx *ctx) 4703 { 4704 bs_write_used_md(ctx->seq, ctx, bs_load_write_used_pages_cpl); 4705 } 4706 4707 static void 4708 bs_load_replay_md_chain_cpl(struct spdk_bs_load_ctx *ctx) 4709 { 4710 uint64_t num_md_clusters; 4711 uint64_t i; 4712 4713 ctx->in_page_chain = false; 4714 4715 do { 4716 ctx->page_index++; 4717 } while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true); 4718 4719 if (ctx->page_index < ctx->super->md_len) { 4720 ctx->cur_page = ctx->page_index; 4721 bs_load_replay_cur_md_page(ctx); 4722 } else { 4723 /* Claim all of the clusters used by the metadata */ 4724 num_md_clusters = spdk_divide_round_up( 4725 ctx->super->md_start + ctx->super->md_len, ctx->bs->pages_per_cluster); 4726 for (i = 0; i < num_md_clusters; i++) { 4727 spdk_bit_array_set(ctx->used_clusters, i); 4728 } 4729 ctx->bs->num_free_clusters -= num_md_clusters; 4730 spdk_free(ctx->page); 4731 bs_load_write_used_md(ctx); 4732 } 4733 } 4734 4735 static void 4736 bs_load_replay_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4737 { 4738 struct spdk_bs_load_ctx *ctx = cb_arg; 4739 uint32_t page_num; 4740 uint64_t i; 4741 4742 if (bserrno != 0) { 4743 spdk_free(ctx->extent_pages); 4744 bs_load_ctx_fail(ctx, bserrno); 4745 return; 4746 } 4747 4748 for (i = 0; i < ctx->num_extent_pages; i++) { 4749 /* Extent pages are only read when present within in chain md. 4750 * Integrity of md is not right if that page was not a valid extent page. */ 4751 if (bs_load_cur_extent_page_valid(&ctx->extent_pages[i]) != true) { 4752 spdk_free(ctx->extent_pages); 4753 bs_load_ctx_fail(ctx, -EILSEQ); 4754 return; 4755 } 4756 4757 page_num = ctx->extent_page_num[i]; 4758 spdk_bit_array_set(ctx->bs->used_md_pages, page_num); 4759 if (bs_load_replay_md_parse_page(ctx, &ctx->extent_pages[i])) { 4760 spdk_free(ctx->extent_pages); 4761 bs_load_ctx_fail(ctx, -EILSEQ); 4762 return; 4763 } 4764 } 4765 4766 spdk_free(ctx->extent_pages); 4767 free(ctx->extent_page_num); 4768 ctx->extent_page_num = NULL; 4769 ctx->num_extent_pages = 0; 4770 4771 bs_load_replay_md_chain_cpl(ctx); 4772 } 4773 4774 static void 4775 bs_load_replay_extent_pages(struct spdk_bs_load_ctx *ctx) 4776 { 4777 spdk_bs_batch_t *batch; 4778 uint32_t page; 4779 uint64_t lba; 4780 uint64_t i; 4781 4782 ctx->extent_pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE * ctx->num_extent_pages, 0, 4783 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4784 if (!ctx->extent_pages) { 4785 bs_load_ctx_fail(ctx, -ENOMEM); 4786 return; 4787 } 4788 4789 batch = bs_sequence_to_batch(ctx->seq, bs_load_replay_extent_page_cpl, ctx); 4790 4791 for (i = 0; i < ctx->num_extent_pages; i++) { 4792 page = ctx->extent_page_num[i]; 4793 assert(page < ctx->super->md_len); 4794 lba = bs_md_page_to_lba(ctx->bs, page); 4795 bs_batch_read_dev(batch, &ctx->extent_pages[i], lba, 4796 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE)); 4797 } 4798 4799 bs_batch_close(batch); 4800 } 4801 4802 static void 4803 bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4804 { 4805 struct spdk_bs_load_ctx *ctx = cb_arg; 4806 uint32_t page_num; 4807 struct spdk_blob_md_page *page; 4808 4809 if (bserrno != 0) { 4810 bs_load_ctx_fail(ctx, bserrno); 4811 return; 4812 } 4813 4814 page_num = ctx->cur_page; 4815 page = ctx->page; 4816 if (bs_load_cur_md_page_valid(ctx) == true) { 4817 if (page->sequence_num == 0 || ctx->in_page_chain == true) { 4818 spdk_spin_lock(&ctx->bs->used_lock); 4819 bs_claim_md_page(ctx->bs, page_num); 4820 spdk_spin_unlock(&ctx->bs->used_lock); 4821 if (page->sequence_num == 0) { 4822 SPDK_NOTICELOG("Recover: blob 0x%" PRIx32 "\n", page_num); 4823 spdk_bit_array_set(ctx->bs->used_blobids, page_num); 4824 } 4825 if (bs_load_replay_md_parse_page(ctx, page)) { 4826 bs_load_ctx_fail(ctx, -EILSEQ); 4827 return; 4828 } 4829 if (page->next != SPDK_INVALID_MD_PAGE) { 4830 ctx->in_page_chain = true; 4831 ctx->cur_page = page->next; 4832 bs_load_replay_cur_md_page(ctx); 4833 return; 4834 } 4835 if (ctx->num_extent_pages != 0) { 4836 bs_load_replay_extent_pages(ctx); 4837 return; 4838 } 4839 } 4840 } 4841 bs_load_replay_md_chain_cpl(ctx); 4842 } 4843 4844 static void 4845 bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx) 4846 { 4847 uint64_t lba; 4848 4849 assert(ctx->cur_page < ctx->super->md_len); 4850 lba = bs_md_page_to_lba(ctx->bs, ctx->cur_page); 4851 bs_sequence_read_dev(ctx->seq, ctx->page, lba, 4852 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 4853 bs_load_replay_md_cpl, ctx); 4854 } 4855 4856 static void 4857 bs_load_replay_md(struct spdk_bs_load_ctx *ctx) 4858 { 4859 ctx->page_index = 0; 4860 ctx->cur_page = 0; 4861 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 4862 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4863 if (!ctx->page) { 4864 bs_load_ctx_fail(ctx, -ENOMEM); 4865 return; 4866 } 4867 bs_load_replay_cur_md_page(ctx); 4868 } 4869 4870 static void 4871 bs_recover(struct spdk_bs_load_ctx *ctx) 4872 { 4873 int rc; 4874 4875 SPDK_NOTICELOG("Performing recovery on blobstore\n"); 4876 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len); 4877 if (rc < 0) { 4878 bs_load_ctx_fail(ctx, -ENOMEM); 4879 return; 4880 } 4881 4882 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len); 4883 if (rc < 0) { 4884 bs_load_ctx_fail(ctx, -ENOMEM); 4885 return; 4886 } 4887 4888 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 4889 if (rc < 0) { 4890 bs_load_ctx_fail(ctx, -ENOMEM); 4891 return; 4892 } 4893 4894 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->super->md_len); 4895 if (rc < 0) { 4896 bs_load_ctx_fail(ctx, -ENOMEM); 4897 return; 4898 } 4899 4900 ctx->bs->num_free_clusters = ctx->bs->total_clusters; 4901 bs_load_replay_md(ctx); 4902 } 4903 4904 static int 4905 bs_parse_super(struct spdk_bs_load_ctx *ctx) 4906 { 4907 int rc; 4908 4909 if (ctx->super->size == 0) { 4910 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 4911 } 4912 4913 if (ctx->super->io_unit_size == 0) { 4914 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 4915 } 4916 4917 ctx->bs->clean = 1; 4918 ctx->bs->cluster_sz = ctx->super->cluster_size; 4919 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 4920 ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE; 4921 if (spdk_u32_is_pow2(ctx->bs->pages_per_cluster)) { 4922 ctx->bs->pages_per_cluster_shift = spdk_u32log2(ctx->bs->pages_per_cluster); 4923 } 4924 ctx->bs->io_unit_size = ctx->super->io_unit_size; 4925 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 4926 if (rc < 0) { 4927 return -ENOMEM; 4928 } 4929 ctx->bs->md_start = ctx->super->md_start; 4930 ctx->bs->md_len = ctx->super->md_len; 4931 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len); 4932 if (rc < 0) { 4933 return -ENOMEM; 4934 } 4935 4936 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 4937 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 4938 ctx->bs->super_blob = ctx->super->super_blob; 4939 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 4940 4941 return 0; 4942 } 4943 4944 static void 4945 bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4946 { 4947 struct spdk_bs_load_ctx *ctx = cb_arg; 4948 int rc; 4949 4950 rc = bs_super_validate(ctx->super, ctx->bs); 4951 if (rc != 0) { 4952 bs_load_ctx_fail(ctx, rc); 4953 return; 4954 } 4955 4956 rc = bs_parse_super(ctx); 4957 if (rc < 0) { 4958 bs_load_ctx_fail(ctx, rc); 4959 return; 4960 } 4961 4962 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0 || ctx->force_recover) { 4963 bs_recover(ctx); 4964 } else { 4965 bs_load_read_used_pages(ctx); 4966 } 4967 } 4968 4969 static inline int 4970 bs_opts_copy(struct spdk_bs_opts *src, struct spdk_bs_opts *dst) 4971 { 4972 4973 if (!src->opts_size) { 4974 SPDK_ERRLOG("opts_size should not be zero value\n"); 4975 return -1; 4976 } 4977 4978 #define FIELD_OK(field) \ 4979 offsetof(struct spdk_bs_opts, field) + sizeof(src->field) <= src->opts_size 4980 4981 #define SET_FIELD(field) \ 4982 if (FIELD_OK(field)) { \ 4983 dst->field = src->field; \ 4984 } \ 4985 4986 SET_FIELD(cluster_sz); 4987 SET_FIELD(num_md_pages); 4988 SET_FIELD(max_md_ops); 4989 SET_FIELD(max_channel_ops); 4990 SET_FIELD(clear_method); 4991 4992 if (FIELD_OK(bstype)) { 4993 memcpy(&dst->bstype, &src->bstype, sizeof(dst->bstype)); 4994 } 4995 SET_FIELD(iter_cb_fn); 4996 SET_FIELD(iter_cb_arg); 4997 SET_FIELD(force_recover); 4998 SET_FIELD(esnap_bs_dev_create); 4999 SET_FIELD(esnap_ctx); 5000 5001 dst->opts_size = src->opts_size; 5002 5003 /* You should not remove this statement, but need to update the assert statement 5004 * if you add a new field, and also add a corresponding SET_FIELD statement */ 5005 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_opts) == 88, "Incorrect size"); 5006 5007 #undef FIELD_OK 5008 #undef SET_FIELD 5009 5010 return 0; 5011 } 5012 5013 void 5014 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 5015 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 5016 { 5017 struct spdk_blob_store *bs; 5018 struct spdk_bs_cpl cpl; 5019 struct spdk_bs_load_ctx *ctx; 5020 struct spdk_bs_opts opts = {}; 5021 int err; 5022 5023 SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev); 5024 5025 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 5026 SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen); 5027 dev->destroy(dev); 5028 cb_fn(cb_arg, NULL, -EINVAL); 5029 return; 5030 } 5031 5032 spdk_bs_opts_init(&opts, sizeof(opts)); 5033 if (o) { 5034 if (bs_opts_copy(o, &opts)) { 5035 return; 5036 } 5037 } 5038 5039 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 5040 dev->destroy(dev); 5041 cb_fn(cb_arg, NULL, -EINVAL); 5042 return; 5043 } 5044 5045 err = bs_alloc(dev, &opts, &bs, &ctx); 5046 if (err) { 5047 dev->destroy(dev); 5048 cb_fn(cb_arg, NULL, err); 5049 return; 5050 } 5051 5052 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 5053 cpl.u.bs_handle.cb_fn = cb_fn; 5054 cpl.u.bs_handle.cb_arg = cb_arg; 5055 cpl.u.bs_handle.bs = bs; 5056 5057 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5058 if (!ctx->seq) { 5059 spdk_free(ctx->super); 5060 free(ctx); 5061 bs_free(bs); 5062 cb_fn(cb_arg, NULL, -ENOMEM); 5063 return; 5064 } 5065 5066 /* Read the super block */ 5067 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5068 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5069 bs_load_super_cpl, ctx); 5070 } 5071 5072 /* END spdk_bs_load */ 5073 5074 /* START spdk_bs_dump */ 5075 5076 static void 5077 bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno) 5078 { 5079 spdk_free(ctx->super); 5080 5081 /* 5082 * We need to defer calling bs_call_cpl() until after 5083 * dev destruction, so tuck these away for later use. 5084 */ 5085 ctx->bs->unload_err = bserrno; 5086 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5087 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5088 5089 bs_sequence_finish(seq, 0); 5090 bs_free(ctx->bs); 5091 free(ctx); 5092 } 5093 5094 static void 5095 bs_dump_print_xattr(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 5096 { 5097 struct spdk_blob_md_descriptor_xattr *desc_xattr; 5098 uint32_t i; 5099 const char *type; 5100 5101 desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc; 5102 5103 if (desc_xattr->length != 5104 sizeof(desc_xattr->name_length) + sizeof(desc_xattr->value_length) + 5105 desc_xattr->name_length + desc_xattr->value_length) { 5106 } 5107 5108 memcpy(ctx->xattr_name, desc_xattr->name, desc_xattr->name_length); 5109 ctx->xattr_name[desc_xattr->name_length] = '\0'; 5110 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 5111 type = "XATTR"; 5112 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 5113 type = "XATTR_INTERNAL"; 5114 } else { 5115 assert(false); 5116 type = "XATTR_?"; 5117 } 5118 fprintf(ctx->fp, "%s: name = \"%s\"\n", type, ctx->xattr_name); 5119 fprintf(ctx->fp, " value = \""); 5120 ctx->print_xattr_fn(ctx->fp, ctx->super->bstype.bstype, ctx->xattr_name, 5121 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 5122 desc_xattr->value_length); 5123 fprintf(ctx->fp, "\"\n"); 5124 for (i = 0; i < desc_xattr->value_length; i++) { 5125 if (i % 16 == 0) { 5126 fprintf(ctx->fp, " "); 5127 } 5128 fprintf(ctx->fp, "%02" PRIx8 " ", *((uint8_t *)desc_xattr->name + desc_xattr->name_length + i)); 5129 if ((i + 1) % 16 == 0) { 5130 fprintf(ctx->fp, "\n"); 5131 } 5132 } 5133 if (i % 16 != 0) { 5134 fprintf(ctx->fp, "\n"); 5135 } 5136 } 5137 5138 struct type_flag_desc { 5139 uint64_t mask; 5140 uint64_t val; 5141 const char *name; 5142 }; 5143 5144 static void 5145 bs_dump_print_type_bits(struct spdk_bs_load_ctx *ctx, uint64_t flags, 5146 struct type_flag_desc *desc, size_t numflags) 5147 { 5148 uint64_t covered = 0; 5149 size_t i; 5150 5151 for (i = 0; i < numflags; i++) { 5152 if ((desc[i].mask & flags) != desc[i].val) { 5153 continue; 5154 } 5155 fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " %s", desc[i].val, desc[i].name); 5156 if (desc[i].mask != desc[i].val) { 5157 fprintf(ctx->fp, " (mask 0x%" PRIx64 " value 0x%" PRIx64 ")", 5158 desc[i].mask, desc[i].val); 5159 } 5160 fprintf(ctx->fp, "\n"); 5161 covered |= desc[i].mask; 5162 } 5163 if ((flags & ~covered) != 0) { 5164 fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " Unknown\n", flags & ~covered); 5165 } 5166 } 5167 5168 static void 5169 bs_dump_print_type_flags(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 5170 { 5171 struct spdk_blob_md_descriptor_flags *type_desc; 5172 #define ADD_FLAG(f) { f, f, #f } 5173 #define ADD_MASK_VAL(m, v) { m, v, #v } 5174 static struct type_flag_desc invalid[] = { 5175 ADD_FLAG(SPDK_BLOB_THIN_PROV), 5176 ADD_FLAG(SPDK_BLOB_INTERNAL_XATTR), 5177 ADD_FLAG(SPDK_BLOB_EXTENT_TABLE), 5178 }; 5179 static struct type_flag_desc data_ro[] = { 5180 ADD_FLAG(SPDK_BLOB_READ_ONLY), 5181 }; 5182 static struct type_flag_desc md_ro[] = { 5183 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_DEFAULT), 5184 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_NONE), 5185 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_UNMAP), 5186 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_WRITE_ZEROES), 5187 }; 5188 #undef ADD_FLAG 5189 #undef ADD_MASK_VAL 5190 5191 type_desc = (struct spdk_blob_md_descriptor_flags *)desc; 5192 fprintf(ctx->fp, "Flags:\n"); 5193 fprintf(ctx->fp, "\tinvalid: 0x%016" PRIx64 "\n", type_desc->invalid_flags); 5194 bs_dump_print_type_bits(ctx, type_desc->invalid_flags, invalid, 5195 SPDK_COUNTOF(invalid)); 5196 fprintf(ctx->fp, "\tdata_ro: 0x%016" PRIx64 "\n", type_desc->data_ro_flags); 5197 bs_dump_print_type_bits(ctx, type_desc->data_ro_flags, data_ro, 5198 SPDK_COUNTOF(data_ro)); 5199 fprintf(ctx->fp, "\t md_ro: 0x%016" PRIx64 "\n", type_desc->md_ro_flags); 5200 bs_dump_print_type_bits(ctx, type_desc->md_ro_flags, md_ro, 5201 SPDK_COUNTOF(md_ro)); 5202 } 5203 5204 static void 5205 bs_dump_print_extent_table(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 5206 { 5207 struct spdk_blob_md_descriptor_extent_table *et_desc; 5208 uint64_t num_extent_pages; 5209 uint32_t et_idx; 5210 5211 et_desc = (struct spdk_blob_md_descriptor_extent_table *)desc; 5212 num_extent_pages = (et_desc->length - sizeof(et_desc->num_clusters)) / 5213 sizeof(et_desc->extent_page[0]); 5214 5215 fprintf(ctx->fp, "Extent table:\n"); 5216 for (et_idx = 0; et_idx < num_extent_pages; et_idx++) { 5217 if (et_desc->extent_page[et_idx].page_idx == 0) { 5218 /* Zeroes represent unallocated extent pages. */ 5219 continue; 5220 } 5221 fprintf(ctx->fp, "\tExtent page: %5" PRIu32 " length %3" PRIu32 5222 " at LBA %" PRIu64 "\n", et_desc->extent_page[et_idx].page_idx, 5223 et_desc->extent_page[et_idx].num_pages, 5224 bs_md_page_to_lba(ctx->bs, et_desc->extent_page[et_idx].page_idx)); 5225 } 5226 } 5227 5228 static void 5229 bs_dump_print_md_page(struct spdk_bs_load_ctx *ctx) 5230 { 5231 uint32_t page_idx = ctx->cur_page; 5232 struct spdk_blob_md_page *page = ctx->page; 5233 struct spdk_blob_md_descriptor *desc; 5234 size_t cur_desc = 0; 5235 uint32_t crc; 5236 5237 fprintf(ctx->fp, "=========\n"); 5238 fprintf(ctx->fp, "Metadata Page Index: %" PRIu32 " (0x%" PRIx32 ")\n", page_idx, page_idx); 5239 fprintf(ctx->fp, "Start LBA: %" PRIu64 "\n", bs_md_page_to_lba(ctx->bs, page_idx)); 5240 fprintf(ctx->fp, "Blob ID: 0x%" PRIx64 "\n", page->id); 5241 fprintf(ctx->fp, "Sequence: %" PRIu32 "\n", page->sequence_num); 5242 if (page->next == SPDK_INVALID_MD_PAGE) { 5243 fprintf(ctx->fp, "Next: None\n"); 5244 } else { 5245 fprintf(ctx->fp, "Next: %" PRIu32 "\n", page->next); 5246 } 5247 fprintf(ctx->fp, "In used bit array%s:", ctx->super->clean ? "" : " (not clean: dubious)"); 5248 if (spdk_bit_array_get(ctx->bs->used_md_pages, page_idx)) { 5249 fprintf(ctx->fp, " md"); 5250 } 5251 if (spdk_bit_array_get(ctx->bs->used_blobids, page_idx)) { 5252 fprintf(ctx->fp, " blob"); 5253 } 5254 fprintf(ctx->fp, "\n"); 5255 5256 crc = blob_md_page_calc_crc(page); 5257 fprintf(ctx->fp, "CRC: 0x%" PRIx32 " (%s)\n", page->crc, crc == page->crc ? "OK" : "Mismatch"); 5258 5259 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 5260 while (cur_desc < sizeof(page->descriptors)) { 5261 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 5262 if (desc->length == 0) { 5263 /* If padding and length are 0, this terminates the page */ 5264 break; 5265 } 5266 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 5267 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 5268 unsigned int i; 5269 5270 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 5271 5272 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 5273 if (desc_extent_rle->extents[i].cluster_idx != 0) { 5274 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 5275 desc_extent_rle->extents[i].cluster_idx); 5276 } else { 5277 fprintf(ctx->fp, "Unallocated Extent - "); 5278 } 5279 fprintf(ctx->fp, " Length: %" PRIu32, desc_extent_rle->extents[i].length); 5280 fprintf(ctx->fp, "\n"); 5281 } 5282 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 5283 struct spdk_blob_md_descriptor_extent_page *desc_extent; 5284 unsigned int i; 5285 5286 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 5287 5288 for (i = 0; i < desc_extent->length / sizeof(desc_extent->cluster_idx[0]); i++) { 5289 if (desc_extent->cluster_idx[i] != 0) { 5290 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 5291 desc_extent->cluster_idx[i]); 5292 } else { 5293 fprintf(ctx->fp, "Unallocated Extent"); 5294 } 5295 fprintf(ctx->fp, "\n"); 5296 } 5297 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 5298 bs_dump_print_xattr(ctx, desc); 5299 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 5300 bs_dump_print_xattr(ctx, desc); 5301 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 5302 bs_dump_print_type_flags(ctx, desc); 5303 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 5304 bs_dump_print_extent_table(ctx, desc); 5305 } else { 5306 /* Error */ 5307 fprintf(ctx->fp, "Unknown descriptor type %" PRIu8 "\n", desc->type); 5308 } 5309 /* Advance to the next descriptor */ 5310 cur_desc += sizeof(*desc) + desc->length; 5311 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 5312 break; 5313 } 5314 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 5315 } 5316 } 5317 5318 static void 5319 bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5320 { 5321 struct spdk_bs_load_ctx *ctx = cb_arg; 5322 5323 if (bserrno != 0) { 5324 bs_dump_finish(seq, ctx, bserrno); 5325 return; 5326 } 5327 5328 if (ctx->page->id != 0) { 5329 bs_dump_print_md_page(ctx); 5330 } 5331 5332 ctx->cur_page++; 5333 5334 if (ctx->cur_page < ctx->super->md_len) { 5335 bs_dump_read_md_page(seq, ctx); 5336 } else { 5337 spdk_free(ctx->page); 5338 bs_dump_finish(seq, ctx, 0); 5339 } 5340 } 5341 5342 static void 5343 bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg) 5344 { 5345 struct spdk_bs_load_ctx *ctx = cb_arg; 5346 uint64_t lba; 5347 5348 assert(ctx->cur_page < ctx->super->md_len); 5349 lba = bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page); 5350 bs_sequence_read_dev(seq, ctx->page, lba, 5351 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 5352 bs_dump_read_md_page_cpl, ctx); 5353 } 5354 5355 static void 5356 bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5357 { 5358 struct spdk_bs_load_ctx *ctx = cb_arg; 5359 int rc; 5360 5361 fprintf(ctx->fp, "Signature: \"%.8s\" ", ctx->super->signature); 5362 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 5363 sizeof(ctx->super->signature)) != 0) { 5364 fprintf(ctx->fp, "(Mismatch)\n"); 5365 bs_dump_finish(seq, ctx, bserrno); 5366 return; 5367 } else { 5368 fprintf(ctx->fp, "(OK)\n"); 5369 } 5370 fprintf(ctx->fp, "Version: %" PRIu32 "\n", ctx->super->version); 5371 fprintf(ctx->fp, "CRC: 0x%x (%s)\n", ctx->super->crc, 5372 (ctx->super->crc == blob_md_page_calc_crc(ctx->super)) ? "OK" : "Mismatch"); 5373 fprintf(ctx->fp, "Blobstore Type: %.*s\n", SPDK_BLOBSTORE_TYPE_LENGTH, ctx->super->bstype.bstype); 5374 fprintf(ctx->fp, "Cluster Size: %" PRIu32 "\n", ctx->super->cluster_size); 5375 fprintf(ctx->fp, "Super Blob ID: "); 5376 if (ctx->super->super_blob == SPDK_BLOBID_INVALID) { 5377 fprintf(ctx->fp, "(None)\n"); 5378 } else { 5379 fprintf(ctx->fp, "0x%" PRIx64 "\n", ctx->super->super_blob); 5380 } 5381 fprintf(ctx->fp, "Clean: %" PRIu32 "\n", ctx->super->clean); 5382 fprintf(ctx->fp, "Used Metadata Page Mask Start: %" PRIu32 "\n", ctx->super->used_page_mask_start); 5383 fprintf(ctx->fp, "Used Metadata Page Mask Length: %" PRIu32 "\n", ctx->super->used_page_mask_len); 5384 fprintf(ctx->fp, "Used Cluster Mask Start: %" PRIu32 "\n", ctx->super->used_cluster_mask_start); 5385 fprintf(ctx->fp, "Used Cluster Mask Length: %" PRIu32 "\n", ctx->super->used_cluster_mask_len); 5386 fprintf(ctx->fp, "Used Blob ID Mask Start: %" PRIu32 "\n", ctx->super->used_blobid_mask_start); 5387 fprintf(ctx->fp, "Used Blob ID Mask Length: %" PRIu32 "\n", ctx->super->used_blobid_mask_len); 5388 fprintf(ctx->fp, "Metadata Start: %" PRIu32 "\n", ctx->super->md_start); 5389 fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len); 5390 5391 ctx->cur_page = 0; 5392 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 5393 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 5394 if (!ctx->page) { 5395 bs_dump_finish(seq, ctx, -ENOMEM); 5396 return; 5397 } 5398 5399 rc = bs_parse_super(ctx); 5400 if (rc < 0) { 5401 bs_load_ctx_fail(ctx, rc); 5402 return; 5403 } 5404 5405 bs_load_read_used_pages(ctx); 5406 } 5407 5408 void 5409 spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_xattr_fn, 5410 spdk_bs_op_complete cb_fn, void *cb_arg) 5411 { 5412 struct spdk_blob_store *bs; 5413 struct spdk_bs_cpl cpl; 5414 struct spdk_bs_load_ctx *ctx; 5415 struct spdk_bs_opts opts = {}; 5416 int err; 5417 5418 SPDK_DEBUGLOG(blob, "Dumping blobstore from dev %p\n", dev); 5419 5420 spdk_bs_opts_init(&opts, sizeof(opts)); 5421 5422 err = bs_alloc(dev, &opts, &bs, &ctx); 5423 if (err) { 5424 dev->destroy(dev); 5425 cb_fn(cb_arg, err); 5426 return; 5427 } 5428 5429 ctx->dumping = true; 5430 ctx->fp = fp; 5431 ctx->print_xattr_fn = print_xattr_fn; 5432 5433 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5434 cpl.u.bs_basic.cb_fn = cb_fn; 5435 cpl.u.bs_basic.cb_arg = cb_arg; 5436 5437 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5438 if (!ctx->seq) { 5439 spdk_free(ctx->super); 5440 free(ctx); 5441 bs_free(bs); 5442 cb_fn(cb_arg, -ENOMEM); 5443 return; 5444 } 5445 5446 /* Read the super block */ 5447 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5448 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5449 bs_dump_super_cpl, ctx); 5450 } 5451 5452 /* END spdk_bs_dump */ 5453 5454 /* START spdk_bs_init */ 5455 5456 static void 5457 bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5458 { 5459 struct spdk_bs_load_ctx *ctx = cb_arg; 5460 5461 ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters); 5462 spdk_free(ctx->super); 5463 free(ctx); 5464 5465 bs_sequence_finish(seq, bserrno); 5466 } 5467 5468 static void 5469 bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5470 { 5471 struct spdk_bs_load_ctx *ctx = cb_arg; 5472 5473 /* Write super block */ 5474 bs_sequence_write_dev(seq, ctx->super, bs_page_to_lba(ctx->bs, 0), 5475 bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 5476 bs_init_persist_super_cpl, ctx); 5477 } 5478 5479 void 5480 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 5481 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 5482 { 5483 struct spdk_bs_load_ctx *ctx; 5484 struct spdk_blob_store *bs; 5485 struct spdk_bs_cpl cpl; 5486 spdk_bs_sequence_t *seq; 5487 spdk_bs_batch_t *batch; 5488 uint64_t num_md_lba; 5489 uint64_t num_md_pages; 5490 uint64_t num_md_clusters; 5491 uint64_t max_used_cluster_mask_len; 5492 uint32_t i; 5493 struct spdk_bs_opts opts = {}; 5494 int rc; 5495 uint64_t lba, lba_count; 5496 5497 SPDK_DEBUGLOG(blob, "Initializing blobstore on dev %p\n", dev); 5498 5499 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 5500 SPDK_ERRLOG("unsupported dev block length of %d\n", 5501 dev->blocklen); 5502 dev->destroy(dev); 5503 cb_fn(cb_arg, NULL, -EINVAL); 5504 return; 5505 } 5506 5507 spdk_bs_opts_init(&opts, sizeof(opts)); 5508 if (o) { 5509 if (bs_opts_copy(o, &opts)) { 5510 return; 5511 } 5512 } 5513 5514 if (bs_opts_verify(&opts) != 0) { 5515 dev->destroy(dev); 5516 cb_fn(cb_arg, NULL, -EINVAL); 5517 return; 5518 } 5519 5520 rc = bs_alloc(dev, &opts, &bs, &ctx); 5521 if (rc) { 5522 dev->destroy(dev); 5523 cb_fn(cb_arg, NULL, rc); 5524 return; 5525 } 5526 5527 if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) { 5528 /* By default, allocate 1 page per cluster. 5529 * Technically, this over-allocates metadata 5530 * because more metadata will reduce the number 5531 * of usable clusters. This can be addressed with 5532 * more complex math in the future. 5533 */ 5534 bs->md_len = bs->total_clusters; 5535 } else { 5536 bs->md_len = opts.num_md_pages; 5537 } 5538 rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len); 5539 if (rc < 0) { 5540 spdk_free(ctx->super); 5541 free(ctx); 5542 bs_free(bs); 5543 cb_fn(cb_arg, NULL, -ENOMEM); 5544 return; 5545 } 5546 5547 rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len); 5548 if (rc < 0) { 5549 spdk_free(ctx->super); 5550 free(ctx); 5551 bs_free(bs); 5552 cb_fn(cb_arg, NULL, -ENOMEM); 5553 return; 5554 } 5555 5556 rc = spdk_bit_array_resize(&bs->open_blobids, bs->md_len); 5557 if (rc < 0) { 5558 spdk_free(ctx->super); 5559 free(ctx); 5560 bs_free(bs); 5561 cb_fn(cb_arg, NULL, -ENOMEM); 5562 return; 5563 } 5564 5565 memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 5566 sizeof(ctx->super->signature)); 5567 ctx->super->version = SPDK_BS_VERSION; 5568 ctx->super->length = sizeof(*ctx->super); 5569 ctx->super->super_blob = bs->super_blob; 5570 ctx->super->clean = 0; 5571 ctx->super->cluster_size = bs->cluster_sz; 5572 ctx->super->io_unit_size = bs->io_unit_size; 5573 memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype)); 5574 5575 /* Calculate how many pages the metadata consumes at the front 5576 * of the disk. 5577 */ 5578 5579 /* The super block uses 1 page */ 5580 num_md_pages = 1; 5581 5582 /* The used_md_pages mask requires 1 bit per metadata page, rounded 5583 * up to the nearest page, plus a header. 5584 */ 5585 ctx->super->used_page_mask_start = num_md_pages; 5586 ctx->super->used_page_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5587 spdk_divide_round_up(bs->md_len, 8), 5588 SPDK_BS_PAGE_SIZE); 5589 num_md_pages += ctx->super->used_page_mask_len; 5590 5591 /* The used_clusters mask requires 1 bit per cluster, rounded 5592 * up to the nearest page, plus a header. 5593 */ 5594 ctx->super->used_cluster_mask_start = num_md_pages; 5595 ctx->super->used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5596 spdk_divide_round_up(bs->total_clusters, 8), 5597 SPDK_BS_PAGE_SIZE); 5598 /* The blobstore might be extended, then the used_cluster bitmap will need more space. 5599 * Here we calculate the max clusters we can support according to the 5600 * num_md_pages (bs->md_len). 5601 */ 5602 max_used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5603 spdk_divide_round_up(bs->md_len, 8), 5604 SPDK_BS_PAGE_SIZE); 5605 max_used_cluster_mask_len = spdk_max(max_used_cluster_mask_len, 5606 ctx->super->used_cluster_mask_len); 5607 num_md_pages += max_used_cluster_mask_len; 5608 5609 /* The used_blobids mask requires 1 bit per metadata page, rounded 5610 * up to the nearest page, plus a header. 5611 */ 5612 ctx->super->used_blobid_mask_start = num_md_pages; 5613 ctx->super->used_blobid_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5614 spdk_divide_round_up(bs->md_len, 8), 5615 SPDK_BS_PAGE_SIZE); 5616 num_md_pages += ctx->super->used_blobid_mask_len; 5617 5618 /* The metadata region size was chosen above */ 5619 ctx->super->md_start = bs->md_start = num_md_pages; 5620 ctx->super->md_len = bs->md_len; 5621 num_md_pages += bs->md_len; 5622 5623 num_md_lba = bs_page_to_lba(bs, num_md_pages); 5624 5625 ctx->super->size = dev->blockcnt * dev->blocklen; 5626 5627 ctx->super->crc = blob_md_page_calc_crc(ctx->super); 5628 5629 num_md_clusters = spdk_divide_round_up(num_md_pages, bs->pages_per_cluster); 5630 if (num_md_clusters > bs->total_clusters) { 5631 SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, " 5632 "please decrease number of pages reserved for metadata " 5633 "or increase cluster size.\n"); 5634 spdk_free(ctx->super); 5635 spdk_bit_array_free(&ctx->used_clusters); 5636 free(ctx); 5637 bs_free(bs); 5638 cb_fn(cb_arg, NULL, -ENOMEM); 5639 return; 5640 } 5641 /* Claim all of the clusters used by the metadata */ 5642 for (i = 0; i < num_md_clusters; i++) { 5643 spdk_bit_array_set(ctx->used_clusters, i); 5644 } 5645 5646 bs->num_free_clusters -= num_md_clusters; 5647 bs->total_data_clusters = bs->num_free_clusters; 5648 5649 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 5650 cpl.u.bs_handle.cb_fn = cb_fn; 5651 cpl.u.bs_handle.cb_arg = cb_arg; 5652 cpl.u.bs_handle.bs = bs; 5653 5654 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5655 if (!seq) { 5656 spdk_free(ctx->super); 5657 free(ctx); 5658 bs_free(bs); 5659 cb_fn(cb_arg, NULL, -ENOMEM); 5660 return; 5661 } 5662 5663 batch = bs_sequence_to_batch(seq, bs_init_trim_cpl, ctx); 5664 5665 /* Clear metadata space */ 5666 bs_batch_write_zeroes_dev(batch, 0, num_md_lba); 5667 5668 lba = num_md_lba; 5669 lba_count = ctx->bs->dev->blockcnt - lba; 5670 switch (opts.clear_method) { 5671 case BS_CLEAR_WITH_UNMAP: 5672 /* Trim data clusters */ 5673 bs_batch_unmap_dev(batch, lba, lba_count); 5674 break; 5675 case BS_CLEAR_WITH_WRITE_ZEROES: 5676 /* Write_zeroes to data clusters */ 5677 bs_batch_write_zeroes_dev(batch, lba, lba_count); 5678 break; 5679 case BS_CLEAR_WITH_NONE: 5680 default: 5681 break; 5682 } 5683 5684 bs_batch_close(batch); 5685 } 5686 5687 /* END spdk_bs_init */ 5688 5689 /* START spdk_bs_destroy */ 5690 5691 static void 5692 bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5693 { 5694 struct spdk_bs_load_ctx *ctx = cb_arg; 5695 struct spdk_blob_store *bs = ctx->bs; 5696 5697 /* 5698 * We need to defer calling bs_call_cpl() until after 5699 * dev destruction, so tuck these away for later use. 5700 */ 5701 bs->unload_err = bserrno; 5702 memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5703 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5704 5705 bs_sequence_finish(seq, bserrno); 5706 5707 bs_free(bs); 5708 free(ctx); 5709 } 5710 5711 void 5712 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, 5713 void *cb_arg) 5714 { 5715 struct spdk_bs_cpl cpl; 5716 spdk_bs_sequence_t *seq; 5717 struct spdk_bs_load_ctx *ctx; 5718 5719 SPDK_DEBUGLOG(blob, "Destroying blobstore\n"); 5720 5721 if (!RB_EMPTY(&bs->open_blobs)) { 5722 SPDK_ERRLOG("Blobstore still has open blobs\n"); 5723 cb_fn(cb_arg, -EBUSY); 5724 return; 5725 } 5726 5727 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5728 cpl.u.bs_basic.cb_fn = cb_fn; 5729 cpl.u.bs_basic.cb_arg = cb_arg; 5730 5731 ctx = calloc(1, sizeof(*ctx)); 5732 if (!ctx) { 5733 cb_fn(cb_arg, -ENOMEM); 5734 return; 5735 } 5736 5737 ctx->bs = bs; 5738 5739 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5740 if (!seq) { 5741 free(ctx); 5742 cb_fn(cb_arg, -ENOMEM); 5743 return; 5744 } 5745 5746 /* Write zeroes to the super block */ 5747 bs_sequence_write_zeroes_dev(seq, 5748 bs_page_to_lba(bs, 0), 5749 bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)), 5750 bs_destroy_trim_cpl, ctx); 5751 } 5752 5753 /* END spdk_bs_destroy */ 5754 5755 /* START spdk_bs_unload */ 5756 5757 static void 5758 bs_unload_finish(struct spdk_bs_load_ctx *ctx, int bserrno) 5759 { 5760 spdk_bs_sequence_t *seq = ctx->seq; 5761 5762 spdk_free(ctx->super); 5763 5764 /* 5765 * We need to defer calling bs_call_cpl() until after 5766 * dev destruction, so tuck these away for later use. 5767 */ 5768 ctx->bs->unload_err = bserrno; 5769 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5770 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5771 5772 bs_sequence_finish(seq, bserrno); 5773 5774 bs_free(ctx->bs); 5775 free(ctx); 5776 } 5777 5778 static void 5779 bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5780 { 5781 struct spdk_bs_load_ctx *ctx = cb_arg; 5782 5783 bs_unload_finish(ctx, bserrno); 5784 } 5785 5786 static void 5787 bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5788 { 5789 struct spdk_bs_load_ctx *ctx = cb_arg; 5790 5791 spdk_free(ctx->mask); 5792 5793 if (bserrno != 0) { 5794 bs_unload_finish(ctx, bserrno); 5795 return; 5796 } 5797 5798 ctx->super->clean = 1; 5799 5800 bs_write_super(seq, ctx->bs, ctx->super, bs_unload_write_super_cpl, ctx); 5801 } 5802 5803 static void 5804 bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5805 { 5806 struct spdk_bs_load_ctx *ctx = cb_arg; 5807 5808 spdk_free(ctx->mask); 5809 ctx->mask = NULL; 5810 5811 if (bserrno != 0) { 5812 bs_unload_finish(ctx, bserrno); 5813 return; 5814 } 5815 5816 bs_write_used_clusters(seq, ctx, bs_unload_write_used_clusters_cpl); 5817 } 5818 5819 static void 5820 bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5821 { 5822 struct spdk_bs_load_ctx *ctx = cb_arg; 5823 5824 spdk_free(ctx->mask); 5825 ctx->mask = NULL; 5826 5827 if (bserrno != 0) { 5828 bs_unload_finish(ctx, bserrno); 5829 return; 5830 } 5831 5832 bs_write_used_blobids(seq, ctx, bs_unload_write_used_blobids_cpl); 5833 } 5834 5835 static void 5836 bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5837 { 5838 struct spdk_bs_load_ctx *ctx = cb_arg; 5839 int rc; 5840 5841 if (bserrno != 0) { 5842 bs_unload_finish(ctx, bserrno); 5843 return; 5844 } 5845 5846 rc = bs_super_validate(ctx->super, ctx->bs); 5847 if (rc != 0) { 5848 bs_unload_finish(ctx, rc); 5849 return; 5850 } 5851 5852 bs_write_used_md(seq, cb_arg, bs_unload_write_used_pages_cpl); 5853 } 5854 5855 void 5856 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg) 5857 { 5858 struct spdk_bs_cpl cpl; 5859 struct spdk_bs_load_ctx *ctx; 5860 5861 SPDK_DEBUGLOG(blob, "Syncing blobstore\n"); 5862 5863 /* 5864 * If external snapshot channels are being destroyed while the blobstore is unloaded, the 5865 * unload is deferred until after the channel destruction completes. 5866 */ 5867 if (bs->esnap_channels_unloading != 0) { 5868 if (bs->esnap_unload_cb_fn != NULL) { 5869 SPDK_ERRLOG("Blobstore unload in progress\n"); 5870 cb_fn(cb_arg, -EBUSY); 5871 return; 5872 } 5873 SPDK_DEBUGLOG(blob_esnap, "Blobstore unload deferred: %" PRIu32 5874 " esnap clones are unloading\n", bs->esnap_channels_unloading); 5875 bs->esnap_unload_cb_fn = cb_fn; 5876 bs->esnap_unload_cb_arg = cb_arg; 5877 return; 5878 } 5879 if (bs->esnap_unload_cb_fn != NULL) { 5880 SPDK_DEBUGLOG(blob_esnap, "Blobstore deferred unload progressing\n"); 5881 assert(bs->esnap_unload_cb_fn == cb_fn); 5882 assert(bs->esnap_unload_cb_arg == cb_arg); 5883 bs->esnap_unload_cb_fn = NULL; 5884 bs->esnap_unload_cb_arg = NULL; 5885 } 5886 5887 if (!RB_EMPTY(&bs->open_blobs)) { 5888 SPDK_ERRLOG("Blobstore still has open blobs\n"); 5889 cb_fn(cb_arg, -EBUSY); 5890 return; 5891 } 5892 5893 ctx = calloc(1, sizeof(*ctx)); 5894 if (!ctx) { 5895 cb_fn(cb_arg, -ENOMEM); 5896 return; 5897 } 5898 5899 ctx->bs = bs; 5900 5901 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 5902 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 5903 if (!ctx->super) { 5904 free(ctx); 5905 cb_fn(cb_arg, -ENOMEM); 5906 return; 5907 } 5908 5909 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5910 cpl.u.bs_basic.cb_fn = cb_fn; 5911 cpl.u.bs_basic.cb_arg = cb_arg; 5912 5913 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5914 if (!ctx->seq) { 5915 spdk_free(ctx->super); 5916 free(ctx); 5917 cb_fn(cb_arg, -ENOMEM); 5918 return; 5919 } 5920 5921 /* Read super block */ 5922 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5923 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5924 bs_unload_read_super_cpl, ctx); 5925 } 5926 5927 /* END spdk_bs_unload */ 5928 5929 /* START spdk_bs_set_super */ 5930 5931 struct spdk_bs_set_super_ctx { 5932 struct spdk_blob_store *bs; 5933 struct spdk_bs_super_block *super; 5934 }; 5935 5936 static void 5937 bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5938 { 5939 struct spdk_bs_set_super_ctx *ctx = cb_arg; 5940 5941 if (bserrno != 0) { 5942 SPDK_ERRLOG("Unable to write to super block of blobstore\n"); 5943 } 5944 5945 spdk_free(ctx->super); 5946 5947 bs_sequence_finish(seq, bserrno); 5948 5949 free(ctx); 5950 } 5951 5952 static void 5953 bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5954 { 5955 struct spdk_bs_set_super_ctx *ctx = cb_arg; 5956 int rc; 5957 5958 if (bserrno != 0) { 5959 SPDK_ERRLOG("Unable to read super block of blobstore\n"); 5960 spdk_free(ctx->super); 5961 bs_sequence_finish(seq, bserrno); 5962 free(ctx); 5963 return; 5964 } 5965 5966 rc = bs_super_validate(ctx->super, ctx->bs); 5967 if (rc != 0) { 5968 SPDK_ERRLOG("Not a valid super block\n"); 5969 spdk_free(ctx->super); 5970 bs_sequence_finish(seq, rc); 5971 free(ctx); 5972 return; 5973 } 5974 5975 bs_write_super(seq, ctx->bs, ctx->super, bs_set_super_write_cpl, ctx); 5976 } 5977 5978 void 5979 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid, 5980 spdk_bs_op_complete cb_fn, void *cb_arg) 5981 { 5982 struct spdk_bs_cpl cpl; 5983 spdk_bs_sequence_t *seq; 5984 struct spdk_bs_set_super_ctx *ctx; 5985 5986 SPDK_DEBUGLOG(blob, "Setting super blob id on blobstore\n"); 5987 5988 ctx = calloc(1, sizeof(*ctx)); 5989 if (!ctx) { 5990 cb_fn(cb_arg, -ENOMEM); 5991 return; 5992 } 5993 5994 ctx->bs = bs; 5995 5996 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 5997 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 5998 if (!ctx->super) { 5999 free(ctx); 6000 cb_fn(cb_arg, -ENOMEM); 6001 return; 6002 } 6003 6004 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 6005 cpl.u.bs_basic.cb_fn = cb_fn; 6006 cpl.u.bs_basic.cb_arg = cb_arg; 6007 6008 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 6009 if (!seq) { 6010 spdk_free(ctx->super); 6011 free(ctx); 6012 cb_fn(cb_arg, -ENOMEM); 6013 return; 6014 } 6015 6016 bs->super_blob = blobid; 6017 6018 /* Read super block */ 6019 bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0), 6020 bs_byte_to_lba(bs, sizeof(*ctx->super)), 6021 bs_set_super_read_cpl, ctx); 6022 } 6023 6024 /* END spdk_bs_set_super */ 6025 6026 void 6027 spdk_bs_get_super(struct spdk_blob_store *bs, 6028 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6029 { 6030 if (bs->super_blob == SPDK_BLOBID_INVALID) { 6031 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT); 6032 } else { 6033 cb_fn(cb_arg, bs->super_blob, 0); 6034 } 6035 } 6036 6037 uint64_t 6038 spdk_bs_get_cluster_size(struct spdk_blob_store *bs) 6039 { 6040 return bs->cluster_sz; 6041 } 6042 6043 uint64_t 6044 spdk_bs_get_page_size(struct spdk_blob_store *bs) 6045 { 6046 return SPDK_BS_PAGE_SIZE; 6047 } 6048 6049 uint64_t 6050 spdk_bs_get_io_unit_size(struct spdk_blob_store *bs) 6051 { 6052 return bs->io_unit_size; 6053 } 6054 6055 uint64_t 6056 spdk_bs_free_cluster_count(struct spdk_blob_store *bs) 6057 { 6058 return bs->num_free_clusters; 6059 } 6060 6061 uint64_t 6062 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs) 6063 { 6064 return bs->total_data_clusters; 6065 } 6066 6067 static int 6068 bs_register_md_thread(struct spdk_blob_store *bs) 6069 { 6070 bs->md_channel = spdk_get_io_channel(bs); 6071 if (!bs->md_channel) { 6072 SPDK_ERRLOG("Failed to get IO channel.\n"); 6073 return -1; 6074 } 6075 6076 return 0; 6077 } 6078 6079 static int 6080 bs_unregister_md_thread(struct spdk_blob_store *bs) 6081 { 6082 spdk_put_io_channel(bs->md_channel); 6083 6084 return 0; 6085 } 6086 6087 spdk_blob_id 6088 spdk_blob_get_id(struct spdk_blob *blob) 6089 { 6090 assert(blob != NULL); 6091 6092 return blob->id; 6093 } 6094 6095 uint64_t 6096 spdk_blob_get_num_pages(struct spdk_blob *blob) 6097 { 6098 assert(blob != NULL); 6099 6100 return bs_cluster_to_page(blob->bs, blob->active.num_clusters); 6101 } 6102 6103 uint64_t 6104 spdk_blob_get_num_io_units(struct spdk_blob *blob) 6105 { 6106 assert(blob != NULL); 6107 6108 return spdk_blob_get_num_pages(blob) * bs_io_unit_per_page(blob->bs); 6109 } 6110 6111 uint64_t 6112 spdk_blob_get_num_clusters(struct spdk_blob *blob) 6113 { 6114 assert(blob != NULL); 6115 6116 return blob->active.num_clusters; 6117 } 6118 6119 uint64_t 6120 spdk_blob_get_num_allocated_clusters(struct spdk_blob *blob) 6121 { 6122 assert(blob != NULL); 6123 6124 return blob->active.num_allocated_clusters; 6125 } 6126 6127 static uint64_t 6128 blob_find_io_unit(struct spdk_blob *blob, uint64_t offset, bool is_allocated) 6129 { 6130 uint64_t blob_io_unit_num = spdk_blob_get_num_io_units(blob); 6131 6132 while (offset < blob_io_unit_num) { 6133 if (bs_io_unit_is_allocated(blob, offset) == is_allocated) { 6134 return offset; 6135 } 6136 6137 offset += bs_num_io_units_to_cluster_boundary(blob, offset); 6138 } 6139 6140 return UINT64_MAX; 6141 } 6142 6143 uint64_t 6144 spdk_blob_get_next_allocated_io_unit(struct spdk_blob *blob, uint64_t offset) 6145 { 6146 return blob_find_io_unit(blob, offset, true); 6147 } 6148 6149 uint64_t 6150 spdk_blob_get_next_unallocated_io_unit(struct spdk_blob *blob, uint64_t offset) 6151 { 6152 return blob_find_io_unit(blob, offset, false); 6153 } 6154 6155 /* START spdk_bs_create_blob */ 6156 6157 static void 6158 bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 6159 { 6160 struct spdk_blob *blob = cb_arg; 6161 uint32_t page_idx = bs_blobid_to_page(blob->id); 6162 6163 if (bserrno != 0) { 6164 spdk_spin_lock(&blob->bs->used_lock); 6165 spdk_bit_array_clear(blob->bs->used_blobids, page_idx); 6166 bs_release_md_page(blob->bs, page_idx); 6167 spdk_spin_unlock(&blob->bs->used_lock); 6168 } 6169 6170 blob_free(blob); 6171 6172 bs_sequence_finish(seq, bserrno); 6173 } 6174 6175 static int 6176 blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs, 6177 bool internal) 6178 { 6179 uint64_t i; 6180 size_t value_len = 0; 6181 int rc; 6182 const void *value = NULL; 6183 if (xattrs->count > 0 && xattrs->get_value == NULL) { 6184 return -EINVAL; 6185 } 6186 for (i = 0; i < xattrs->count; i++) { 6187 xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len); 6188 if (value == NULL || value_len == 0) { 6189 return -EINVAL; 6190 } 6191 rc = blob_set_xattr(blob, xattrs->names[i], value, value_len, internal); 6192 if (rc < 0) { 6193 return rc; 6194 } 6195 } 6196 return 0; 6197 } 6198 6199 static void 6200 blob_opts_copy(const struct spdk_blob_opts *src, struct spdk_blob_opts *dst) 6201 { 6202 #define FIELD_OK(field) \ 6203 offsetof(struct spdk_blob_opts, field) + sizeof(src->field) <= src->opts_size 6204 6205 #define SET_FIELD(field) \ 6206 if (FIELD_OK(field)) { \ 6207 dst->field = src->field; \ 6208 } \ 6209 6210 SET_FIELD(num_clusters); 6211 SET_FIELD(thin_provision); 6212 SET_FIELD(clear_method); 6213 6214 if (FIELD_OK(xattrs)) { 6215 memcpy(&dst->xattrs, &src->xattrs, sizeof(src->xattrs)); 6216 } 6217 6218 SET_FIELD(use_extent_table); 6219 SET_FIELD(esnap_id); 6220 SET_FIELD(esnap_id_len); 6221 6222 dst->opts_size = src->opts_size; 6223 6224 /* You should not remove this statement, but need to update the assert statement 6225 * if you add a new field, and also add a corresponding SET_FIELD statement */ 6226 SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_opts) == 80, "Incorrect size"); 6227 6228 #undef FIELD_OK 6229 #undef SET_FIELD 6230 } 6231 6232 static void 6233 bs_create_blob(struct spdk_blob_store *bs, 6234 const struct spdk_blob_opts *opts, 6235 const struct spdk_blob_xattr_opts *internal_xattrs, 6236 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6237 { 6238 struct spdk_blob *blob; 6239 uint32_t page_idx; 6240 struct spdk_bs_cpl cpl; 6241 struct spdk_blob_opts opts_local; 6242 struct spdk_blob_xattr_opts internal_xattrs_default; 6243 spdk_bs_sequence_t *seq; 6244 spdk_blob_id id; 6245 int rc; 6246 6247 assert(spdk_get_thread() == bs->md_thread); 6248 6249 spdk_spin_lock(&bs->used_lock); 6250 page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); 6251 if (page_idx == UINT32_MAX) { 6252 spdk_spin_unlock(&bs->used_lock); 6253 cb_fn(cb_arg, 0, -ENOMEM); 6254 return; 6255 } 6256 spdk_bit_array_set(bs->used_blobids, page_idx); 6257 bs_claim_md_page(bs, page_idx); 6258 spdk_spin_unlock(&bs->used_lock); 6259 6260 id = bs_page_to_blobid(page_idx); 6261 6262 SPDK_DEBUGLOG(blob, "Creating blob with id 0x%" PRIx64 " at page %u\n", id, page_idx); 6263 6264 spdk_blob_opts_init(&opts_local, sizeof(opts_local)); 6265 if (opts) { 6266 blob_opts_copy(opts, &opts_local); 6267 } 6268 6269 blob = blob_alloc(bs, id); 6270 if (!blob) { 6271 rc = -ENOMEM; 6272 goto error; 6273 } 6274 6275 blob->use_extent_table = opts_local.use_extent_table; 6276 if (blob->use_extent_table) { 6277 blob->invalid_flags |= SPDK_BLOB_EXTENT_TABLE; 6278 } 6279 6280 if (!internal_xattrs) { 6281 blob_xattrs_init(&internal_xattrs_default); 6282 internal_xattrs = &internal_xattrs_default; 6283 } 6284 6285 rc = blob_set_xattrs(blob, &opts_local.xattrs, false); 6286 if (rc < 0) { 6287 goto error; 6288 } 6289 6290 rc = blob_set_xattrs(blob, internal_xattrs, true); 6291 if (rc < 0) { 6292 goto error; 6293 } 6294 6295 if (opts_local.thin_provision) { 6296 blob_set_thin_provision(blob); 6297 } 6298 6299 blob_set_clear_method(blob, opts_local.clear_method); 6300 6301 if (opts_local.esnap_id != NULL) { 6302 if (opts_local.esnap_id_len > UINT16_MAX) { 6303 SPDK_ERRLOG("esnap id length %" PRIu64 "is too long\n", 6304 opts_local.esnap_id_len); 6305 rc = -EINVAL; 6306 goto error; 6307 6308 } 6309 blob_set_thin_provision(blob); 6310 blob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 6311 rc = blob_set_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID, 6312 opts_local.esnap_id, opts_local.esnap_id_len, true); 6313 if (rc != 0) { 6314 goto error; 6315 } 6316 } 6317 6318 rc = blob_resize(blob, opts_local.num_clusters); 6319 if (rc < 0) { 6320 goto error; 6321 } 6322 cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6323 cpl.u.blobid.cb_fn = cb_fn; 6324 cpl.u.blobid.cb_arg = cb_arg; 6325 cpl.u.blobid.blobid = blob->id; 6326 6327 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 6328 if (!seq) { 6329 rc = -ENOMEM; 6330 goto error; 6331 } 6332 6333 blob_persist(seq, blob, bs_create_blob_cpl, blob); 6334 return; 6335 6336 error: 6337 SPDK_ERRLOG("Failed to create blob: %s, size in clusters/size: %lu (clusters)\n", 6338 spdk_strerror(rc), opts_local.num_clusters); 6339 if (blob != NULL) { 6340 blob_free(blob); 6341 } 6342 spdk_spin_lock(&bs->used_lock); 6343 spdk_bit_array_clear(bs->used_blobids, page_idx); 6344 bs_release_md_page(bs, page_idx); 6345 spdk_spin_unlock(&bs->used_lock); 6346 cb_fn(cb_arg, 0, rc); 6347 } 6348 6349 void 6350 spdk_bs_create_blob(struct spdk_blob_store *bs, 6351 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6352 { 6353 bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg); 6354 } 6355 6356 void 6357 spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts, 6358 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6359 { 6360 bs_create_blob(bs, opts, NULL, cb_fn, cb_arg); 6361 } 6362 6363 /* END spdk_bs_create_blob */ 6364 6365 /* START blob_cleanup */ 6366 6367 struct spdk_clone_snapshot_ctx { 6368 struct spdk_bs_cpl cpl; 6369 int bserrno; 6370 bool frozen; 6371 6372 struct spdk_io_channel *channel; 6373 6374 /* Current cluster for inflate operation */ 6375 uint64_t cluster; 6376 6377 /* For inflation force allocation of all unallocated clusters and remove 6378 * thin-provisioning. Otherwise only decouple parent and keep clone thin. */ 6379 bool allocate_all; 6380 6381 struct { 6382 spdk_blob_id id; 6383 struct spdk_blob *blob; 6384 bool md_ro; 6385 } original; 6386 struct { 6387 spdk_blob_id id; 6388 struct spdk_blob *blob; 6389 } new; 6390 6391 /* xattrs specified for snapshot/clones only. They have no impact on 6392 * the original blobs xattrs. */ 6393 const struct spdk_blob_xattr_opts *xattrs; 6394 }; 6395 6396 static void 6397 bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno) 6398 { 6399 struct spdk_clone_snapshot_ctx *ctx = cb_arg; 6400 struct spdk_bs_cpl *cpl = &ctx->cpl; 6401 6402 if (bserrno != 0) { 6403 if (ctx->bserrno != 0) { 6404 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6405 } else { 6406 ctx->bserrno = bserrno; 6407 } 6408 } 6409 6410 switch (cpl->type) { 6411 case SPDK_BS_CPL_TYPE_BLOBID: 6412 cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno); 6413 break; 6414 case SPDK_BS_CPL_TYPE_BLOB_BASIC: 6415 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno); 6416 break; 6417 default: 6418 SPDK_UNREACHABLE(); 6419 break; 6420 } 6421 6422 free(ctx); 6423 } 6424 6425 static void 6426 bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 6427 { 6428 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6429 struct spdk_blob *origblob = ctx->original.blob; 6430 6431 if (bserrno != 0) { 6432 if (ctx->bserrno != 0) { 6433 SPDK_ERRLOG("Unfreeze error %d\n", bserrno); 6434 } else { 6435 ctx->bserrno = bserrno; 6436 } 6437 } 6438 6439 ctx->original.id = origblob->id; 6440 origblob->locked_operation_in_progress = false; 6441 6442 /* Revert md_ro to original state */ 6443 origblob->md_ro = ctx->original.md_ro; 6444 6445 spdk_blob_close(origblob, bs_clone_snapshot_cleanup_finish, ctx); 6446 } 6447 6448 static void 6449 bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno) 6450 { 6451 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6452 struct spdk_blob *origblob = ctx->original.blob; 6453 6454 if (bserrno != 0) { 6455 if (ctx->bserrno != 0) { 6456 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6457 } else { 6458 ctx->bserrno = bserrno; 6459 } 6460 } 6461 6462 if (ctx->frozen) { 6463 /* Unfreeze any outstanding I/O */ 6464 blob_unfreeze_io(origblob, bs_snapshot_unfreeze_cpl, ctx); 6465 } else { 6466 bs_snapshot_unfreeze_cpl(ctx, 0); 6467 } 6468 6469 } 6470 6471 static void 6472 bs_clone_snapshot_newblob_cleanup(struct spdk_clone_snapshot_ctx *ctx, int bserrno) 6473 { 6474 struct spdk_blob *newblob = ctx->new.blob; 6475 6476 if (bserrno != 0) { 6477 if (ctx->bserrno != 0) { 6478 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6479 } else { 6480 ctx->bserrno = bserrno; 6481 } 6482 } 6483 6484 ctx->new.id = newblob->id; 6485 spdk_blob_close(newblob, bs_clone_snapshot_origblob_cleanup, ctx); 6486 } 6487 6488 /* END blob_cleanup */ 6489 6490 /* START spdk_bs_create_snapshot */ 6491 6492 static void 6493 bs_snapshot_swap_cluster_maps(struct spdk_blob *blob1, struct spdk_blob *blob2) 6494 { 6495 uint64_t *cluster_temp; 6496 uint64_t num_allocated_clusters_temp; 6497 uint32_t *extent_page_temp; 6498 6499 cluster_temp = blob1->active.clusters; 6500 blob1->active.clusters = blob2->active.clusters; 6501 blob2->active.clusters = cluster_temp; 6502 6503 num_allocated_clusters_temp = blob1->active.num_allocated_clusters; 6504 blob1->active.num_allocated_clusters = blob2->active.num_allocated_clusters; 6505 blob2->active.num_allocated_clusters = num_allocated_clusters_temp; 6506 6507 extent_page_temp = blob1->active.extent_pages; 6508 blob1->active.extent_pages = blob2->active.extent_pages; 6509 blob2->active.extent_pages = extent_page_temp; 6510 } 6511 6512 /* Copies an internal xattr */ 6513 static int 6514 bs_snapshot_copy_xattr(struct spdk_blob *toblob, struct spdk_blob *fromblob, const char *name) 6515 { 6516 const void *val = NULL; 6517 size_t len; 6518 int bserrno; 6519 6520 bserrno = blob_get_xattr_value(fromblob, name, &val, &len, true); 6521 if (bserrno != 0) { 6522 SPDK_ERRLOG("blob 0x%" PRIx64 " missing %s XATTR\n", fromblob->id, name); 6523 return bserrno; 6524 } 6525 6526 bserrno = blob_set_xattr(toblob, name, val, len, true); 6527 if (bserrno != 0) { 6528 SPDK_ERRLOG("could not set %s XATTR on blob 0x%" PRIx64 "\n", 6529 name, toblob->id); 6530 return bserrno; 6531 } 6532 return 0; 6533 } 6534 6535 static void 6536 bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno) 6537 { 6538 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6539 struct spdk_blob *origblob = ctx->original.blob; 6540 struct spdk_blob *newblob = ctx->new.blob; 6541 6542 if (bserrno != 0) { 6543 bs_snapshot_swap_cluster_maps(newblob, origblob); 6544 if (blob_is_esnap_clone(newblob)) { 6545 bs_snapshot_copy_xattr(origblob, newblob, BLOB_EXTERNAL_SNAPSHOT_ID); 6546 origblob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 6547 } 6548 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6549 return; 6550 } 6551 6552 /* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */ 6553 bserrno = blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true); 6554 if (bserrno != 0) { 6555 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6556 return; 6557 } 6558 6559 bs_blob_list_add(ctx->original.blob); 6560 6561 spdk_blob_set_read_only(newblob); 6562 6563 /* sync snapshot metadata */ 6564 spdk_blob_sync_md(newblob, bs_clone_snapshot_origblob_cleanup, ctx); 6565 } 6566 6567 static void 6568 bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno) 6569 { 6570 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6571 struct spdk_blob *origblob = ctx->original.blob; 6572 struct spdk_blob *newblob = ctx->new.blob; 6573 6574 if (bserrno != 0) { 6575 /* return cluster map back to original */ 6576 bs_snapshot_swap_cluster_maps(newblob, origblob); 6577 6578 /* Newblob md sync failed. Valid clusters are only present in origblob. 6579 * Since I/O is frozen on origblob, not changes to zeroed out cluster map should have occurred. 6580 * Newblob needs to be reverted to thin_provisioned state at creation to properly close. */ 6581 blob_set_thin_provision(newblob); 6582 assert(spdk_mem_all_zero(newblob->active.clusters, 6583 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 6584 assert(spdk_mem_all_zero(newblob->active.extent_pages, 6585 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 6586 6587 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6588 return; 6589 } 6590 6591 /* Set internal xattr for snapshot id */ 6592 bserrno = blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true); 6593 if (bserrno != 0) { 6594 /* return cluster map back to original */ 6595 bs_snapshot_swap_cluster_maps(newblob, origblob); 6596 blob_set_thin_provision(newblob); 6597 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6598 return; 6599 } 6600 6601 /* Create new back_bs_dev for snapshot */ 6602 origblob->back_bs_dev = bs_create_blob_bs_dev(newblob); 6603 if (origblob->back_bs_dev == NULL) { 6604 /* return cluster map back to original */ 6605 bs_snapshot_swap_cluster_maps(newblob, origblob); 6606 blob_set_thin_provision(newblob); 6607 bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL); 6608 return; 6609 } 6610 6611 /* Remove the xattr that references an external snapshot */ 6612 if (blob_is_esnap_clone(origblob)) { 6613 origblob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT; 6614 bserrno = blob_remove_xattr(origblob, BLOB_EXTERNAL_SNAPSHOT_ID, true); 6615 if (bserrno != 0) { 6616 if (bserrno == -ENOENT) { 6617 SPDK_ERRLOG("blob 0x%" PRIx64 " has no " BLOB_EXTERNAL_SNAPSHOT_ID 6618 " xattr to remove\n", origblob->id); 6619 assert(false); 6620 } else { 6621 /* return cluster map back to original */ 6622 bs_snapshot_swap_cluster_maps(newblob, origblob); 6623 blob_set_thin_provision(newblob); 6624 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6625 return; 6626 } 6627 } 6628 } 6629 6630 bs_blob_list_remove(origblob); 6631 origblob->parent_id = newblob->id; 6632 /* set clone blob as thin provisioned */ 6633 blob_set_thin_provision(origblob); 6634 6635 bs_blob_list_add(newblob); 6636 6637 /* sync clone metadata */ 6638 spdk_blob_sync_md(origblob, bs_snapshot_origblob_sync_cpl, ctx); 6639 } 6640 6641 static void 6642 bs_snapshot_freeze_cpl(void *cb_arg, int rc) 6643 { 6644 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6645 struct spdk_blob *origblob = ctx->original.blob; 6646 struct spdk_blob *newblob = ctx->new.blob; 6647 int bserrno; 6648 6649 if (rc != 0) { 6650 bs_clone_snapshot_newblob_cleanup(ctx, rc); 6651 return; 6652 } 6653 6654 ctx->frozen = true; 6655 6656 if (blob_is_esnap_clone(origblob)) { 6657 /* Clean up any channels associated with the original blob id because future IO will 6658 * perform IO using the snapshot blob_id. 6659 */ 6660 blob_esnap_destroy_bs_dev_channels(origblob, false, NULL, NULL); 6661 } 6662 if (newblob->back_bs_dev) { 6663 blob_back_bs_destroy(newblob); 6664 } 6665 /* set new back_bs_dev for snapshot */ 6666 newblob->back_bs_dev = origblob->back_bs_dev; 6667 /* Set invalid flags from origblob */ 6668 newblob->invalid_flags = origblob->invalid_flags; 6669 6670 /* inherit parent from original blob if set */ 6671 newblob->parent_id = origblob->parent_id; 6672 switch (origblob->parent_id) { 6673 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 6674 bserrno = bs_snapshot_copy_xattr(newblob, origblob, BLOB_EXTERNAL_SNAPSHOT_ID); 6675 if (bserrno != 0) { 6676 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6677 return; 6678 } 6679 break; 6680 case SPDK_BLOBID_INVALID: 6681 break; 6682 default: 6683 /* Set internal xattr for snapshot id */ 6684 bserrno = blob_set_xattr(newblob, BLOB_SNAPSHOT, 6685 &origblob->parent_id, sizeof(spdk_blob_id), true); 6686 if (bserrno != 0) { 6687 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6688 return; 6689 } 6690 } 6691 6692 /* swap cluster maps */ 6693 bs_snapshot_swap_cluster_maps(newblob, origblob); 6694 6695 /* Set the clear method on the new blob to match the original. */ 6696 blob_set_clear_method(newblob, origblob->clear_method); 6697 6698 /* sync snapshot metadata */ 6699 spdk_blob_sync_md(newblob, bs_snapshot_newblob_sync_cpl, ctx); 6700 } 6701 6702 static void 6703 bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6704 { 6705 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6706 struct spdk_blob *origblob = ctx->original.blob; 6707 struct spdk_blob *newblob = _blob; 6708 6709 if (bserrno != 0) { 6710 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6711 return; 6712 } 6713 6714 ctx->new.blob = newblob; 6715 assert(spdk_blob_is_thin_provisioned(newblob)); 6716 assert(spdk_mem_all_zero(newblob->active.clusters, 6717 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 6718 assert(spdk_mem_all_zero(newblob->active.extent_pages, 6719 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 6720 6721 blob_freeze_io(origblob, bs_snapshot_freeze_cpl, ctx); 6722 } 6723 6724 static void 6725 bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 6726 { 6727 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6728 struct spdk_blob *origblob = ctx->original.blob; 6729 6730 if (bserrno != 0) { 6731 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6732 return; 6733 } 6734 6735 ctx->new.id = blobid; 6736 ctx->cpl.u.blobid.blobid = blobid; 6737 6738 spdk_bs_open_blob(origblob->bs, ctx->new.id, bs_snapshot_newblob_open_cpl, ctx); 6739 } 6740 6741 6742 static void 6743 bs_xattr_snapshot(void *arg, const char *name, 6744 const void **value, size_t *value_len) 6745 { 6746 assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0); 6747 6748 struct spdk_blob *blob = (struct spdk_blob *)arg; 6749 *value = &blob->id; 6750 *value_len = sizeof(blob->id); 6751 } 6752 6753 static void 6754 bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6755 { 6756 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6757 struct spdk_blob_opts opts; 6758 struct spdk_blob_xattr_opts internal_xattrs; 6759 char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS }; 6760 6761 if (bserrno != 0) { 6762 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6763 return; 6764 } 6765 6766 ctx->original.blob = _blob; 6767 6768 if (_blob->data_ro || _blob->md_ro) { 6769 SPDK_DEBUGLOG(blob, "Cannot create snapshot from read only blob with id 0x%" 6770 PRIx64 "\n", _blob->id); 6771 ctx->bserrno = -EINVAL; 6772 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6773 return; 6774 } 6775 6776 if (_blob->locked_operation_in_progress) { 6777 SPDK_DEBUGLOG(blob, "Cannot create snapshot - another operation in progress\n"); 6778 ctx->bserrno = -EBUSY; 6779 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6780 return; 6781 } 6782 6783 _blob->locked_operation_in_progress = true; 6784 6785 spdk_blob_opts_init(&opts, sizeof(opts)); 6786 blob_xattrs_init(&internal_xattrs); 6787 6788 /* Change the size of new blob to the same as in original blob, 6789 * but do not allocate clusters */ 6790 opts.thin_provision = true; 6791 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 6792 opts.use_extent_table = _blob->use_extent_table; 6793 6794 /* If there are any xattrs specified for snapshot, set them now */ 6795 if (ctx->xattrs) { 6796 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 6797 } 6798 /* Set internal xattr SNAPSHOT_IN_PROGRESS */ 6799 internal_xattrs.count = 1; 6800 internal_xattrs.ctx = _blob; 6801 internal_xattrs.names = xattrs_names; 6802 internal_xattrs.get_value = bs_xattr_snapshot; 6803 6804 bs_create_blob(_blob->bs, &opts, &internal_xattrs, 6805 bs_snapshot_newblob_create_cpl, ctx); 6806 } 6807 6808 void 6809 spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid, 6810 const struct spdk_blob_xattr_opts *snapshot_xattrs, 6811 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6812 { 6813 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6814 6815 if (!ctx) { 6816 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 6817 return; 6818 } 6819 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6820 ctx->cpl.u.blobid.cb_fn = cb_fn; 6821 ctx->cpl.u.blobid.cb_arg = cb_arg; 6822 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 6823 ctx->bserrno = 0; 6824 ctx->frozen = false; 6825 ctx->original.id = blobid; 6826 ctx->xattrs = snapshot_xattrs; 6827 6828 spdk_bs_open_blob(bs, ctx->original.id, bs_snapshot_origblob_open_cpl, ctx); 6829 } 6830 /* END spdk_bs_create_snapshot */ 6831 6832 /* START spdk_bs_create_clone */ 6833 6834 static void 6835 bs_xattr_clone(void *arg, const char *name, 6836 const void **value, size_t *value_len) 6837 { 6838 assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0); 6839 6840 struct spdk_blob *blob = (struct spdk_blob *)arg; 6841 *value = &blob->id; 6842 *value_len = sizeof(blob->id); 6843 } 6844 6845 static void 6846 bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6847 { 6848 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6849 struct spdk_blob *clone = _blob; 6850 6851 ctx->new.blob = clone; 6852 bs_blob_list_add(clone); 6853 6854 spdk_blob_close(clone, bs_clone_snapshot_origblob_cleanup, ctx); 6855 } 6856 6857 static void 6858 bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 6859 { 6860 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6861 6862 ctx->cpl.u.blobid.blobid = blobid; 6863 spdk_bs_open_blob(ctx->original.blob->bs, blobid, bs_clone_newblob_open_cpl, ctx); 6864 } 6865 6866 static void 6867 bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6868 { 6869 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6870 struct spdk_blob_opts opts; 6871 struct spdk_blob_xattr_opts internal_xattrs; 6872 char *xattr_names[] = { BLOB_SNAPSHOT }; 6873 6874 if (bserrno != 0) { 6875 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6876 return; 6877 } 6878 6879 ctx->original.blob = _blob; 6880 ctx->original.md_ro = _blob->md_ro; 6881 6882 if (!_blob->data_ro || !_blob->md_ro) { 6883 SPDK_DEBUGLOG(blob, "Clone not from read-only blob\n"); 6884 ctx->bserrno = -EINVAL; 6885 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6886 return; 6887 } 6888 6889 if (_blob->locked_operation_in_progress) { 6890 SPDK_DEBUGLOG(blob, "Cannot create clone - another operation in progress\n"); 6891 ctx->bserrno = -EBUSY; 6892 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6893 return; 6894 } 6895 6896 _blob->locked_operation_in_progress = true; 6897 6898 spdk_blob_opts_init(&opts, sizeof(opts)); 6899 blob_xattrs_init(&internal_xattrs); 6900 6901 opts.thin_provision = true; 6902 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 6903 opts.use_extent_table = _blob->use_extent_table; 6904 if (ctx->xattrs) { 6905 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 6906 } 6907 6908 /* Set internal xattr BLOB_SNAPSHOT */ 6909 internal_xattrs.count = 1; 6910 internal_xattrs.ctx = _blob; 6911 internal_xattrs.names = xattr_names; 6912 internal_xattrs.get_value = bs_xattr_clone; 6913 6914 bs_create_blob(_blob->bs, &opts, &internal_xattrs, 6915 bs_clone_newblob_create_cpl, ctx); 6916 } 6917 6918 void 6919 spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid, 6920 const struct spdk_blob_xattr_opts *clone_xattrs, 6921 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6922 { 6923 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6924 6925 if (!ctx) { 6926 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 6927 return; 6928 } 6929 6930 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6931 ctx->cpl.u.blobid.cb_fn = cb_fn; 6932 ctx->cpl.u.blobid.cb_arg = cb_arg; 6933 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 6934 ctx->bserrno = 0; 6935 ctx->xattrs = clone_xattrs; 6936 ctx->original.id = blobid; 6937 6938 spdk_bs_open_blob(bs, ctx->original.id, bs_clone_origblob_open_cpl, ctx); 6939 } 6940 6941 /* END spdk_bs_create_clone */ 6942 6943 /* START spdk_bs_inflate_blob */ 6944 6945 static void 6946 bs_inflate_blob_set_parent_cpl(void *cb_arg, struct spdk_blob *_parent, int bserrno) 6947 { 6948 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6949 struct spdk_blob *_blob = ctx->original.blob; 6950 6951 if (bserrno != 0) { 6952 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6953 return; 6954 } 6955 6956 /* Temporarily override md_ro flag for MD modification */ 6957 _blob->md_ro = false; 6958 6959 bserrno = blob_set_xattr(_blob, BLOB_SNAPSHOT, &_parent->id, sizeof(spdk_blob_id), true); 6960 if (bserrno != 0) { 6961 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6962 return; 6963 } 6964 6965 assert(_parent != NULL); 6966 6967 bs_blob_list_remove(_blob); 6968 _blob->parent_id = _parent->id; 6969 6970 blob_back_bs_destroy(_blob); 6971 _blob->back_bs_dev = bs_create_blob_bs_dev(_parent); 6972 bs_blob_list_add(_blob); 6973 6974 spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx); 6975 } 6976 6977 static void 6978 bs_inflate_blob_done(struct spdk_clone_snapshot_ctx *ctx) 6979 { 6980 struct spdk_blob *_blob = ctx->original.blob; 6981 struct spdk_blob *_parent; 6982 6983 if (ctx->allocate_all) { 6984 /* remove thin provisioning */ 6985 bs_blob_list_remove(_blob); 6986 if (_blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 6987 blob_remove_xattr(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, true); 6988 _blob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT; 6989 } else { 6990 blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 6991 } 6992 _blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV; 6993 blob_back_bs_destroy(_blob); 6994 _blob->parent_id = SPDK_BLOBID_INVALID; 6995 } else { 6996 /* For now, esnap clones always have allocate_all set. */ 6997 assert(!blob_is_esnap_clone(_blob)); 6998 6999 _parent = ((struct spdk_blob_bs_dev *)(_blob->back_bs_dev))->blob; 7000 if (_parent->parent_id != SPDK_BLOBID_INVALID) { 7001 /* We must change the parent of the inflated blob */ 7002 spdk_bs_open_blob(_blob->bs, _parent->parent_id, 7003 bs_inflate_blob_set_parent_cpl, ctx); 7004 return; 7005 } 7006 7007 bs_blob_list_remove(_blob); 7008 _blob->parent_id = SPDK_BLOBID_INVALID; 7009 blob_back_bs_destroy(_blob); 7010 _blob->back_bs_dev = bs_create_zeroes_dev(); 7011 } 7012 7013 /* Temporarily override md_ro flag for MD modification */ 7014 _blob->md_ro = false; 7015 blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 7016 _blob->state = SPDK_BLOB_STATE_DIRTY; 7017 7018 spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx); 7019 } 7020 7021 /* Check if cluster needs allocation */ 7022 static inline bool 7023 bs_cluster_needs_allocation(struct spdk_blob *blob, uint64_t cluster, bool allocate_all) 7024 { 7025 struct spdk_blob_bs_dev *b; 7026 7027 assert(blob != NULL); 7028 7029 if (blob->active.clusters[cluster] != 0) { 7030 /* Cluster is already allocated */ 7031 return false; 7032 } 7033 7034 if (blob->parent_id == SPDK_BLOBID_INVALID) { 7035 /* Blob have no parent blob */ 7036 return allocate_all; 7037 } 7038 7039 if (blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 7040 return true; 7041 } 7042 7043 b = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 7044 return (allocate_all || b->blob->active.clusters[cluster] != 0); 7045 } 7046 7047 static void 7048 bs_inflate_blob_touch_next(void *cb_arg, int bserrno) 7049 { 7050 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 7051 struct spdk_blob *_blob = ctx->original.blob; 7052 struct spdk_bs_cpl cpl; 7053 spdk_bs_user_op_t *op; 7054 uint64_t offset; 7055 7056 if (bserrno != 0) { 7057 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 7058 return; 7059 } 7060 7061 for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) { 7062 if (bs_cluster_needs_allocation(_blob, ctx->cluster, ctx->allocate_all)) { 7063 break; 7064 } 7065 } 7066 7067 if (ctx->cluster < _blob->active.num_clusters) { 7068 offset = bs_cluster_to_lba(_blob->bs, ctx->cluster); 7069 7070 /* We may safely increment a cluster before copying */ 7071 ctx->cluster++; 7072 7073 /* Use a dummy 0B read as a context for cluster copy */ 7074 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7075 cpl.u.blob_basic.cb_fn = bs_inflate_blob_touch_next; 7076 cpl.u.blob_basic.cb_arg = ctx; 7077 7078 op = bs_user_op_alloc(ctx->channel, &cpl, SPDK_BLOB_READ, _blob, 7079 NULL, 0, offset, 0); 7080 if (!op) { 7081 bs_clone_snapshot_origblob_cleanup(ctx, -ENOMEM); 7082 return; 7083 } 7084 7085 bs_allocate_and_copy_cluster(_blob, ctx->channel, offset, op); 7086 } else { 7087 bs_inflate_blob_done(ctx); 7088 } 7089 } 7090 7091 static void 7092 bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 7093 { 7094 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 7095 uint64_t clusters_needed; 7096 uint64_t i; 7097 7098 if (bserrno != 0) { 7099 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 7100 return; 7101 } 7102 7103 ctx->original.blob = _blob; 7104 ctx->original.md_ro = _blob->md_ro; 7105 7106 if (_blob->locked_operation_in_progress) { 7107 SPDK_DEBUGLOG(blob, "Cannot inflate blob - another operation in progress\n"); 7108 ctx->bserrno = -EBUSY; 7109 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 7110 return; 7111 } 7112 7113 _blob->locked_operation_in_progress = true; 7114 7115 switch (_blob->parent_id) { 7116 case SPDK_BLOBID_INVALID: 7117 if (!ctx->allocate_all) { 7118 /* This blob has no parent, so we cannot decouple it. */ 7119 SPDK_ERRLOG("Cannot decouple parent of blob with no parent.\n"); 7120 bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 7121 return; 7122 } 7123 break; 7124 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 7125 /* 7126 * It would be better to rely on back_bs_dev->is_zeroes(), to determine which 7127 * clusters require allocation. Until there is a blobstore consumer that 7128 * uses esnaps with an spdk_bs_dev that implements a useful is_zeroes() it is not 7129 * worth the effort. 7130 */ 7131 ctx->allocate_all = true; 7132 break; 7133 default: 7134 break; 7135 } 7136 7137 if (spdk_blob_is_thin_provisioned(_blob) == false) { 7138 /* This is not thin provisioned blob. No need to inflate. */ 7139 bs_clone_snapshot_origblob_cleanup(ctx, 0); 7140 return; 7141 } 7142 7143 /* Do two passes - one to verify that we can obtain enough clusters 7144 * and another to actually claim them. 7145 */ 7146 clusters_needed = 0; 7147 for (i = 0; i < _blob->active.num_clusters; i++) { 7148 if (bs_cluster_needs_allocation(_blob, i, ctx->allocate_all)) { 7149 clusters_needed++; 7150 } 7151 } 7152 7153 if (clusters_needed > _blob->bs->num_free_clusters) { 7154 /* Not enough free clusters. Cannot satisfy the request. */ 7155 bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC); 7156 return; 7157 } 7158 7159 ctx->cluster = 0; 7160 bs_inflate_blob_touch_next(ctx, 0); 7161 } 7162 7163 static void 7164 bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7165 spdk_blob_id blobid, bool allocate_all, spdk_blob_op_complete cb_fn, void *cb_arg) 7166 { 7167 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 7168 7169 if (!ctx) { 7170 cb_fn(cb_arg, -ENOMEM); 7171 return; 7172 } 7173 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7174 ctx->cpl.u.bs_basic.cb_fn = cb_fn; 7175 ctx->cpl.u.bs_basic.cb_arg = cb_arg; 7176 ctx->bserrno = 0; 7177 ctx->original.id = blobid; 7178 ctx->channel = channel; 7179 ctx->allocate_all = allocate_all; 7180 7181 spdk_bs_open_blob(bs, ctx->original.id, bs_inflate_blob_open_cpl, ctx); 7182 } 7183 7184 void 7185 spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7186 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 7187 { 7188 bs_inflate_blob(bs, channel, blobid, true, cb_fn, cb_arg); 7189 } 7190 7191 void 7192 spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7193 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 7194 { 7195 bs_inflate_blob(bs, channel, blobid, false, cb_fn, cb_arg); 7196 } 7197 /* END spdk_bs_inflate_blob */ 7198 7199 /* START spdk_bs_blob_shallow_copy */ 7200 7201 struct shallow_copy_ctx { 7202 struct spdk_bs_cpl cpl; 7203 int bserrno; 7204 7205 /* Blob source for copy */ 7206 struct spdk_blob_store *bs; 7207 spdk_blob_id blobid; 7208 struct spdk_blob *blob; 7209 struct spdk_io_channel *blob_channel; 7210 7211 /* Destination device for copy */ 7212 struct spdk_bs_dev *ext_dev; 7213 struct spdk_io_channel *ext_channel; 7214 7215 /* Current cluster for copy operation */ 7216 uint64_t cluster; 7217 7218 /* Buffer for blob reading */ 7219 uint8_t *read_buff; 7220 7221 /* Struct for external device writing */ 7222 struct spdk_bs_dev_cb_args ext_args; 7223 7224 /* Actual number of copied clusters */ 7225 uint64_t copied_clusters_count; 7226 7227 /* Status callback for updates about the ongoing operation */ 7228 spdk_blob_shallow_copy_status status_cb; 7229 7230 /* Argument passed to function status_cb */ 7231 void *status_cb_arg; 7232 }; 7233 7234 static void 7235 bs_shallow_copy_cleanup_finish(void *cb_arg, int bserrno) 7236 { 7237 struct shallow_copy_ctx *ctx = cb_arg; 7238 struct spdk_bs_cpl *cpl = &ctx->cpl; 7239 7240 if (bserrno != 0) { 7241 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, cleanup error %d\n", ctx->blob->id, bserrno); 7242 ctx->bserrno = bserrno; 7243 } 7244 7245 ctx->ext_dev->destroy_channel(ctx->ext_dev, ctx->ext_channel); 7246 spdk_free(ctx->read_buff); 7247 7248 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno); 7249 7250 free(ctx); 7251 } 7252 7253 static void 7254 bs_shallow_copy_bdev_write_cpl(struct spdk_io_channel *channel, void *cb_arg, int bserrno) 7255 { 7256 struct shallow_copy_ctx *ctx = cb_arg; 7257 struct spdk_blob *_blob = ctx->blob; 7258 7259 if (bserrno != 0) { 7260 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, ext dev write error %d\n", ctx->blob->id, bserrno); 7261 ctx->bserrno = bserrno; 7262 _blob->locked_operation_in_progress = false; 7263 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7264 return; 7265 } 7266 7267 ctx->cluster++; 7268 if (ctx->status_cb) { 7269 ctx->copied_clusters_count++; 7270 ctx->status_cb(ctx->copied_clusters_count, ctx->status_cb_arg); 7271 } 7272 7273 bs_shallow_copy_cluster_find_next(ctx); 7274 } 7275 7276 static void 7277 bs_shallow_copy_blob_read_cpl(void *cb_arg, int bserrno) 7278 { 7279 struct shallow_copy_ctx *ctx = cb_arg; 7280 struct spdk_bs_dev *ext_dev = ctx->ext_dev; 7281 struct spdk_blob *_blob = ctx->blob; 7282 7283 if (bserrno != 0) { 7284 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, blob read error %d\n", ctx->blob->id, bserrno); 7285 ctx->bserrno = bserrno; 7286 _blob->locked_operation_in_progress = false; 7287 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7288 return; 7289 } 7290 7291 ctx->ext_args.channel = ctx->ext_channel; 7292 ctx->ext_args.cb_fn = bs_shallow_copy_bdev_write_cpl; 7293 ctx->ext_args.cb_arg = ctx; 7294 7295 ext_dev->write(ext_dev, ctx->ext_channel, ctx->read_buff, 7296 bs_cluster_to_lba(_blob->bs, ctx->cluster), 7297 bs_dev_byte_to_lba(_blob->bs->dev, _blob->bs->cluster_sz), 7298 &ctx->ext_args); 7299 } 7300 7301 static void 7302 bs_shallow_copy_cluster_find_next(void *cb_arg) 7303 { 7304 struct shallow_copy_ctx *ctx = cb_arg; 7305 struct spdk_blob *_blob = ctx->blob; 7306 7307 while (ctx->cluster < _blob->active.num_clusters) { 7308 if (_blob->active.clusters[ctx->cluster] != 0) { 7309 break; 7310 } 7311 7312 ctx->cluster++; 7313 } 7314 7315 if (ctx->cluster < _blob->active.num_clusters) { 7316 blob_request_submit_op_single(ctx->blob_channel, _blob, ctx->read_buff, 7317 bs_cluster_to_lba(_blob->bs, ctx->cluster), 7318 bs_dev_byte_to_lba(_blob->bs->dev, _blob->bs->cluster_sz), 7319 bs_shallow_copy_blob_read_cpl, ctx, SPDK_BLOB_READ); 7320 } else { 7321 _blob->locked_operation_in_progress = false; 7322 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7323 } 7324 } 7325 7326 static void 7327 bs_shallow_copy_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 7328 { 7329 struct shallow_copy_ctx *ctx = cb_arg; 7330 struct spdk_bs_dev *ext_dev = ctx->ext_dev; 7331 uint32_t blob_block_size; 7332 uint64_t blob_total_size; 7333 7334 if (bserrno != 0) { 7335 SPDK_ERRLOG("Shallow copy blob open error %d\n", bserrno); 7336 ctx->bserrno = bserrno; 7337 bs_shallow_copy_cleanup_finish(ctx, 0); 7338 return; 7339 } 7340 7341 if (!spdk_blob_is_read_only(_blob)) { 7342 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, blob must be read only\n", _blob->id); 7343 ctx->bserrno = -EPERM; 7344 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7345 return; 7346 } 7347 7348 blob_block_size = _blob->bs->dev->blocklen; 7349 blob_total_size = spdk_blob_get_num_clusters(_blob) * spdk_bs_get_cluster_size(_blob->bs); 7350 7351 if (blob_total_size > ext_dev->blockcnt * ext_dev->blocklen) { 7352 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, external device must have at least blob size\n", 7353 _blob->id); 7354 ctx->bserrno = -EINVAL; 7355 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7356 return; 7357 } 7358 7359 if (blob_block_size % ext_dev->blocklen != 0) { 7360 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, external device block size is not compatible with \ 7361 blobstore block size\n", _blob->id); 7362 ctx->bserrno = -EINVAL; 7363 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7364 return; 7365 } 7366 7367 ctx->blob = _blob; 7368 7369 if (_blob->locked_operation_in_progress) { 7370 SPDK_DEBUGLOG(blob, "blob 0x%" PRIx64 " shallow copy - another operation in progress\n", _blob->id); 7371 ctx->bserrno = -EBUSY; 7372 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7373 return; 7374 } 7375 7376 _blob->locked_operation_in_progress = true; 7377 7378 ctx->cluster = 0; 7379 bs_shallow_copy_cluster_find_next(ctx); 7380 } 7381 7382 int 7383 spdk_bs_blob_shallow_copy(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7384 spdk_blob_id blobid, struct spdk_bs_dev *ext_dev, 7385 spdk_blob_shallow_copy_status status_cb_fn, void *status_cb_arg, 7386 spdk_blob_op_complete cb_fn, void *cb_arg) 7387 { 7388 struct shallow_copy_ctx *ctx; 7389 struct spdk_io_channel *ext_channel; 7390 7391 ctx = calloc(1, sizeof(*ctx)); 7392 if (!ctx) { 7393 return -ENOMEM; 7394 } 7395 7396 ctx->bs = bs; 7397 ctx->blobid = blobid; 7398 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7399 ctx->cpl.u.bs_basic.cb_fn = cb_fn; 7400 ctx->cpl.u.bs_basic.cb_arg = cb_arg; 7401 ctx->bserrno = 0; 7402 ctx->blob_channel = channel; 7403 ctx->status_cb = status_cb_fn; 7404 ctx->status_cb_arg = status_cb_arg; 7405 ctx->read_buff = spdk_malloc(bs->cluster_sz, bs->dev->blocklen, NULL, 7406 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 7407 if (!ctx->read_buff) { 7408 free(ctx); 7409 return -ENOMEM; 7410 } 7411 7412 ext_channel = ext_dev->create_channel(ext_dev); 7413 if (!ext_channel) { 7414 spdk_free(ctx->read_buff); 7415 free(ctx); 7416 return -ENOMEM; 7417 } 7418 ctx->ext_dev = ext_dev; 7419 ctx->ext_channel = ext_channel; 7420 7421 spdk_bs_open_blob(ctx->bs, ctx->blobid, bs_shallow_copy_blob_open_cpl, ctx); 7422 7423 return 0; 7424 } 7425 /* END spdk_bs_blob_shallow_copy */ 7426 7427 /* START spdk_bs_blob_set_parent */ 7428 7429 struct set_parent_ctx { 7430 struct spdk_blob_store *bs; 7431 int bserrno; 7432 spdk_bs_op_complete cb_fn; 7433 void *cb_arg; 7434 7435 struct spdk_blob *blob; 7436 bool blob_md_ro; 7437 7438 struct blob_parent parent; 7439 }; 7440 7441 static void 7442 bs_set_parent_cleanup_finish(void *cb_arg, int bserrno) 7443 { 7444 struct set_parent_ctx *ctx = cb_arg; 7445 7446 assert(ctx != NULL); 7447 7448 if (bserrno != 0) { 7449 SPDK_ERRLOG("blob set parent finish error %d\n", bserrno); 7450 if (ctx->bserrno == 0) { 7451 ctx->bserrno = bserrno; 7452 } 7453 } 7454 7455 ctx->cb_fn(ctx->cb_arg, ctx->bserrno); 7456 7457 free(ctx); 7458 } 7459 7460 static void 7461 bs_set_parent_close_snapshot(void *cb_arg, int bserrno) 7462 { 7463 struct set_parent_ctx *ctx = cb_arg; 7464 7465 if (ctx->bserrno != 0) { 7466 spdk_blob_close(ctx->parent.u.snapshot.blob, bs_set_parent_cleanup_finish, ctx); 7467 return; 7468 } 7469 7470 if (bserrno != 0) { 7471 SPDK_ERRLOG("blob close error %d\n", bserrno); 7472 ctx->bserrno = bserrno; 7473 } 7474 7475 bs_set_parent_cleanup_finish(ctx, ctx->bserrno); 7476 } 7477 7478 static void 7479 bs_set_parent_close_blob(void *cb_arg, int bserrno) 7480 { 7481 struct set_parent_ctx *ctx = cb_arg; 7482 struct spdk_blob *blob = ctx->blob; 7483 struct spdk_blob *snapshot = ctx->parent.u.snapshot.blob; 7484 7485 if (bserrno != 0 && ctx->bserrno == 0) { 7486 SPDK_ERRLOG("error %d in metadata sync\n", bserrno); 7487 ctx->bserrno = bserrno; 7488 } 7489 7490 /* Revert md_ro to original state */ 7491 blob->md_ro = ctx->blob_md_ro; 7492 7493 blob->locked_operation_in_progress = false; 7494 snapshot->locked_operation_in_progress = false; 7495 7496 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7497 } 7498 7499 static void 7500 bs_set_parent_set_back_bs_dev_done(void *cb_arg, int bserrno) 7501 { 7502 struct set_parent_ctx *ctx = cb_arg; 7503 struct spdk_blob *blob = ctx->blob; 7504 7505 if (bserrno != 0) { 7506 SPDK_ERRLOG("error %d setting back_bs_dev\n", bserrno); 7507 ctx->bserrno = bserrno; 7508 bs_set_parent_close_blob(ctx, bserrno); 7509 return; 7510 } 7511 7512 spdk_blob_sync_md(blob, bs_set_parent_close_blob, ctx); 7513 } 7514 7515 static int 7516 bs_set_parent_refs(struct spdk_blob *blob, struct blob_parent *parent) 7517 { 7518 int rc; 7519 7520 bs_blob_list_remove(blob); 7521 7522 rc = blob_set_xattr(blob, BLOB_SNAPSHOT, &parent->u.snapshot.id, sizeof(spdk_blob_id), true); 7523 if (rc != 0) { 7524 SPDK_ERRLOG("error %d setting snapshot xattr\n", rc); 7525 return rc; 7526 } 7527 blob->parent_id = parent->u.snapshot.id; 7528 7529 if (blob_is_esnap_clone(blob)) { 7530 /* Remove the xattr that references the external snapshot */ 7531 blob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT; 7532 blob_remove_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID, true); 7533 } 7534 7535 bs_blob_list_add(blob); 7536 7537 return 0; 7538 } 7539 7540 static void 7541 bs_set_parent_snapshot_open_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno) 7542 { 7543 struct set_parent_ctx *ctx = cb_arg; 7544 struct spdk_blob *blob = ctx->blob; 7545 struct spdk_bs_dev *back_bs_dev; 7546 7547 if (bserrno != 0) { 7548 SPDK_ERRLOG("snapshot open error %d\n", bserrno); 7549 ctx->bserrno = bserrno; 7550 spdk_blob_close(blob, bs_set_parent_cleanup_finish, ctx); 7551 return; 7552 } 7553 7554 ctx->parent.u.snapshot.blob = snapshot; 7555 ctx->parent.u.snapshot.id = snapshot->id; 7556 7557 if (!spdk_blob_is_snapshot(snapshot)) { 7558 SPDK_ERRLOG("parent blob is not a snapshot\n"); 7559 ctx->bserrno = -EINVAL; 7560 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7561 return; 7562 } 7563 7564 if (blob->active.num_clusters != snapshot->active.num_clusters) { 7565 SPDK_ERRLOG("parent blob has a number of clusters different from child's ones\n"); 7566 ctx->bserrno = -EINVAL; 7567 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7568 return; 7569 } 7570 7571 if (blob->locked_operation_in_progress || snapshot->locked_operation_in_progress) { 7572 SPDK_ERRLOG("cannot set parent of blob, another operation in progress\n"); 7573 ctx->bserrno = -EBUSY; 7574 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7575 return; 7576 } 7577 7578 blob->locked_operation_in_progress = true; 7579 snapshot->locked_operation_in_progress = true; 7580 7581 /* Temporarily override md_ro flag for MD modification */ 7582 blob->md_ro = false; 7583 7584 back_bs_dev = bs_create_blob_bs_dev(snapshot); 7585 7586 blob_set_back_bs_dev(blob, back_bs_dev, bs_set_parent_refs, &ctx->parent, 7587 bs_set_parent_set_back_bs_dev_done, 7588 ctx); 7589 } 7590 7591 static void 7592 bs_set_parent_blob_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 7593 { 7594 struct set_parent_ctx *ctx = cb_arg; 7595 7596 if (bserrno != 0) { 7597 SPDK_ERRLOG("blob open error %d\n", bserrno); 7598 ctx->bserrno = bserrno; 7599 bs_set_parent_cleanup_finish(ctx, 0); 7600 return; 7601 } 7602 7603 if (!spdk_blob_is_thin_provisioned(blob)) { 7604 SPDK_ERRLOG("blob is not thin-provisioned\n"); 7605 ctx->bserrno = -EINVAL; 7606 spdk_blob_close(blob, bs_set_parent_cleanup_finish, ctx); 7607 return; 7608 } 7609 7610 ctx->blob = blob; 7611 ctx->blob_md_ro = blob->md_ro; 7612 7613 spdk_bs_open_blob(ctx->bs, ctx->parent.u.snapshot.id, bs_set_parent_snapshot_open_cpl, ctx); 7614 } 7615 7616 void 7617 spdk_bs_blob_set_parent(struct spdk_blob_store *bs, spdk_blob_id blob_id, 7618 spdk_blob_id snapshot_id, spdk_blob_op_complete cb_fn, void *cb_arg) 7619 { 7620 struct set_parent_ctx *ctx; 7621 7622 if (snapshot_id == SPDK_BLOBID_INVALID) { 7623 SPDK_ERRLOG("snapshot id not valid\n"); 7624 cb_fn(cb_arg, -EINVAL); 7625 return; 7626 } 7627 7628 if (blob_id == snapshot_id) { 7629 SPDK_ERRLOG("blob id and snapshot id cannot be the same\n"); 7630 cb_fn(cb_arg, -EINVAL); 7631 return; 7632 } 7633 7634 if (spdk_blob_get_parent_snapshot(bs, blob_id) == snapshot_id) { 7635 SPDK_NOTICELOG("snapshot is already the parent of blob\n"); 7636 cb_fn(cb_arg, -EEXIST); 7637 return; 7638 } 7639 7640 ctx = calloc(1, sizeof(*ctx)); 7641 if (!ctx) { 7642 cb_fn(cb_arg, -ENOMEM); 7643 return; 7644 } 7645 7646 ctx->bs = bs; 7647 ctx->parent.u.snapshot.id = snapshot_id; 7648 ctx->cb_fn = cb_fn; 7649 ctx->cb_arg = cb_arg; 7650 ctx->bserrno = 0; 7651 7652 spdk_bs_open_blob(bs, blob_id, bs_set_parent_blob_open_cpl, ctx); 7653 } 7654 /* END spdk_bs_blob_set_parent */ 7655 7656 /* START spdk_bs_blob_set_external_parent */ 7657 7658 static void 7659 bs_set_external_parent_cleanup_finish(void *cb_arg, int bserrno) 7660 { 7661 struct set_parent_ctx *ctx = cb_arg; 7662 7663 if (bserrno != 0) { 7664 SPDK_ERRLOG("blob set external parent finish error %d\n", bserrno); 7665 if (ctx->bserrno == 0) { 7666 ctx->bserrno = bserrno; 7667 } 7668 } 7669 7670 ctx->cb_fn(ctx->cb_arg, ctx->bserrno); 7671 7672 free(ctx->parent.u.esnap.id); 7673 free(ctx); 7674 } 7675 7676 static void 7677 bs_set_external_parent_close_blob(void *cb_arg, int bserrno) 7678 { 7679 struct set_parent_ctx *ctx = cb_arg; 7680 struct spdk_blob *blob = ctx->blob; 7681 7682 if (bserrno != 0 && ctx->bserrno == 0) { 7683 SPDK_ERRLOG("error %d in metadata sync\n", bserrno); 7684 ctx->bserrno = bserrno; 7685 } 7686 7687 /* Revert md_ro to original state */ 7688 blob->md_ro = ctx->blob_md_ro; 7689 7690 blob->locked_operation_in_progress = false; 7691 7692 spdk_blob_close(blob, bs_set_external_parent_cleanup_finish, ctx); 7693 } 7694 7695 static void 7696 bs_set_external_parent_unfrozen(void *cb_arg, int bserrno) 7697 { 7698 struct set_parent_ctx *ctx = cb_arg; 7699 struct spdk_blob *blob = ctx->blob; 7700 7701 if (bserrno != 0) { 7702 SPDK_ERRLOG("error %d setting back_bs_dev\n", bserrno); 7703 ctx->bserrno = bserrno; 7704 bs_set_external_parent_close_blob(ctx, bserrno); 7705 return; 7706 } 7707 7708 spdk_blob_sync_md(blob, bs_set_external_parent_close_blob, ctx); 7709 } 7710 7711 static int 7712 bs_set_external_parent_refs(struct spdk_blob *blob, struct blob_parent *parent) 7713 { 7714 int rc; 7715 7716 bs_blob_list_remove(blob); 7717 7718 if (spdk_blob_is_clone(blob)) { 7719 /* Remove the xattr that references the snapshot */ 7720 blob->parent_id = SPDK_BLOBID_INVALID; 7721 blob_remove_xattr(blob, BLOB_SNAPSHOT, true); 7722 } 7723 7724 rc = blob_set_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID, parent->u.esnap.id, 7725 parent->u.esnap.id_len, true); 7726 if (rc != 0) { 7727 SPDK_ERRLOG("error %d setting external snapshot xattr\n", rc); 7728 return rc; 7729 } 7730 blob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 7731 blob->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT; 7732 7733 bs_blob_list_add(blob); 7734 7735 return 0; 7736 } 7737 7738 static void 7739 bs_set_external_parent_blob_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 7740 { 7741 struct set_parent_ctx *ctx = cb_arg; 7742 const void *esnap_id; 7743 size_t esnap_id_len; 7744 int rc; 7745 7746 if (bserrno != 0) { 7747 SPDK_ERRLOG("blob open error %d\n", bserrno); 7748 ctx->bserrno = bserrno; 7749 bs_set_parent_cleanup_finish(ctx, 0); 7750 return; 7751 } 7752 7753 ctx->blob = blob; 7754 ctx->blob_md_ro = blob->md_ro; 7755 7756 rc = spdk_blob_get_esnap_id(blob, &esnap_id, &esnap_id_len); 7757 if (rc == 0 && esnap_id != NULL && esnap_id_len == ctx->parent.u.esnap.id_len && 7758 memcmp(esnap_id, ctx->parent.u.esnap.id, esnap_id_len) == 0) { 7759 SPDK_ERRLOG("external snapshot is already the parent of blob\n"); 7760 ctx->bserrno = -EEXIST; 7761 goto error; 7762 } 7763 7764 if (!spdk_blob_is_thin_provisioned(blob)) { 7765 SPDK_ERRLOG("blob is not thin-provisioned\n"); 7766 ctx->bserrno = -EINVAL; 7767 goto error; 7768 } 7769 7770 if (blob->locked_operation_in_progress) { 7771 SPDK_ERRLOG("cannot set external parent of blob, another operation in progress\n"); 7772 ctx->bserrno = -EBUSY; 7773 goto error; 7774 } 7775 7776 blob->locked_operation_in_progress = true; 7777 7778 /* Temporarily override md_ro flag for MD modification */ 7779 blob->md_ro = false; 7780 7781 blob_set_back_bs_dev(blob, ctx->parent.u.esnap.back_bs_dev, bs_set_external_parent_refs, 7782 &ctx->parent, bs_set_external_parent_unfrozen, ctx); 7783 return; 7784 7785 error: 7786 spdk_blob_close(blob, bs_set_external_parent_cleanup_finish, ctx); 7787 } 7788 7789 void 7790 spdk_bs_blob_set_external_parent(struct spdk_blob_store *bs, spdk_blob_id blob_id, 7791 struct spdk_bs_dev *esnap_bs_dev, const void *esnap_id, 7792 uint32_t esnap_id_len, spdk_blob_op_complete cb_fn, void *cb_arg) 7793 { 7794 struct set_parent_ctx *ctx; 7795 uint64_t esnap_dev_size, cluster_sz; 7796 7797 if (sizeof(blob_id) == esnap_id_len && memcmp(&blob_id, esnap_id, sizeof(blob_id)) == 0) { 7798 SPDK_ERRLOG("blob id and external snapshot id cannot be the same\n"); 7799 cb_fn(cb_arg, -EINVAL); 7800 return; 7801 } 7802 7803 esnap_dev_size = esnap_bs_dev->blockcnt * esnap_bs_dev->blocklen; 7804 cluster_sz = spdk_bs_get_cluster_size(bs); 7805 if ((esnap_dev_size % cluster_sz) != 0) { 7806 SPDK_ERRLOG("Esnap device size %" PRIu64 " is not an integer multiple of " 7807 "cluster size %" PRIu64 "\n", esnap_dev_size, cluster_sz); 7808 cb_fn(cb_arg, -EINVAL); 7809 return; 7810 } 7811 7812 ctx = calloc(1, sizeof(*ctx)); 7813 if (!ctx) { 7814 cb_fn(cb_arg, -ENOMEM); 7815 return; 7816 } 7817 7818 ctx->parent.u.esnap.id = calloc(1, esnap_id_len); 7819 if (!ctx->parent.u.esnap.id) { 7820 free(ctx); 7821 cb_fn(cb_arg, -ENOMEM); 7822 return; 7823 } 7824 7825 ctx->bs = bs; 7826 ctx->parent.u.esnap.back_bs_dev = esnap_bs_dev; 7827 memcpy(ctx->parent.u.esnap.id, esnap_id, esnap_id_len); 7828 ctx->parent.u.esnap.id_len = esnap_id_len; 7829 ctx->cb_fn = cb_fn; 7830 ctx->cb_arg = cb_arg; 7831 ctx->bserrno = 0; 7832 7833 spdk_bs_open_blob(bs, blob_id, bs_set_external_parent_blob_open_cpl, ctx); 7834 } 7835 /* END spdk_bs_blob_set_external_parent */ 7836 7837 /* START spdk_blob_resize */ 7838 struct spdk_bs_resize_ctx { 7839 spdk_blob_op_complete cb_fn; 7840 void *cb_arg; 7841 struct spdk_blob *blob; 7842 uint64_t sz; 7843 int rc; 7844 }; 7845 7846 static void 7847 bs_resize_unfreeze_cpl(void *cb_arg, int rc) 7848 { 7849 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 7850 7851 if (rc != 0) { 7852 SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc); 7853 } 7854 7855 if (ctx->rc != 0) { 7856 SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc); 7857 rc = ctx->rc; 7858 } 7859 7860 ctx->blob->locked_operation_in_progress = false; 7861 7862 ctx->cb_fn(ctx->cb_arg, rc); 7863 free(ctx); 7864 } 7865 7866 static void 7867 bs_resize_freeze_cpl(void *cb_arg, int rc) 7868 { 7869 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 7870 7871 if (rc != 0) { 7872 ctx->blob->locked_operation_in_progress = false; 7873 ctx->cb_fn(ctx->cb_arg, rc); 7874 free(ctx); 7875 return; 7876 } 7877 7878 ctx->rc = blob_resize(ctx->blob, ctx->sz); 7879 7880 blob_unfreeze_io(ctx->blob, bs_resize_unfreeze_cpl, ctx); 7881 } 7882 7883 void 7884 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg) 7885 { 7886 struct spdk_bs_resize_ctx *ctx; 7887 7888 blob_verify_md_op(blob); 7889 7890 SPDK_DEBUGLOG(blob, "Resizing blob 0x%" PRIx64 " to %" PRIu64 " clusters\n", blob->id, sz); 7891 7892 if (blob->md_ro) { 7893 cb_fn(cb_arg, -EPERM); 7894 return; 7895 } 7896 7897 if (sz == blob->active.num_clusters) { 7898 cb_fn(cb_arg, 0); 7899 return; 7900 } 7901 7902 if (blob->locked_operation_in_progress) { 7903 cb_fn(cb_arg, -EBUSY); 7904 return; 7905 } 7906 7907 ctx = calloc(1, sizeof(*ctx)); 7908 if (!ctx) { 7909 cb_fn(cb_arg, -ENOMEM); 7910 return; 7911 } 7912 7913 blob->locked_operation_in_progress = true; 7914 ctx->cb_fn = cb_fn; 7915 ctx->cb_arg = cb_arg; 7916 ctx->blob = blob; 7917 ctx->sz = sz; 7918 blob_freeze_io(blob, bs_resize_freeze_cpl, ctx); 7919 } 7920 7921 /* END spdk_blob_resize */ 7922 7923 7924 /* START spdk_bs_delete_blob */ 7925 7926 static void 7927 bs_delete_close_cpl(void *cb_arg, int bserrno) 7928 { 7929 spdk_bs_sequence_t *seq = cb_arg; 7930 7931 bs_sequence_finish(seq, bserrno); 7932 } 7933 7934 static void 7935 bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7936 { 7937 struct spdk_blob *blob = cb_arg; 7938 7939 if (bserrno != 0) { 7940 /* 7941 * We already removed this blob from the blobstore tailq, so 7942 * we need to free it here since this is the last reference 7943 * to it. 7944 */ 7945 blob_free(blob); 7946 bs_delete_close_cpl(seq, bserrno); 7947 return; 7948 } 7949 7950 /* 7951 * This will immediately decrement the ref_count and call 7952 * the completion routine since the metadata state is clean. 7953 * By calling spdk_blob_close, we reduce the number of call 7954 * points into code that touches the blob->open_ref count 7955 * and the blobstore's blob list. 7956 */ 7957 spdk_blob_close(blob, bs_delete_close_cpl, seq); 7958 } 7959 7960 struct delete_snapshot_ctx { 7961 struct spdk_blob_list *parent_snapshot_entry; 7962 struct spdk_blob *snapshot; 7963 struct spdk_blob_md_page *page; 7964 bool snapshot_md_ro; 7965 struct spdk_blob *clone; 7966 bool clone_md_ro; 7967 spdk_blob_op_with_handle_complete cb_fn; 7968 void *cb_arg; 7969 int bserrno; 7970 uint32_t next_extent_page; 7971 }; 7972 7973 static void 7974 delete_blob_cleanup_finish(void *cb_arg, int bserrno) 7975 { 7976 struct delete_snapshot_ctx *ctx = cb_arg; 7977 7978 if (bserrno != 0) { 7979 SPDK_ERRLOG("Snapshot cleanup error %d\n", bserrno); 7980 } 7981 7982 assert(ctx != NULL); 7983 7984 if (bserrno != 0 && ctx->bserrno == 0) { 7985 ctx->bserrno = bserrno; 7986 } 7987 7988 ctx->cb_fn(ctx->cb_arg, ctx->snapshot, ctx->bserrno); 7989 spdk_free(ctx->page); 7990 free(ctx); 7991 } 7992 7993 static void 7994 delete_snapshot_cleanup_snapshot(void *cb_arg, int bserrno) 7995 { 7996 struct delete_snapshot_ctx *ctx = cb_arg; 7997 7998 if (bserrno != 0) { 7999 ctx->bserrno = bserrno; 8000 SPDK_ERRLOG("Clone cleanup error %d\n", bserrno); 8001 } 8002 8003 if (ctx->bserrno != 0) { 8004 assert(blob_lookup(ctx->snapshot->bs, ctx->snapshot->id) == NULL); 8005 RB_INSERT(spdk_blob_tree, &ctx->snapshot->bs->open_blobs, ctx->snapshot); 8006 spdk_bit_array_set(ctx->snapshot->bs->open_blobids, ctx->snapshot->id); 8007 } 8008 8009 ctx->snapshot->locked_operation_in_progress = false; 8010 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 8011 8012 spdk_blob_close(ctx->snapshot, delete_blob_cleanup_finish, ctx); 8013 } 8014 8015 static void 8016 delete_snapshot_cleanup_clone(void *cb_arg, int bserrno) 8017 { 8018 struct delete_snapshot_ctx *ctx = cb_arg; 8019 8020 ctx->clone->locked_operation_in_progress = false; 8021 ctx->clone->md_ro = ctx->clone_md_ro; 8022 8023 spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx); 8024 } 8025 8026 static void 8027 delete_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 8028 { 8029 struct delete_snapshot_ctx *ctx = cb_arg; 8030 8031 if (bserrno) { 8032 ctx->bserrno = bserrno; 8033 delete_snapshot_cleanup_clone(ctx, 0); 8034 return; 8035 } 8036 8037 ctx->clone->locked_operation_in_progress = false; 8038 spdk_blob_close(ctx->clone, delete_blob_cleanup_finish, ctx); 8039 } 8040 8041 static void 8042 delete_snapshot_sync_snapshot_cpl(void *cb_arg, int bserrno) 8043 { 8044 struct delete_snapshot_ctx *ctx = cb_arg; 8045 struct spdk_blob_list *parent_snapshot_entry = NULL; 8046 struct spdk_blob_list *snapshot_entry = NULL; 8047 struct spdk_blob_list *clone_entry = NULL; 8048 struct spdk_blob_list *snapshot_clone_entry = NULL; 8049 8050 if (bserrno) { 8051 SPDK_ERRLOG("Failed to sync MD on blob\n"); 8052 ctx->bserrno = bserrno; 8053 delete_snapshot_cleanup_clone(ctx, 0); 8054 return; 8055 } 8056 8057 /* Get snapshot entry for the snapshot we want to remove */ 8058 snapshot_entry = bs_get_snapshot_entry(ctx->snapshot->bs, ctx->snapshot->id); 8059 8060 assert(snapshot_entry != NULL); 8061 8062 /* Remove clone entry in this snapshot (at this point there can be only one clone) */ 8063 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 8064 assert(clone_entry != NULL); 8065 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 8066 snapshot_entry->clone_count--; 8067 assert(TAILQ_EMPTY(&snapshot_entry->clones)); 8068 8069 switch (ctx->snapshot->parent_id) { 8070 case SPDK_BLOBID_INVALID: 8071 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 8072 /* No parent snapshot - just remove clone entry */ 8073 free(clone_entry); 8074 break; 8075 default: 8076 /* This snapshot is at the same time a clone of another snapshot - we need to 8077 * update parent snapshot (remove current clone, add new one inherited from 8078 * the snapshot that is being removed) */ 8079 8080 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 8081 * snapshot that we are removing */ 8082 blob_get_snapshot_and_clone_entries(ctx->snapshot, &parent_snapshot_entry, 8083 &snapshot_clone_entry); 8084 8085 /* Switch clone entry in parent snapshot */ 8086 TAILQ_INSERT_TAIL(&parent_snapshot_entry->clones, clone_entry, link); 8087 TAILQ_REMOVE(&parent_snapshot_entry->clones, snapshot_clone_entry, link); 8088 free(snapshot_clone_entry); 8089 } 8090 8091 /* Restore md_ro flags */ 8092 ctx->clone->md_ro = ctx->clone_md_ro; 8093 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 8094 8095 blob_unfreeze_io(ctx->clone, delete_snapshot_unfreeze_cpl, ctx); 8096 } 8097 8098 static void 8099 delete_snapshot_sync_clone_cpl(void *cb_arg, int bserrno) 8100 { 8101 struct delete_snapshot_ctx *ctx = cb_arg; 8102 uint64_t i; 8103 8104 ctx->snapshot->md_ro = false; 8105 8106 if (bserrno) { 8107 SPDK_ERRLOG("Failed to sync MD on clone\n"); 8108 ctx->bserrno = bserrno; 8109 8110 /* Restore snapshot to previous state */ 8111 bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true); 8112 if (bserrno != 0) { 8113 delete_snapshot_cleanup_clone(ctx, bserrno); 8114 return; 8115 } 8116 8117 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx); 8118 return; 8119 } 8120 8121 /* Clear cluster map entries for snapshot */ 8122 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 8123 if (ctx->clone->active.clusters[i] == ctx->snapshot->active.clusters[i]) { 8124 if (ctx->snapshot->active.clusters[i] != 0) { 8125 ctx->snapshot->active.num_allocated_clusters--; 8126 } 8127 ctx->snapshot->active.clusters[i] = 0; 8128 } 8129 } 8130 for (i = 0; i < ctx->snapshot->active.num_extent_pages && 8131 i < ctx->clone->active.num_extent_pages; i++) { 8132 if (ctx->clone->active.extent_pages[i] == ctx->snapshot->active.extent_pages[i]) { 8133 ctx->snapshot->active.extent_pages[i] = 0; 8134 } 8135 } 8136 8137 blob_set_thin_provision(ctx->snapshot); 8138 ctx->snapshot->state = SPDK_BLOB_STATE_DIRTY; 8139 8140 if (ctx->parent_snapshot_entry != NULL) { 8141 ctx->snapshot->back_bs_dev = NULL; 8142 } 8143 8144 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_cpl, ctx); 8145 } 8146 8147 static void 8148 delete_snapshot_update_extent_pages_cpl(struct delete_snapshot_ctx *ctx) 8149 { 8150 int bserrno; 8151 8152 /* Delete old backing bs_dev from clone (related to snapshot that will be removed) */ 8153 blob_back_bs_destroy(ctx->clone); 8154 8155 /* Set/remove snapshot xattr and switch parent ID and backing bs_dev on clone... */ 8156 if (ctx->snapshot->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 8157 bserrno = bs_snapshot_copy_xattr(ctx->clone, ctx->snapshot, 8158 BLOB_EXTERNAL_SNAPSHOT_ID); 8159 if (bserrno != 0) { 8160 ctx->bserrno = bserrno; 8161 8162 /* Restore snapshot to previous state */ 8163 bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true); 8164 if (bserrno != 0) { 8165 delete_snapshot_cleanup_clone(ctx, bserrno); 8166 return; 8167 } 8168 8169 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx); 8170 return; 8171 } 8172 ctx->clone->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT; 8173 ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev; 8174 /* Do not delete the external snapshot along with this snapshot */ 8175 ctx->snapshot->back_bs_dev = NULL; 8176 ctx->clone->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 8177 } else if (ctx->parent_snapshot_entry != NULL) { 8178 /* ...to parent snapshot */ 8179 ctx->clone->parent_id = ctx->parent_snapshot_entry->id; 8180 ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev; 8181 blob_set_xattr(ctx->clone, BLOB_SNAPSHOT, &ctx->parent_snapshot_entry->id, 8182 sizeof(spdk_blob_id), 8183 true); 8184 } else { 8185 /* ...to blobid invalid and zeroes dev */ 8186 ctx->clone->parent_id = SPDK_BLOBID_INVALID; 8187 ctx->clone->back_bs_dev = bs_create_zeroes_dev(); 8188 blob_remove_xattr(ctx->clone, BLOB_SNAPSHOT, true); 8189 } 8190 8191 spdk_blob_sync_md(ctx->clone, delete_snapshot_sync_clone_cpl, ctx); 8192 } 8193 8194 static void 8195 delete_snapshot_update_extent_pages(void *cb_arg, int bserrno) 8196 { 8197 struct delete_snapshot_ctx *ctx = cb_arg; 8198 uint32_t *extent_page; 8199 uint64_t i; 8200 8201 for (i = ctx->next_extent_page; i < ctx->snapshot->active.num_extent_pages && 8202 i < ctx->clone->active.num_extent_pages; i++) { 8203 if (ctx->snapshot->active.extent_pages[i] == 0) { 8204 /* No extent page to use from snapshot */ 8205 continue; 8206 } 8207 8208 extent_page = &ctx->clone->active.extent_pages[i]; 8209 if (*extent_page == 0) { 8210 /* Copy extent page from snapshot when clone did not have a matching one */ 8211 *extent_page = ctx->snapshot->active.extent_pages[i]; 8212 continue; 8213 } 8214 8215 /* Clone and snapshot both contain partially filled matching extent pages. 8216 * Update the clone extent page in place with cluster map containing the mix of both. */ 8217 ctx->next_extent_page = i + 1; 8218 memset(ctx->page, 0, SPDK_BS_PAGE_SIZE); 8219 8220 blob_write_extent_page(ctx->clone, *extent_page, i * SPDK_EXTENTS_PER_EP, ctx->page, 8221 delete_snapshot_update_extent_pages, ctx); 8222 return; 8223 } 8224 delete_snapshot_update_extent_pages_cpl(ctx); 8225 } 8226 8227 static void 8228 delete_snapshot_sync_snapshot_xattr_cpl(void *cb_arg, int bserrno) 8229 { 8230 struct delete_snapshot_ctx *ctx = cb_arg; 8231 uint64_t i; 8232 8233 /* Temporarily override md_ro flag for clone for MD modification */ 8234 ctx->clone_md_ro = ctx->clone->md_ro; 8235 ctx->clone->md_ro = false; 8236 8237 if (bserrno) { 8238 SPDK_ERRLOG("Failed to sync MD with xattr on blob\n"); 8239 ctx->bserrno = bserrno; 8240 delete_snapshot_cleanup_clone(ctx, 0); 8241 return; 8242 } 8243 8244 /* Copy snapshot map to clone map (only unallocated clusters in clone) */ 8245 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 8246 if (ctx->clone->active.clusters[i] == 0) { 8247 ctx->clone->active.clusters[i] = ctx->snapshot->active.clusters[i]; 8248 if (ctx->clone->active.clusters[i] != 0) { 8249 ctx->clone->active.num_allocated_clusters++; 8250 } 8251 } 8252 } 8253 ctx->next_extent_page = 0; 8254 delete_snapshot_update_extent_pages(ctx, 0); 8255 } 8256 8257 static void 8258 delete_snapshot_esnap_channels_destroyed_cb(void *cb_arg, struct spdk_blob *blob, int bserrno) 8259 { 8260 struct delete_snapshot_ctx *ctx = cb_arg; 8261 8262 if (bserrno != 0) { 8263 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to destroy esnap channels: %d\n", 8264 blob->id, bserrno); 8265 /* That error should not stop us from syncing metadata. */ 8266 } 8267 8268 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx); 8269 } 8270 8271 static void 8272 delete_snapshot_freeze_io_cb(void *cb_arg, int bserrno) 8273 { 8274 struct delete_snapshot_ctx *ctx = cb_arg; 8275 8276 if (bserrno) { 8277 SPDK_ERRLOG("Failed to freeze I/O on clone\n"); 8278 ctx->bserrno = bserrno; 8279 delete_snapshot_cleanup_clone(ctx, 0); 8280 return; 8281 } 8282 8283 /* Temporarily override md_ro flag for snapshot for MD modification */ 8284 ctx->snapshot_md_ro = ctx->snapshot->md_ro; 8285 ctx->snapshot->md_ro = false; 8286 8287 /* Mark blob as pending for removal for power failure safety, use clone id for recovery */ 8288 ctx->bserrno = blob_set_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, &ctx->clone->id, 8289 sizeof(spdk_blob_id), true); 8290 if (ctx->bserrno != 0) { 8291 delete_snapshot_cleanup_clone(ctx, 0); 8292 return; 8293 } 8294 8295 if (blob_is_esnap_clone(ctx->snapshot)) { 8296 blob_esnap_destroy_bs_dev_channels(ctx->snapshot, false, 8297 delete_snapshot_esnap_channels_destroyed_cb, 8298 ctx); 8299 return; 8300 } 8301 8302 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx); 8303 } 8304 8305 static void 8306 delete_snapshot_open_clone_cb(void *cb_arg, struct spdk_blob *clone, int bserrno) 8307 { 8308 struct delete_snapshot_ctx *ctx = cb_arg; 8309 8310 if (bserrno) { 8311 SPDK_ERRLOG("Failed to open clone\n"); 8312 ctx->bserrno = bserrno; 8313 delete_snapshot_cleanup_snapshot(ctx, 0); 8314 return; 8315 } 8316 8317 ctx->clone = clone; 8318 8319 if (clone->locked_operation_in_progress) { 8320 SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress on its clone\n"); 8321 ctx->bserrno = -EBUSY; 8322 spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx); 8323 return; 8324 } 8325 8326 clone->locked_operation_in_progress = true; 8327 8328 blob_freeze_io(clone, delete_snapshot_freeze_io_cb, ctx); 8329 } 8330 8331 static void 8332 update_clone_on_snapshot_deletion(struct spdk_blob *snapshot, struct delete_snapshot_ctx *ctx) 8333 { 8334 struct spdk_blob_list *snapshot_entry = NULL; 8335 struct spdk_blob_list *clone_entry = NULL; 8336 struct spdk_blob_list *snapshot_clone_entry = NULL; 8337 8338 /* Get snapshot entry for the snapshot we want to remove */ 8339 snapshot_entry = bs_get_snapshot_entry(snapshot->bs, snapshot->id); 8340 8341 assert(snapshot_entry != NULL); 8342 8343 /* Get clone of the snapshot (at this point there can be only one clone) */ 8344 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 8345 assert(snapshot_entry->clone_count == 1); 8346 assert(clone_entry != NULL); 8347 8348 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 8349 * snapshot that we are removing */ 8350 blob_get_snapshot_and_clone_entries(snapshot, &ctx->parent_snapshot_entry, 8351 &snapshot_clone_entry); 8352 8353 spdk_bs_open_blob(snapshot->bs, clone_entry->id, delete_snapshot_open_clone_cb, ctx); 8354 } 8355 8356 static void 8357 bs_delete_blob_finish(void *cb_arg, struct spdk_blob *blob, int bserrno) 8358 { 8359 spdk_bs_sequence_t *seq = cb_arg; 8360 struct spdk_blob_list *snapshot_entry = NULL; 8361 uint32_t page_num; 8362 8363 if (bserrno) { 8364 SPDK_ERRLOG("Failed to remove blob\n"); 8365 bs_sequence_finish(seq, bserrno); 8366 return; 8367 } 8368 8369 /* Remove snapshot from the list */ 8370 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 8371 if (snapshot_entry != NULL) { 8372 TAILQ_REMOVE(&blob->bs->snapshots, snapshot_entry, link); 8373 free(snapshot_entry); 8374 } 8375 8376 page_num = bs_blobid_to_page(blob->id); 8377 spdk_bit_array_clear(blob->bs->used_blobids, page_num); 8378 blob->state = SPDK_BLOB_STATE_DIRTY; 8379 blob->active.num_pages = 0; 8380 blob_resize(blob, 0); 8381 8382 blob_persist(seq, blob, bs_delete_persist_cpl, blob); 8383 } 8384 8385 static int 8386 bs_is_blob_deletable(struct spdk_blob *blob, bool *update_clone) 8387 { 8388 struct spdk_blob_list *snapshot_entry = NULL; 8389 struct spdk_blob_list *clone_entry = NULL; 8390 struct spdk_blob *clone = NULL; 8391 bool has_one_clone = false; 8392 8393 /* Check if this is a snapshot with clones */ 8394 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 8395 if (snapshot_entry != NULL) { 8396 if (snapshot_entry->clone_count > 1) { 8397 SPDK_ERRLOG("Cannot remove snapshot with more than one clone\n"); 8398 return -EBUSY; 8399 } else if (snapshot_entry->clone_count == 1) { 8400 has_one_clone = true; 8401 } 8402 } 8403 8404 /* Check if someone has this blob open (besides this delete context): 8405 * - open_ref = 1 - only this context opened blob, so it is ok to remove it 8406 * - open_ref <= 2 && has_one_clone = true - clone is holding snapshot 8407 * and that is ok, because we will update it accordingly */ 8408 if (blob->open_ref <= 2 && has_one_clone) { 8409 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 8410 assert(clone_entry != NULL); 8411 clone = blob_lookup(blob->bs, clone_entry->id); 8412 8413 if (blob->open_ref == 2 && clone == NULL) { 8414 /* Clone is closed and someone else opened this blob */ 8415 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 8416 return -EBUSY; 8417 } 8418 8419 *update_clone = true; 8420 return 0; 8421 } 8422 8423 if (blob->open_ref > 1) { 8424 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 8425 return -EBUSY; 8426 } 8427 8428 assert(has_one_clone == false); 8429 *update_clone = false; 8430 return 0; 8431 } 8432 8433 static void 8434 bs_delete_enomem_close_cpl(void *cb_arg, int bserrno) 8435 { 8436 spdk_bs_sequence_t *seq = cb_arg; 8437 8438 bs_sequence_finish(seq, -ENOMEM); 8439 } 8440 8441 static void 8442 bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 8443 { 8444 spdk_bs_sequence_t *seq = cb_arg; 8445 struct delete_snapshot_ctx *ctx; 8446 bool update_clone = false; 8447 8448 if (bserrno != 0) { 8449 bs_sequence_finish(seq, bserrno); 8450 return; 8451 } 8452 8453 blob_verify_md_op(blob); 8454 8455 ctx = calloc(1, sizeof(*ctx)); 8456 if (ctx == NULL) { 8457 spdk_blob_close(blob, bs_delete_enomem_close_cpl, seq); 8458 return; 8459 } 8460 8461 ctx->snapshot = blob; 8462 ctx->cb_fn = bs_delete_blob_finish; 8463 ctx->cb_arg = seq; 8464 8465 /* Check if blob can be removed and if it is a snapshot with clone on top of it */ 8466 ctx->bserrno = bs_is_blob_deletable(blob, &update_clone); 8467 if (ctx->bserrno) { 8468 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 8469 return; 8470 } 8471 8472 if (blob->locked_operation_in_progress) { 8473 SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress\n"); 8474 ctx->bserrno = -EBUSY; 8475 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 8476 return; 8477 } 8478 8479 blob->locked_operation_in_progress = true; 8480 8481 /* 8482 * Remove the blob from the blob_store list now, to ensure it does not 8483 * get returned after this point by blob_lookup(). 8484 */ 8485 spdk_bit_array_clear(blob->bs->open_blobids, blob->id); 8486 RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob); 8487 8488 if (update_clone) { 8489 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 8490 if (!ctx->page) { 8491 ctx->bserrno = -ENOMEM; 8492 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 8493 return; 8494 } 8495 /* This blob is a snapshot with active clone - update clone first */ 8496 update_clone_on_snapshot_deletion(blob, ctx); 8497 } else { 8498 /* This blob does not have any clones - just remove it */ 8499 bs_blob_list_remove(blob); 8500 bs_delete_blob_finish(seq, blob, 0); 8501 free(ctx); 8502 } 8503 } 8504 8505 void 8506 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 8507 spdk_blob_op_complete cb_fn, void *cb_arg) 8508 { 8509 struct spdk_bs_cpl cpl; 8510 spdk_bs_sequence_t *seq; 8511 8512 SPDK_DEBUGLOG(blob, "Deleting blob 0x%" PRIx64 "\n", blobid); 8513 8514 assert(spdk_get_thread() == bs->md_thread); 8515 8516 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 8517 cpl.u.blob_basic.cb_fn = cb_fn; 8518 cpl.u.blob_basic.cb_arg = cb_arg; 8519 8520 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 8521 if (!seq) { 8522 cb_fn(cb_arg, -ENOMEM); 8523 return; 8524 } 8525 8526 spdk_bs_open_blob(bs, blobid, bs_delete_open_cpl, seq); 8527 } 8528 8529 /* END spdk_bs_delete_blob */ 8530 8531 /* START spdk_bs_open_blob */ 8532 8533 static void 8534 bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8535 { 8536 struct spdk_blob *blob = cb_arg; 8537 struct spdk_blob *existing; 8538 8539 if (bserrno != 0) { 8540 blob_free(blob); 8541 seq->cpl.u.blob_handle.blob = NULL; 8542 bs_sequence_finish(seq, bserrno); 8543 return; 8544 } 8545 8546 existing = blob_lookup(blob->bs, blob->id); 8547 if (existing) { 8548 blob_free(blob); 8549 existing->open_ref++; 8550 seq->cpl.u.blob_handle.blob = existing; 8551 bs_sequence_finish(seq, 0); 8552 return; 8553 } 8554 8555 blob->open_ref++; 8556 8557 spdk_bit_array_set(blob->bs->open_blobids, blob->id); 8558 RB_INSERT(spdk_blob_tree, &blob->bs->open_blobs, blob); 8559 8560 bs_sequence_finish(seq, bserrno); 8561 } 8562 8563 static inline void 8564 blob_open_opts_copy(const struct spdk_blob_open_opts *src, struct spdk_blob_open_opts *dst) 8565 { 8566 #define FIELD_OK(field) \ 8567 offsetof(struct spdk_blob_open_opts, field) + sizeof(src->field) <= src->opts_size 8568 8569 #define SET_FIELD(field) \ 8570 if (FIELD_OK(field)) { \ 8571 dst->field = src->field; \ 8572 } \ 8573 8574 SET_FIELD(clear_method); 8575 SET_FIELD(esnap_ctx); 8576 8577 dst->opts_size = src->opts_size; 8578 8579 /* You should not remove this statement, but need to update the assert statement 8580 * if you add a new field, and also add a corresponding SET_FIELD statement */ 8581 SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_open_opts) == 24, "Incorrect size"); 8582 8583 #undef FIELD_OK 8584 #undef SET_FIELD 8585 } 8586 8587 static void 8588 bs_open_blob(struct spdk_blob_store *bs, 8589 spdk_blob_id blobid, 8590 struct spdk_blob_open_opts *opts, 8591 spdk_blob_op_with_handle_complete cb_fn, 8592 void *cb_arg) 8593 { 8594 struct spdk_blob *blob; 8595 struct spdk_bs_cpl cpl; 8596 struct spdk_blob_open_opts opts_local; 8597 spdk_bs_sequence_t *seq; 8598 uint32_t page_num; 8599 8600 SPDK_DEBUGLOG(blob, "Opening blob 0x%" PRIx64 "\n", blobid); 8601 assert(spdk_get_thread() == bs->md_thread); 8602 8603 page_num = bs_blobid_to_page(blobid); 8604 if (spdk_bit_array_get(bs->used_blobids, page_num) == false) { 8605 /* Invalid blobid */ 8606 cb_fn(cb_arg, NULL, -ENOENT); 8607 return; 8608 } 8609 8610 blob = blob_lookup(bs, blobid); 8611 if (blob) { 8612 blob->open_ref++; 8613 cb_fn(cb_arg, blob, 0); 8614 return; 8615 } 8616 8617 blob = blob_alloc(bs, blobid); 8618 if (!blob) { 8619 cb_fn(cb_arg, NULL, -ENOMEM); 8620 return; 8621 } 8622 8623 spdk_blob_open_opts_init(&opts_local, sizeof(opts_local)); 8624 if (opts) { 8625 blob_open_opts_copy(opts, &opts_local); 8626 } 8627 8628 blob->clear_method = opts_local.clear_method; 8629 8630 cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE; 8631 cpl.u.blob_handle.cb_fn = cb_fn; 8632 cpl.u.blob_handle.cb_arg = cb_arg; 8633 cpl.u.blob_handle.blob = blob; 8634 cpl.u.blob_handle.esnap_ctx = opts_local.esnap_ctx; 8635 8636 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 8637 if (!seq) { 8638 blob_free(blob); 8639 cb_fn(cb_arg, NULL, -ENOMEM); 8640 return; 8641 } 8642 8643 blob_load(seq, blob, bs_open_blob_cpl, blob); 8644 } 8645 8646 void 8647 spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 8648 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 8649 { 8650 bs_open_blob(bs, blobid, NULL, cb_fn, cb_arg); 8651 } 8652 8653 void 8654 spdk_bs_open_blob_ext(struct spdk_blob_store *bs, spdk_blob_id blobid, 8655 struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 8656 { 8657 bs_open_blob(bs, blobid, opts, cb_fn, cb_arg); 8658 } 8659 8660 /* END spdk_bs_open_blob */ 8661 8662 /* START spdk_blob_set_read_only */ 8663 int 8664 spdk_blob_set_read_only(struct spdk_blob *blob) 8665 { 8666 blob_verify_md_op(blob); 8667 8668 blob->data_ro_flags |= SPDK_BLOB_READ_ONLY; 8669 8670 blob->state = SPDK_BLOB_STATE_DIRTY; 8671 return 0; 8672 } 8673 /* END spdk_blob_set_read_only */ 8674 8675 /* START spdk_blob_sync_md */ 8676 8677 static void 8678 blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8679 { 8680 struct spdk_blob *blob = cb_arg; 8681 8682 if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 8683 blob->data_ro = true; 8684 blob->md_ro = true; 8685 } 8686 8687 bs_sequence_finish(seq, bserrno); 8688 } 8689 8690 static void 8691 blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 8692 { 8693 struct spdk_bs_cpl cpl; 8694 spdk_bs_sequence_t *seq; 8695 8696 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 8697 cpl.u.blob_basic.cb_fn = cb_fn; 8698 cpl.u.blob_basic.cb_arg = cb_arg; 8699 8700 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 8701 if (!seq) { 8702 cb_fn(cb_arg, -ENOMEM); 8703 return; 8704 } 8705 8706 blob_persist(seq, blob, blob_sync_md_cpl, blob); 8707 } 8708 8709 void 8710 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 8711 { 8712 blob_verify_md_op(blob); 8713 8714 SPDK_DEBUGLOG(blob, "Syncing blob 0x%" PRIx64 "\n", blob->id); 8715 8716 if (blob->md_ro) { 8717 assert(blob->state == SPDK_BLOB_STATE_CLEAN); 8718 cb_fn(cb_arg, 0); 8719 return; 8720 } 8721 8722 blob_sync_md(blob, cb_fn, cb_arg); 8723 } 8724 8725 /* END spdk_blob_sync_md */ 8726 8727 struct spdk_blob_cluster_op_ctx { 8728 struct spdk_thread *thread; 8729 struct spdk_blob *blob; 8730 uint32_t cluster_num; /* cluster index in blob */ 8731 uint32_t cluster; /* cluster on disk */ 8732 uint32_t extent_page; /* extent page on disk */ 8733 struct spdk_blob_md_page *page; /* preallocated extent page */ 8734 int rc; 8735 spdk_blob_op_complete cb_fn; 8736 void *cb_arg; 8737 }; 8738 8739 static void 8740 blob_op_cluster_msg_cpl(void *arg) 8741 { 8742 struct spdk_blob_cluster_op_ctx *ctx = arg; 8743 8744 ctx->cb_fn(ctx->cb_arg, ctx->rc); 8745 free(ctx); 8746 } 8747 8748 static void 8749 blob_op_cluster_msg_cb(void *arg, int bserrno) 8750 { 8751 struct spdk_blob_cluster_op_ctx *ctx = arg; 8752 8753 ctx->rc = bserrno; 8754 spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx); 8755 } 8756 8757 static void 8758 blob_insert_new_ep_cb(void *arg, int bserrno) 8759 { 8760 struct spdk_blob_cluster_op_ctx *ctx = arg; 8761 uint32_t *extent_page; 8762 8763 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 8764 *extent_page = ctx->extent_page; 8765 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8766 blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx); 8767 } 8768 8769 struct spdk_blob_write_extent_page_ctx { 8770 struct spdk_blob_store *bs; 8771 8772 uint32_t extent; 8773 struct spdk_blob_md_page *page; 8774 }; 8775 8776 static void 8777 blob_free_cluster_msg_cb(void *arg, int bserrno) 8778 { 8779 struct spdk_blob_cluster_op_ctx *ctx = arg; 8780 8781 spdk_spin_lock(&ctx->blob->bs->used_lock); 8782 bs_release_cluster(ctx->blob->bs, ctx->cluster); 8783 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8784 8785 ctx->rc = bserrno; 8786 spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx); 8787 } 8788 8789 static void 8790 blob_free_cluster_update_ep_cb(void *arg, int bserrno) 8791 { 8792 struct spdk_blob_cluster_op_ctx *ctx = arg; 8793 8794 if (bserrno != 0 || ctx->blob->bs->clean == 0) { 8795 blob_free_cluster_msg_cb(ctx, bserrno); 8796 return; 8797 } 8798 8799 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8800 blob_sync_md(ctx->blob, blob_free_cluster_msg_cb, ctx); 8801 } 8802 8803 static void 8804 blob_free_cluster_free_ep_cb(void *arg, int bserrno) 8805 { 8806 struct spdk_blob_cluster_op_ctx *ctx = arg; 8807 8808 spdk_spin_lock(&ctx->blob->bs->used_lock); 8809 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8810 bs_release_md_page(ctx->blob->bs, ctx->extent_page); 8811 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8812 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8813 blob_sync_md(ctx->blob, blob_free_cluster_msg_cb, ctx); 8814 } 8815 8816 static void 8817 blob_persist_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8818 { 8819 struct spdk_blob_write_extent_page_ctx *ctx = cb_arg; 8820 8821 free(ctx); 8822 bs_sequence_finish(seq, bserrno); 8823 } 8824 8825 static void 8826 blob_write_extent_page_ready(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8827 { 8828 struct spdk_blob_write_extent_page_ctx *ctx = cb_arg; 8829 8830 if (bserrno != 0) { 8831 blob_persist_extent_page_cpl(seq, ctx, bserrno); 8832 return; 8833 } 8834 bs_sequence_write_dev(seq, ctx->page, bs_md_page_to_lba(ctx->bs, ctx->extent), 8835 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 8836 blob_persist_extent_page_cpl, ctx); 8837 } 8838 8839 static void 8840 blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 8841 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg) 8842 { 8843 struct spdk_blob_write_extent_page_ctx *ctx; 8844 spdk_bs_sequence_t *seq; 8845 struct spdk_bs_cpl cpl; 8846 8847 ctx = calloc(1, sizeof(*ctx)); 8848 if (!ctx) { 8849 cb_fn(cb_arg, -ENOMEM); 8850 return; 8851 } 8852 ctx->bs = blob->bs; 8853 ctx->extent = extent; 8854 ctx->page = page; 8855 8856 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 8857 cpl.u.blob_basic.cb_fn = cb_fn; 8858 cpl.u.blob_basic.cb_arg = cb_arg; 8859 8860 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 8861 if (!seq) { 8862 free(ctx); 8863 cb_fn(cb_arg, -ENOMEM); 8864 return; 8865 } 8866 8867 assert(page); 8868 page->next = SPDK_INVALID_MD_PAGE; 8869 page->id = blob->id; 8870 page->sequence_num = 0; 8871 8872 blob_serialize_extent_page(blob, cluster_num, page); 8873 8874 page->crc = blob_md_page_calc_crc(page); 8875 8876 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent) == true); 8877 8878 bs_mark_dirty(seq, blob->bs, blob_write_extent_page_ready, ctx); 8879 } 8880 8881 static void 8882 blob_insert_cluster_msg(void *arg) 8883 { 8884 struct spdk_blob_cluster_op_ctx *ctx = arg; 8885 uint32_t *extent_page; 8886 8887 ctx->rc = blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster); 8888 if (ctx->rc != 0) { 8889 spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx); 8890 return; 8891 } 8892 8893 if (ctx->blob->use_extent_table == false) { 8894 /* Extent table is not used, proceed with sync of md that will only use extents_rle. */ 8895 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8896 blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx); 8897 return; 8898 } 8899 8900 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 8901 if (*extent_page == 0) { 8902 /* Extent page requires allocation. 8903 * It was already claimed in the used_md_pages map and placed in ctx. */ 8904 assert(ctx->extent_page != 0); 8905 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8906 blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, ctx->page, 8907 blob_insert_new_ep_cb, ctx); 8908 } else { 8909 /* It is possible for original thread to allocate extent page for 8910 * different cluster in the same extent page. In such case proceed with 8911 * updating the existing extent page, but release the additional one. */ 8912 if (ctx->extent_page != 0) { 8913 spdk_spin_lock(&ctx->blob->bs->used_lock); 8914 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8915 bs_release_md_page(ctx->blob->bs, ctx->extent_page); 8916 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8917 ctx->extent_page = 0; 8918 } 8919 /* Extent page already allocated. 8920 * Every cluster allocation, requires just an update of single extent page. */ 8921 blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, ctx->page, 8922 blob_op_cluster_msg_cb, ctx); 8923 } 8924 } 8925 8926 static void 8927 blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 8928 uint64_t cluster, uint32_t extent_page, struct spdk_blob_md_page *page, 8929 spdk_blob_op_complete cb_fn, void *cb_arg) 8930 { 8931 struct spdk_blob_cluster_op_ctx *ctx; 8932 8933 ctx = calloc(1, sizeof(*ctx)); 8934 if (ctx == NULL) { 8935 cb_fn(cb_arg, -ENOMEM); 8936 return; 8937 } 8938 8939 ctx->thread = spdk_get_thread(); 8940 ctx->blob = blob; 8941 ctx->cluster_num = cluster_num; 8942 ctx->cluster = cluster; 8943 ctx->extent_page = extent_page; 8944 ctx->page = page; 8945 ctx->cb_fn = cb_fn; 8946 ctx->cb_arg = cb_arg; 8947 8948 spdk_thread_send_msg(blob->bs->md_thread, blob_insert_cluster_msg, ctx); 8949 } 8950 8951 static void 8952 blob_free_cluster_msg(void *arg) 8953 { 8954 struct spdk_blob_cluster_op_ctx *ctx = arg; 8955 uint32_t *extent_page; 8956 uint32_t start_cluster_idx; 8957 bool free_extent_page = true; 8958 size_t i; 8959 8960 ctx->cluster = bs_lba_to_cluster(ctx->blob->bs, ctx->blob->active.clusters[ctx->cluster_num]); 8961 8962 /* There were concurrent unmaps to the same cluster, only release the cluster on the first one */ 8963 if (ctx->cluster == 0) { 8964 blob_op_cluster_msg_cb(ctx, 0); 8965 return; 8966 } 8967 8968 ctx->blob->active.clusters[ctx->cluster_num] = 0; 8969 if (ctx->cluster != 0) { 8970 ctx->blob->active.num_allocated_clusters--; 8971 } 8972 8973 if (ctx->blob->use_extent_table == false) { 8974 /* Extent table is not used, proceed with sync of md that will only use extents_rle. */ 8975 spdk_spin_lock(&ctx->blob->bs->used_lock); 8976 bs_release_cluster(ctx->blob->bs, ctx->cluster); 8977 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8978 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8979 blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx); 8980 return; 8981 } 8982 8983 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 8984 8985 /* There shouldn't be parallel release operations on same cluster */ 8986 assert(*extent_page == ctx->extent_page); 8987 8988 start_cluster_idx = (ctx->cluster_num / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP; 8989 for (i = 0; i < SPDK_EXTENTS_PER_EP; ++i) { 8990 if (ctx->blob->active.clusters[start_cluster_idx + i] != 0) { 8991 free_extent_page = false; 8992 break; 8993 } 8994 } 8995 8996 if (free_extent_page) { 8997 assert(ctx->extent_page != 0); 8998 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8999 ctx->blob->active.extent_pages[bs_cluster_to_extent_table_id(ctx->cluster_num)] = 0; 9000 blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, ctx->page, 9001 blob_free_cluster_free_ep_cb, ctx); 9002 } else { 9003 blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, ctx->page, 9004 blob_free_cluster_update_ep_cb, ctx); 9005 } 9006 } 9007 9008 9009 static void 9010 blob_free_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, uint32_t extent_page, 9011 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg) 9012 { 9013 struct spdk_blob_cluster_op_ctx *ctx; 9014 9015 ctx = calloc(1, sizeof(*ctx)); 9016 if (ctx == NULL) { 9017 cb_fn(cb_arg, -ENOMEM); 9018 return; 9019 } 9020 9021 ctx->thread = spdk_get_thread(); 9022 ctx->blob = blob; 9023 ctx->cluster_num = cluster_num; 9024 ctx->extent_page = extent_page; 9025 ctx->page = page; 9026 ctx->cb_fn = cb_fn; 9027 ctx->cb_arg = cb_arg; 9028 9029 spdk_thread_send_msg(blob->bs->md_thread, blob_free_cluster_msg, ctx); 9030 } 9031 9032 /* START spdk_blob_close */ 9033 9034 static void 9035 blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9036 { 9037 struct spdk_blob *blob = cb_arg; 9038 9039 if (bserrno == 0) { 9040 blob->open_ref--; 9041 if (blob->open_ref == 0) { 9042 /* 9043 * Blobs with active.num_pages == 0 are deleted blobs. 9044 * these blobs are removed from the blob_store list 9045 * when the deletion process starts - so don't try to 9046 * remove them again. 9047 */ 9048 if (blob->active.num_pages > 0) { 9049 spdk_bit_array_clear(blob->bs->open_blobids, blob->id); 9050 RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob); 9051 } 9052 blob_free(blob); 9053 } 9054 } 9055 9056 bs_sequence_finish(seq, bserrno); 9057 } 9058 9059 static void 9060 blob_close_esnap_done(void *cb_arg, struct spdk_blob *blob, int bserrno) 9061 { 9062 spdk_bs_sequence_t *seq = cb_arg; 9063 9064 if (bserrno != 0) { 9065 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": close failed with error %d\n", 9066 blob->id, bserrno); 9067 bs_sequence_finish(seq, bserrno); 9068 return; 9069 } 9070 9071 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": closed, syncing metadata on thread %s\n", 9072 blob->id, spdk_thread_get_name(spdk_get_thread())); 9073 9074 /* Sync metadata */ 9075 blob_persist(seq, blob, blob_close_cpl, blob); 9076 } 9077 9078 void 9079 spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 9080 { 9081 struct spdk_bs_cpl cpl; 9082 spdk_bs_sequence_t *seq; 9083 9084 blob_verify_md_op(blob); 9085 9086 SPDK_DEBUGLOG(blob, "Closing blob 0x%" PRIx64 "\n", blob->id); 9087 9088 if (blob->open_ref == 0) { 9089 cb_fn(cb_arg, -EBADF); 9090 return; 9091 } 9092 9093 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 9094 cpl.u.blob_basic.cb_fn = cb_fn; 9095 cpl.u.blob_basic.cb_arg = cb_arg; 9096 9097 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 9098 if (!seq) { 9099 cb_fn(cb_arg, -ENOMEM); 9100 return; 9101 } 9102 9103 if (blob->open_ref == 1 && blob_is_esnap_clone(blob)) { 9104 blob_esnap_destroy_bs_dev_channels(blob, false, blob_close_esnap_done, seq); 9105 return; 9106 } 9107 9108 /* Sync metadata */ 9109 blob_persist(seq, blob, blob_close_cpl, blob); 9110 } 9111 9112 /* END spdk_blob_close */ 9113 9114 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs) 9115 { 9116 return spdk_get_io_channel(bs); 9117 } 9118 9119 void 9120 spdk_bs_free_io_channel(struct spdk_io_channel *channel) 9121 { 9122 blob_esnap_destroy_bs_channel(spdk_io_channel_get_ctx(channel)); 9123 spdk_put_io_channel(channel); 9124 } 9125 9126 void 9127 spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel, 9128 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 9129 { 9130 blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 9131 SPDK_BLOB_UNMAP); 9132 } 9133 9134 void 9135 spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel, 9136 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 9137 { 9138 blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 9139 SPDK_BLOB_WRITE_ZEROES); 9140 } 9141 9142 void 9143 spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel, 9144 void *payload, uint64_t offset, uint64_t length, 9145 spdk_blob_op_complete cb_fn, void *cb_arg) 9146 { 9147 blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 9148 SPDK_BLOB_WRITE); 9149 } 9150 9151 void 9152 spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel, 9153 void *payload, uint64_t offset, uint64_t length, 9154 spdk_blob_op_complete cb_fn, void *cb_arg) 9155 { 9156 blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 9157 SPDK_BLOB_READ); 9158 } 9159 9160 void 9161 spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel, 9162 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9163 spdk_blob_op_complete cb_fn, void *cb_arg) 9164 { 9165 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, NULL); 9166 } 9167 9168 void 9169 spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel, 9170 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9171 spdk_blob_op_complete cb_fn, void *cb_arg) 9172 { 9173 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, NULL); 9174 } 9175 9176 void 9177 spdk_blob_io_writev_ext(struct spdk_blob *blob, struct spdk_io_channel *channel, 9178 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9179 spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts) 9180 { 9181 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, 9182 io_opts); 9183 } 9184 9185 void 9186 spdk_blob_io_readv_ext(struct spdk_blob *blob, struct spdk_io_channel *channel, 9187 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9188 spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts) 9189 { 9190 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, 9191 io_opts); 9192 } 9193 9194 struct spdk_bs_iter_ctx { 9195 int64_t page_num; 9196 struct spdk_blob_store *bs; 9197 9198 spdk_blob_op_with_handle_complete cb_fn; 9199 void *cb_arg; 9200 }; 9201 9202 static void 9203 bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 9204 { 9205 struct spdk_bs_iter_ctx *ctx = cb_arg; 9206 struct spdk_blob_store *bs = ctx->bs; 9207 spdk_blob_id id; 9208 9209 if (bserrno == 0) { 9210 ctx->cb_fn(ctx->cb_arg, _blob, bserrno); 9211 free(ctx); 9212 return; 9213 } 9214 9215 ctx->page_num++; 9216 ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num); 9217 if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) { 9218 ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT); 9219 free(ctx); 9220 return; 9221 } 9222 9223 id = bs_page_to_blobid(ctx->page_num); 9224 9225 spdk_bs_open_blob(bs, id, bs_iter_cpl, ctx); 9226 } 9227 9228 void 9229 spdk_bs_iter_first(struct spdk_blob_store *bs, 9230 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 9231 { 9232 struct spdk_bs_iter_ctx *ctx; 9233 9234 ctx = calloc(1, sizeof(*ctx)); 9235 if (!ctx) { 9236 cb_fn(cb_arg, NULL, -ENOMEM); 9237 return; 9238 } 9239 9240 ctx->page_num = -1; 9241 ctx->bs = bs; 9242 ctx->cb_fn = cb_fn; 9243 ctx->cb_arg = cb_arg; 9244 9245 bs_iter_cpl(ctx, NULL, -1); 9246 } 9247 9248 static void 9249 bs_iter_close_cpl(void *cb_arg, int bserrno) 9250 { 9251 struct spdk_bs_iter_ctx *ctx = cb_arg; 9252 9253 bs_iter_cpl(ctx, NULL, -1); 9254 } 9255 9256 void 9257 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob, 9258 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 9259 { 9260 struct spdk_bs_iter_ctx *ctx; 9261 9262 assert(blob != NULL); 9263 9264 ctx = calloc(1, sizeof(*ctx)); 9265 if (!ctx) { 9266 cb_fn(cb_arg, NULL, -ENOMEM); 9267 return; 9268 } 9269 9270 ctx->page_num = bs_blobid_to_page(blob->id); 9271 ctx->bs = bs; 9272 ctx->cb_fn = cb_fn; 9273 ctx->cb_arg = cb_arg; 9274 9275 /* Close the existing blob */ 9276 spdk_blob_close(blob, bs_iter_close_cpl, ctx); 9277 } 9278 9279 static int 9280 blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 9281 uint16_t value_len, bool internal) 9282 { 9283 struct spdk_xattr_tailq *xattrs; 9284 struct spdk_xattr *xattr; 9285 size_t desc_size; 9286 void *tmp; 9287 9288 blob_verify_md_op(blob); 9289 9290 if (blob->md_ro) { 9291 return -EPERM; 9292 } 9293 9294 desc_size = sizeof(struct spdk_blob_md_descriptor_xattr) + strlen(name) + value_len; 9295 if (desc_size > SPDK_BS_MAX_DESC_SIZE) { 9296 SPDK_DEBUGLOG(blob, "Xattr '%s' of size %zu does not fix into single page %zu\n", name, 9297 desc_size, SPDK_BS_MAX_DESC_SIZE); 9298 return -ENOMEM; 9299 } 9300 9301 if (internal) { 9302 xattrs = &blob->xattrs_internal; 9303 blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR; 9304 } else { 9305 xattrs = &blob->xattrs; 9306 } 9307 9308 TAILQ_FOREACH(xattr, xattrs, link) { 9309 if (!strcmp(name, xattr->name)) { 9310 tmp = malloc(value_len); 9311 if (!tmp) { 9312 return -ENOMEM; 9313 } 9314 9315 free(xattr->value); 9316 xattr->value_len = value_len; 9317 xattr->value = tmp; 9318 memcpy(xattr->value, value, value_len); 9319 9320 blob->state = SPDK_BLOB_STATE_DIRTY; 9321 9322 return 0; 9323 } 9324 } 9325 9326 xattr = calloc(1, sizeof(*xattr)); 9327 if (!xattr) { 9328 return -ENOMEM; 9329 } 9330 9331 xattr->name = strdup(name); 9332 if (!xattr->name) { 9333 free(xattr); 9334 return -ENOMEM; 9335 } 9336 9337 xattr->value_len = value_len; 9338 xattr->value = malloc(value_len); 9339 if (!xattr->value) { 9340 free(xattr->name); 9341 free(xattr); 9342 return -ENOMEM; 9343 } 9344 memcpy(xattr->value, value, value_len); 9345 TAILQ_INSERT_TAIL(xattrs, xattr, link); 9346 9347 blob->state = SPDK_BLOB_STATE_DIRTY; 9348 9349 return 0; 9350 } 9351 9352 int 9353 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 9354 uint16_t value_len) 9355 { 9356 return blob_set_xattr(blob, name, value, value_len, false); 9357 } 9358 9359 static int 9360 blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal) 9361 { 9362 struct spdk_xattr_tailq *xattrs; 9363 struct spdk_xattr *xattr; 9364 9365 blob_verify_md_op(blob); 9366 9367 if (blob->md_ro) { 9368 return -EPERM; 9369 } 9370 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 9371 9372 TAILQ_FOREACH(xattr, xattrs, link) { 9373 if (!strcmp(name, xattr->name)) { 9374 TAILQ_REMOVE(xattrs, xattr, link); 9375 free(xattr->value); 9376 free(xattr->name); 9377 free(xattr); 9378 9379 if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) { 9380 blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR; 9381 } 9382 blob->state = SPDK_BLOB_STATE_DIRTY; 9383 9384 return 0; 9385 } 9386 } 9387 9388 return -ENOENT; 9389 } 9390 9391 int 9392 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name) 9393 { 9394 return blob_remove_xattr(blob, name, false); 9395 } 9396 9397 static int 9398 blob_get_xattr_value(struct spdk_blob *blob, const char *name, 9399 const void **value, size_t *value_len, bool internal) 9400 { 9401 struct spdk_xattr *xattr; 9402 struct spdk_xattr_tailq *xattrs; 9403 9404 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 9405 9406 TAILQ_FOREACH(xattr, xattrs, link) { 9407 if (!strcmp(name, xattr->name)) { 9408 *value = xattr->value; 9409 *value_len = xattr->value_len; 9410 return 0; 9411 } 9412 } 9413 return -ENOENT; 9414 } 9415 9416 int 9417 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 9418 const void **value, size_t *value_len) 9419 { 9420 blob_verify_md_op(blob); 9421 9422 return blob_get_xattr_value(blob, name, value, value_len, false); 9423 } 9424 9425 struct spdk_xattr_names { 9426 uint32_t count; 9427 const char *names[0]; 9428 }; 9429 9430 static int 9431 blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names) 9432 { 9433 struct spdk_xattr *xattr; 9434 int count = 0; 9435 9436 TAILQ_FOREACH(xattr, xattrs, link) { 9437 count++; 9438 } 9439 9440 *names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *)); 9441 if (*names == NULL) { 9442 return -ENOMEM; 9443 } 9444 9445 TAILQ_FOREACH(xattr, xattrs, link) { 9446 (*names)->names[(*names)->count++] = xattr->name; 9447 } 9448 9449 return 0; 9450 } 9451 9452 int 9453 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names) 9454 { 9455 blob_verify_md_op(blob); 9456 9457 return blob_get_xattr_names(&blob->xattrs, names); 9458 } 9459 9460 uint32_t 9461 spdk_xattr_names_get_count(struct spdk_xattr_names *names) 9462 { 9463 assert(names != NULL); 9464 9465 return names->count; 9466 } 9467 9468 const char * 9469 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index) 9470 { 9471 if (index >= names->count) { 9472 return NULL; 9473 } 9474 9475 return names->names[index]; 9476 } 9477 9478 void 9479 spdk_xattr_names_free(struct spdk_xattr_names *names) 9480 { 9481 free(names); 9482 } 9483 9484 struct spdk_bs_type 9485 spdk_bs_get_bstype(struct spdk_blob_store *bs) 9486 { 9487 return bs->bstype; 9488 } 9489 9490 void 9491 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype) 9492 { 9493 memcpy(&bs->bstype, &bstype, sizeof(bstype)); 9494 } 9495 9496 bool 9497 spdk_blob_is_read_only(struct spdk_blob *blob) 9498 { 9499 assert(blob != NULL); 9500 return (blob->data_ro || blob->md_ro); 9501 } 9502 9503 bool 9504 spdk_blob_is_snapshot(struct spdk_blob *blob) 9505 { 9506 struct spdk_blob_list *snapshot_entry; 9507 9508 assert(blob != NULL); 9509 9510 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 9511 if (snapshot_entry == NULL) { 9512 return false; 9513 } 9514 9515 return true; 9516 } 9517 9518 bool 9519 spdk_blob_is_clone(struct spdk_blob *blob) 9520 { 9521 assert(blob != NULL); 9522 9523 if (blob->parent_id != SPDK_BLOBID_INVALID && 9524 blob->parent_id != SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 9525 assert(spdk_blob_is_thin_provisioned(blob)); 9526 return true; 9527 } 9528 9529 return false; 9530 } 9531 9532 bool 9533 spdk_blob_is_thin_provisioned(struct spdk_blob *blob) 9534 { 9535 assert(blob != NULL); 9536 return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 9537 } 9538 9539 bool 9540 spdk_blob_is_esnap_clone(const struct spdk_blob *blob) 9541 { 9542 return blob_is_esnap_clone(blob); 9543 } 9544 9545 static void 9546 blob_update_clear_method(struct spdk_blob *blob) 9547 { 9548 enum blob_clear_method stored_cm; 9549 9550 assert(blob != NULL); 9551 9552 /* If BLOB_CLEAR_WITH_DEFAULT was passed in, use the setting stored 9553 * in metadata previously. If something other than the default was 9554 * specified, ignore stored value and used what was passed in. 9555 */ 9556 stored_cm = ((blob->md_ro_flags & SPDK_BLOB_CLEAR_METHOD) >> SPDK_BLOB_CLEAR_METHOD_SHIFT); 9557 9558 if (blob->clear_method == BLOB_CLEAR_WITH_DEFAULT) { 9559 blob->clear_method = stored_cm; 9560 } else if (blob->clear_method != stored_cm) { 9561 SPDK_WARNLOG("Using passed in clear method 0x%x instead of stored value of 0x%x\n", 9562 blob->clear_method, stored_cm); 9563 } 9564 } 9565 9566 spdk_blob_id 9567 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id) 9568 { 9569 struct spdk_blob_list *snapshot_entry = NULL; 9570 struct spdk_blob_list *clone_entry = NULL; 9571 9572 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 9573 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 9574 if (clone_entry->id == blob_id) { 9575 return snapshot_entry->id; 9576 } 9577 } 9578 } 9579 9580 return SPDK_BLOBID_INVALID; 9581 } 9582 9583 int 9584 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids, 9585 size_t *count) 9586 { 9587 struct spdk_blob_list *snapshot_entry, *clone_entry; 9588 size_t n; 9589 9590 snapshot_entry = bs_get_snapshot_entry(bs, blobid); 9591 if (snapshot_entry == NULL) { 9592 *count = 0; 9593 return 0; 9594 } 9595 9596 if (ids == NULL || *count < snapshot_entry->clone_count) { 9597 *count = snapshot_entry->clone_count; 9598 return -ENOMEM; 9599 } 9600 *count = snapshot_entry->clone_count; 9601 9602 n = 0; 9603 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 9604 ids[n++] = clone_entry->id; 9605 } 9606 9607 return 0; 9608 } 9609 9610 static void 9611 bs_load_grow_continue(struct spdk_bs_load_ctx *ctx) 9612 { 9613 int rc; 9614 9615 if (ctx->super->size == 0) { 9616 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9617 } 9618 9619 if (ctx->super->io_unit_size == 0) { 9620 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 9621 } 9622 9623 /* Parse the super block */ 9624 ctx->bs->clean = 1; 9625 ctx->bs->cluster_sz = ctx->super->cluster_size; 9626 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 9627 ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE; 9628 if (spdk_u32_is_pow2(ctx->bs->pages_per_cluster)) { 9629 ctx->bs->pages_per_cluster_shift = spdk_u32log2(ctx->bs->pages_per_cluster); 9630 } 9631 ctx->bs->io_unit_size = ctx->super->io_unit_size; 9632 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 9633 if (rc < 0) { 9634 bs_load_ctx_fail(ctx, -ENOMEM); 9635 return; 9636 } 9637 ctx->bs->md_start = ctx->super->md_start; 9638 ctx->bs->md_len = ctx->super->md_len; 9639 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len); 9640 if (rc < 0) { 9641 bs_load_ctx_fail(ctx, -ENOMEM); 9642 return; 9643 } 9644 9645 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 9646 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 9647 ctx->bs->super_blob = ctx->super->super_blob; 9648 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 9649 9650 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0) { 9651 SPDK_ERRLOG("Can not grow an unclean blobstore, please load it normally to clean it.\n"); 9652 bs_load_ctx_fail(ctx, -EIO); 9653 return; 9654 } else { 9655 bs_load_read_used_pages(ctx); 9656 } 9657 } 9658 9659 static void 9660 bs_load_grow_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9661 { 9662 struct spdk_bs_load_ctx *ctx = cb_arg; 9663 9664 if (bserrno != 0) { 9665 bs_load_ctx_fail(ctx, bserrno); 9666 return; 9667 } 9668 bs_load_grow_continue(ctx); 9669 } 9670 9671 static void 9672 bs_load_grow_used_clusters_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9673 { 9674 struct spdk_bs_load_ctx *ctx = cb_arg; 9675 9676 if (bserrno != 0) { 9677 bs_load_ctx_fail(ctx, bserrno); 9678 return; 9679 } 9680 9681 spdk_free(ctx->mask); 9682 9683 bs_sequence_write_dev(ctx->seq, ctx->super, bs_page_to_lba(ctx->bs, 0), 9684 bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 9685 bs_load_grow_super_write_cpl, ctx); 9686 } 9687 9688 static void 9689 bs_load_grow_used_clusters_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9690 { 9691 struct spdk_bs_load_ctx *ctx = cb_arg; 9692 uint64_t lba, lba_count; 9693 uint64_t dev_size; 9694 uint64_t total_clusters; 9695 9696 if (bserrno != 0) { 9697 bs_load_ctx_fail(ctx, bserrno); 9698 return; 9699 } 9700 9701 /* The type must be correct */ 9702 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 9703 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 9704 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 9705 struct spdk_blob_md_page) * 8)); 9706 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9707 total_clusters = dev_size / ctx->super->cluster_size; 9708 ctx->mask->length = total_clusters; 9709 9710 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 9711 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 9712 bs_sequence_write_dev(ctx->seq, ctx->mask, lba, lba_count, 9713 bs_load_grow_used_clusters_write_cpl, ctx); 9714 } 9715 9716 static void 9717 bs_load_try_to_grow(struct spdk_bs_load_ctx *ctx) 9718 { 9719 uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask; 9720 uint64_t lba, lba_count, mask_size; 9721 9722 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9723 total_clusters = dev_size / ctx->super->cluster_size; 9724 used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 9725 spdk_divide_round_up(total_clusters, 8), 9726 SPDK_BS_PAGE_SIZE); 9727 max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start; 9728 /* No necessary to grow or no space to grow */ 9729 if (ctx->super->size >= dev_size || used_cluster_mask_len > max_used_cluster_mask) { 9730 SPDK_DEBUGLOG(blob, "No grow\n"); 9731 bs_load_grow_continue(ctx); 9732 return; 9733 } 9734 9735 SPDK_DEBUGLOG(blob, "Resize blobstore\n"); 9736 9737 ctx->super->size = dev_size; 9738 ctx->super->used_cluster_mask_len = used_cluster_mask_len; 9739 ctx->super->crc = blob_md_page_calc_crc(ctx->super); 9740 9741 mask_size = used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 9742 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_NUMA_ID_ANY, 9743 SPDK_MALLOC_DMA); 9744 if (!ctx->mask) { 9745 bs_load_ctx_fail(ctx, -ENOMEM); 9746 return; 9747 } 9748 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 9749 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 9750 bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count, 9751 bs_load_grow_used_clusters_read_cpl, ctx); 9752 } 9753 9754 static void 9755 bs_grow_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9756 { 9757 struct spdk_bs_load_ctx *ctx = cb_arg; 9758 int rc; 9759 9760 rc = bs_super_validate(ctx->super, ctx->bs); 9761 if (rc != 0) { 9762 bs_load_ctx_fail(ctx, rc); 9763 return; 9764 } 9765 9766 bs_load_try_to_grow(ctx); 9767 } 9768 9769 struct spdk_bs_grow_ctx { 9770 struct spdk_blob_store *bs; 9771 struct spdk_bs_super_block *super; 9772 9773 struct spdk_bit_pool *new_used_clusters; 9774 struct spdk_bs_md_mask *new_used_clusters_mask; 9775 9776 spdk_bs_sequence_t *seq; 9777 }; 9778 9779 static void 9780 bs_grow_live_done(struct spdk_bs_grow_ctx *ctx, int bserrno) 9781 { 9782 if (bserrno != 0) { 9783 spdk_bit_pool_free(&ctx->new_used_clusters); 9784 } 9785 9786 bs_sequence_finish(ctx->seq, bserrno); 9787 free(ctx->new_used_clusters_mask); 9788 spdk_free(ctx->super); 9789 free(ctx); 9790 } 9791 9792 static void 9793 bs_grow_live_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9794 { 9795 struct spdk_bs_grow_ctx *ctx = cb_arg; 9796 struct spdk_blob_store *bs = ctx->bs; 9797 uint64_t total_clusters; 9798 9799 if (bserrno != 0) { 9800 bs_grow_live_done(ctx, bserrno); 9801 return; 9802 } 9803 9804 /* 9805 * Blobstore is not clean until unload, for now only the super block is up to date. 9806 * This is similar to state right after blobstore init, when bs_write_used_md() didn't 9807 * yet execute. 9808 * When cleanly unloaded, the used md pages will be written out. 9809 * In case of unclean shutdown, loading blobstore will go through recovery path correctly 9810 * filling out the used_clusters with new size and writing it out. 9811 */ 9812 bs->clean = 0; 9813 9814 /* Reverting the super->size past this point is complex, avoid any error paths 9815 * that require to do so. */ 9816 spdk_spin_lock(&bs->used_lock); 9817 9818 total_clusters = ctx->super->size / ctx->super->cluster_size; 9819 9820 assert(total_clusters >= spdk_bit_pool_capacity(bs->used_clusters)); 9821 spdk_bit_pool_store_mask(bs->used_clusters, ctx->new_used_clusters_mask); 9822 9823 assert(total_clusters == spdk_bit_pool_capacity(ctx->new_used_clusters)); 9824 spdk_bit_pool_load_mask(ctx->new_used_clusters, ctx->new_used_clusters_mask); 9825 9826 spdk_bit_pool_free(&bs->used_clusters); 9827 bs->used_clusters = ctx->new_used_clusters; 9828 9829 bs->total_clusters = total_clusters; 9830 bs->total_data_clusters = bs->total_clusters - spdk_divide_round_up( 9831 bs->md_start + bs->md_len, bs->pages_per_cluster); 9832 9833 bs->num_free_clusters = spdk_bit_pool_count_free(bs->used_clusters); 9834 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 9835 spdk_spin_unlock(&bs->used_lock); 9836 9837 bs_grow_live_done(ctx, 0); 9838 } 9839 9840 static void 9841 bs_grow_live_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9842 { 9843 struct spdk_bs_grow_ctx *ctx = cb_arg; 9844 uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask; 9845 int rc; 9846 9847 if (bserrno != 0) { 9848 bs_grow_live_done(ctx, bserrno); 9849 return; 9850 } 9851 9852 rc = bs_super_validate(ctx->super, ctx->bs); 9853 if (rc != 0) { 9854 bs_grow_live_done(ctx, rc); 9855 return; 9856 } 9857 9858 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9859 total_clusters = dev_size / ctx->super->cluster_size; 9860 used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 9861 spdk_divide_round_up(total_clusters, 8), 9862 SPDK_BS_PAGE_SIZE); 9863 max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start; 9864 /* Only checking dev_size. Since it can change, but total_clusters remain the same. */ 9865 if (dev_size == ctx->super->size) { 9866 SPDK_DEBUGLOG(blob, "No need to grow blobstore\n"); 9867 bs_grow_live_done(ctx, 0); 9868 return; 9869 } 9870 /* 9871 * Blobstore cannot be shrunk, so check before if: 9872 * - new size of the device is smaller than size in super_block 9873 * - new total number of clusters is smaller than used_clusters bit_pool 9874 * - there is enough space in metadata for used_cluster_mask to be written out 9875 */ 9876 if (dev_size < ctx->super->size || 9877 total_clusters < spdk_bit_pool_capacity(ctx->bs->used_clusters) || 9878 used_cluster_mask_len > max_used_cluster_mask) { 9879 SPDK_DEBUGLOG(blob, "No space to grow blobstore\n"); 9880 bs_grow_live_done(ctx, -ENOSPC); 9881 return; 9882 } 9883 9884 SPDK_DEBUGLOG(blob, "Resizing blobstore\n"); 9885 9886 ctx->new_used_clusters_mask = calloc(1, total_clusters); 9887 if (!ctx->new_used_clusters_mask) { 9888 bs_grow_live_done(ctx, -ENOMEM); 9889 return; 9890 } 9891 ctx->new_used_clusters = spdk_bit_pool_create(total_clusters); 9892 if (!ctx->new_used_clusters) { 9893 bs_grow_live_done(ctx, -ENOMEM); 9894 return; 9895 } 9896 9897 ctx->super->clean = 0; 9898 ctx->super->size = dev_size; 9899 ctx->super->used_cluster_mask_len = used_cluster_mask_len; 9900 bs_write_super(seq, ctx->bs, ctx->super, bs_grow_live_super_write_cpl, ctx); 9901 } 9902 9903 void 9904 spdk_bs_grow_live(struct spdk_blob_store *bs, 9905 spdk_bs_op_complete cb_fn, void *cb_arg) 9906 { 9907 struct spdk_bs_cpl cpl; 9908 struct spdk_bs_grow_ctx *ctx; 9909 9910 assert(spdk_get_thread() == bs->md_thread); 9911 9912 SPDK_DEBUGLOG(blob, "Growing blobstore on dev %p\n", bs->dev); 9913 9914 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 9915 cpl.u.bs_basic.cb_fn = cb_fn; 9916 cpl.u.bs_basic.cb_arg = cb_arg; 9917 9918 ctx = calloc(1, sizeof(struct spdk_bs_grow_ctx)); 9919 if (!ctx) { 9920 cb_fn(cb_arg, -ENOMEM); 9921 return; 9922 } 9923 ctx->bs = bs; 9924 9925 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 9926 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 9927 if (!ctx->super) { 9928 free(ctx); 9929 cb_fn(cb_arg, -ENOMEM); 9930 return; 9931 } 9932 9933 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 9934 if (!ctx->seq) { 9935 spdk_free(ctx->super); 9936 free(ctx); 9937 cb_fn(cb_arg, -ENOMEM); 9938 return; 9939 } 9940 9941 /* Read the super block */ 9942 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 9943 bs_byte_to_lba(bs, sizeof(*ctx->super)), 9944 bs_grow_live_load_super_cpl, ctx); 9945 } 9946 9947 void 9948 spdk_bs_grow(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 9949 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 9950 { 9951 struct spdk_blob_store *bs; 9952 struct spdk_bs_cpl cpl; 9953 struct spdk_bs_load_ctx *ctx; 9954 struct spdk_bs_opts opts = {}; 9955 int err; 9956 9957 SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev); 9958 9959 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 9960 SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen); 9961 dev->destroy(dev); 9962 cb_fn(cb_arg, NULL, -EINVAL); 9963 return; 9964 } 9965 9966 spdk_bs_opts_init(&opts, sizeof(opts)); 9967 if (o) { 9968 if (bs_opts_copy(o, &opts)) { 9969 return; 9970 } 9971 } 9972 9973 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 9974 dev->destroy(dev); 9975 cb_fn(cb_arg, NULL, -EINVAL); 9976 return; 9977 } 9978 9979 err = bs_alloc(dev, &opts, &bs, &ctx); 9980 if (err) { 9981 dev->destroy(dev); 9982 cb_fn(cb_arg, NULL, err); 9983 return; 9984 } 9985 9986 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 9987 cpl.u.bs_handle.cb_fn = cb_fn; 9988 cpl.u.bs_handle.cb_arg = cb_arg; 9989 cpl.u.bs_handle.bs = bs; 9990 9991 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 9992 if (!ctx->seq) { 9993 spdk_free(ctx->super); 9994 free(ctx); 9995 bs_free(bs); 9996 cb_fn(cb_arg, NULL, -ENOMEM); 9997 return; 9998 } 9999 10000 /* Read the super block */ 10001 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 10002 bs_byte_to_lba(bs, sizeof(*ctx->super)), 10003 bs_grow_load_super_cpl, ctx); 10004 } 10005 10006 int 10007 spdk_blob_get_esnap_id(struct spdk_blob *blob, const void **id, size_t *len) 10008 { 10009 if (!blob_is_esnap_clone(blob)) { 10010 return -EINVAL; 10011 } 10012 10013 return blob_get_xattr_value(blob, BLOB_EXTERNAL_SNAPSHOT_ID, id, len, true); 10014 } 10015 10016 struct spdk_io_channel * 10017 blob_esnap_get_io_channel(struct spdk_io_channel *ch, struct spdk_blob *blob) 10018 { 10019 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(ch); 10020 struct spdk_bs_dev *bs_dev = blob->back_bs_dev; 10021 struct blob_esnap_channel find = {}; 10022 struct blob_esnap_channel *esnap_channel, *existing; 10023 10024 find.blob_id = blob->id; 10025 esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find); 10026 if (spdk_likely(esnap_channel != NULL)) { 10027 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": using cached channel on thread %s\n", 10028 blob->id, spdk_thread_get_name(spdk_get_thread())); 10029 return esnap_channel->channel; 10030 } 10031 10032 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": allocating channel on thread %s\n", 10033 blob->id, spdk_thread_get_name(spdk_get_thread())); 10034 10035 esnap_channel = calloc(1, sizeof(*esnap_channel)); 10036 if (esnap_channel == NULL) { 10037 SPDK_NOTICELOG("blob 0x%" PRIx64 " channel allocation failed: no memory\n", 10038 find.blob_id); 10039 return NULL; 10040 } 10041 esnap_channel->channel = bs_dev->create_channel(bs_dev); 10042 if (esnap_channel->channel == NULL) { 10043 SPDK_NOTICELOG("blob 0x%" PRIx64 " back channel allocation failed\n", blob->id); 10044 free(esnap_channel); 10045 return NULL; 10046 } 10047 esnap_channel->blob_id = find.blob_id; 10048 existing = RB_INSERT(blob_esnap_channel_tree, &bs_channel->esnap_channels, esnap_channel); 10049 if (spdk_unlikely(existing != NULL)) { 10050 /* 10051 * This should be unreachable: all modifications to this tree happen on this thread. 10052 */ 10053 SPDK_ERRLOG("blob 0x%" PRIx64 "lost race to allocate a channel\n", find.blob_id); 10054 assert(false); 10055 10056 bs_dev->destroy_channel(bs_dev, esnap_channel->channel); 10057 free(esnap_channel); 10058 10059 return existing->channel; 10060 } 10061 10062 return esnap_channel->channel; 10063 } 10064 10065 static int 10066 blob_esnap_channel_compare(struct blob_esnap_channel *c1, struct blob_esnap_channel *c2) 10067 { 10068 return (c1->blob_id < c2->blob_id ? -1 : c1->blob_id > c2->blob_id); 10069 } 10070 10071 struct blob_esnap_destroy_ctx { 10072 spdk_blob_op_with_handle_complete cb_fn; 10073 void *cb_arg; 10074 struct spdk_blob *blob; 10075 struct spdk_bs_dev *back_bs_dev; 10076 bool abort_io; 10077 }; 10078 10079 static void 10080 blob_esnap_destroy_channels_done(struct spdk_io_channel_iter *i, int status) 10081 { 10082 struct blob_esnap_destroy_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 10083 struct spdk_blob *blob = ctx->blob; 10084 struct spdk_blob_store *bs = blob->bs; 10085 10086 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": done destroying channels for this blob\n", 10087 blob->id); 10088 10089 if (ctx->cb_fn != NULL) { 10090 ctx->cb_fn(ctx->cb_arg, blob, status); 10091 } 10092 free(ctx); 10093 10094 bs->esnap_channels_unloading--; 10095 if (bs->esnap_channels_unloading == 0 && bs->esnap_unload_cb_fn != NULL) { 10096 spdk_bs_unload(bs, bs->esnap_unload_cb_fn, bs->esnap_unload_cb_arg); 10097 } 10098 } 10099 10100 static void 10101 blob_esnap_destroy_one_channel(struct spdk_io_channel_iter *i) 10102 { 10103 struct blob_esnap_destroy_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 10104 struct spdk_blob *blob = ctx->blob; 10105 struct spdk_bs_dev *bs_dev = ctx->back_bs_dev; 10106 struct spdk_io_channel *channel = spdk_io_channel_iter_get_channel(i); 10107 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(channel); 10108 struct blob_esnap_channel *esnap_channel; 10109 struct blob_esnap_channel find = {}; 10110 10111 assert(spdk_get_thread() == spdk_io_channel_get_thread(channel)); 10112 10113 find.blob_id = blob->id; 10114 esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find); 10115 if (esnap_channel != NULL) { 10116 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": destroying channel on thread %s\n", 10117 blob->id, spdk_thread_get_name(spdk_get_thread())); 10118 RB_REMOVE(blob_esnap_channel_tree, &bs_channel->esnap_channels, esnap_channel); 10119 10120 if (ctx->abort_io) { 10121 spdk_bs_user_op_t *op, *tmp; 10122 10123 TAILQ_FOREACH_SAFE(op, &bs_channel->queued_io, link, tmp) { 10124 if (op->back_channel == esnap_channel->channel) { 10125 TAILQ_REMOVE(&bs_channel->queued_io, op, link); 10126 bs_user_op_abort(op, -EIO); 10127 } 10128 } 10129 } 10130 10131 bs_dev->destroy_channel(bs_dev, esnap_channel->channel); 10132 free(esnap_channel); 10133 } 10134 10135 spdk_for_each_channel_continue(i, 0); 10136 } 10137 10138 /* 10139 * Destroy the channels for a specific blob on each thread with a blobstore channel. This should be 10140 * used when closing an esnap clone blob and after decoupling from the parent. 10141 */ 10142 static void 10143 blob_esnap_destroy_bs_dev_channels(struct spdk_blob *blob, bool abort_io, 10144 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 10145 { 10146 struct blob_esnap_destroy_ctx *ctx; 10147 10148 if (!blob_is_esnap_clone(blob) || blob->back_bs_dev == NULL) { 10149 if (cb_fn != NULL) { 10150 cb_fn(cb_arg, blob, 0); 10151 } 10152 return; 10153 } 10154 10155 ctx = calloc(1, sizeof(*ctx)); 10156 if (ctx == NULL) { 10157 if (cb_fn != NULL) { 10158 cb_fn(cb_arg, blob, -ENOMEM); 10159 } 10160 return; 10161 } 10162 ctx->cb_fn = cb_fn; 10163 ctx->cb_arg = cb_arg; 10164 ctx->blob = blob; 10165 ctx->back_bs_dev = blob->back_bs_dev; 10166 ctx->abort_io = abort_io; 10167 10168 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": destroying channels for this blob\n", 10169 blob->id); 10170 10171 blob->bs->esnap_channels_unloading++; 10172 spdk_for_each_channel(blob->bs, blob_esnap_destroy_one_channel, ctx, 10173 blob_esnap_destroy_channels_done); 10174 } 10175 10176 /* 10177 * Destroy all bs_dev channels on a specific blobstore channel. This should be used when a 10178 * bs_channel is destroyed. 10179 */ 10180 static void 10181 blob_esnap_destroy_bs_channel(struct spdk_bs_channel *ch) 10182 { 10183 struct blob_esnap_channel *esnap_channel, *esnap_channel_tmp; 10184 10185 assert(spdk_get_thread() == spdk_io_channel_get_thread(spdk_io_channel_from_ctx(ch))); 10186 10187 SPDK_DEBUGLOG(blob_esnap, "destroying channels on thread %s\n", 10188 spdk_thread_get_name(spdk_get_thread())); 10189 RB_FOREACH_SAFE(esnap_channel, blob_esnap_channel_tree, &ch->esnap_channels, 10190 esnap_channel_tmp) { 10191 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 10192 ": destroying one channel in thread %s\n", 10193 esnap_channel->blob_id, spdk_thread_get_name(spdk_get_thread())); 10194 RB_REMOVE(blob_esnap_channel_tree, &ch->esnap_channels, esnap_channel); 10195 spdk_put_io_channel(esnap_channel->channel); 10196 free(esnap_channel); 10197 } 10198 SPDK_DEBUGLOG(blob_esnap, "done destroying channels on thread %s\n", 10199 spdk_thread_get_name(spdk_get_thread())); 10200 } 10201 10202 static void 10203 blob_set_back_bs_dev_done(void *_ctx, int bserrno) 10204 { 10205 struct set_bs_dev_ctx *ctx = _ctx; 10206 10207 if (bserrno != 0) { 10208 /* Even though the unfreeze failed, the update may have succeed. */ 10209 SPDK_ERRLOG("blob 0x%" PRIx64 ": unfreeze failed with error %d\n", ctx->blob->id, 10210 bserrno); 10211 } 10212 ctx->cb_fn(ctx->cb_arg, ctx->bserrno); 10213 free(ctx); 10214 } 10215 10216 static void 10217 blob_frozen_set_back_bs_dev(void *_ctx, struct spdk_blob *blob, int bserrno) 10218 { 10219 struct set_bs_dev_ctx *ctx = _ctx; 10220 int rc; 10221 10222 if (bserrno != 0) { 10223 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to release old back_bs_dev with error %d\n", 10224 blob->id, bserrno); 10225 ctx->bserrno = bserrno; 10226 blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx); 10227 return; 10228 } 10229 10230 if (blob->back_bs_dev != NULL) { 10231 blob_unref_back_bs_dev(blob); 10232 } 10233 10234 if (ctx->parent_refs_cb_fn) { 10235 rc = ctx->parent_refs_cb_fn(blob, ctx->parent_refs_cb_arg); 10236 if (rc != 0) { 10237 ctx->bserrno = rc; 10238 blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx); 10239 return; 10240 } 10241 } 10242 10243 SPDK_NOTICELOG("blob 0x%" PRIx64 ": hotplugged back_bs_dev\n", blob->id); 10244 blob->back_bs_dev = ctx->back_bs_dev; 10245 ctx->bserrno = 0; 10246 10247 blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx); 10248 } 10249 10250 static void 10251 blob_set_back_bs_dev_frozen(void *_ctx, int bserrno) 10252 { 10253 struct set_bs_dev_ctx *ctx = _ctx; 10254 struct spdk_blob *blob = ctx->blob; 10255 10256 if (bserrno != 0) { 10257 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to freeze with error %d\n", blob->id, 10258 bserrno); 10259 ctx->cb_fn(ctx->cb_arg, bserrno); 10260 free(ctx); 10261 return; 10262 } 10263 10264 /* 10265 * This does not prevent future reads from the esnap device because any future IO will 10266 * lazily create a new esnap IO channel. 10267 */ 10268 blob_esnap_destroy_bs_dev_channels(blob, true, blob_frozen_set_back_bs_dev, ctx); 10269 } 10270 10271 void 10272 spdk_blob_set_esnap_bs_dev(struct spdk_blob *blob, struct spdk_bs_dev *back_bs_dev, 10273 spdk_blob_op_complete cb_fn, void *cb_arg) 10274 { 10275 if (!blob_is_esnap_clone(blob)) { 10276 SPDK_ERRLOG("blob 0x%" PRIx64 ": not an esnap clone\n", blob->id); 10277 cb_fn(cb_arg, -EINVAL); 10278 return; 10279 } 10280 10281 blob_set_back_bs_dev(blob, back_bs_dev, NULL, NULL, cb_fn, cb_arg); 10282 } 10283 10284 struct spdk_bs_dev * 10285 spdk_blob_get_esnap_bs_dev(const struct spdk_blob *blob) 10286 { 10287 if (!blob_is_esnap_clone(blob)) { 10288 SPDK_ERRLOG("blob 0x%" PRIx64 ": not an esnap clone\n", blob->id); 10289 return NULL; 10290 } 10291 10292 return blob->back_bs_dev; 10293 } 10294 10295 bool 10296 spdk_blob_is_degraded(const struct spdk_blob *blob) 10297 { 10298 if (blob->bs->dev->is_degraded != NULL && blob->bs->dev->is_degraded(blob->bs->dev)) { 10299 return true; 10300 } 10301 if (blob->back_bs_dev == NULL || blob->back_bs_dev->is_degraded == NULL) { 10302 return false; 10303 } 10304 10305 return blob->back_bs_dev->is_degraded(blob->back_bs_dev); 10306 } 10307 10308 SPDK_LOG_REGISTER_COMPONENT(blob) 10309 SPDK_LOG_REGISTER_COMPONENT(blob_esnap) 10310 10311 static void 10312 blob_trace(void) 10313 { 10314 struct spdk_trace_tpoint_opts opts[] = { 10315 { 10316 "BLOB_REQ_SET_START", TRACE_BLOB_REQ_SET_START, 10317 OWNER_TYPE_NONE, OBJECT_BLOB_CB_ARG, 1, 10318 { 10319 { "ctx", SPDK_TRACE_ARG_TYPE_PTR, 8 } 10320 } 10321 }, 10322 { 10323 "BLOB_REQ_SET_COMPLETE", TRACE_BLOB_REQ_SET_COMPLETE, 10324 OWNER_TYPE_NONE, OBJECT_BLOB_CB_ARG, 0, 10325 { 10326 { "ctx", SPDK_TRACE_ARG_TYPE_PTR, 8 } 10327 } 10328 }, 10329 }; 10330 10331 spdk_trace_register_object(OBJECT_BLOB_CB_ARG, 'a'); 10332 spdk_trace_register_description_ext(opts, SPDK_COUNTOF(opts)); 10333 spdk_trace_tpoint_register_relation(TRACE_BDEV_IO_START, OBJECT_BLOB_CB_ARG, 1); 10334 spdk_trace_tpoint_register_relation(TRACE_BDEV_IO_DONE, OBJECT_BLOB_CB_ARG, 0); 10335 } 10336 SPDK_TRACE_REGISTER_FN(blob_trace, "blob", TRACE_GROUP_BLOB) 10337