1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "spdk/blob.h" 10 #include "spdk/crc32.h" 11 #include "spdk/env.h" 12 #include "spdk/queue.h" 13 #include "spdk/thread.h" 14 #include "spdk/bit_array.h" 15 #include "spdk/bit_pool.h" 16 #include "spdk/likely.h" 17 #include "spdk/util.h" 18 #include "spdk/string.h" 19 #include "spdk/trace.h" 20 21 #include "spdk_internal/assert.h" 22 #include "spdk_internal/trace_defs.h" 23 #include "spdk/log.h" 24 25 #include "blobstore.h" 26 27 #define BLOB_CRC32C_INITIAL 0xffffffffUL 28 29 static int bs_register_md_thread(struct spdk_blob_store *bs); 30 static int bs_unregister_md_thread(struct spdk_blob_store *bs); 31 static void blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 32 static void blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 33 uint64_t cluster, uint32_t extent, struct spdk_blob_md_page *page, 34 spdk_blob_op_complete cb_fn, void *cb_arg); 35 static void blob_free_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 36 uint32_t extent_page, struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg); 37 38 static int blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 39 uint16_t value_len, bool internal); 40 static int blob_get_xattr_value(struct spdk_blob *blob, const char *name, 41 const void **value, size_t *value_len, bool internal); 42 static int blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal); 43 44 static void blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 45 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg); 46 static void blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg); 47 48 static void bs_shallow_copy_cluster_find_next(void *cb_arg); 49 50 /* 51 * External snapshots require a channel per thread per esnap bdev. The tree 52 * is populated lazily as blob IOs are handled by the back_bs_dev. When this 53 * channel is destroyed, all the channels in the tree are destroyed. 54 */ 55 56 struct blob_esnap_channel { 57 RB_ENTRY(blob_esnap_channel) node; 58 spdk_blob_id blob_id; 59 struct spdk_io_channel *channel; 60 }; 61 62 static int blob_esnap_channel_compare(struct blob_esnap_channel *c1, struct blob_esnap_channel *c2); 63 static void blob_esnap_destroy_bs_dev_channels(struct spdk_blob *blob, bool abort_io, 64 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg); 65 static void blob_esnap_destroy_bs_channel(struct spdk_bs_channel *ch); 66 static void blob_set_back_bs_dev_frozen(void *_ctx, int bserrno); 67 RB_GENERATE_STATIC(blob_esnap_channel_tree, blob_esnap_channel, node, blob_esnap_channel_compare) 68 69 static inline bool 70 blob_is_esnap_clone(const struct spdk_blob *blob) 71 { 72 assert(blob != NULL); 73 return !!(blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT); 74 } 75 76 static int 77 blob_id_cmp(struct spdk_blob *blob1, struct spdk_blob *blob2) 78 { 79 assert(blob1 != NULL && blob2 != NULL); 80 return (blob1->id < blob2->id ? -1 : blob1->id > blob2->id); 81 } 82 83 RB_GENERATE_STATIC(spdk_blob_tree, spdk_blob, link, blob_id_cmp); 84 85 static void 86 blob_verify_md_op(struct spdk_blob *blob) 87 { 88 assert(blob != NULL); 89 assert(spdk_get_thread() == blob->bs->md_thread); 90 assert(blob->state != SPDK_BLOB_STATE_LOADING); 91 } 92 93 static struct spdk_blob_list * 94 bs_get_snapshot_entry(struct spdk_blob_store *bs, spdk_blob_id blobid) 95 { 96 struct spdk_blob_list *snapshot_entry = NULL; 97 98 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 99 if (snapshot_entry->id == blobid) { 100 break; 101 } 102 } 103 104 return snapshot_entry; 105 } 106 107 static void 108 bs_claim_md_page(struct spdk_blob_store *bs, uint32_t page) 109 { 110 assert(spdk_spin_held(&bs->used_lock)); 111 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 112 assert(spdk_bit_array_get(bs->used_md_pages, page) == false); 113 114 spdk_bit_array_set(bs->used_md_pages, page); 115 } 116 117 static void 118 bs_release_md_page(struct spdk_blob_store *bs, uint32_t page) 119 { 120 assert(spdk_spin_held(&bs->used_lock)); 121 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 122 assert(spdk_bit_array_get(bs->used_md_pages, page) == true); 123 124 spdk_bit_array_clear(bs->used_md_pages, page); 125 } 126 127 static uint32_t 128 bs_claim_cluster(struct spdk_blob_store *bs) 129 { 130 uint32_t cluster_num; 131 132 assert(spdk_spin_held(&bs->used_lock)); 133 134 cluster_num = spdk_bit_pool_allocate_bit(bs->used_clusters); 135 if (cluster_num == UINT32_MAX) { 136 return UINT32_MAX; 137 } 138 139 SPDK_DEBUGLOG(blob, "Claiming cluster %u\n", cluster_num); 140 bs->num_free_clusters--; 141 142 return cluster_num; 143 } 144 145 static void 146 bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 147 { 148 assert(spdk_spin_held(&bs->used_lock)); 149 assert(cluster_num < spdk_bit_pool_capacity(bs->used_clusters)); 150 assert(spdk_bit_pool_is_allocated(bs->used_clusters, cluster_num) == true); 151 assert(bs->num_free_clusters < bs->total_clusters); 152 153 SPDK_DEBUGLOG(blob, "Releasing cluster %u\n", cluster_num); 154 155 spdk_bit_pool_free_bit(bs->used_clusters, cluster_num); 156 bs->num_free_clusters++; 157 } 158 159 static int 160 blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster) 161 { 162 uint64_t *cluster_lba = &blob->active.clusters[cluster_num]; 163 164 blob_verify_md_op(blob); 165 166 if (*cluster_lba != 0) { 167 return -EEXIST; 168 } 169 170 *cluster_lba = bs_cluster_to_lba(blob->bs, cluster); 171 blob->active.num_allocated_clusters++; 172 173 return 0; 174 } 175 176 static int 177 bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num, 178 uint64_t *cluster, uint32_t *lowest_free_md_page, bool update_map) 179 { 180 uint32_t *extent_page = 0; 181 182 assert(spdk_spin_held(&blob->bs->used_lock)); 183 184 *cluster = bs_claim_cluster(blob->bs); 185 if (*cluster == UINT32_MAX) { 186 /* No more free clusters. Cannot satisfy the request */ 187 return -ENOSPC; 188 } 189 190 if (blob->use_extent_table) { 191 extent_page = bs_cluster_to_extent_page(blob, cluster_num); 192 if (*extent_page == 0) { 193 /* Extent page shall never occupy md_page so start the search from 1 */ 194 if (*lowest_free_md_page == 0) { 195 *lowest_free_md_page = 1; 196 } 197 /* No extent_page is allocated for the cluster */ 198 *lowest_free_md_page = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, 199 *lowest_free_md_page); 200 if (*lowest_free_md_page == UINT32_MAX) { 201 /* No more free md pages. Cannot satisfy the request */ 202 bs_release_cluster(blob->bs, *cluster); 203 return -ENOSPC; 204 } 205 bs_claim_md_page(blob->bs, *lowest_free_md_page); 206 } 207 } 208 209 SPDK_DEBUGLOG(blob, "Claiming cluster %" PRIu64 " for blob 0x%" PRIx64 "\n", *cluster, 210 blob->id); 211 212 if (update_map) { 213 blob_insert_cluster(blob, cluster_num, *cluster); 214 if (blob->use_extent_table && *extent_page == 0) { 215 *extent_page = *lowest_free_md_page; 216 } 217 } 218 219 return 0; 220 } 221 222 static void 223 blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs) 224 { 225 xattrs->count = 0; 226 xattrs->names = NULL; 227 xattrs->ctx = NULL; 228 xattrs->get_value = NULL; 229 } 230 231 void 232 spdk_blob_opts_init(struct spdk_blob_opts *opts, size_t opts_size) 233 { 234 if (!opts) { 235 SPDK_ERRLOG("opts should not be NULL\n"); 236 return; 237 } 238 239 if (!opts_size) { 240 SPDK_ERRLOG("opts_size should not be zero value\n"); 241 return; 242 } 243 244 memset(opts, 0, opts_size); 245 opts->opts_size = opts_size; 246 247 #define FIELD_OK(field) \ 248 offsetof(struct spdk_blob_opts, field) + sizeof(opts->field) <= opts_size 249 250 #define SET_FIELD(field, value) \ 251 if (FIELD_OK(field)) { \ 252 opts->field = value; \ 253 } \ 254 255 SET_FIELD(num_clusters, 0); 256 SET_FIELD(thin_provision, false); 257 SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT); 258 259 if (FIELD_OK(xattrs)) { 260 blob_xattrs_init(&opts->xattrs); 261 } 262 263 SET_FIELD(use_extent_table, true); 264 265 #undef FIELD_OK 266 #undef SET_FIELD 267 } 268 269 void 270 spdk_blob_open_opts_init(struct spdk_blob_open_opts *opts, size_t opts_size) 271 { 272 if (!opts) { 273 SPDK_ERRLOG("opts should not be NULL\n"); 274 return; 275 } 276 277 if (!opts_size) { 278 SPDK_ERRLOG("opts_size should not be zero value\n"); 279 return; 280 } 281 282 memset(opts, 0, opts_size); 283 opts->opts_size = opts_size; 284 285 #define FIELD_OK(field) \ 286 offsetof(struct spdk_blob_open_opts, field) + sizeof(opts->field) <= opts_size 287 288 #define SET_FIELD(field, value) \ 289 if (FIELD_OK(field)) { \ 290 opts->field = value; \ 291 } \ 292 293 SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT); 294 295 #undef FIELD_OK 296 #undef SET_FILED 297 } 298 299 static struct spdk_blob * 300 blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id) 301 { 302 struct spdk_blob *blob; 303 304 blob = calloc(1, sizeof(*blob)); 305 if (!blob) { 306 return NULL; 307 } 308 309 blob->id = id; 310 blob->bs = bs; 311 312 blob->parent_id = SPDK_BLOBID_INVALID; 313 314 blob->state = SPDK_BLOB_STATE_DIRTY; 315 blob->extent_rle_found = false; 316 blob->extent_table_found = false; 317 blob->active.num_pages = 1; 318 blob->active.pages = calloc(1, sizeof(*blob->active.pages)); 319 if (!blob->active.pages) { 320 free(blob); 321 return NULL; 322 } 323 324 blob->active.pages[0] = bs_blobid_to_page(id); 325 326 TAILQ_INIT(&blob->xattrs); 327 TAILQ_INIT(&blob->xattrs_internal); 328 TAILQ_INIT(&blob->pending_persists); 329 TAILQ_INIT(&blob->persists_to_complete); 330 331 return blob; 332 } 333 334 static void 335 xattrs_free(struct spdk_xattr_tailq *xattrs) 336 { 337 struct spdk_xattr *xattr, *xattr_tmp; 338 339 TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) { 340 TAILQ_REMOVE(xattrs, xattr, link); 341 free(xattr->name); 342 free(xattr->value); 343 free(xattr); 344 } 345 } 346 347 static void 348 blob_unref_back_bs_dev(struct spdk_blob *blob) 349 { 350 blob->back_bs_dev->destroy(blob->back_bs_dev); 351 blob->back_bs_dev = NULL; 352 } 353 354 static void 355 blob_free(struct spdk_blob *blob) 356 { 357 assert(blob != NULL); 358 assert(TAILQ_EMPTY(&blob->pending_persists)); 359 assert(TAILQ_EMPTY(&blob->persists_to_complete)); 360 361 free(blob->active.extent_pages); 362 free(blob->clean.extent_pages); 363 free(blob->active.clusters); 364 free(blob->clean.clusters); 365 free(blob->active.pages); 366 free(blob->clean.pages); 367 368 xattrs_free(&blob->xattrs); 369 xattrs_free(&blob->xattrs_internal); 370 371 if (blob->back_bs_dev) { 372 blob_unref_back_bs_dev(blob); 373 } 374 375 free(blob); 376 } 377 378 static void 379 blob_back_bs_destroy_esnap_done(void *ctx, struct spdk_blob *blob, int bserrno) 380 { 381 struct spdk_bs_dev *bs_dev = ctx; 382 383 if (bserrno != 0) { 384 /* 385 * This is probably due to a memory allocation failure when creating the 386 * blob_esnap_destroy_ctx before iterating threads. 387 */ 388 SPDK_ERRLOG("blob 0x%" PRIx64 ": Unable to destroy bs dev channels: error %d\n", 389 blob->id, bserrno); 390 assert(false); 391 } 392 393 if (bs_dev == NULL) { 394 /* 395 * This check exists to make scanbuild happy. 396 * 397 * blob->back_bs_dev for an esnap is NULL during the first iteration of blobs while 398 * the blobstore is being loaded. It could also be NULL if there was an error 399 * opening the esnap device. In each of these cases, no channels could have been 400 * created because back_bs_dev->create_channel() would have led to a NULL pointer 401 * deref. 402 */ 403 assert(false); 404 return; 405 } 406 407 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": calling destroy on back_bs_dev\n", blob->id); 408 bs_dev->destroy(bs_dev); 409 } 410 411 static void 412 blob_back_bs_destroy(struct spdk_blob *blob) 413 { 414 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": preparing to destroy back_bs_dev\n", 415 blob->id); 416 417 blob_esnap_destroy_bs_dev_channels(blob, false, blob_back_bs_destroy_esnap_done, 418 blob->back_bs_dev); 419 blob->back_bs_dev = NULL; 420 } 421 422 struct blob_parent { 423 union { 424 struct { 425 spdk_blob_id id; 426 struct spdk_blob *blob; 427 } snapshot; 428 429 struct { 430 void *id; 431 uint32_t id_len; 432 struct spdk_bs_dev *back_bs_dev; 433 } esnap; 434 } u; 435 }; 436 437 typedef int (*set_parent_refs_cb)(struct spdk_blob *blob, struct blob_parent *parent); 438 439 struct set_bs_dev_ctx { 440 struct spdk_blob *blob; 441 struct spdk_bs_dev *back_bs_dev; 442 443 /* 444 * This callback is used during a set parent operation to change the references 445 * to the parent of the blob. 446 */ 447 set_parent_refs_cb parent_refs_cb_fn; 448 struct blob_parent *parent_refs_cb_arg; 449 450 spdk_blob_op_complete cb_fn; 451 void *cb_arg; 452 int bserrno; 453 }; 454 455 static void 456 blob_set_back_bs_dev(struct spdk_blob *blob, struct spdk_bs_dev *back_bs_dev, 457 set_parent_refs_cb parent_refs_cb_fn, struct blob_parent *parent_refs_cb_arg, 458 spdk_blob_op_complete cb_fn, void *cb_arg) 459 { 460 struct set_bs_dev_ctx *ctx; 461 462 ctx = calloc(1, sizeof(*ctx)); 463 if (ctx == NULL) { 464 SPDK_ERRLOG("blob 0x%" PRIx64 ": out of memory while setting back_bs_dev\n", 465 blob->id); 466 cb_fn(cb_arg, -ENOMEM); 467 return; 468 } 469 470 ctx->parent_refs_cb_fn = parent_refs_cb_fn; 471 ctx->parent_refs_cb_arg = parent_refs_cb_arg; 472 ctx->cb_fn = cb_fn; 473 ctx->cb_arg = cb_arg; 474 ctx->back_bs_dev = back_bs_dev; 475 ctx->blob = blob; 476 477 blob_freeze_io(blob, blob_set_back_bs_dev_frozen, ctx); 478 } 479 480 struct freeze_io_ctx { 481 struct spdk_bs_cpl cpl; 482 struct spdk_blob *blob; 483 }; 484 485 static void 486 blob_io_sync(struct spdk_io_channel_iter *i) 487 { 488 spdk_for_each_channel_continue(i, 0); 489 } 490 491 static void 492 blob_execute_queued_io(struct spdk_io_channel_iter *i) 493 { 494 struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); 495 struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch); 496 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 497 struct spdk_bs_request_set *set; 498 struct spdk_bs_user_op_args *args; 499 spdk_bs_user_op_t *op, *tmp; 500 501 TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) { 502 set = (struct spdk_bs_request_set *)op; 503 args = &set->u.user_op; 504 505 if (args->blob == ctx->blob) { 506 TAILQ_REMOVE(&ch->queued_io, op, link); 507 bs_user_op_execute(op); 508 } 509 } 510 511 spdk_for_each_channel_continue(i, 0); 512 } 513 514 static void 515 blob_io_cpl(struct spdk_io_channel_iter *i, int status) 516 { 517 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 518 519 ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0); 520 521 free(ctx); 522 } 523 524 static void 525 blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 526 { 527 struct freeze_io_ctx *ctx; 528 529 blob_verify_md_op(blob); 530 531 ctx = calloc(1, sizeof(*ctx)); 532 if (!ctx) { 533 cb_fn(cb_arg, -ENOMEM); 534 return; 535 } 536 537 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 538 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 539 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 540 ctx->blob = blob; 541 542 /* Freeze I/O on blob */ 543 blob->frozen_refcnt++; 544 545 spdk_for_each_channel(blob->bs, blob_io_sync, ctx, blob_io_cpl); 546 } 547 548 static void 549 blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 550 { 551 struct freeze_io_ctx *ctx; 552 553 blob_verify_md_op(blob); 554 555 ctx = calloc(1, sizeof(*ctx)); 556 if (!ctx) { 557 cb_fn(cb_arg, -ENOMEM); 558 return; 559 } 560 561 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 562 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 563 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 564 ctx->blob = blob; 565 566 assert(blob->frozen_refcnt > 0); 567 568 blob->frozen_refcnt--; 569 570 spdk_for_each_channel(blob->bs, blob_execute_queued_io, ctx, blob_io_cpl); 571 } 572 573 static int 574 blob_mark_clean(struct spdk_blob *blob) 575 { 576 uint32_t *extent_pages = NULL; 577 uint64_t *clusters = NULL; 578 uint32_t *pages = NULL; 579 580 assert(blob != NULL); 581 582 if (blob->active.num_extent_pages) { 583 assert(blob->active.extent_pages); 584 extent_pages = calloc(blob->active.num_extent_pages, sizeof(*blob->active.extent_pages)); 585 if (!extent_pages) { 586 return -ENOMEM; 587 } 588 memcpy(extent_pages, blob->active.extent_pages, 589 blob->active.num_extent_pages * sizeof(*extent_pages)); 590 } 591 592 if (blob->active.num_clusters) { 593 assert(blob->active.clusters); 594 clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters)); 595 if (!clusters) { 596 free(extent_pages); 597 return -ENOMEM; 598 } 599 memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 600 } 601 602 if (blob->active.num_pages) { 603 assert(blob->active.pages); 604 pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages)); 605 if (!pages) { 606 free(extent_pages); 607 free(clusters); 608 return -ENOMEM; 609 } 610 memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 611 } 612 613 free(blob->clean.extent_pages); 614 free(blob->clean.clusters); 615 free(blob->clean.pages); 616 617 blob->clean.num_extent_pages = blob->active.num_extent_pages; 618 blob->clean.extent_pages = blob->active.extent_pages; 619 blob->clean.num_clusters = blob->active.num_clusters; 620 blob->clean.clusters = blob->active.clusters; 621 blob->clean.num_allocated_clusters = blob->active.num_allocated_clusters; 622 blob->clean.num_pages = blob->active.num_pages; 623 blob->clean.pages = blob->active.pages; 624 625 blob->active.extent_pages = extent_pages; 626 blob->active.clusters = clusters; 627 blob->active.pages = pages; 628 629 /* If the metadata was dirtied again while the metadata was being written to disk, 630 * we do not want to revert the DIRTY state back to CLEAN here. 631 */ 632 if (blob->state == SPDK_BLOB_STATE_LOADING) { 633 blob->state = SPDK_BLOB_STATE_CLEAN; 634 } 635 636 return 0; 637 } 638 639 static int 640 blob_deserialize_xattr(struct spdk_blob *blob, 641 struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal) 642 { 643 struct spdk_xattr *xattr; 644 645 if (desc_xattr->length != sizeof(desc_xattr->name_length) + 646 sizeof(desc_xattr->value_length) + 647 desc_xattr->name_length + desc_xattr->value_length) { 648 return -EINVAL; 649 } 650 651 xattr = calloc(1, sizeof(*xattr)); 652 if (xattr == NULL) { 653 return -ENOMEM; 654 } 655 656 xattr->name = malloc(desc_xattr->name_length + 1); 657 if (xattr->name == NULL) { 658 free(xattr); 659 return -ENOMEM; 660 } 661 662 xattr->value = malloc(desc_xattr->value_length); 663 if (xattr->value == NULL) { 664 free(xattr->name); 665 free(xattr); 666 return -ENOMEM; 667 } 668 669 memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length); 670 xattr->name[desc_xattr->name_length] = '\0'; 671 xattr->value_len = desc_xattr->value_length; 672 memcpy(xattr->value, 673 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 674 desc_xattr->value_length); 675 676 TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link); 677 678 return 0; 679 } 680 681 682 static int 683 blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob) 684 { 685 struct spdk_blob_md_descriptor *desc; 686 size_t cur_desc = 0; 687 void *tmp; 688 689 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 690 while (cur_desc < sizeof(page->descriptors)) { 691 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 692 if (desc->length == 0) { 693 /* If padding and length are 0, this terminates the page */ 694 break; 695 } 696 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 697 struct spdk_blob_md_descriptor_flags *desc_flags; 698 699 desc_flags = (struct spdk_blob_md_descriptor_flags *)desc; 700 701 if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) { 702 return -EINVAL; 703 } 704 705 if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) != 706 SPDK_BLOB_INVALID_FLAGS_MASK) { 707 return -EINVAL; 708 } 709 710 if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) != 711 SPDK_BLOB_DATA_RO_FLAGS_MASK) { 712 blob->data_ro = true; 713 blob->md_ro = true; 714 } 715 716 if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) != 717 SPDK_BLOB_MD_RO_FLAGS_MASK) { 718 blob->md_ro = true; 719 } 720 721 if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 722 blob->data_ro = true; 723 blob->md_ro = true; 724 } 725 726 blob->invalid_flags = desc_flags->invalid_flags; 727 blob->data_ro_flags = desc_flags->data_ro_flags; 728 blob->md_ro_flags = desc_flags->md_ro_flags; 729 730 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 731 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 732 unsigned int i, j; 733 unsigned int cluster_count = blob->active.num_clusters; 734 735 if (blob->extent_table_found) { 736 /* Extent Table already present in the md, 737 * both descriptors should never be at the same time. */ 738 return -EINVAL; 739 } 740 blob->extent_rle_found = true; 741 742 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 743 744 if (desc_extent_rle->length == 0 || 745 (desc_extent_rle->length % sizeof(desc_extent_rle->extents[0]) != 0)) { 746 return -EINVAL; 747 } 748 749 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 750 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 751 if (desc_extent_rle->extents[i].cluster_idx != 0) { 752 if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, 753 desc_extent_rle->extents[i].cluster_idx + j)) { 754 return -EINVAL; 755 } 756 } 757 cluster_count++; 758 } 759 } 760 761 if (cluster_count == 0) { 762 return -EINVAL; 763 } 764 tmp = realloc(blob->active.clusters, cluster_count * sizeof(*blob->active.clusters)); 765 if (tmp == NULL) { 766 return -ENOMEM; 767 } 768 blob->active.clusters = tmp; 769 blob->active.cluster_array_size = cluster_count; 770 771 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 772 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 773 if (desc_extent_rle->extents[i].cluster_idx != 0) { 774 blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs, 775 desc_extent_rle->extents[i].cluster_idx + j); 776 blob->active.num_allocated_clusters++; 777 } else if (spdk_blob_is_thin_provisioned(blob)) { 778 blob->active.clusters[blob->active.num_clusters++] = 0; 779 } else { 780 return -EINVAL; 781 } 782 } 783 } 784 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 785 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 786 uint32_t num_extent_pages = blob->active.num_extent_pages; 787 uint32_t i, j; 788 size_t extent_pages_length; 789 790 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 791 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 792 793 if (blob->extent_rle_found) { 794 /* This means that Extent RLE is present in MD, 795 * both should never be at the same time. */ 796 return -EINVAL; 797 } else if (blob->extent_table_found && 798 desc_extent_table->num_clusters != blob->remaining_clusters_in_et) { 799 /* Number of clusters in this ET does not match number 800 * from previously read EXTENT_TABLE. */ 801 return -EINVAL; 802 } 803 804 if (desc_extent_table->length == 0 || 805 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 806 return -EINVAL; 807 } 808 809 blob->extent_table_found = true; 810 811 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 812 num_extent_pages += desc_extent_table->extent_page[i].num_pages; 813 } 814 815 if (num_extent_pages > 0) { 816 tmp = realloc(blob->active.extent_pages, num_extent_pages * sizeof(uint32_t)); 817 if (tmp == NULL) { 818 return -ENOMEM; 819 } 820 blob->active.extent_pages = tmp; 821 } 822 blob->active.extent_pages_array_size = num_extent_pages; 823 824 blob->remaining_clusters_in_et = desc_extent_table->num_clusters; 825 826 /* Extent table entries contain md page numbers for extent pages. 827 * Zeroes represent unallocated extent pages, those are run-length-encoded. 828 */ 829 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 830 if (desc_extent_table->extent_page[i].page_idx != 0) { 831 assert(desc_extent_table->extent_page[i].num_pages == 1); 832 blob->active.extent_pages[blob->active.num_extent_pages++] = 833 desc_extent_table->extent_page[i].page_idx; 834 } else if (spdk_blob_is_thin_provisioned(blob)) { 835 for (j = 0; j < desc_extent_table->extent_page[i].num_pages; j++) { 836 blob->active.extent_pages[blob->active.num_extent_pages++] = 0; 837 } 838 } else { 839 return -EINVAL; 840 } 841 } 842 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 843 struct spdk_blob_md_descriptor_extent_page *desc_extent; 844 unsigned int i; 845 unsigned int cluster_count = 0; 846 size_t cluster_idx_length; 847 848 if (blob->extent_rle_found) { 849 /* This means that Extent RLE is present in MD, 850 * both should never be at the same time. */ 851 return -EINVAL; 852 } 853 854 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 855 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 856 857 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 858 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 859 return -EINVAL; 860 } 861 862 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 863 if (desc_extent->cluster_idx[i] != 0) { 864 if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, desc_extent->cluster_idx[i])) { 865 return -EINVAL; 866 } 867 } 868 cluster_count++; 869 } 870 871 if (cluster_count == 0) { 872 return -EINVAL; 873 } 874 875 /* When reading extent pages sequentially starting cluster idx should match 876 * current size of a blob. 877 * If changed to batch reading, this check shall be removed. */ 878 if (desc_extent->start_cluster_idx != blob->active.num_clusters) { 879 return -EINVAL; 880 } 881 882 tmp = realloc(blob->active.clusters, 883 (cluster_count + blob->active.num_clusters) * sizeof(*blob->active.clusters)); 884 if (tmp == NULL) { 885 return -ENOMEM; 886 } 887 blob->active.clusters = tmp; 888 blob->active.cluster_array_size = (cluster_count + blob->active.num_clusters); 889 890 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 891 if (desc_extent->cluster_idx[i] != 0) { 892 blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs, 893 desc_extent->cluster_idx[i]); 894 blob->active.num_allocated_clusters++; 895 } else if (spdk_blob_is_thin_provisioned(blob)) { 896 blob->active.clusters[blob->active.num_clusters++] = 0; 897 } else { 898 return -EINVAL; 899 } 900 } 901 assert(desc_extent->start_cluster_idx + cluster_count == blob->active.num_clusters); 902 assert(blob->remaining_clusters_in_et >= cluster_count); 903 blob->remaining_clusters_in_et -= cluster_count; 904 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 905 int rc; 906 907 rc = blob_deserialize_xattr(blob, 908 (struct spdk_blob_md_descriptor_xattr *) desc, false); 909 if (rc != 0) { 910 return rc; 911 } 912 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 913 int rc; 914 915 rc = blob_deserialize_xattr(blob, 916 (struct spdk_blob_md_descriptor_xattr *) desc, true); 917 if (rc != 0) { 918 return rc; 919 } 920 } else { 921 /* Unrecognized descriptor type. Do not fail - just continue to the 922 * next descriptor. If this descriptor is associated with some feature 923 * defined in a newer version of blobstore, that version of blobstore 924 * should create and set an associated feature flag to specify if this 925 * blob can be loaded or not. 926 */ 927 } 928 929 /* Advance to the next descriptor */ 930 cur_desc += sizeof(*desc) + desc->length; 931 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 932 break; 933 } 934 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 935 } 936 937 return 0; 938 } 939 940 static bool bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page); 941 942 static int 943 blob_parse_extent_page(struct spdk_blob_md_page *extent_page, struct spdk_blob *blob) 944 { 945 assert(blob != NULL); 946 assert(blob->state == SPDK_BLOB_STATE_LOADING); 947 948 if (bs_load_cur_extent_page_valid(extent_page) == false) { 949 return -ENOENT; 950 } 951 952 return blob_parse_page(extent_page, blob); 953 } 954 955 static int 956 blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count, 957 struct spdk_blob *blob) 958 { 959 const struct spdk_blob_md_page *page; 960 uint32_t i; 961 int rc; 962 void *tmp; 963 964 assert(page_count > 0); 965 assert(pages[0].sequence_num == 0); 966 assert(blob != NULL); 967 assert(blob->state == SPDK_BLOB_STATE_LOADING); 968 assert(blob->active.clusters == NULL); 969 970 /* The blobid provided doesn't match what's in the MD, this can 971 * happen for example if a bogus blobid is passed in through open. 972 */ 973 if (blob->id != pages[0].id) { 974 SPDK_ERRLOG("Blobid (0x%" PRIx64 ") doesn't match what's in metadata " 975 "(0x%" PRIx64 ")\n", blob->id, pages[0].id); 976 return -ENOENT; 977 } 978 979 tmp = realloc(blob->active.pages, page_count * sizeof(*blob->active.pages)); 980 if (!tmp) { 981 return -ENOMEM; 982 } 983 blob->active.pages = tmp; 984 985 blob->active.pages[0] = pages[0].id; 986 987 for (i = 1; i < page_count; i++) { 988 assert(spdk_bit_array_get(blob->bs->used_md_pages, pages[i - 1].next)); 989 blob->active.pages[i] = pages[i - 1].next; 990 } 991 blob->active.num_pages = page_count; 992 993 for (i = 0; i < page_count; i++) { 994 page = &pages[i]; 995 996 assert(page->id == blob->id); 997 assert(page->sequence_num == i); 998 999 rc = blob_parse_page(page, blob); 1000 if (rc != 0) { 1001 return rc; 1002 } 1003 } 1004 1005 return 0; 1006 } 1007 1008 static int 1009 blob_serialize_add_page(const struct spdk_blob *blob, 1010 struct spdk_blob_md_page **pages, 1011 uint32_t *page_count, 1012 struct spdk_blob_md_page **last_page) 1013 { 1014 struct spdk_blob_md_page *page, *tmp_pages; 1015 1016 assert(pages != NULL); 1017 assert(page_count != NULL); 1018 1019 *last_page = NULL; 1020 if (*page_count == 0) { 1021 assert(*pages == NULL); 1022 *pages = spdk_malloc(blob->bs->md_page_size, 0, 1023 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 1024 if (*pages == NULL) { 1025 return -ENOMEM; 1026 } 1027 *page_count = 1; 1028 } else { 1029 assert(*pages != NULL); 1030 tmp_pages = spdk_realloc(*pages, blob->bs->md_page_size * (*page_count + 1), 0); 1031 if (tmp_pages == NULL) { 1032 return -ENOMEM; 1033 } 1034 (*page_count)++; 1035 *pages = tmp_pages; 1036 } 1037 1038 page = &(*pages)[*page_count - 1]; 1039 memset(page, 0, sizeof(*page)); 1040 page->id = blob->id; 1041 page->sequence_num = *page_count - 1; 1042 page->next = SPDK_INVALID_MD_PAGE; 1043 *last_page = page; 1044 1045 return 0; 1046 } 1047 1048 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor. 1049 * Update required_sz on both success and failure. 1050 * 1051 */ 1052 static int 1053 blob_serialize_xattr(const struct spdk_xattr *xattr, 1054 uint8_t *buf, size_t buf_sz, 1055 size_t *required_sz, bool internal) 1056 { 1057 struct spdk_blob_md_descriptor_xattr *desc; 1058 1059 *required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) + 1060 strlen(xattr->name) + 1061 xattr->value_len; 1062 1063 if (buf_sz < *required_sz) { 1064 return -1; 1065 } 1066 1067 desc = (struct spdk_blob_md_descriptor_xattr *)buf; 1068 1069 desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR; 1070 desc->length = sizeof(desc->name_length) + 1071 sizeof(desc->value_length) + 1072 strlen(xattr->name) + 1073 xattr->value_len; 1074 desc->name_length = strlen(xattr->name); 1075 desc->value_length = xattr->value_len; 1076 1077 memcpy(desc->name, xattr->name, desc->name_length); 1078 memcpy((void *)((uintptr_t)desc->name + desc->name_length), 1079 xattr->value, 1080 desc->value_length); 1081 1082 return 0; 1083 } 1084 1085 static void 1086 blob_serialize_extent_table_entry(const struct spdk_blob *blob, 1087 uint64_t start_ep, uint64_t *next_ep, 1088 uint8_t **buf, size_t *remaining_sz) 1089 { 1090 struct spdk_blob_md_descriptor_extent_table *desc; 1091 size_t cur_sz; 1092 uint64_t i, et_idx; 1093 uint32_t extent_page, ep_len; 1094 1095 /* The buffer must have room for at least num_clusters entry */ 1096 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->num_clusters); 1097 if (*remaining_sz < cur_sz) { 1098 *next_ep = start_ep; 1099 return; 1100 } 1101 1102 desc = (struct spdk_blob_md_descriptor_extent_table *)*buf; 1103 desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE; 1104 1105 desc->num_clusters = blob->active.num_clusters; 1106 1107 ep_len = 1; 1108 et_idx = 0; 1109 for (i = start_ep; i < blob->active.num_extent_pages; i++) { 1110 if (*remaining_sz < cur_sz + sizeof(desc->extent_page[0])) { 1111 /* If we ran out of buffer space, return */ 1112 break; 1113 } 1114 1115 extent_page = blob->active.extent_pages[i]; 1116 /* Verify that next extent_page is unallocated */ 1117 if (extent_page == 0 && 1118 (i + 1 < blob->active.num_extent_pages && blob->active.extent_pages[i + 1] == 0)) { 1119 ep_len++; 1120 continue; 1121 } 1122 desc->extent_page[et_idx].page_idx = extent_page; 1123 desc->extent_page[et_idx].num_pages = ep_len; 1124 et_idx++; 1125 1126 ep_len = 1; 1127 cur_sz += sizeof(desc->extent_page[et_idx]); 1128 } 1129 *next_ep = i; 1130 1131 desc->length = sizeof(desc->num_clusters) + sizeof(desc->extent_page[0]) * et_idx; 1132 *remaining_sz -= sizeof(struct spdk_blob_md_descriptor) + desc->length; 1133 *buf += sizeof(struct spdk_blob_md_descriptor) + desc->length; 1134 } 1135 1136 static int 1137 blob_serialize_extent_table(const struct spdk_blob *blob, 1138 struct spdk_blob_md_page **pages, 1139 struct spdk_blob_md_page *cur_page, 1140 uint32_t *page_count, uint8_t **buf, 1141 size_t *remaining_sz) 1142 { 1143 uint64_t last_extent_page; 1144 int rc; 1145 1146 last_extent_page = 0; 1147 /* At least single extent table entry has to be always persisted. 1148 * Such case occurs with num_extent_pages == 0. */ 1149 while (last_extent_page <= blob->active.num_extent_pages) { 1150 blob_serialize_extent_table_entry(blob, last_extent_page, &last_extent_page, buf, 1151 remaining_sz); 1152 1153 if (last_extent_page == blob->active.num_extent_pages) { 1154 break; 1155 } 1156 1157 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1158 if (rc < 0) { 1159 return rc; 1160 } 1161 1162 *buf = (uint8_t *)cur_page->descriptors; 1163 *remaining_sz = sizeof(cur_page->descriptors); 1164 } 1165 1166 return 0; 1167 } 1168 1169 static void 1170 blob_serialize_extent_rle(const struct spdk_blob *blob, 1171 uint64_t start_cluster, uint64_t *next_cluster, 1172 uint8_t **buf, size_t *buf_sz) 1173 { 1174 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 1175 size_t cur_sz; 1176 uint64_t i, extent_idx; 1177 uint64_t lba, lba_per_cluster, lba_count; 1178 1179 /* The buffer must have room for at least one extent */ 1180 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc_extent_rle->extents[0]); 1181 if (*buf_sz < cur_sz) { 1182 *next_cluster = start_cluster; 1183 return; 1184 } 1185 1186 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)*buf; 1187 desc_extent_rle->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE; 1188 1189 lba_per_cluster = bs_cluster_to_lba(blob->bs, 1); 1190 /* Assert for scan-build false positive */ 1191 assert(lba_per_cluster > 0); 1192 1193 lba = blob->active.clusters[start_cluster]; 1194 lba_count = lba_per_cluster; 1195 extent_idx = 0; 1196 for (i = start_cluster + 1; i < blob->active.num_clusters; i++) { 1197 if ((lba + lba_count) == blob->active.clusters[i] && lba != 0) { 1198 /* Run-length encode sequential non-zero LBA */ 1199 lba_count += lba_per_cluster; 1200 continue; 1201 } else if (lba == 0 && blob->active.clusters[i] == 0) { 1202 /* Run-length encode unallocated clusters */ 1203 lba_count += lba_per_cluster; 1204 continue; 1205 } 1206 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1207 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1208 extent_idx++; 1209 1210 cur_sz += sizeof(desc_extent_rle->extents[extent_idx]); 1211 1212 if (*buf_sz < cur_sz) { 1213 /* If we ran out of buffer space, return */ 1214 *next_cluster = i; 1215 break; 1216 } 1217 1218 lba = blob->active.clusters[i]; 1219 lba_count = lba_per_cluster; 1220 } 1221 1222 if (*buf_sz >= cur_sz) { 1223 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1224 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1225 extent_idx++; 1226 1227 *next_cluster = blob->active.num_clusters; 1228 } 1229 1230 desc_extent_rle->length = sizeof(desc_extent_rle->extents[0]) * extent_idx; 1231 *buf_sz -= sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1232 *buf += sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1233 } 1234 1235 static int 1236 blob_serialize_extents_rle(const struct spdk_blob *blob, 1237 struct spdk_blob_md_page **pages, 1238 struct spdk_blob_md_page *cur_page, 1239 uint32_t *page_count, uint8_t **buf, 1240 size_t *remaining_sz) 1241 { 1242 uint64_t last_cluster; 1243 int rc; 1244 1245 last_cluster = 0; 1246 while (last_cluster < blob->active.num_clusters) { 1247 blob_serialize_extent_rle(blob, last_cluster, &last_cluster, buf, remaining_sz); 1248 1249 if (last_cluster == blob->active.num_clusters) { 1250 break; 1251 } 1252 1253 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1254 if (rc < 0) { 1255 return rc; 1256 } 1257 1258 *buf = (uint8_t *)cur_page->descriptors; 1259 *remaining_sz = sizeof(cur_page->descriptors); 1260 } 1261 1262 return 0; 1263 } 1264 1265 static void 1266 blob_serialize_extent_page(const struct spdk_blob *blob, 1267 uint64_t cluster, struct spdk_blob_md_page *page) 1268 { 1269 struct spdk_blob_md_descriptor_extent_page *desc_extent; 1270 uint64_t i, extent_idx; 1271 uint64_t lba, lba_per_cluster; 1272 uint64_t start_cluster_idx = (cluster / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP; 1273 1274 desc_extent = (struct spdk_blob_md_descriptor_extent_page *) page->descriptors; 1275 desc_extent->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE; 1276 1277 lba_per_cluster = bs_cluster_to_lba(blob->bs, 1); 1278 1279 desc_extent->start_cluster_idx = start_cluster_idx; 1280 extent_idx = 0; 1281 for (i = start_cluster_idx; i < blob->active.num_clusters; i++) { 1282 lba = blob->active.clusters[i]; 1283 desc_extent->cluster_idx[extent_idx++] = lba / lba_per_cluster; 1284 if (extent_idx >= SPDK_EXTENTS_PER_EP) { 1285 break; 1286 } 1287 } 1288 desc_extent->length = sizeof(desc_extent->start_cluster_idx) + 1289 sizeof(desc_extent->cluster_idx[0]) * extent_idx; 1290 } 1291 1292 static void 1293 blob_serialize_flags(const struct spdk_blob *blob, 1294 uint8_t *buf, size_t *buf_sz) 1295 { 1296 struct spdk_blob_md_descriptor_flags *desc; 1297 1298 /* 1299 * Flags get serialized first, so we should always have room for the flags 1300 * descriptor. 1301 */ 1302 assert(*buf_sz >= sizeof(*desc)); 1303 1304 desc = (struct spdk_blob_md_descriptor_flags *)buf; 1305 desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS; 1306 desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor); 1307 desc->invalid_flags = blob->invalid_flags; 1308 desc->data_ro_flags = blob->data_ro_flags; 1309 desc->md_ro_flags = blob->md_ro_flags; 1310 1311 *buf_sz -= sizeof(*desc); 1312 } 1313 1314 static int 1315 blob_serialize_xattrs(const struct spdk_blob *blob, 1316 const struct spdk_xattr_tailq *xattrs, bool internal, 1317 struct spdk_blob_md_page **pages, 1318 struct spdk_blob_md_page *cur_page, 1319 uint32_t *page_count, uint8_t **buf, 1320 size_t *remaining_sz) 1321 { 1322 const struct spdk_xattr *xattr; 1323 int rc; 1324 1325 TAILQ_FOREACH(xattr, xattrs, link) { 1326 size_t required_sz = 0; 1327 1328 rc = blob_serialize_xattr(xattr, 1329 *buf, *remaining_sz, 1330 &required_sz, internal); 1331 if (rc < 0) { 1332 /* Need to add a new page to the chain */ 1333 rc = blob_serialize_add_page(blob, pages, page_count, 1334 &cur_page); 1335 if (rc < 0) { 1336 spdk_free(*pages); 1337 *pages = NULL; 1338 *page_count = 0; 1339 return rc; 1340 } 1341 1342 *buf = (uint8_t *)cur_page->descriptors; 1343 *remaining_sz = sizeof(cur_page->descriptors); 1344 1345 /* Try again */ 1346 required_sz = 0; 1347 rc = blob_serialize_xattr(xattr, 1348 *buf, *remaining_sz, 1349 &required_sz, internal); 1350 1351 if (rc < 0) { 1352 spdk_free(*pages); 1353 *pages = NULL; 1354 *page_count = 0; 1355 return rc; 1356 } 1357 } 1358 1359 *remaining_sz -= required_sz; 1360 *buf += required_sz; 1361 } 1362 1363 return 0; 1364 } 1365 1366 static int 1367 blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages, 1368 uint32_t *page_count) 1369 { 1370 struct spdk_blob_md_page *cur_page; 1371 int rc; 1372 uint8_t *buf; 1373 size_t remaining_sz; 1374 1375 assert(pages != NULL); 1376 assert(page_count != NULL); 1377 assert(blob != NULL); 1378 assert(blob->state == SPDK_BLOB_STATE_DIRTY); 1379 1380 *pages = NULL; 1381 *page_count = 0; 1382 1383 /* A blob always has at least 1 page, even if it has no descriptors */ 1384 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1385 if (rc < 0) { 1386 return rc; 1387 } 1388 1389 buf = (uint8_t *)cur_page->descriptors; 1390 remaining_sz = sizeof(cur_page->descriptors); 1391 1392 /* Serialize flags */ 1393 blob_serialize_flags(blob, buf, &remaining_sz); 1394 buf += sizeof(struct spdk_blob_md_descriptor_flags); 1395 1396 /* Serialize xattrs */ 1397 rc = blob_serialize_xattrs(blob, &blob->xattrs, false, 1398 pages, cur_page, page_count, &buf, &remaining_sz); 1399 if (rc < 0) { 1400 return rc; 1401 } 1402 1403 /* Serialize internal xattrs */ 1404 rc = blob_serialize_xattrs(blob, &blob->xattrs_internal, true, 1405 pages, cur_page, page_count, &buf, &remaining_sz); 1406 if (rc < 0) { 1407 return rc; 1408 } 1409 1410 if (blob->use_extent_table) { 1411 /* Serialize extent table */ 1412 rc = blob_serialize_extent_table(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1413 } else { 1414 /* Serialize extents */ 1415 rc = blob_serialize_extents_rle(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1416 } 1417 1418 return rc; 1419 } 1420 1421 struct spdk_blob_load_ctx { 1422 struct spdk_blob *blob; 1423 1424 struct spdk_blob_md_page *pages; 1425 uint32_t num_pages; 1426 uint32_t next_extent_page; 1427 spdk_bs_sequence_t *seq; 1428 1429 spdk_bs_sequence_cpl cb_fn; 1430 void *cb_arg; 1431 }; 1432 1433 static uint32_t 1434 blob_md_page_calc_crc(void *page) 1435 { 1436 uint32_t crc; 1437 1438 crc = BLOB_CRC32C_INITIAL; 1439 crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc); 1440 crc ^= BLOB_CRC32C_INITIAL; 1441 1442 return crc; 1443 1444 } 1445 1446 static void 1447 blob_load_final(struct spdk_blob_load_ctx *ctx, int bserrno) 1448 { 1449 struct spdk_blob *blob = ctx->blob; 1450 1451 if (bserrno == 0) { 1452 blob_mark_clean(blob); 1453 } 1454 1455 ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno); 1456 1457 /* Free the memory */ 1458 spdk_free(ctx->pages); 1459 free(ctx); 1460 } 1461 1462 static void 1463 blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno) 1464 { 1465 struct spdk_blob_load_ctx *ctx = cb_arg; 1466 struct spdk_blob *blob = ctx->blob; 1467 1468 if (bserrno == 0) { 1469 blob->back_bs_dev = bs_create_blob_bs_dev(snapshot); 1470 if (blob->back_bs_dev == NULL) { 1471 bserrno = -ENOMEM; 1472 } 1473 } 1474 if (bserrno != 0) { 1475 SPDK_ERRLOG("Snapshot fail\n"); 1476 } 1477 1478 blob_load_final(ctx, bserrno); 1479 } 1480 1481 static void blob_update_clear_method(struct spdk_blob *blob); 1482 1483 static int 1484 blob_load_esnap(struct spdk_blob *blob, void *blob_ctx) 1485 { 1486 struct spdk_blob_store *bs = blob->bs; 1487 struct spdk_bs_dev *bs_dev = NULL; 1488 const void *esnap_id = NULL; 1489 size_t id_len = 0; 1490 int rc; 1491 1492 if (bs->esnap_bs_dev_create == NULL) { 1493 SPDK_NOTICELOG("blob 0x%" PRIx64 " is an esnap clone but the blobstore was opened " 1494 "without support for esnap clones\n", blob->id); 1495 return -ENOTSUP; 1496 } 1497 assert(blob->back_bs_dev == NULL); 1498 1499 rc = blob_get_xattr_value(blob, BLOB_EXTERNAL_SNAPSHOT_ID, &esnap_id, &id_len, true); 1500 if (rc != 0) { 1501 SPDK_ERRLOG("blob 0x%" PRIx64 " is an esnap clone but has no esnap ID\n", blob->id); 1502 return -EINVAL; 1503 } 1504 assert(id_len > 0 && id_len < UINT32_MAX); 1505 1506 SPDK_INFOLOG(blob, "Creating external snapshot device\n"); 1507 1508 rc = bs->esnap_bs_dev_create(bs->esnap_ctx, blob_ctx, blob, esnap_id, (uint32_t)id_len, 1509 &bs_dev); 1510 if (rc != 0) { 1511 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": failed to load back_bs_dev " 1512 "with error %d\n", blob->id, rc); 1513 return rc; 1514 } 1515 1516 /* 1517 * Note: bs_dev might be NULL if the consumer chose to not open the external snapshot. 1518 * This especially might happen during spdk_bs_load() iteration. 1519 */ 1520 if (bs_dev != NULL) { 1521 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": loaded back_bs_dev\n", blob->id); 1522 if ((bs->io_unit_size % bs_dev->blocklen) != 0) { 1523 SPDK_NOTICELOG("blob 0x%" PRIx64 " external snapshot device block size %u " 1524 "is not compatible with blobstore block size %u\n", 1525 blob->id, bs_dev->blocklen, bs->io_unit_size); 1526 bs_dev->destroy(bs_dev); 1527 return -EINVAL; 1528 } 1529 } 1530 1531 blob->back_bs_dev = bs_dev; 1532 blob->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT; 1533 1534 return 0; 1535 } 1536 1537 static void 1538 blob_load_backing_dev(spdk_bs_sequence_t *seq, void *cb_arg) 1539 { 1540 struct spdk_blob_load_ctx *ctx = cb_arg; 1541 struct spdk_blob *blob = ctx->blob; 1542 const void *value; 1543 size_t len; 1544 int rc; 1545 1546 if (blob_is_esnap_clone(blob)) { 1547 rc = blob_load_esnap(blob, seq->cpl.u.blob_handle.esnap_ctx); 1548 blob_load_final(ctx, rc); 1549 return; 1550 } 1551 1552 if (spdk_blob_is_thin_provisioned(blob)) { 1553 rc = blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true); 1554 if (rc == 0) { 1555 if (len != sizeof(spdk_blob_id)) { 1556 blob_load_final(ctx, -EINVAL); 1557 return; 1558 } 1559 /* open snapshot blob and continue in the callback function */ 1560 blob->parent_id = *(spdk_blob_id *)value; 1561 spdk_bs_open_blob(blob->bs, blob->parent_id, 1562 blob_load_snapshot_cpl, ctx); 1563 return; 1564 } else { 1565 /* add zeroes_dev for thin provisioned blob */ 1566 blob->back_bs_dev = bs_create_zeroes_dev(); 1567 } 1568 } else { 1569 /* standard blob */ 1570 blob->back_bs_dev = NULL; 1571 } 1572 blob_load_final(ctx, 0); 1573 } 1574 1575 static void 1576 blob_load_cpl_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1577 { 1578 struct spdk_blob_load_ctx *ctx = cb_arg; 1579 struct spdk_blob *blob = ctx->blob; 1580 struct spdk_blob_md_page *page; 1581 uint64_t i; 1582 uint32_t crc; 1583 uint64_t lba; 1584 void *tmp; 1585 uint64_t sz; 1586 1587 if (bserrno) { 1588 SPDK_ERRLOG("Extent page read failed: %d\n", bserrno); 1589 blob_load_final(ctx, bserrno); 1590 return; 1591 } 1592 1593 if (ctx->pages == NULL) { 1594 /* First iteration of this function, allocate buffer for single EXTENT_PAGE */ 1595 ctx->pages = spdk_zmalloc(blob->bs->md_page_size, 0, 1596 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 1597 if (!ctx->pages) { 1598 blob_load_final(ctx, -ENOMEM); 1599 return; 1600 } 1601 ctx->num_pages = 1; 1602 ctx->next_extent_page = 0; 1603 } else { 1604 page = &ctx->pages[0]; 1605 crc = blob_md_page_calc_crc(page); 1606 if (crc != page->crc) { 1607 blob_load_final(ctx, -EINVAL); 1608 return; 1609 } 1610 1611 if (page->next != SPDK_INVALID_MD_PAGE) { 1612 blob_load_final(ctx, -EINVAL); 1613 return; 1614 } 1615 1616 bserrno = blob_parse_extent_page(page, blob); 1617 if (bserrno) { 1618 blob_load_final(ctx, bserrno); 1619 return; 1620 } 1621 } 1622 1623 for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) { 1624 if (blob->active.extent_pages[i] != 0) { 1625 /* Extent page was allocated, read and parse it. */ 1626 lba = bs_md_page_to_lba(blob->bs, blob->active.extent_pages[i]); 1627 ctx->next_extent_page = i + 1; 1628 1629 bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1630 bs_byte_to_lba(blob->bs, blob->bs->md_page_size), 1631 blob_load_cpl_extents_cpl, ctx); 1632 return; 1633 } else { 1634 /* Thin provisioned blobs can point to unallocated extent pages. 1635 * In this case blob size should be increased by up to the amount left in remaining_clusters_in_et. */ 1636 1637 sz = spdk_min(blob->remaining_clusters_in_et, SPDK_EXTENTS_PER_EP); 1638 blob->active.num_clusters += sz; 1639 blob->remaining_clusters_in_et -= sz; 1640 1641 assert(spdk_blob_is_thin_provisioned(blob)); 1642 assert(i + 1 < blob->active.num_extent_pages || blob->remaining_clusters_in_et == 0); 1643 1644 tmp = realloc(blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 1645 if (tmp == NULL) { 1646 blob_load_final(ctx, -ENOMEM); 1647 return; 1648 } 1649 memset(tmp + sizeof(*blob->active.clusters) * blob->active.cluster_array_size, 0, 1650 sizeof(*blob->active.clusters) * (blob->active.num_clusters - blob->active.cluster_array_size)); 1651 blob->active.clusters = tmp; 1652 blob->active.cluster_array_size = blob->active.num_clusters; 1653 } 1654 } 1655 1656 blob_load_backing_dev(seq, ctx); 1657 } 1658 1659 static void 1660 blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1661 { 1662 struct spdk_blob_load_ctx *ctx = cb_arg; 1663 struct spdk_blob *blob = ctx->blob; 1664 struct spdk_blob_md_page *page; 1665 int rc; 1666 uint32_t crc; 1667 uint32_t current_page; 1668 1669 if (ctx->num_pages == 1) { 1670 current_page = bs_blobid_to_page(blob->id); 1671 } else { 1672 assert(ctx->num_pages != 0); 1673 page = &ctx->pages[ctx->num_pages - 2]; 1674 current_page = page->next; 1675 } 1676 1677 if (bserrno) { 1678 SPDK_ERRLOG("Metadata page %d read failed for blobid 0x%" PRIx64 ": %d\n", 1679 current_page, blob->id, bserrno); 1680 blob_load_final(ctx, bserrno); 1681 return; 1682 } 1683 1684 page = &ctx->pages[ctx->num_pages - 1]; 1685 crc = blob_md_page_calc_crc(page); 1686 if (crc != page->crc) { 1687 SPDK_ERRLOG("Metadata page %d crc mismatch for blobid 0x%" PRIx64 "\n", 1688 current_page, blob->id); 1689 blob_load_final(ctx, -EINVAL); 1690 return; 1691 } 1692 1693 if (page->next != SPDK_INVALID_MD_PAGE) { 1694 struct spdk_blob_md_page *tmp_pages; 1695 uint32_t next_page = page->next; 1696 uint64_t next_lba = bs_md_page_to_lba(blob->bs, next_page); 1697 1698 /* Read the next page */ 1699 tmp_pages = spdk_realloc(ctx->pages, (sizeof(*page) * (ctx->num_pages + 1)), 0); 1700 if (tmp_pages == NULL) { 1701 blob_load_final(ctx, -ENOMEM); 1702 return; 1703 } 1704 ctx->num_pages++; 1705 ctx->pages = tmp_pages; 1706 1707 bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1], 1708 next_lba, 1709 bs_byte_to_lba(blob->bs, sizeof(*page)), 1710 blob_load_cpl, ctx); 1711 return; 1712 } 1713 1714 /* Parse the pages */ 1715 rc = blob_parse(ctx->pages, ctx->num_pages, blob); 1716 if (rc) { 1717 blob_load_final(ctx, rc); 1718 return; 1719 } 1720 1721 if (blob->extent_table_found == true) { 1722 /* If EXTENT_TABLE was found, that means support for it should be enabled. */ 1723 assert(blob->extent_rle_found == false); 1724 blob->use_extent_table = true; 1725 } else { 1726 /* If EXTENT_RLE or no extent_* descriptor was found disable support 1727 * for extent table. No extent_* descriptors means that blob has length of 0 1728 * and no extent_rle descriptors were persisted for it. 1729 * EXTENT_TABLE if used, is always present in metadata regardless of length. */ 1730 blob->use_extent_table = false; 1731 } 1732 1733 /* Check the clear_method stored in metadata vs what may have been passed 1734 * via spdk_bs_open_blob_ext() and update accordingly. 1735 */ 1736 blob_update_clear_method(blob); 1737 1738 spdk_free(ctx->pages); 1739 ctx->pages = NULL; 1740 1741 if (blob->extent_table_found) { 1742 blob_load_cpl_extents_cpl(seq, ctx, 0); 1743 } else { 1744 blob_load_backing_dev(seq, ctx); 1745 } 1746 } 1747 1748 /* Load a blob from disk given a blobid */ 1749 static void 1750 blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 1751 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 1752 { 1753 struct spdk_blob_load_ctx *ctx; 1754 struct spdk_blob_store *bs; 1755 uint32_t page_num; 1756 uint64_t lba; 1757 1758 blob_verify_md_op(blob); 1759 1760 bs = blob->bs; 1761 1762 ctx = calloc(1, sizeof(*ctx)); 1763 if (!ctx) { 1764 cb_fn(seq, cb_arg, -ENOMEM); 1765 return; 1766 } 1767 1768 ctx->blob = blob; 1769 ctx->pages = spdk_realloc(ctx->pages, bs->md_page_size, 0); 1770 if (!ctx->pages) { 1771 free(ctx); 1772 cb_fn(seq, cb_arg, -ENOMEM); 1773 return; 1774 } 1775 ctx->num_pages = 1; 1776 ctx->cb_fn = cb_fn; 1777 ctx->cb_arg = cb_arg; 1778 ctx->seq = seq; 1779 1780 page_num = bs_blobid_to_page(blob->id); 1781 lba = bs_md_page_to_lba(blob->bs, page_num); 1782 1783 blob->state = SPDK_BLOB_STATE_LOADING; 1784 1785 bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1786 bs_byte_to_lba(bs, bs->md_page_size), 1787 blob_load_cpl, ctx); 1788 } 1789 1790 struct spdk_blob_persist_ctx { 1791 struct spdk_blob *blob; 1792 1793 struct spdk_blob_md_page *pages; 1794 uint32_t next_extent_page; 1795 struct spdk_blob_md_page *extent_page; 1796 1797 spdk_bs_sequence_t *seq; 1798 spdk_bs_sequence_cpl cb_fn; 1799 void *cb_arg; 1800 TAILQ_ENTRY(spdk_blob_persist_ctx) link; 1801 }; 1802 1803 static void 1804 bs_batch_clear_dev(struct spdk_blob *blob, spdk_bs_batch_t *batch, uint64_t lba, 1805 uint64_t lba_count) 1806 { 1807 switch (blob->clear_method) { 1808 case BLOB_CLEAR_WITH_DEFAULT: 1809 case BLOB_CLEAR_WITH_UNMAP: 1810 bs_batch_unmap_dev(batch, lba, lba_count); 1811 break; 1812 case BLOB_CLEAR_WITH_WRITE_ZEROES: 1813 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1814 break; 1815 case BLOB_CLEAR_WITH_NONE: 1816 default: 1817 break; 1818 } 1819 } 1820 1821 static int 1822 bs_super_validate(struct spdk_bs_super_block *super, struct spdk_blob_store *bs) 1823 { 1824 uint32_t crc; 1825 static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH]; 1826 1827 if (super->version > SPDK_BS_VERSION || 1828 super->version < SPDK_BS_INITIAL_VERSION) { 1829 return -EILSEQ; 1830 } 1831 1832 if (memcmp(super->signature, SPDK_BS_SUPER_BLOCK_SIG, 1833 sizeof(super->signature)) != 0) { 1834 return -EILSEQ; 1835 } 1836 1837 crc = blob_md_page_calc_crc(super); 1838 if (crc != super->crc) { 1839 return -EILSEQ; 1840 } 1841 1842 if (memcmp(&bs->bstype, &super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 1843 SPDK_DEBUGLOG(blob, "Bstype matched - loading blobstore\n"); 1844 } else if (memcmp(&bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 1845 SPDK_DEBUGLOG(blob, "Bstype wildcard used - loading blobstore regardless bstype\n"); 1846 } else { 1847 SPDK_DEBUGLOG(blob, "Unexpected bstype\n"); 1848 SPDK_LOGDUMP(blob, "Expected:", bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 1849 SPDK_LOGDUMP(blob, "Found:", super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 1850 return -ENXIO; 1851 } 1852 1853 if (super->size > bs->dev->blockcnt * bs->dev->blocklen) { 1854 SPDK_NOTICELOG("Size mismatch, dev size: %" PRIu64 ", blobstore size: %" PRIu64 "\n", 1855 bs->dev->blockcnt * bs->dev->blocklen, super->size); 1856 return -EILSEQ; 1857 } 1858 1859 return 0; 1860 } 1861 1862 static void bs_mark_dirty(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 1863 spdk_bs_sequence_cpl cb_fn, void *cb_arg); 1864 1865 static void 1866 blob_persist_complete_cb(void *arg) 1867 { 1868 struct spdk_blob_persist_ctx *ctx = arg; 1869 1870 /* Call user callback */ 1871 ctx->cb_fn(ctx->seq, ctx->cb_arg, 0); 1872 1873 /* Free the memory */ 1874 spdk_free(ctx->pages); 1875 free(ctx); 1876 } 1877 1878 static void blob_persist_start(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 1879 1880 static void 1881 blob_persist_complete(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx, int bserrno) 1882 { 1883 struct spdk_blob_persist_ctx *next_persist, *tmp; 1884 struct spdk_blob *blob = ctx->blob; 1885 1886 if (bserrno == 0) { 1887 blob_mark_clean(blob); 1888 } 1889 1890 assert(ctx == TAILQ_FIRST(&blob->persists_to_complete)); 1891 1892 /* Complete all persists that were pending when the current persist started */ 1893 TAILQ_FOREACH_SAFE(next_persist, &blob->persists_to_complete, link, tmp) { 1894 TAILQ_REMOVE(&blob->persists_to_complete, next_persist, link); 1895 spdk_thread_send_msg(spdk_get_thread(), blob_persist_complete_cb, next_persist); 1896 } 1897 1898 if (TAILQ_EMPTY(&blob->pending_persists)) { 1899 return; 1900 } 1901 1902 /* Queue up all pending persists for completion and start blob persist with first one */ 1903 TAILQ_SWAP(&blob->persists_to_complete, &blob->pending_persists, spdk_blob_persist_ctx, link); 1904 next_persist = TAILQ_FIRST(&blob->persists_to_complete); 1905 1906 blob->state = SPDK_BLOB_STATE_DIRTY; 1907 bs_mark_dirty(seq, blob->bs, blob_persist_start, next_persist); 1908 } 1909 1910 static void 1911 blob_persist_clear_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1912 { 1913 struct spdk_blob_persist_ctx *ctx = cb_arg; 1914 struct spdk_blob *blob = ctx->blob; 1915 struct spdk_blob_store *bs = blob->bs; 1916 size_t i; 1917 1918 if (bserrno != 0) { 1919 blob_persist_complete(seq, ctx, bserrno); 1920 return; 1921 } 1922 1923 spdk_spin_lock(&bs->used_lock); 1924 1925 /* Release all extent_pages that were truncated */ 1926 for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) { 1927 /* Nothing to release if it was not allocated */ 1928 if (blob->active.extent_pages[i] != 0) { 1929 bs_release_md_page(bs, blob->active.extent_pages[i]); 1930 } 1931 } 1932 1933 spdk_spin_unlock(&bs->used_lock); 1934 1935 if (blob->active.num_extent_pages == 0) { 1936 free(blob->active.extent_pages); 1937 blob->active.extent_pages = NULL; 1938 blob->active.extent_pages_array_size = 0; 1939 } else if (blob->active.num_extent_pages != blob->active.extent_pages_array_size) { 1940 #ifndef __clang_analyzer__ 1941 void *tmp; 1942 1943 /* scan-build really can't figure reallocs, workaround it */ 1944 tmp = realloc(blob->active.extent_pages, sizeof(uint32_t) * blob->active.num_extent_pages); 1945 assert(tmp != NULL); 1946 blob->active.extent_pages = tmp; 1947 #endif 1948 blob->active.extent_pages_array_size = blob->active.num_extent_pages; 1949 } 1950 1951 blob_persist_complete(seq, ctx, bserrno); 1952 } 1953 1954 static void 1955 blob_persist_clear_extents(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 1956 { 1957 struct spdk_blob *blob = ctx->blob; 1958 struct spdk_blob_store *bs = blob->bs; 1959 size_t i; 1960 uint64_t lba; 1961 uint64_t lba_count; 1962 spdk_bs_batch_t *batch; 1963 1964 batch = bs_sequence_to_batch(seq, blob_persist_clear_extents_cpl, ctx); 1965 lba_count = bs_byte_to_lba(bs, bs->md_page_size); 1966 1967 /* Clear all extent_pages that were truncated */ 1968 for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) { 1969 /* Nothing to clear if it was not allocated */ 1970 if (blob->active.extent_pages[i] != 0) { 1971 lba = bs_md_page_to_lba(bs, blob->active.extent_pages[i]); 1972 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1973 } 1974 } 1975 1976 bs_batch_close(batch); 1977 } 1978 1979 static void 1980 blob_persist_clear_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1981 { 1982 struct spdk_blob_persist_ctx *ctx = cb_arg; 1983 struct spdk_blob *blob = ctx->blob; 1984 struct spdk_blob_store *bs = blob->bs; 1985 size_t i; 1986 1987 if (bserrno != 0) { 1988 blob_persist_complete(seq, ctx, bserrno); 1989 return; 1990 } 1991 1992 spdk_spin_lock(&bs->used_lock); 1993 /* Release all clusters that were truncated */ 1994 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1995 uint32_t cluster_num = bs_lba_to_cluster(bs, blob->active.clusters[i]); 1996 1997 /* Nothing to release if it was not allocated */ 1998 if (blob->active.clusters[i] != 0) { 1999 bs_release_cluster(bs, cluster_num); 2000 } 2001 } 2002 spdk_spin_unlock(&bs->used_lock); 2003 2004 if (blob->active.num_clusters == 0) { 2005 free(blob->active.clusters); 2006 blob->active.clusters = NULL; 2007 blob->active.cluster_array_size = 0; 2008 } else if (blob->active.num_clusters != blob->active.cluster_array_size) { 2009 #ifndef __clang_analyzer__ 2010 void *tmp; 2011 2012 /* scan-build really can't figure reallocs, workaround it */ 2013 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * blob->active.num_clusters); 2014 assert(tmp != NULL); 2015 blob->active.clusters = tmp; 2016 2017 #endif 2018 blob->active.cluster_array_size = blob->active.num_clusters; 2019 } 2020 2021 /* Move on to clearing extent pages */ 2022 blob_persist_clear_extents(seq, ctx); 2023 } 2024 2025 static void 2026 blob_persist_clear_clusters(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 2027 { 2028 struct spdk_blob *blob = ctx->blob; 2029 struct spdk_blob_store *bs = blob->bs; 2030 spdk_bs_batch_t *batch; 2031 size_t i; 2032 uint64_t lba; 2033 uint64_t lba_count; 2034 2035 /* Clusters don't move around in blobs. The list shrinks or grows 2036 * at the end, but no changes ever occur in the middle of the list. 2037 */ 2038 2039 batch = bs_sequence_to_batch(seq, blob_persist_clear_clusters_cpl, ctx); 2040 2041 /* Clear all clusters that were truncated */ 2042 lba = 0; 2043 lba_count = 0; 2044 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 2045 uint64_t next_lba = blob->active.clusters[i]; 2046 uint64_t next_lba_count = bs_cluster_to_lba(bs, 1); 2047 2048 if (next_lba > 0 && (lba + lba_count) == next_lba) { 2049 /* This cluster is contiguous with the previous one. */ 2050 lba_count += next_lba_count; 2051 continue; 2052 } else if (next_lba == 0) { 2053 continue; 2054 } 2055 2056 /* This cluster is not contiguous with the previous one. */ 2057 2058 /* If a run of LBAs previously existing, clear them now */ 2059 if (lba_count > 0) { 2060 bs_batch_clear_dev(ctx->blob, batch, lba, lba_count); 2061 } 2062 2063 /* Start building the next batch */ 2064 lba = next_lba; 2065 if (next_lba > 0) { 2066 lba_count = next_lba_count; 2067 } else { 2068 lba_count = 0; 2069 } 2070 } 2071 2072 /* If we ended with a contiguous set of LBAs, clear them now */ 2073 if (lba_count > 0) { 2074 bs_batch_clear_dev(ctx->blob, batch, lba, lba_count); 2075 } 2076 2077 bs_batch_close(batch); 2078 } 2079 2080 static void 2081 blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2082 { 2083 struct spdk_blob_persist_ctx *ctx = cb_arg; 2084 struct spdk_blob *blob = ctx->blob; 2085 struct spdk_blob_store *bs = blob->bs; 2086 size_t i; 2087 2088 if (bserrno != 0) { 2089 blob_persist_complete(seq, ctx, bserrno); 2090 return; 2091 } 2092 2093 spdk_spin_lock(&bs->used_lock); 2094 2095 /* This loop starts at 1 because the first page is special and handled 2096 * below. The pages (except the first) are never written in place, 2097 * so any pages in the clean list must be zeroed. 2098 */ 2099 for (i = 1; i < blob->clean.num_pages; i++) { 2100 bs_release_md_page(bs, blob->clean.pages[i]); 2101 } 2102 2103 if (blob->active.num_pages == 0) { 2104 uint32_t page_num; 2105 2106 page_num = bs_blobid_to_page(blob->id); 2107 bs_release_md_page(bs, page_num); 2108 } 2109 2110 spdk_spin_unlock(&bs->used_lock); 2111 2112 /* Move on to clearing clusters */ 2113 blob_persist_clear_clusters(seq, ctx); 2114 } 2115 2116 static void 2117 blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2118 { 2119 struct spdk_blob_persist_ctx *ctx = cb_arg; 2120 struct spdk_blob *blob = ctx->blob; 2121 struct spdk_blob_store *bs = blob->bs; 2122 uint64_t lba; 2123 uint64_t lba_count; 2124 spdk_bs_batch_t *batch; 2125 size_t i; 2126 2127 if (bserrno != 0) { 2128 blob_persist_complete(seq, ctx, bserrno); 2129 return; 2130 } 2131 2132 batch = bs_sequence_to_batch(seq, blob_persist_zero_pages_cpl, ctx); 2133 2134 lba_count = bs_byte_to_lba(bs, bs->md_page_size); 2135 2136 /* This loop starts at 1 because the first page is special and handled 2137 * below. The pages (except the first) are never written in place, 2138 * so any pages in the clean list must be zeroed. 2139 */ 2140 for (i = 1; i < blob->clean.num_pages; i++) { 2141 lba = bs_md_page_to_lba(bs, blob->clean.pages[i]); 2142 2143 bs_batch_write_zeroes_dev(batch, lba, lba_count); 2144 } 2145 2146 /* The first page will only be zeroed if this is a delete. */ 2147 if (blob->active.num_pages == 0) { 2148 uint32_t page_num; 2149 2150 /* The first page in the metadata goes where the blobid indicates */ 2151 page_num = bs_blobid_to_page(blob->id); 2152 lba = bs_md_page_to_lba(bs, page_num); 2153 2154 bs_batch_write_zeroes_dev(batch, lba, lba_count); 2155 } 2156 2157 bs_batch_close(batch); 2158 } 2159 2160 static void 2161 blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2162 { 2163 struct spdk_blob_persist_ctx *ctx = cb_arg; 2164 struct spdk_blob *blob = ctx->blob; 2165 struct spdk_blob_store *bs = blob->bs; 2166 uint64_t lba; 2167 uint32_t lba_count; 2168 struct spdk_blob_md_page *page; 2169 2170 if (bserrno != 0) { 2171 blob_persist_complete(seq, ctx, bserrno); 2172 return; 2173 } 2174 2175 if (blob->active.num_pages == 0) { 2176 /* Move on to the next step */ 2177 blob_persist_zero_pages(seq, ctx, 0); 2178 return; 2179 } 2180 2181 lba_count = bs_byte_to_lba(bs, bs->md_page_size); 2182 2183 page = &ctx->pages[0]; 2184 /* The first page in the metadata goes where the blobid indicates */ 2185 lba = bs_md_page_to_lba(bs, bs_blobid_to_page(blob->id)); 2186 2187 bs_sequence_write_dev(seq, page, lba, lba_count, 2188 blob_persist_zero_pages, ctx); 2189 } 2190 2191 static void 2192 blob_persist_write_page_chain(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 2193 { 2194 struct spdk_blob *blob = ctx->blob; 2195 struct spdk_blob_store *bs = blob->bs; 2196 uint64_t lba; 2197 uint32_t lba_count; 2198 struct spdk_blob_md_page *page; 2199 spdk_bs_batch_t *batch; 2200 size_t i; 2201 2202 /* Clusters don't move around in blobs. The list shrinks or grows 2203 * at the end, but no changes ever occur in the middle of the list. 2204 */ 2205 2206 lba_count = bs_byte_to_lba(bs, sizeof(*page)); 2207 2208 batch = bs_sequence_to_batch(seq, blob_persist_write_page_root, ctx); 2209 2210 /* This starts at 1. The root page is not written until 2211 * all of the others are finished 2212 */ 2213 for (i = 1; i < blob->active.num_pages; i++) { 2214 page = &ctx->pages[i]; 2215 assert(page->sequence_num == i); 2216 2217 lba = bs_md_page_to_lba(bs, blob->active.pages[i]); 2218 2219 bs_batch_write_dev(batch, page, lba, lba_count); 2220 } 2221 2222 bs_batch_close(batch); 2223 } 2224 2225 static int 2226 blob_resize(struct spdk_blob *blob, uint64_t sz) 2227 { 2228 uint64_t i; 2229 uint64_t *tmp; 2230 uint64_t cluster; 2231 uint32_t lfmd; /* lowest free md page */ 2232 uint64_t num_clusters; 2233 uint32_t *ep_tmp; 2234 uint64_t new_num_ep = 0, current_num_ep = 0; 2235 struct spdk_blob_store *bs; 2236 int rc; 2237 2238 bs = blob->bs; 2239 2240 blob_verify_md_op(blob); 2241 2242 if (blob->active.num_clusters == sz) { 2243 return 0; 2244 } 2245 2246 if (blob->active.num_clusters < blob->active.cluster_array_size) { 2247 /* If this blob was resized to be larger, then smaller, then 2248 * larger without syncing, then the cluster array already 2249 * contains spare assigned clusters we can use. 2250 */ 2251 num_clusters = spdk_min(blob->active.cluster_array_size, 2252 sz); 2253 } else { 2254 num_clusters = blob->active.num_clusters; 2255 } 2256 2257 if (blob->use_extent_table) { 2258 /* Round up since every cluster beyond current Extent Table size, 2259 * requires new extent page. */ 2260 new_num_ep = spdk_divide_round_up(sz, SPDK_EXTENTS_PER_EP); 2261 current_num_ep = spdk_divide_round_up(num_clusters, SPDK_EXTENTS_PER_EP); 2262 } 2263 2264 assert(!spdk_spin_held(&bs->used_lock)); 2265 2266 /* Check first that we have enough clusters and md pages before we start claiming them. 2267 * bs->used_lock is held to ensure that clusters we think are free are still free when we go 2268 * to claim them later in this function. 2269 */ 2270 if (sz > num_clusters && spdk_blob_is_thin_provisioned(blob) == false) { 2271 spdk_spin_lock(&bs->used_lock); 2272 if ((sz - num_clusters) > bs->num_free_clusters) { 2273 rc = -ENOSPC; 2274 goto out; 2275 } 2276 lfmd = 0; 2277 for (i = current_num_ep; i < new_num_ep ; i++) { 2278 lfmd = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, lfmd); 2279 if (lfmd == UINT32_MAX) { 2280 /* No more free md pages. Cannot satisfy the request */ 2281 rc = -ENOSPC; 2282 goto out; 2283 } 2284 } 2285 } 2286 2287 if (sz > num_clusters) { 2288 /* Expand the cluster array if necessary. 2289 * We only shrink the array when persisting. 2290 */ 2291 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * sz); 2292 if (sz > 0 && tmp == NULL) { 2293 rc = -ENOMEM; 2294 goto out; 2295 } 2296 memset(tmp + blob->active.cluster_array_size, 0, 2297 sizeof(*blob->active.clusters) * (sz - blob->active.cluster_array_size)); 2298 blob->active.clusters = tmp; 2299 blob->active.cluster_array_size = sz; 2300 2301 /* Expand the extents table, only if enough clusters were added */ 2302 if (new_num_ep > current_num_ep && blob->use_extent_table) { 2303 ep_tmp = realloc(blob->active.extent_pages, sizeof(*blob->active.extent_pages) * new_num_ep); 2304 if (new_num_ep > 0 && ep_tmp == NULL) { 2305 rc = -ENOMEM; 2306 goto out; 2307 } 2308 memset(ep_tmp + blob->active.extent_pages_array_size, 0, 2309 sizeof(*blob->active.extent_pages) * (new_num_ep - blob->active.extent_pages_array_size)); 2310 blob->active.extent_pages = ep_tmp; 2311 blob->active.extent_pages_array_size = new_num_ep; 2312 } 2313 } 2314 2315 blob->state = SPDK_BLOB_STATE_DIRTY; 2316 2317 if (spdk_blob_is_thin_provisioned(blob) == false) { 2318 cluster = 0; 2319 lfmd = 0; 2320 for (i = num_clusters; i < sz; i++) { 2321 bs_allocate_cluster(blob, i, &cluster, &lfmd, true); 2322 /* Do not increment lfmd here. lfmd will get updated 2323 * to the md_page allocated (if any) when a new extent 2324 * page is needed. Just pass that value again, 2325 * bs_allocate_cluster will just start at that index 2326 * to find the next free md_page when needed. 2327 */ 2328 } 2329 } 2330 2331 /* If we are shrinking the blob, we must adjust num_allocated_clusters */ 2332 for (i = sz; i < num_clusters; i++) { 2333 if (blob->active.clusters[i] != 0) { 2334 blob->active.num_allocated_clusters--; 2335 } 2336 } 2337 2338 blob->active.num_clusters = sz; 2339 blob->active.num_extent_pages = new_num_ep; 2340 2341 rc = 0; 2342 out: 2343 if (spdk_spin_held(&bs->used_lock)) { 2344 spdk_spin_unlock(&bs->used_lock); 2345 } 2346 2347 return rc; 2348 } 2349 2350 static void 2351 blob_persist_generate_new_md(struct spdk_blob_persist_ctx *ctx) 2352 { 2353 spdk_bs_sequence_t *seq = ctx->seq; 2354 struct spdk_blob *blob = ctx->blob; 2355 struct spdk_blob_store *bs = blob->bs; 2356 uint64_t i; 2357 uint32_t page_num; 2358 void *tmp; 2359 int rc; 2360 2361 /* Generate the new metadata */ 2362 rc = blob_serialize(blob, &ctx->pages, &blob->active.num_pages); 2363 if (rc < 0) { 2364 blob_persist_complete(seq, ctx, rc); 2365 return; 2366 } 2367 2368 assert(blob->active.num_pages >= 1); 2369 2370 /* Resize the cache of page indices */ 2371 tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 2372 if (!tmp) { 2373 blob_persist_complete(seq, ctx, -ENOMEM); 2374 return; 2375 } 2376 blob->active.pages = tmp; 2377 2378 /* Assign this metadata to pages. This requires two passes - one to verify that there are 2379 * enough pages and a second to actually claim them. The used_lock is held across 2380 * both passes to ensure things don't change in the middle. 2381 */ 2382 spdk_spin_lock(&bs->used_lock); 2383 page_num = 0; 2384 /* Note that this loop starts at one. The first page location is fixed by the blobid. */ 2385 for (i = 1; i < blob->active.num_pages; i++) { 2386 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 2387 if (page_num == UINT32_MAX) { 2388 spdk_spin_unlock(&bs->used_lock); 2389 blob_persist_complete(seq, ctx, -ENOMEM); 2390 return; 2391 } 2392 page_num++; 2393 } 2394 2395 page_num = 0; 2396 blob->active.pages[0] = bs_blobid_to_page(blob->id); 2397 for (i = 1; i < blob->active.num_pages; i++) { 2398 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 2399 ctx->pages[i - 1].next = page_num; 2400 /* Now that previous metadata page is complete, calculate the crc for it. */ 2401 ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]); 2402 blob->active.pages[i] = page_num; 2403 bs_claim_md_page(bs, page_num); 2404 SPDK_DEBUGLOG(blob, "Claiming page %u for blob 0x%" PRIx64 "\n", page_num, 2405 blob->id); 2406 page_num++; 2407 } 2408 spdk_spin_unlock(&bs->used_lock); 2409 ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]); 2410 /* Start writing the metadata from last page to first */ 2411 blob->state = SPDK_BLOB_STATE_CLEAN; 2412 blob_persist_write_page_chain(seq, ctx); 2413 } 2414 2415 static void 2416 blob_persist_write_extent_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2417 { 2418 struct spdk_blob_persist_ctx *ctx = cb_arg; 2419 struct spdk_blob *blob = ctx->blob; 2420 size_t i; 2421 uint32_t extent_page_id; 2422 uint32_t page_count = 0; 2423 int rc; 2424 2425 if (ctx->extent_page != NULL) { 2426 spdk_free(ctx->extent_page); 2427 ctx->extent_page = NULL; 2428 } 2429 2430 if (bserrno != 0) { 2431 blob_persist_complete(seq, ctx, bserrno); 2432 return; 2433 } 2434 2435 /* Only write out Extent Pages when blob was resized. */ 2436 for (i = ctx->next_extent_page; i < blob->active.extent_pages_array_size; i++) { 2437 extent_page_id = blob->active.extent_pages[i]; 2438 if (extent_page_id == 0) { 2439 /* No Extent Page to persist */ 2440 assert(spdk_blob_is_thin_provisioned(blob)); 2441 continue; 2442 } 2443 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent_page_id)); 2444 ctx->next_extent_page = i + 1; 2445 rc = blob_serialize_add_page(ctx->blob, &ctx->extent_page, &page_count, &ctx->extent_page); 2446 if (rc < 0) { 2447 blob_persist_complete(seq, ctx, rc); 2448 return; 2449 } 2450 2451 blob->state = SPDK_BLOB_STATE_DIRTY; 2452 blob_serialize_extent_page(blob, i * SPDK_EXTENTS_PER_EP, ctx->extent_page); 2453 2454 ctx->extent_page->crc = blob_md_page_calc_crc(ctx->extent_page); 2455 2456 bs_sequence_write_dev(seq, ctx->extent_page, bs_md_page_to_lba(blob->bs, extent_page_id), 2457 bs_byte_to_lba(blob->bs, blob->bs->md_page_size), 2458 blob_persist_write_extent_pages, ctx); 2459 return; 2460 } 2461 2462 blob_persist_generate_new_md(ctx); 2463 } 2464 2465 static void 2466 blob_persist_start(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2467 { 2468 struct spdk_blob_persist_ctx *ctx = cb_arg; 2469 struct spdk_blob *blob = ctx->blob; 2470 2471 if (bserrno != 0) { 2472 blob_persist_complete(seq, ctx, bserrno); 2473 return; 2474 } 2475 2476 if (blob->active.num_pages == 0) { 2477 /* This is the signal that the blob should be deleted. 2478 * Immediately jump to the clean up routine. */ 2479 assert(blob->clean.num_pages > 0); 2480 blob->state = SPDK_BLOB_STATE_CLEAN; 2481 blob_persist_zero_pages(seq, ctx, 0); 2482 return; 2483 2484 } 2485 2486 if (blob->clean.num_clusters < blob->active.num_clusters) { 2487 /* Blob was resized up */ 2488 assert(blob->clean.num_extent_pages <= blob->active.num_extent_pages); 2489 ctx->next_extent_page = spdk_max(1, blob->clean.num_extent_pages) - 1; 2490 } else if (blob->active.num_clusters < blob->active.cluster_array_size) { 2491 /* Blob was resized down */ 2492 assert(blob->clean.num_extent_pages >= blob->active.num_extent_pages); 2493 ctx->next_extent_page = spdk_max(1, blob->active.num_extent_pages) - 1; 2494 } else { 2495 /* No change in size occurred */ 2496 blob_persist_generate_new_md(ctx); 2497 return; 2498 } 2499 2500 blob_persist_write_extent_pages(seq, ctx, 0); 2501 } 2502 2503 struct spdk_bs_mark_dirty { 2504 struct spdk_blob_store *bs; 2505 struct spdk_bs_super_block *super; 2506 spdk_bs_sequence_cpl cb_fn; 2507 void *cb_arg; 2508 }; 2509 2510 static void 2511 bs_mark_dirty_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2512 { 2513 struct spdk_bs_mark_dirty *ctx = cb_arg; 2514 2515 if (bserrno == 0) { 2516 ctx->bs->clean = 0; 2517 } 2518 2519 ctx->cb_fn(seq, ctx->cb_arg, bserrno); 2520 2521 spdk_free(ctx->super); 2522 free(ctx); 2523 } 2524 2525 static void bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2526 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg); 2527 2528 2529 static void 2530 bs_mark_dirty_write(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2531 { 2532 struct spdk_bs_mark_dirty *ctx = cb_arg; 2533 int rc; 2534 2535 if (bserrno != 0) { 2536 bs_mark_dirty_write_cpl(seq, ctx, bserrno); 2537 return; 2538 } 2539 2540 rc = bs_super_validate(ctx->super, ctx->bs); 2541 if (rc != 0) { 2542 bs_mark_dirty_write_cpl(seq, ctx, rc); 2543 return; 2544 } 2545 2546 ctx->super->clean = 0; 2547 if (ctx->super->size == 0) { 2548 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 2549 } 2550 2551 bs_write_super(seq, ctx->bs, ctx->super, bs_mark_dirty_write_cpl, ctx); 2552 } 2553 2554 static void 2555 bs_mark_dirty(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2556 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2557 { 2558 struct spdk_bs_mark_dirty *ctx; 2559 2560 /* Blobstore is already marked dirty */ 2561 if (bs->clean == 0) { 2562 cb_fn(seq, cb_arg, 0); 2563 return; 2564 } 2565 2566 ctx = calloc(1, sizeof(*ctx)); 2567 if (!ctx) { 2568 cb_fn(seq, cb_arg, -ENOMEM); 2569 return; 2570 } 2571 ctx->bs = bs; 2572 ctx->cb_fn = cb_fn; 2573 ctx->cb_arg = cb_arg; 2574 2575 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 2576 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 2577 if (!ctx->super) { 2578 free(ctx); 2579 cb_fn(seq, cb_arg, -ENOMEM); 2580 return; 2581 } 2582 2583 bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0), 2584 bs_byte_to_lba(bs, sizeof(*ctx->super)), 2585 bs_mark_dirty_write, ctx); 2586 } 2587 2588 /* Write a blob to disk */ 2589 static void 2590 blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 2591 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2592 { 2593 struct spdk_blob_persist_ctx *ctx; 2594 2595 blob_verify_md_op(blob); 2596 2597 if (blob->state == SPDK_BLOB_STATE_CLEAN && TAILQ_EMPTY(&blob->persists_to_complete)) { 2598 cb_fn(seq, cb_arg, 0); 2599 return; 2600 } 2601 2602 ctx = calloc(1, sizeof(*ctx)); 2603 if (!ctx) { 2604 cb_fn(seq, cb_arg, -ENOMEM); 2605 return; 2606 } 2607 ctx->blob = blob; 2608 ctx->seq = seq; 2609 ctx->cb_fn = cb_fn; 2610 ctx->cb_arg = cb_arg; 2611 2612 /* Multiple blob persists can affect one another, via blob->state or 2613 * blob mutable data changes. To prevent it, queue up the persists. */ 2614 if (!TAILQ_EMPTY(&blob->persists_to_complete)) { 2615 TAILQ_INSERT_TAIL(&blob->pending_persists, ctx, link); 2616 return; 2617 } 2618 TAILQ_INSERT_HEAD(&blob->persists_to_complete, ctx, link); 2619 2620 bs_mark_dirty(seq, blob->bs, blob_persist_start, ctx); 2621 } 2622 2623 struct spdk_blob_copy_cluster_ctx { 2624 struct spdk_blob *blob; 2625 uint8_t *buf; 2626 uint64_t io_unit; 2627 uint64_t new_cluster; 2628 uint32_t new_extent_page; 2629 spdk_bs_sequence_t *seq; 2630 struct spdk_blob_md_page *new_cluster_page; 2631 }; 2632 2633 struct spdk_blob_free_cluster_ctx { 2634 struct spdk_blob *blob; 2635 uint64_t page; 2636 struct spdk_blob_md_page *md_page; 2637 uint64_t cluster_num; 2638 uint32_t extent_page; 2639 spdk_bs_sequence_t *seq; 2640 }; 2641 2642 static void 2643 blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno) 2644 { 2645 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2646 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq; 2647 TAILQ_HEAD(, spdk_bs_request_set) requests; 2648 spdk_bs_user_op_t *op; 2649 2650 TAILQ_INIT(&requests); 2651 TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link); 2652 2653 while (!TAILQ_EMPTY(&requests)) { 2654 op = TAILQ_FIRST(&requests); 2655 TAILQ_REMOVE(&requests, op, link); 2656 if (bserrno == 0) { 2657 bs_user_op_execute(op); 2658 } else { 2659 bs_user_op_abort(op, bserrno); 2660 } 2661 } 2662 2663 spdk_free(ctx->buf); 2664 free(ctx); 2665 } 2666 2667 static void 2668 blob_free_cluster_cpl(void *cb_arg, int bserrno) 2669 { 2670 struct spdk_blob_free_cluster_ctx *ctx = cb_arg; 2671 spdk_bs_sequence_t *seq = ctx->seq; 2672 2673 bs_sequence_finish(seq, bserrno); 2674 2675 free(ctx); 2676 } 2677 2678 static void 2679 blob_insert_cluster_revert(struct spdk_blob_copy_cluster_ctx *ctx) 2680 { 2681 spdk_spin_lock(&ctx->blob->bs->used_lock); 2682 bs_release_cluster(ctx->blob->bs, ctx->new_cluster); 2683 if (ctx->new_extent_page != 0) { 2684 bs_release_md_page(ctx->blob->bs, ctx->new_extent_page); 2685 } 2686 spdk_spin_unlock(&ctx->blob->bs->used_lock); 2687 } 2688 2689 static void 2690 blob_insert_cluster_clear_cpl(void *cb_arg, int bserrno) 2691 { 2692 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2693 2694 if (bserrno) { 2695 SPDK_WARNLOG("Failed to clear cluster: %d\n", bserrno); 2696 } 2697 2698 blob_insert_cluster_revert(ctx); 2699 bs_sequence_finish(ctx->seq, bserrno); 2700 } 2701 2702 static void 2703 blob_insert_cluster_clear(struct spdk_blob_copy_cluster_ctx *ctx) 2704 { 2705 struct spdk_bs_cpl cpl; 2706 spdk_bs_batch_t *batch; 2707 struct spdk_io_channel *ch = spdk_io_channel_from_ctx(ctx->seq->channel); 2708 2709 /* 2710 * We allocated a cluster and we copied data to it. But now, we realized that we don't need 2711 * this cluster and we want to release it. We must ensure that we clear the data on this 2712 * cluster. 2713 * The cluster may later be re-allocated by a thick-provisioned blob for example. When 2714 * reading from this thick-provisioned blob before writing data, we should read zeroes. 2715 */ 2716 2717 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2718 cpl.u.blob_basic.cb_fn = blob_insert_cluster_clear_cpl; 2719 cpl.u.blob_basic.cb_arg = ctx; 2720 2721 batch = bs_batch_open(ch, &cpl, ctx->blob); 2722 if (!batch) { 2723 blob_insert_cluster_clear_cpl(ctx, -ENOMEM); 2724 return; 2725 } 2726 2727 bs_batch_clear_dev(ctx->blob, batch, bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 2728 bs_cluster_to_lba(ctx->blob->bs, 1)); 2729 bs_batch_close(batch); 2730 } 2731 2732 static void 2733 blob_insert_cluster_cpl(void *cb_arg, int bserrno) 2734 { 2735 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2736 2737 if (bserrno) { 2738 if (bserrno == -EEXIST) { 2739 /* The metadata insert failed because another thread 2740 * allocated the cluster first. Clear and free our cluster 2741 * but continue without error. */ 2742 blob_insert_cluster_clear(ctx); 2743 return; 2744 } 2745 2746 blob_insert_cluster_revert(ctx); 2747 } 2748 2749 bs_sequence_finish(ctx->seq, bserrno); 2750 } 2751 2752 static void 2753 blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2754 { 2755 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2756 uint32_t cluster_number; 2757 2758 if (bserrno) { 2759 /* The write failed, so jump to the final completion handler */ 2760 bs_sequence_finish(seq, bserrno); 2761 return; 2762 } 2763 2764 cluster_number = bs_io_unit_to_cluster(ctx->blob->bs, ctx->io_unit); 2765 2766 blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2767 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx); 2768 } 2769 2770 static void 2771 blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2772 { 2773 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2774 2775 if (bserrno != 0) { 2776 /* The read failed, so jump to the final completion handler */ 2777 bs_sequence_finish(seq, bserrno); 2778 return; 2779 } 2780 2781 /* Write whole cluster */ 2782 bs_sequence_write_dev(seq, ctx->buf, 2783 bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 2784 bs_cluster_to_lba(ctx->blob->bs, 1), 2785 blob_write_copy_cpl, ctx); 2786 } 2787 2788 static bool 2789 blob_can_copy(struct spdk_blob *blob, uint64_t cluster_start_io_unit, uint64_t *base_lba) 2790 { 2791 uint64_t lba = bs_dev_io_unit_to_lba(blob, blob->back_bs_dev, cluster_start_io_unit); 2792 2793 return (!blob_is_esnap_clone(blob) && blob->bs->dev->copy != NULL) && 2794 blob->back_bs_dev->translate_lba(blob->back_bs_dev, lba, base_lba); 2795 } 2796 2797 static void 2798 blob_copy(struct spdk_blob_copy_cluster_ctx *ctx, spdk_bs_user_op_t *op, uint64_t src_lba) 2799 { 2800 struct spdk_blob *blob = ctx->blob; 2801 uint64_t lba_count = bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz); 2802 2803 bs_sequence_copy_dev(ctx->seq, 2804 bs_cluster_to_lba(blob->bs, ctx->new_cluster), 2805 src_lba, 2806 lba_count, 2807 blob_write_copy_cpl, ctx); 2808 } 2809 2810 static void 2811 bs_allocate_and_copy_cluster(struct spdk_blob *blob, 2812 struct spdk_io_channel *_ch, 2813 uint64_t io_unit, spdk_bs_user_op_t *op) 2814 { 2815 struct spdk_bs_cpl cpl; 2816 struct spdk_bs_channel *ch; 2817 struct spdk_blob_copy_cluster_ctx *ctx; 2818 uint64_t cluster_start_io_unit; 2819 uint32_t cluster_number; 2820 bool is_zeroes; 2821 bool can_copy; 2822 bool is_valid_range; 2823 uint64_t copy_src_lba; 2824 int rc; 2825 2826 ch = spdk_io_channel_get_ctx(_ch); 2827 2828 if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) { 2829 /* There are already operations pending. Queue this user op 2830 * and return because it will be re-executed when the outstanding 2831 * cluster allocation completes. */ 2832 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2833 return; 2834 } 2835 2836 /* Round the io_unit offset down to the first io_unit in the cluster */ 2837 cluster_start_io_unit = bs_io_unit_to_cluster_start(blob, io_unit); 2838 2839 /* Calculate which index in the metadata cluster array the corresponding 2840 * cluster is supposed to be at. */ 2841 cluster_number = bs_io_unit_to_cluster_number(blob, io_unit); 2842 2843 ctx = calloc(1, sizeof(*ctx)); 2844 if (!ctx) { 2845 bs_user_op_abort(op, -ENOMEM); 2846 return; 2847 } 2848 2849 assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0); 2850 2851 ctx->blob = blob; 2852 ctx->io_unit = cluster_start_io_unit; 2853 ctx->new_cluster_page = ch->new_cluster_page; 2854 memset(ctx->new_cluster_page, 0, blob->bs->md_page_size); 2855 2856 /* Check if the cluster that we intend to do CoW for is valid for 2857 * the backing dev. For zeroes backing dev, it'll be always valid. 2858 * For other backing dev e.g. a snapshot, it could be invalid if 2859 * the blob has been resized after snapshot was taken. */ 2860 is_valid_range = blob->back_bs_dev->is_range_valid(blob->back_bs_dev, 2861 bs_dev_io_unit_to_lba(blob, blob->back_bs_dev, cluster_start_io_unit), 2862 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz)); 2863 2864 can_copy = is_valid_range && blob_can_copy(blob, cluster_start_io_unit, ©_src_lba); 2865 2866 is_zeroes = is_valid_range && blob->back_bs_dev->is_zeroes(blob->back_bs_dev, 2867 bs_dev_io_unit_to_lba(blob, blob->back_bs_dev, cluster_start_io_unit), 2868 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz)); 2869 if (blob->parent_id != SPDK_BLOBID_INVALID && !is_zeroes && !can_copy) { 2870 ctx->buf = spdk_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen, 2871 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 2872 if (!ctx->buf) { 2873 SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n", 2874 blob->bs->cluster_sz); 2875 free(ctx); 2876 bs_user_op_abort(op, -ENOMEM); 2877 return; 2878 } 2879 } 2880 2881 spdk_spin_lock(&blob->bs->used_lock); 2882 rc = bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, &ctx->new_extent_page, 2883 false); 2884 spdk_spin_unlock(&blob->bs->used_lock); 2885 if (rc != 0) { 2886 spdk_free(ctx->buf); 2887 free(ctx); 2888 bs_user_op_abort(op, rc); 2889 return; 2890 } 2891 2892 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2893 cpl.u.blob_basic.cb_fn = blob_allocate_and_copy_cluster_cpl; 2894 cpl.u.blob_basic.cb_arg = ctx; 2895 2896 ctx->seq = bs_sequence_start_blob(_ch, &cpl, blob); 2897 if (!ctx->seq) { 2898 spdk_spin_lock(&blob->bs->used_lock); 2899 bs_release_cluster(blob->bs, ctx->new_cluster); 2900 spdk_spin_unlock(&blob->bs->used_lock); 2901 spdk_free(ctx->buf); 2902 free(ctx); 2903 bs_user_op_abort(op, -ENOMEM); 2904 return; 2905 } 2906 2907 /* Queue the user op to block other incoming operations */ 2908 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2909 2910 if (blob->parent_id != SPDK_BLOBID_INVALID && !is_zeroes) { 2911 if (can_copy) { 2912 blob_copy(ctx, op, copy_src_lba); 2913 } else { 2914 /* Read cluster from backing device */ 2915 bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf, 2916 bs_dev_io_unit_to_lba(blob, blob->back_bs_dev, cluster_start_io_unit), 2917 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz), 2918 blob_write_copy, ctx); 2919 } 2920 2921 } else { 2922 blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2923 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx); 2924 } 2925 } 2926 2927 static inline bool 2928 blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t io_unit, uint64_t length, 2929 uint64_t *lba, uint64_t *lba_count) 2930 { 2931 *lba_count = length; 2932 2933 if (!bs_io_unit_is_allocated(blob, io_unit)) { 2934 assert(blob->back_bs_dev != NULL); 2935 *lba = bs_io_unit_to_back_dev_lba(blob, io_unit); 2936 *lba_count = bs_io_unit_to_back_dev_lba(blob, *lba_count); 2937 return false; 2938 } else { 2939 *lba = bs_blob_io_unit_to_lba(blob, io_unit); 2940 return true; 2941 } 2942 } 2943 2944 struct op_split_ctx { 2945 struct spdk_blob *blob; 2946 struct spdk_io_channel *channel; 2947 uint64_t io_unit_offset; 2948 uint64_t io_units_remaining; 2949 void *curr_payload; 2950 enum spdk_blob_op_type op_type; 2951 spdk_bs_sequence_t *seq; 2952 bool in_submit_ctx; 2953 bool completed_in_submit_ctx; 2954 bool done; 2955 }; 2956 2957 static void 2958 blob_request_submit_op_split_next(void *cb_arg, int bserrno) 2959 { 2960 struct op_split_ctx *ctx = cb_arg; 2961 struct spdk_blob *blob = ctx->blob; 2962 struct spdk_io_channel *ch = ctx->channel; 2963 enum spdk_blob_op_type op_type = ctx->op_type; 2964 uint8_t *buf; 2965 uint64_t offset; 2966 uint64_t length; 2967 uint64_t op_length; 2968 2969 if (bserrno != 0 || ctx->io_units_remaining == 0) { 2970 bs_sequence_finish(ctx->seq, bserrno); 2971 if (ctx->in_submit_ctx) { 2972 /* Defer freeing of the ctx object, since it will be 2973 * accessed when this unwinds back to the submission 2974 * context. 2975 */ 2976 ctx->done = true; 2977 } else { 2978 free(ctx); 2979 } 2980 return; 2981 } 2982 2983 if (ctx->in_submit_ctx) { 2984 /* If this split operation completed in the context 2985 * of its submission, mark the flag and return immediately 2986 * to avoid recursion. 2987 */ 2988 ctx->completed_in_submit_ctx = true; 2989 return; 2990 } 2991 2992 while (true) { 2993 ctx->completed_in_submit_ctx = false; 2994 2995 offset = ctx->io_unit_offset; 2996 length = ctx->io_units_remaining; 2997 buf = ctx->curr_payload; 2998 op_length = spdk_min(length, bs_num_io_units_to_cluster_boundary(blob, 2999 offset)); 3000 3001 /* Update length and payload for next operation */ 3002 ctx->io_units_remaining -= op_length; 3003 ctx->io_unit_offset += op_length; 3004 if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) { 3005 ctx->curr_payload += op_length * blob->bs->io_unit_size; 3006 } 3007 3008 assert(!ctx->in_submit_ctx); 3009 ctx->in_submit_ctx = true; 3010 3011 switch (op_type) { 3012 case SPDK_BLOB_READ: 3013 spdk_blob_io_read(blob, ch, buf, offset, op_length, 3014 blob_request_submit_op_split_next, ctx); 3015 break; 3016 case SPDK_BLOB_WRITE: 3017 spdk_blob_io_write(blob, ch, buf, offset, op_length, 3018 blob_request_submit_op_split_next, ctx); 3019 break; 3020 case SPDK_BLOB_UNMAP: 3021 spdk_blob_io_unmap(blob, ch, offset, op_length, 3022 blob_request_submit_op_split_next, ctx); 3023 break; 3024 case SPDK_BLOB_WRITE_ZEROES: 3025 spdk_blob_io_write_zeroes(blob, ch, offset, op_length, 3026 blob_request_submit_op_split_next, ctx); 3027 break; 3028 case SPDK_BLOB_READV: 3029 case SPDK_BLOB_WRITEV: 3030 SPDK_ERRLOG("readv/write not valid\n"); 3031 bs_sequence_finish(ctx->seq, -EINVAL); 3032 free(ctx); 3033 return; 3034 } 3035 3036 #ifndef __clang_analyzer__ 3037 /* scan-build reports a false positive around accessing the ctx here. It 3038 * forms a path that recursively calls this function, but then says 3039 * "assuming ctx->in_submit_ctx is false", when that isn't possible. 3040 * This path does free(ctx), returns to here, and reports a use-after-free 3041 * bug. Wrapping this bit of code so that scan-build doesn't see it 3042 * works around the scan-build bug. 3043 */ 3044 assert(ctx->in_submit_ctx); 3045 ctx->in_submit_ctx = false; 3046 3047 /* If the operation completed immediately, loop back and submit the 3048 * next operation. Otherwise we can return and the next split 3049 * operation will get submitted when this current operation is 3050 * later completed asynchronously. 3051 */ 3052 if (ctx->completed_in_submit_ctx) { 3053 continue; 3054 } else if (ctx->done) { 3055 free(ctx); 3056 } 3057 #endif 3058 break; 3059 } 3060 } 3061 3062 static void 3063 blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob, 3064 void *payload, uint64_t offset, uint64_t length, 3065 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 3066 { 3067 struct op_split_ctx *ctx; 3068 spdk_bs_sequence_t *seq; 3069 struct spdk_bs_cpl cpl; 3070 3071 assert(blob != NULL); 3072 3073 ctx = calloc(1, sizeof(struct op_split_ctx)); 3074 if (ctx == NULL) { 3075 cb_fn(cb_arg, -ENOMEM); 3076 return; 3077 } 3078 3079 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 3080 cpl.u.blob_basic.cb_fn = cb_fn; 3081 cpl.u.blob_basic.cb_arg = cb_arg; 3082 3083 seq = bs_sequence_start_blob(ch, &cpl, blob); 3084 if (!seq) { 3085 free(ctx); 3086 cb_fn(cb_arg, -ENOMEM); 3087 return; 3088 } 3089 3090 ctx->blob = blob; 3091 ctx->channel = ch; 3092 ctx->curr_payload = payload; 3093 ctx->io_unit_offset = offset; 3094 ctx->io_units_remaining = length; 3095 ctx->op_type = op_type; 3096 ctx->seq = seq; 3097 3098 blob_request_submit_op_split_next(ctx, 0); 3099 } 3100 3101 static void 3102 spdk_free_cluster_unmap_complete(void *cb_arg, int bserrno) 3103 { 3104 struct spdk_blob_free_cluster_ctx *ctx = cb_arg; 3105 3106 if (bserrno) { 3107 bs_sequence_finish(ctx->seq, bserrno); 3108 free(ctx); 3109 return; 3110 } 3111 3112 blob_free_cluster_on_md_thread(ctx->blob, ctx->cluster_num, 3113 ctx->extent_page, ctx->md_page, blob_free_cluster_cpl, ctx); 3114 } 3115 3116 static void 3117 blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob, 3118 void *payload, uint64_t offset, uint64_t length, 3119 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 3120 { 3121 struct spdk_bs_cpl cpl; 3122 uint64_t lba; 3123 uint64_t lba_count; 3124 bool is_allocated; 3125 3126 assert(blob != NULL); 3127 3128 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 3129 cpl.u.blob_basic.cb_fn = cb_fn; 3130 cpl.u.blob_basic.cb_arg = cb_arg; 3131 3132 if (blob->frozen_refcnt) { 3133 /* This blob I/O is frozen */ 3134 spdk_bs_user_op_t *op; 3135 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 3136 3137 op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 3138 if (!op) { 3139 cb_fn(cb_arg, -ENOMEM); 3140 return; 3141 } 3142 3143 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 3144 3145 return; 3146 } 3147 3148 is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 3149 3150 switch (op_type) { 3151 case SPDK_BLOB_READ: { 3152 spdk_bs_batch_t *batch; 3153 3154 batch = bs_batch_open(_ch, &cpl, blob); 3155 if (!batch) { 3156 cb_fn(cb_arg, -ENOMEM); 3157 return; 3158 } 3159 3160 if (is_allocated) { 3161 /* Read from the blob */ 3162 bs_batch_read_dev(batch, payload, lba, lba_count); 3163 } else { 3164 /* Read from the backing block device */ 3165 bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count); 3166 } 3167 3168 bs_batch_close(batch); 3169 break; 3170 } 3171 case SPDK_BLOB_WRITE: 3172 case SPDK_BLOB_WRITE_ZEROES: { 3173 if (is_allocated) { 3174 /* Write to the blob */ 3175 spdk_bs_batch_t *batch; 3176 3177 if (lba_count == 0) { 3178 cb_fn(cb_arg, 0); 3179 return; 3180 } 3181 3182 batch = bs_batch_open(_ch, &cpl, blob); 3183 if (!batch) { 3184 cb_fn(cb_arg, -ENOMEM); 3185 return; 3186 } 3187 3188 if (op_type == SPDK_BLOB_WRITE) { 3189 bs_batch_write_dev(batch, payload, lba, lba_count); 3190 } else { 3191 bs_batch_write_zeroes_dev(batch, lba, lba_count); 3192 } 3193 3194 bs_batch_close(batch); 3195 } else { 3196 /* Queue this operation and allocate the cluster */ 3197 spdk_bs_user_op_t *op; 3198 3199 op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 3200 if (!op) { 3201 cb_fn(cb_arg, -ENOMEM); 3202 return; 3203 } 3204 3205 bs_allocate_and_copy_cluster(blob, _ch, offset, op); 3206 } 3207 break; 3208 } 3209 case SPDK_BLOB_UNMAP: { 3210 struct spdk_blob_free_cluster_ctx *ctx = NULL; 3211 spdk_bs_batch_t *batch; 3212 3213 /* if aligned with cluster release cluster */ 3214 if (spdk_blob_is_thin_provisioned(blob) && is_allocated && 3215 blob_backed_with_zeroes_dev(blob) && 3216 bs_io_units_per_cluster(blob) == length) { 3217 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 3218 uint64_t cluster_start_page; 3219 uint32_t cluster_number; 3220 3221 assert(offset % bs_io_units_per_cluster(blob) == 0); 3222 3223 /* Round the io_unit offset down to the first page in the cluster */ 3224 cluster_start_page = bs_io_unit_to_cluster_start(blob, offset); 3225 3226 /* Calculate which index in the metadata cluster array the corresponding 3227 * cluster is supposed to be at. */ 3228 cluster_number = bs_io_unit_to_cluster_number(blob, offset); 3229 3230 ctx = calloc(1, sizeof(*ctx)); 3231 if (!ctx) { 3232 cb_fn(cb_arg, -ENOMEM); 3233 return; 3234 } 3235 /* When freeing a cluster the flow should be (in order): 3236 * 1. Unmap the underlying area (so if the cluster is reclaimed in the future, it won't leak 3237 * old data) 3238 * 2. Once the unmap completes (to avoid any races with incoming writes that may claim the 3239 * cluster), update and sync metadata freeing the cluster 3240 * 3. Once metadata update is done, complete the user unmap request 3241 */ 3242 ctx->blob = blob; 3243 ctx->page = cluster_start_page; 3244 ctx->cluster_num = cluster_number; 3245 ctx->md_page = bs_channel->new_cluster_page; 3246 ctx->seq = bs_sequence_start_bs(_ch, &cpl); 3247 if (!ctx->seq) { 3248 free(ctx); 3249 cb_fn(cb_arg, -ENOMEM); 3250 return; 3251 } 3252 3253 if (blob->use_extent_table) { 3254 ctx->extent_page = *bs_cluster_to_extent_page(blob, cluster_number); 3255 } 3256 3257 cpl.u.blob_basic.cb_fn = spdk_free_cluster_unmap_complete; 3258 cpl.u.blob_basic.cb_arg = ctx; 3259 } 3260 3261 batch = bs_batch_open(_ch, &cpl, blob); 3262 if (!batch) { 3263 free(ctx); 3264 cb_fn(cb_arg, -ENOMEM); 3265 return; 3266 } 3267 3268 if (is_allocated) { 3269 bs_batch_unmap_dev(batch, lba, lba_count); 3270 } 3271 3272 bs_batch_close(batch); 3273 break; 3274 } 3275 case SPDK_BLOB_READV: 3276 case SPDK_BLOB_WRITEV: 3277 SPDK_ERRLOG("readv/write not valid\n"); 3278 cb_fn(cb_arg, -EINVAL); 3279 break; 3280 } 3281 } 3282 3283 static void 3284 blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel, 3285 void *payload, uint64_t offset, uint64_t length, 3286 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 3287 { 3288 assert(blob != NULL); 3289 3290 if (blob->data_ro && op_type != SPDK_BLOB_READ) { 3291 cb_fn(cb_arg, -EPERM); 3292 return; 3293 } 3294 3295 if (length == 0) { 3296 cb_fn(cb_arg, 0); 3297 return; 3298 } 3299 3300 if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 3301 cb_fn(cb_arg, -EINVAL); 3302 return; 3303 } 3304 if (length <= bs_num_io_units_to_cluster_boundary(blob, offset)) { 3305 blob_request_submit_op_single(_channel, blob, payload, offset, length, 3306 cb_fn, cb_arg, op_type); 3307 } else { 3308 blob_request_submit_op_split(_channel, blob, payload, offset, length, 3309 cb_fn, cb_arg, op_type); 3310 } 3311 } 3312 3313 struct rw_iov_ctx { 3314 struct spdk_blob *blob; 3315 struct spdk_io_channel *channel; 3316 spdk_blob_op_complete cb_fn; 3317 void *cb_arg; 3318 bool read; 3319 int iovcnt; 3320 struct iovec *orig_iov; 3321 uint64_t io_unit_offset; 3322 uint64_t io_units_remaining; 3323 uint64_t io_units_done; 3324 struct spdk_blob_ext_io_opts *ext_io_opts; 3325 struct iovec iov[0]; 3326 }; 3327 3328 static void 3329 rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3330 { 3331 assert(cb_arg == NULL); 3332 bs_sequence_finish(seq, bserrno); 3333 } 3334 3335 static void 3336 rw_iov_split_next(void *cb_arg, int bserrno) 3337 { 3338 struct rw_iov_ctx *ctx = cb_arg; 3339 struct spdk_blob *blob = ctx->blob; 3340 struct iovec *iov, *orig_iov; 3341 int iovcnt; 3342 size_t orig_iovoff; 3343 uint64_t io_units_count, io_units_to_boundary, io_unit_offset; 3344 uint64_t byte_count; 3345 3346 if (bserrno != 0 || ctx->io_units_remaining == 0) { 3347 ctx->cb_fn(ctx->cb_arg, bserrno); 3348 free(ctx); 3349 return; 3350 } 3351 3352 io_unit_offset = ctx->io_unit_offset; 3353 io_units_to_boundary = bs_num_io_units_to_cluster_boundary(blob, io_unit_offset); 3354 io_units_count = spdk_min(ctx->io_units_remaining, io_units_to_boundary); 3355 /* 3356 * Get index and offset into the original iov array for our current position in the I/O sequence. 3357 * byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will 3358 * point to the current position in the I/O sequence. 3359 */ 3360 byte_count = ctx->io_units_done * blob->bs->io_unit_size; 3361 orig_iov = &ctx->orig_iov[0]; 3362 orig_iovoff = 0; 3363 while (byte_count > 0) { 3364 if (byte_count >= orig_iov->iov_len) { 3365 byte_count -= orig_iov->iov_len; 3366 orig_iov++; 3367 } else { 3368 orig_iovoff = byte_count; 3369 byte_count = 0; 3370 } 3371 } 3372 3373 /* 3374 * Build an iov array for the next I/O in the sequence. byte_count will keep track of how many 3375 * bytes of this next I/O remain to be accounted for in the new iov array. 3376 */ 3377 byte_count = io_units_count * blob->bs->io_unit_size; 3378 iov = &ctx->iov[0]; 3379 iovcnt = 0; 3380 while (byte_count > 0) { 3381 assert(iovcnt < ctx->iovcnt); 3382 iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff); 3383 iov->iov_base = orig_iov->iov_base + orig_iovoff; 3384 byte_count -= iov->iov_len; 3385 orig_iovoff = 0; 3386 orig_iov++; 3387 iov++; 3388 iovcnt++; 3389 } 3390 3391 ctx->io_unit_offset += io_units_count; 3392 ctx->io_units_remaining -= io_units_count; 3393 ctx->io_units_done += io_units_count; 3394 iov = &ctx->iov[0]; 3395 3396 if (ctx->read) { 3397 spdk_blob_io_readv_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 3398 io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts); 3399 } else { 3400 spdk_blob_io_writev_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 3401 io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts); 3402 } 3403 } 3404 3405 static void 3406 blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel, 3407 struct iovec *iov, int iovcnt, 3408 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg, bool read, 3409 struct spdk_blob_ext_io_opts *ext_io_opts) 3410 { 3411 struct spdk_bs_cpl cpl; 3412 3413 assert(blob != NULL); 3414 3415 if (!read && blob->data_ro) { 3416 cb_fn(cb_arg, -EPERM); 3417 return; 3418 } 3419 3420 if (length == 0) { 3421 cb_fn(cb_arg, 0); 3422 return; 3423 } 3424 3425 if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 3426 cb_fn(cb_arg, -EINVAL); 3427 return; 3428 } 3429 3430 /* 3431 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having 3432 * to split a request that spans a cluster boundary. For I/O that do not span a cluster boundary, 3433 * there will be no noticeable difference compared to using a batch. For I/O that do span a cluster 3434 * boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need 3435 * to allocate a separate iov array and split the I/O such that none of the resulting 3436 * smaller I/O cross a cluster boundary. These smaller I/O will be issued in sequence (not in parallel) 3437 * but since this case happens very infrequently, any performance impact will be negligible. 3438 * 3439 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs 3440 * for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them 3441 * in a batch. That would also require creating an intermediate spdk_bs_cpl that would get called 3442 * when the batch was completed, to allow for freeing the memory for the iov arrays. 3443 */ 3444 if (spdk_likely(length <= bs_num_io_units_to_cluster_boundary(blob, offset))) { 3445 uint64_t lba_count; 3446 uint64_t lba; 3447 bool is_allocated; 3448 3449 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 3450 cpl.u.blob_basic.cb_fn = cb_fn; 3451 cpl.u.blob_basic.cb_arg = cb_arg; 3452 3453 if (blob->frozen_refcnt) { 3454 /* This blob I/O is frozen */ 3455 enum spdk_blob_op_type op_type; 3456 spdk_bs_user_op_t *op; 3457 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel); 3458 3459 op_type = read ? SPDK_BLOB_READV : SPDK_BLOB_WRITEV; 3460 op = bs_user_op_alloc(_channel, &cpl, op_type, blob, iov, iovcnt, offset, length); 3461 if (!op) { 3462 cb_fn(cb_arg, -ENOMEM); 3463 return; 3464 } 3465 3466 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 3467 3468 return; 3469 } 3470 3471 is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 3472 3473 if (read) { 3474 spdk_bs_sequence_t *seq; 3475 3476 seq = bs_sequence_start_blob(_channel, &cpl, blob); 3477 if (!seq) { 3478 cb_fn(cb_arg, -ENOMEM); 3479 return; 3480 } 3481 3482 seq->ext_io_opts = ext_io_opts; 3483 3484 if (is_allocated) { 3485 bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL); 3486 } else { 3487 bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count, 3488 rw_iov_done, NULL); 3489 } 3490 } else { 3491 if (is_allocated) { 3492 spdk_bs_sequence_t *seq; 3493 3494 seq = bs_sequence_start_blob(_channel, &cpl, blob); 3495 if (!seq) { 3496 cb_fn(cb_arg, -ENOMEM); 3497 return; 3498 } 3499 3500 seq->ext_io_opts = ext_io_opts; 3501 3502 bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL); 3503 } else { 3504 /* Queue this operation and allocate the cluster */ 3505 spdk_bs_user_op_t *op; 3506 3507 op = bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset, 3508 length); 3509 if (!op) { 3510 cb_fn(cb_arg, -ENOMEM); 3511 return; 3512 } 3513 3514 op->ext_io_opts = ext_io_opts; 3515 3516 bs_allocate_and_copy_cluster(blob, _channel, offset, op); 3517 } 3518 } 3519 } else { 3520 struct rw_iov_ctx *ctx; 3521 3522 ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec)); 3523 if (ctx == NULL) { 3524 cb_fn(cb_arg, -ENOMEM); 3525 return; 3526 } 3527 3528 ctx->blob = blob; 3529 ctx->channel = _channel; 3530 ctx->cb_fn = cb_fn; 3531 ctx->cb_arg = cb_arg; 3532 ctx->read = read; 3533 ctx->orig_iov = iov; 3534 ctx->iovcnt = iovcnt; 3535 ctx->io_unit_offset = offset; 3536 ctx->io_units_remaining = length; 3537 ctx->io_units_done = 0; 3538 ctx->ext_io_opts = ext_io_opts; 3539 3540 rw_iov_split_next(ctx, 0); 3541 } 3542 } 3543 3544 static struct spdk_blob * 3545 blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid) 3546 { 3547 struct spdk_blob find; 3548 3549 if (spdk_bit_array_get(bs->open_blobids, blobid) == 0) { 3550 return NULL; 3551 } 3552 3553 find.id = blobid; 3554 return RB_FIND(spdk_blob_tree, &bs->open_blobs, &find); 3555 } 3556 3557 static void 3558 blob_get_snapshot_and_clone_entries(struct spdk_blob *blob, 3559 struct spdk_blob_list **snapshot_entry, struct spdk_blob_list **clone_entry) 3560 { 3561 assert(blob != NULL); 3562 *snapshot_entry = NULL; 3563 *clone_entry = NULL; 3564 3565 if (blob->parent_id == SPDK_BLOBID_INVALID) { 3566 return; 3567 } 3568 3569 TAILQ_FOREACH(*snapshot_entry, &blob->bs->snapshots, link) { 3570 if ((*snapshot_entry)->id == blob->parent_id) { 3571 break; 3572 } 3573 } 3574 3575 if (*snapshot_entry != NULL) { 3576 TAILQ_FOREACH(*clone_entry, &(*snapshot_entry)->clones, link) { 3577 if ((*clone_entry)->id == blob->id) { 3578 break; 3579 } 3580 } 3581 3582 assert(*clone_entry != NULL); 3583 } 3584 } 3585 3586 static int 3587 bs_channel_create(void *io_device, void *ctx_buf) 3588 { 3589 struct spdk_blob_store *bs = io_device; 3590 struct spdk_bs_channel *channel = ctx_buf; 3591 struct spdk_bs_dev *dev; 3592 uint32_t max_ops = bs->max_channel_ops; 3593 uint32_t i; 3594 3595 dev = bs->dev; 3596 3597 channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set)); 3598 if (!channel->req_mem) { 3599 return -1; 3600 } 3601 3602 TAILQ_INIT(&channel->reqs); 3603 3604 for (i = 0; i < max_ops; i++) { 3605 TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link); 3606 } 3607 3608 channel->bs = bs; 3609 channel->dev = dev; 3610 channel->dev_channel = dev->create_channel(dev); 3611 3612 if (!channel->dev_channel) { 3613 SPDK_ERRLOG("Failed to create device channel.\n"); 3614 free(channel->req_mem); 3615 return -1; 3616 } 3617 3618 channel->new_cluster_page = spdk_zmalloc(bs->md_page_size, 0, NULL, SPDK_ENV_NUMA_ID_ANY, 3619 SPDK_MALLOC_DMA); 3620 if (!channel->new_cluster_page) { 3621 SPDK_ERRLOG("Failed to allocate new cluster page\n"); 3622 free(channel->req_mem); 3623 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 3624 return -1; 3625 } 3626 3627 TAILQ_INIT(&channel->need_cluster_alloc); 3628 TAILQ_INIT(&channel->queued_io); 3629 RB_INIT(&channel->esnap_channels); 3630 3631 return 0; 3632 } 3633 3634 static void 3635 bs_channel_destroy(void *io_device, void *ctx_buf) 3636 { 3637 struct spdk_bs_channel *channel = ctx_buf; 3638 spdk_bs_user_op_t *op; 3639 3640 while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) { 3641 op = TAILQ_FIRST(&channel->need_cluster_alloc); 3642 TAILQ_REMOVE(&channel->need_cluster_alloc, op, link); 3643 bs_user_op_abort(op, -EIO); 3644 } 3645 3646 while (!TAILQ_EMPTY(&channel->queued_io)) { 3647 op = TAILQ_FIRST(&channel->queued_io); 3648 TAILQ_REMOVE(&channel->queued_io, op, link); 3649 bs_user_op_abort(op, -EIO); 3650 } 3651 3652 blob_esnap_destroy_bs_channel(channel); 3653 3654 free(channel->req_mem); 3655 spdk_free(channel->new_cluster_page); 3656 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 3657 } 3658 3659 static void 3660 bs_dev_destroy(void *io_device) 3661 { 3662 struct spdk_blob_store *bs = io_device; 3663 struct spdk_blob *blob, *blob_tmp; 3664 3665 bs->dev->destroy(bs->dev); 3666 3667 RB_FOREACH_SAFE(blob, spdk_blob_tree, &bs->open_blobs, blob_tmp) { 3668 RB_REMOVE(spdk_blob_tree, &bs->open_blobs, blob); 3669 spdk_bit_array_clear(bs->open_blobids, blob->id); 3670 blob_free(blob); 3671 } 3672 3673 spdk_spin_destroy(&bs->used_lock); 3674 3675 spdk_bit_array_free(&bs->open_blobids); 3676 spdk_bit_array_free(&bs->used_blobids); 3677 spdk_bit_array_free(&bs->used_md_pages); 3678 spdk_bit_pool_free(&bs->used_clusters); 3679 /* 3680 * If this function is called for any reason except a successful unload, 3681 * the unload_cpl type will be NONE and this will be a nop. 3682 */ 3683 bs_call_cpl(&bs->unload_cpl, bs->unload_err); 3684 3685 free(bs); 3686 } 3687 3688 static int 3689 bs_blob_list_add(struct spdk_blob *blob) 3690 { 3691 spdk_blob_id snapshot_id; 3692 struct spdk_blob_list *snapshot_entry = NULL; 3693 struct spdk_blob_list *clone_entry = NULL; 3694 3695 assert(blob != NULL); 3696 3697 snapshot_id = blob->parent_id; 3698 if (snapshot_id == SPDK_BLOBID_INVALID || 3699 snapshot_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 3700 return 0; 3701 } 3702 3703 snapshot_entry = bs_get_snapshot_entry(blob->bs, snapshot_id); 3704 if (snapshot_entry == NULL) { 3705 /* Snapshot not found */ 3706 snapshot_entry = calloc(1, sizeof(struct spdk_blob_list)); 3707 if (snapshot_entry == NULL) { 3708 return -ENOMEM; 3709 } 3710 snapshot_entry->id = snapshot_id; 3711 TAILQ_INIT(&snapshot_entry->clones); 3712 TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link); 3713 } else { 3714 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 3715 if (clone_entry->id == blob->id) { 3716 break; 3717 } 3718 } 3719 } 3720 3721 if (clone_entry == NULL) { 3722 /* Clone not found */ 3723 clone_entry = calloc(1, sizeof(struct spdk_blob_list)); 3724 if (clone_entry == NULL) { 3725 return -ENOMEM; 3726 } 3727 clone_entry->id = blob->id; 3728 TAILQ_INIT(&clone_entry->clones); 3729 TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link); 3730 snapshot_entry->clone_count++; 3731 } 3732 3733 return 0; 3734 } 3735 3736 static void 3737 bs_blob_list_remove(struct spdk_blob *blob) 3738 { 3739 struct spdk_blob_list *snapshot_entry = NULL; 3740 struct spdk_blob_list *clone_entry = NULL; 3741 3742 blob_get_snapshot_and_clone_entries(blob, &snapshot_entry, &clone_entry); 3743 3744 if (snapshot_entry == NULL) { 3745 return; 3746 } 3747 3748 blob->parent_id = SPDK_BLOBID_INVALID; 3749 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3750 free(clone_entry); 3751 3752 snapshot_entry->clone_count--; 3753 } 3754 3755 static int 3756 bs_blob_list_free(struct spdk_blob_store *bs) 3757 { 3758 struct spdk_blob_list *snapshot_entry; 3759 struct spdk_blob_list *snapshot_entry_tmp; 3760 struct spdk_blob_list *clone_entry; 3761 struct spdk_blob_list *clone_entry_tmp; 3762 3763 TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) { 3764 TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) { 3765 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3766 free(clone_entry); 3767 } 3768 TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link); 3769 free(snapshot_entry); 3770 } 3771 3772 return 0; 3773 } 3774 3775 static void 3776 bs_free(struct spdk_blob_store *bs) 3777 { 3778 bs_blob_list_free(bs); 3779 3780 bs_unregister_md_thread(bs); 3781 spdk_io_device_unregister(bs, bs_dev_destroy); 3782 } 3783 3784 void 3785 spdk_bs_opts_init(struct spdk_bs_opts *opts, size_t opts_size) 3786 { 3787 3788 if (!opts) { 3789 SPDK_ERRLOG("opts should not be NULL\n"); 3790 return; 3791 } 3792 3793 if (!opts_size) { 3794 SPDK_ERRLOG("opts_size should not be zero value\n"); 3795 return; 3796 } 3797 3798 memset(opts, 0, opts_size); 3799 opts->opts_size = opts_size; 3800 3801 #define FIELD_OK(field) \ 3802 offsetof(struct spdk_bs_opts, field) + sizeof(opts->field) <= opts_size 3803 3804 #define SET_FIELD(field, value) \ 3805 if (FIELD_OK(field)) { \ 3806 opts->field = value; \ 3807 } \ 3808 3809 SET_FIELD(cluster_sz, SPDK_BLOB_OPTS_CLUSTER_SZ); 3810 SET_FIELD(num_md_pages, SPDK_BLOB_OPTS_NUM_MD_PAGES); 3811 SET_FIELD(max_md_ops, SPDK_BLOB_OPTS_NUM_MD_PAGES); 3812 SET_FIELD(max_channel_ops, SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS); 3813 SET_FIELD(clear_method, BS_CLEAR_WITH_UNMAP); 3814 3815 if (FIELD_OK(bstype)) { 3816 memset(&opts->bstype, 0, sizeof(opts->bstype)); 3817 } 3818 3819 SET_FIELD(iter_cb_fn, NULL); 3820 SET_FIELD(iter_cb_arg, NULL); 3821 SET_FIELD(force_recover, false); 3822 SET_FIELD(esnap_bs_dev_create, NULL); 3823 SET_FIELD(esnap_ctx, NULL); 3824 3825 #undef FIELD_OK 3826 #undef SET_FIELD 3827 } 3828 3829 static int 3830 bs_opts_verify(struct spdk_bs_opts *opts) 3831 { 3832 if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 || 3833 opts->max_channel_ops == 0) { 3834 SPDK_ERRLOG("Blobstore options cannot be set to 0\n"); 3835 return -1; 3836 } 3837 3838 if ((opts->cluster_sz % SPDK_BS_PAGE_SIZE) != 0) { 3839 SPDK_ERRLOG("Cluster size %" PRIu32 " is not an integral multiple of blocklen %" PRIu32"\n", 3840 opts->cluster_sz, SPDK_BS_PAGE_SIZE); 3841 return -1; 3842 } 3843 3844 return 0; 3845 } 3846 3847 /* START spdk_bs_load */ 3848 3849 /* spdk_bs_load_ctx is used for init, load, unload and dump code paths. */ 3850 3851 struct spdk_bs_load_ctx { 3852 struct spdk_blob_store *bs; 3853 struct spdk_bs_super_block *super; 3854 3855 struct spdk_bs_md_mask *mask; 3856 bool in_page_chain; 3857 uint32_t page_index; 3858 uint32_t cur_page; 3859 struct spdk_blob_md_page *page; 3860 3861 uint64_t num_extent_pages; 3862 uint32_t *extent_page_num; 3863 struct spdk_blob_md_page *extent_pages; 3864 struct spdk_bit_array *used_clusters; 3865 3866 spdk_bs_sequence_t *seq; 3867 spdk_blob_op_with_handle_complete iter_cb_fn; 3868 void *iter_cb_arg; 3869 struct spdk_blob *blob; 3870 spdk_blob_id blobid; 3871 3872 bool force_recover; 3873 3874 /* These fields are used in the spdk_bs_dump path. */ 3875 bool dumping; 3876 FILE *fp; 3877 spdk_bs_dump_print_xattr print_xattr_fn; 3878 char xattr_name[4096]; 3879 }; 3880 3881 static void 3882 bs_init_per_cluster_fields(struct spdk_blob_store *bs) 3883 { 3884 bs->pages_per_cluster = bs->cluster_sz / bs->md_page_size; 3885 if (spdk_u32_is_pow2(bs->pages_per_cluster)) { 3886 bs->pages_per_cluster_shift = spdk_u32log2(bs->pages_per_cluster); 3887 } 3888 bs->io_units_per_cluster = bs->cluster_sz / bs->io_unit_size; 3889 if (spdk_u32_is_pow2(bs->io_units_per_cluster)) { 3890 bs->io_units_per_cluster_shift = spdk_u32log2(bs->io_units_per_cluster); 3891 } 3892 } 3893 3894 static int 3895 bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_blob_store **_bs, 3896 struct spdk_bs_load_ctx **_ctx) 3897 { 3898 struct spdk_blob_store *bs; 3899 struct spdk_bs_load_ctx *ctx; 3900 uint64_t dev_size; 3901 uint32_t md_page_size; 3902 int rc; 3903 3904 dev_size = dev->blocklen * dev->blockcnt; 3905 if (dev_size < opts->cluster_sz) { 3906 /* Device size cannot be smaller than cluster size of blobstore */ 3907 SPDK_INFOLOG(blob, "Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n", 3908 dev_size, opts->cluster_sz); 3909 return -ENOSPC; 3910 } 3911 3912 md_page_size = spdk_max(spdk_max(dev->phys_blocklen, SPDK_BS_PAGE_SIZE), 3913 opts->md_page_size); 3914 if (opts->cluster_sz < md_page_size) { 3915 /* Cluster size cannot be smaller than page size */ 3916 SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n", 3917 opts->cluster_sz, md_page_size); 3918 return -EINVAL; 3919 } 3920 bs = calloc(1, sizeof(struct spdk_blob_store)); 3921 if (!bs) { 3922 return -ENOMEM; 3923 } 3924 3925 ctx = calloc(1, sizeof(struct spdk_bs_load_ctx)); 3926 if (!ctx) { 3927 free(bs); 3928 return -ENOMEM; 3929 } 3930 3931 ctx->bs = bs; 3932 ctx->iter_cb_fn = opts->iter_cb_fn; 3933 ctx->iter_cb_arg = opts->iter_cb_arg; 3934 ctx->force_recover = opts->force_recover; 3935 3936 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 3937 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 3938 if (!ctx->super) { 3939 free(ctx); 3940 free(bs); 3941 return -ENOMEM; 3942 } 3943 3944 RB_INIT(&bs->open_blobs); 3945 TAILQ_INIT(&bs->snapshots); 3946 bs->dev = dev; 3947 bs->md_page_size = md_page_size; 3948 bs->md_thread = spdk_get_thread(); 3949 assert(bs->md_thread != NULL); 3950 3951 /* 3952 * Do not use bs_lba_to_cluster() here since blockcnt may not be an 3953 * even multiple of the cluster size. 3954 */ 3955 bs->cluster_sz = opts->cluster_sz; 3956 bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen); 3957 ctx->used_clusters = spdk_bit_array_create(bs->total_clusters); 3958 if (!ctx->used_clusters) { 3959 spdk_free(ctx->super); 3960 free(ctx); 3961 free(bs); 3962 return -ENOMEM; 3963 } 3964 3965 bs->num_free_clusters = bs->total_clusters; 3966 bs->io_unit_size = dev->blocklen; 3967 bs_init_per_cluster_fields(bs); 3968 3969 bs->max_channel_ops = opts->max_channel_ops; 3970 bs->super_blob = SPDK_BLOBID_INVALID; 3971 memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype)); 3972 bs->esnap_bs_dev_create = opts->esnap_bs_dev_create; 3973 bs->esnap_ctx = opts->esnap_ctx; 3974 3975 /* The metadata is assumed to be at least 1 page */ 3976 bs->used_md_pages = spdk_bit_array_create(1); 3977 bs->used_blobids = spdk_bit_array_create(0); 3978 bs->open_blobids = spdk_bit_array_create(0); 3979 3980 spdk_spin_init(&bs->used_lock); 3981 3982 spdk_io_device_register(bs, bs_channel_create, bs_channel_destroy, 3983 sizeof(struct spdk_bs_channel), "blobstore"); 3984 rc = bs_register_md_thread(bs); 3985 if (rc == -1) { 3986 spdk_io_device_unregister(bs, NULL); 3987 spdk_spin_destroy(&bs->used_lock); 3988 spdk_bit_array_free(&bs->open_blobids); 3989 spdk_bit_array_free(&bs->used_blobids); 3990 spdk_bit_array_free(&bs->used_md_pages); 3991 spdk_bit_array_free(&ctx->used_clusters); 3992 spdk_free(ctx->super); 3993 free(ctx); 3994 free(bs); 3995 /* FIXME: this is a lie but don't know how to get a proper error code here */ 3996 return -ENOMEM; 3997 } 3998 3999 *_ctx = ctx; 4000 *_bs = bs; 4001 return 0; 4002 } 4003 4004 static void 4005 bs_load_ctx_fail(struct spdk_bs_load_ctx *ctx, int bserrno) 4006 { 4007 assert(bserrno != 0); 4008 4009 spdk_free(ctx->super); 4010 bs_sequence_finish(ctx->seq, bserrno); 4011 bs_free(ctx->bs); 4012 spdk_bit_array_free(&ctx->used_clusters); 4013 free(ctx); 4014 } 4015 4016 static void 4017 bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 4018 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg) 4019 { 4020 /* Update the values in the super block */ 4021 super->super_blob = bs->super_blob; 4022 memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype)); 4023 super->crc = blob_md_page_calc_crc(super); 4024 bs_sequence_write_dev(seq, super, bs_page_to_lba(bs, 0), 4025 bs_byte_to_lba(bs, sizeof(*super)), 4026 cb_fn, cb_arg); 4027 } 4028 4029 static void 4030 bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 4031 { 4032 struct spdk_bs_load_ctx *ctx = arg; 4033 uint64_t mask_size, lba, lba_count; 4034 4035 /* Write out the used clusters mask */ 4036 mask_size = ctx->super->used_cluster_mask_len * ctx->bs->md_page_size; 4037 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 4038 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4039 if (!ctx->mask) { 4040 bs_load_ctx_fail(ctx, -ENOMEM); 4041 return; 4042 } 4043 4044 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS; 4045 ctx->mask->length = ctx->bs->total_clusters; 4046 /* We could get here through the normal unload path, or through dirty 4047 * shutdown recovery. For the normal unload path, we use the mask from 4048 * the bit pool. For dirty shutdown recovery, we don't have a bit pool yet - 4049 * only the bit array from the load ctx. 4050 */ 4051 if (ctx->bs->used_clusters) { 4052 assert(ctx->mask->length == spdk_bit_pool_capacity(ctx->bs->used_clusters)); 4053 spdk_bit_pool_store_mask(ctx->bs->used_clusters, ctx->mask->mask); 4054 } else { 4055 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->used_clusters)); 4056 spdk_bit_array_store_mask(ctx->used_clusters, ctx->mask->mask); 4057 } 4058 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 4059 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 4060 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 4061 } 4062 4063 static void 4064 bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 4065 { 4066 struct spdk_bs_load_ctx *ctx = arg; 4067 uint64_t mask_size, lba, lba_count; 4068 4069 mask_size = ctx->super->used_page_mask_len * ctx->bs->md_page_size; 4070 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 4071 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4072 if (!ctx->mask) { 4073 bs_load_ctx_fail(ctx, -ENOMEM); 4074 return; 4075 } 4076 4077 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES; 4078 ctx->mask->length = ctx->super->md_len; 4079 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages)); 4080 4081 spdk_bit_array_store_mask(ctx->bs->used_md_pages, ctx->mask->mask); 4082 lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 4083 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 4084 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 4085 } 4086 4087 static void 4088 bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 4089 { 4090 struct spdk_bs_load_ctx *ctx = arg; 4091 uint64_t mask_size, lba, lba_count; 4092 4093 if (ctx->super->used_blobid_mask_len == 0) { 4094 /* 4095 * This is a pre-v3 on-disk format where the blobid mask does not get 4096 * written to disk. 4097 */ 4098 cb_fn(seq, arg, 0); 4099 return; 4100 } 4101 4102 mask_size = ctx->super->used_blobid_mask_len * ctx->bs->md_page_size; 4103 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_NUMA_ID_ANY, 4104 SPDK_MALLOC_DMA); 4105 if (!ctx->mask) { 4106 bs_load_ctx_fail(ctx, -ENOMEM); 4107 return; 4108 } 4109 4110 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS; 4111 ctx->mask->length = ctx->super->md_len; 4112 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids)); 4113 4114 spdk_bit_array_store_mask(ctx->bs->used_blobids, ctx->mask->mask); 4115 lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 4116 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 4117 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 4118 } 4119 4120 static void 4121 blob_set_thin_provision(struct spdk_blob *blob) 4122 { 4123 blob_verify_md_op(blob); 4124 blob->invalid_flags |= SPDK_BLOB_THIN_PROV; 4125 blob->state = SPDK_BLOB_STATE_DIRTY; 4126 } 4127 4128 static void 4129 blob_set_clear_method(struct spdk_blob *blob, enum blob_clear_method clear_method) 4130 { 4131 blob_verify_md_op(blob); 4132 blob->clear_method = clear_method; 4133 blob->md_ro_flags |= (clear_method << SPDK_BLOB_CLEAR_METHOD_SHIFT); 4134 blob->state = SPDK_BLOB_STATE_DIRTY; 4135 } 4136 4137 static void bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno); 4138 4139 static void 4140 bs_delete_corrupted_blob_cpl(void *cb_arg, int bserrno) 4141 { 4142 struct spdk_bs_load_ctx *ctx = cb_arg; 4143 spdk_blob_id id; 4144 int64_t page_num; 4145 4146 /* Iterate to next blob (we can't use spdk_bs_iter_next function as our 4147 * last blob has been removed */ 4148 page_num = bs_blobid_to_page(ctx->blobid); 4149 page_num++; 4150 page_num = spdk_bit_array_find_first_set(ctx->bs->used_blobids, page_num); 4151 if (page_num >= spdk_bit_array_capacity(ctx->bs->used_blobids)) { 4152 bs_load_iter(ctx, NULL, -ENOENT); 4153 return; 4154 } 4155 4156 id = bs_page_to_blobid(page_num); 4157 4158 spdk_bs_open_blob(ctx->bs, id, bs_load_iter, ctx); 4159 } 4160 4161 static void 4162 bs_delete_corrupted_close_cb(void *cb_arg, int bserrno) 4163 { 4164 struct spdk_bs_load_ctx *ctx = cb_arg; 4165 4166 if (bserrno != 0) { 4167 SPDK_ERRLOG("Failed to close corrupted blob\n"); 4168 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4169 return; 4170 } 4171 4172 spdk_bs_delete_blob(ctx->bs, ctx->blobid, bs_delete_corrupted_blob_cpl, ctx); 4173 } 4174 4175 static void 4176 bs_delete_corrupted_blob(void *cb_arg, int bserrno) 4177 { 4178 struct spdk_bs_load_ctx *ctx = cb_arg; 4179 uint64_t i; 4180 4181 if (bserrno != 0) { 4182 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 4183 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4184 return; 4185 } 4186 4187 /* Snapshot and clone have the same copy of cluster map and extent pages 4188 * at this point. Let's clear both for snapshot now, 4189 * so that it won't be cleared for clone later when we remove snapshot. 4190 * Also set thin provision to pass data corruption check */ 4191 for (i = 0; i < ctx->blob->active.num_clusters; i++) { 4192 ctx->blob->active.clusters[i] = 0; 4193 } 4194 for (i = 0; i < ctx->blob->active.num_extent_pages; i++) { 4195 ctx->blob->active.extent_pages[i] = 0; 4196 } 4197 4198 ctx->blob->active.num_allocated_clusters = 0; 4199 4200 ctx->blob->md_ro = false; 4201 4202 blob_set_thin_provision(ctx->blob); 4203 4204 ctx->blobid = ctx->blob->id; 4205 4206 spdk_blob_close(ctx->blob, bs_delete_corrupted_close_cb, ctx); 4207 } 4208 4209 static void 4210 bs_update_corrupted_blob(void *cb_arg, int bserrno) 4211 { 4212 struct spdk_bs_load_ctx *ctx = cb_arg; 4213 4214 if (bserrno != 0) { 4215 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 4216 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4217 return; 4218 } 4219 4220 ctx->blob->md_ro = false; 4221 blob_remove_xattr(ctx->blob, SNAPSHOT_PENDING_REMOVAL, true); 4222 blob_remove_xattr(ctx->blob, SNAPSHOT_IN_PROGRESS, true); 4223 spdk_blob_set_read_only(ctx->blob); 4224 4225 if (ctx->iter_cb_fn) { 4226 ctx->iter_cb_fn(ctx->iter_cb_arg, ctx->blob, 0); 4227 } 4228 bs_blob_list_add(ctx->blob); 4229 4230 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4231 } 4232 4233 static void 4234 bs_examine_clone(void *cb_arg, struct spdk_blob *blob, int bserrno) 4235 { 4236 struct spdk_bs_load_ctx *ctx = cb_arg; 4237 4238 if (bserrno != 0) { 4239 SPDK_ERRLOG("Failed to open clone of a corrupted blob\n"); 4240 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4241 return; 4242 } 4243 4244 if (blob->parent_id == ctx->blob->id) { 4245 /* Power failure occurred before updating clone (snapshot delete case) 4246 * or after updating clone (creating snapshot case) - keep snapshot */ 4247 spdk_blob_close(blob, bs_update_corrupted_blob, ctx); 4248 } else { 4249 /* Power failure occurred after updating clone (snapshot delete case) 4250 * or before updating clone (creating snapshot case) - remove snapshot */ 4251 spdk_blob_close(blob, bs_delete_corrupted_blob, ctx); 4252 } 4253 } 4254 4255 static void 4256 bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno) 4257 { 4258 struct spdk_bs_load_ctx *ctx = arg; 4259 const void *value; 4260 size_t len; 4261 int rc = 0; 4262 4263 if (bserrno == 0) { 4264 /* Examine blob if it is corrupted after power failure. Fix 4265 * the ones that can be fixed and remove any other corrupted 4266 * ones. If it is not corrupted just process it */ 4267 rc = blob_get_xattr_value(blob, SNAPSHOT_PENDING_REMOVAL, &value, &len, true); 4268 if (rc != 0) { 4269 rc = blob_get_xattr_value(blob, SNAPSHOT_IN_PROGRESS, &value, &len, true); 4270 if (rc != 0) { 4271 /* Not corrupted - process it and continue with iterating through blobs */ 4272 if (ctx->iter_cb_fn) { 4273 ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0); 4274 } 4275 bs_blob_list_add(blob); 4276 spdk_bs_iter_next(ctx->bs, blob, bs_load_iter, ctx); 4277 return; 4278 } 4279 4280 } 4281 4282 assert(len == sizeof(spdk_blob_id)); 4283 4284 ctx->blob = blob; 4285 4286 /* Open clone to check if we are able to fix this blob or should we remove it */ 4287 spdk_bs_open_blob(ctx->bs, *(spdk_blob_id *)value, bs_examine_clone, ctx); 4288 return; 4289 } else if (bserrno == -ENOENT) { 4290 bserrno = 0; 4291 } else { 4292 /* 4293 * This case needs to be looked at further. Same problem 4294 * exists with applications that rely on explicit blob 4295 * iteration. We should just skip the blob that failed 4296 * to load and continue on to the next one. 4297 */ 4298 SPDK_ERRLOG("Error in iterating blobs\n"); 4299 } 4300 4301 ctx->iter_cb_fn = NULL; 4302 4303 spdk_free(ctx->super); 4304 spdk_free(ctx->mask); 4305 bs_sequence_finish(ctx->seq, bserrno); 4306 free(ctx); 4307 } 4308 4309 static void bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg); 4310 4311 static void 4312 bs_load_complete(struct spdk_bs_load_ctx *ctx) 4313 { 4314 ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters); 4315 if (ctx->dumping) { 4316 bs_dump_read_md_page(ctx->seq, ctx); 4317 return; 4318 } 4319 spdk_bs_iter_first(ctx->bs, bs_load_iter, ctx); 4320 } 4321 4322 static void 4323 bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4324 { 4325 struct spdk_bs_load_ctx *ctx = cb_arg; 4326 int rc; 4327 4328 /* The type must be correct */ 4329 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS); 4330 4331 /* The length of the mask (in bits) must not be greater than 4332 * the length of the buffer (converted to bits) */ 4333 assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * ctx->super->md_page_size * 8)); 4334 4335 /* The length of the mask must be exactly equal to the size 4336 * (in pages) of the metadata region */ 4337 assert(ctx->mask->length == ctx->super->md_len); 4338 4339 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->mask->length); 4340 if (rc < 0) { 4341 spdk_free(ctx->mask); 4342 bs_load_ctx_fail(ctx, rc); 4343 return; 4344 } 4345 4346 spdk_bit_array_load_mask(ctx->bs->used_blobids, ctx->mask->mask); 4347 bs_load_complete(ctx); 4348 } 4349 4350 static void 4351 bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4352 { 4353 struct spdk_bs_load_ctx *ctx = cb_arg; 4354 uint64_t lba, lba_count, mask_size; 4355 int rc; 4356 4357 if (bserrno != 0) { 4358 bs_load_ctx_fail(ctx, bserrno); 4359 return; 4360 } 4361 4362 /* The type must be correct */ 4363 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 4364 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 4365 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 4366 struct spdk_blob_md_page) * 8)); 4367 /* 4368 * The length of the mask must be equal to or larger than the total number of clusters. It may be 4369 * larger than the total number of clusters due to a failure spdk_bs_grow. 4370 */ 4371 assert(ctx->mask->length >= ctx->bs->total_clusters); 4372 if (ctx->mask->length > ctx->bs->total_clusters) { 4373 SPDK_WARNLOG("Shrink the used_custers mask length to total_clusters"); 4374 ctx->mask->length = ctx->bs->total_clusters; 4375 } 4376 4377 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->mask->length); 4378 if (rc < 0) { 4379 spdk_free(ctx->mask); 4380 bs_load_ctx_fail(ctx, rc); 4381 return; 4382 } 4383 4384 spdk_bit_array_load_mask(ctx->used_clusters, ctx->mask->mask); 4385 ctx->bs->num_free_clusters = spdk_bit_array_count_clear(ctx->used_clusters); 4386 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 4387 4388 spdk_free(ctx->mask); 4389 4390 /* Read the used blobids mask */ 4391 mask_size = ctx->super->used_blobid_mask_len * ctx->super->md_page_size; 4392 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_NUMA_ID_ANY, 4393 SPDK_MALLOC_DMA); 4394 if (!ctx->mask) { 4395 bs_load_ctx_fail(ctx, -ENOMEM); 4396 return; 4397 } 4398 lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 4399 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 4400 bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 4401 bs_load_used_blobids_cpl, ctx); 4402 } 4403 4404 static void 4405 bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4406 { 4407 struct spdk_bs_load_ctx *ctx = cb_arg; 4408 uint64_t lba, lba_count, mask_size; 4409 int rc; 4410 4411 if (bserrno != 0) { 4412 bs_load_ctx_fail(ctx, bserrno); 4413 return; 4414 } 4415 4416 /* The type must be correct */ 4417 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES); 4418 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 4419 assert(ctx->mask->length <= (ctx->super->used_page_mask_len * ctx->super->md_page_size * 4420 8)); 4421 /* The length of the mask must be exactly equal to the size (in pages) of the metadata region */ 4422 if (ctx->mask->length != ctx->super->md_len) { 4423 SPDK_ERRLOG("mismatched md_len in used_pages mask: " 4424 "mask->length=%" PRIu32 " super->md_len=%" PRIu32 "\n", 4425 ctx->mask->length, ctx->super->md_len); 4426 assert(false); 4427 } 4428 4429 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->mask->length); 4430 if (rc < 0) { 4431 spdk_free(ctx->mask); 4432 bs_load_ctx_fail(ctx, rc); 4433 return; 4434 } 4435 4436 spdk_bit_array_load_mask(ctx->bs->used_md_pages, ctx->mask->mask); 4437 spdk_free(ctx->mask); 4438 4439 /* Read the used clusters mask */ 4440 mask_size = ctx->super->used_cluster_mask_len * ctx->super->md_page_size; 4441 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_NUMA_ID_ANY, 4442 SPDK_MALLOC_DMA); 4443 if (!ctx->mask) { 4444 bs_load_ctx_fail(ctx, -ENOMEM); 4445 return; 4446 } 4447 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 4448 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 4449 bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 4450 bs_load_used_clusters_cpl, ctx); 4451 } 4452 4453 static void 4454 bs_load_read_used_pages(struct spdk_bs_load_ctx *ctx) 4455 { 4456 uint64_t lba, lba_count, mask_size; 4457 4458 /* Read the used pages mask */ 4459 mask_size = ctx->super->used_page_mask_len * ctx->super->md_page_size; 4460 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 4461 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4462 if (!ctx->mask) { 4463 bs_load_ctx_fail(ctx, -ENOMEM); 4464 return; 4465 } 4466 4467 lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 4468 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 4469 bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count, 4470 bs_load_used_pages_cpl, ctx); 4471 } 4472 4473 static int 4474 bs_load_replay_md_parse_page(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_page *page) 4475 { 4476 struct spdk_blob_store *bs = ctx->bs; 4477 struct spdk_blob_md_descriptor *desc; 4478 size_t cur_desc = 0; 4479 4480 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4481 while (cur_desc < sizeof(page->descriptors)) { 4482 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 4483 if (desc->length == 0) { 4484 /* If padding and length are 0, this terminates the page */ 4485 break; 4486 } 4487 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 4488 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 4489 unsigned int i, j; 4490 unsigned int cluster_count = 0; 4491 uint32_t cluster_idx; 4492 4493 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 4494 4495 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 4496 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 4497 cluster_idx = desc_extent_rle->extents[i].cluster_idx; 4498 /* 4499 * cluster_idx = 0 means an unallocated cluster - don't mark that 4500 * in the used cluster map. 4501 */ 4502 if (cluster_idx != 0) { 4503 SPDK_NOTICELOG("Recover: cluster %" PRIu32 "\n", cluster_idx + j); 4504 spdk_bit_array_set(ctx->used_clusters, cluster_idx + j); 4505 if (bs->num_free_clusters == 0) { 4506 return -ENOSPC; 4507 } 4508 bs->num_free_clusters--; 4509 } 4510 cluster_count++; 4511 } 4512 } 4513 if (cluster_count == 0) { 4514 return -EINVAL; 4515 } 4516 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4517 struct spdk_blob_md_descriptor_extent_page *desc_extent; 4518 uint32_t i; 4519 uint32_t cluster_count = 0; 4520 uint32_t cluster_idx; 4521 size_t cluster_idx_length; 4522 4523 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 4524 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 4525 4526 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 4527 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 4528 return -EINVAL; 4529 } 4530 4531 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 4532 cluster_idx = desc_extent->cluster_idx[i]; 4533 /* 4534 * cluster_idx = 0 means an unallocated cluster - don't mark that 4535 * in the used cluster map. 4536 */ 4537 if (cluster_idx != 0) { 4538 if (cluster_idx < desc_extent->start_cluster_idx && 4539 cluster_idx >= desc_extent->start_cluster_idx + cluster_count) { 4540 return -EINVAL; 4541 } 4542 spdk_bit_array_set(ctx->used_clusters, cluster_idx); 4543 if (bs->num_free_clusters == 0) { 4544 return -ENOSPC; 4545 } 4546 bs->num_free_clusters--; 4547 } 4548 cluster_count++; 4549 } 4550 4551 if (cluster_count == 0) { 4552 return -EINVAL; 4553 } 4554 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 4555 /* Skip this item */ 4556 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 4557 /* Skip this item */ 4558 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 4559 /* Skip this item */ 4560 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 4561 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 4562 uint32_t num_extent_pages = ctx->num_extent_pages; 4563 uint32_t i; 4564 size_t extent_pages_length; 4565 void *tmp; 4566 4567 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 4568 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 4569 4570 if (desc_extent_table->length == 0 || 4571 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 4572 return -EINVAL; 4573 } 4574 4575 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 4576 if (desc_extent_table->extent_page[i].page_idx != 0) { 4577 if (desc_extent_table->extent_page[i].num_pages != 1) { 4578 return -EINVAL; 4579 } 4580 num_extent_pages += 1; 4581 } 4582 } 4583 4584 if (num_extent_pages > 0) { 4585 tmp = realloc(ctx->extent_page_num, num_extent_pages * sizeof(uint32_t)); 4586 if (tmp == NULL) { 4587 return -ENOMEM; 4588 } 4589 ctx->extent_page_num = tmp; 4590 4591 /* Extent table entries contain md page numbers for extent pages. 4592 * Zeroes represent unallocated extent pages, those are run-length-encoded. 4593 */ 4594 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 4595 if (desc_extent_table->extent_page[i].page_idx != 0) { 4596 ctx->extent_page_num[ctx->num_extent_pages] = desc_extent_table->extent_page[i].page_idx; 4597 ctx->num_extent_pages += 1; 4598 } 4599 } 4600 } 4601 } else { 4602 /* Error */ 4603 return -EINVAL; 4604 } 4605 /* Advance to the next descriptor */ 4606 cur_desc += sizeof(*desc) + desc->length; 4607 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 4608 break; 4609 } 4610 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 4611 } 4612 return 0; 4613 } 4614 4615 static bool 4616 bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page) 4617 { 4618 uint32_t crc; 4619 struct spdk_blob_md_descriptor *desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4620 size_t desc_len; 4621 4622 crc = blob_md_page_calc_crc(page); 4623 if (crc != page->crc) { 4624 return false; 4625 } 4626 4627 /* Extent page should always be of sequence num 0. */ 4628 if (page->sequence_num != 0) { 4629 return false; 4630 } 4631 4632 /* Descriptor type must be EXTENT_PAGE. */ 4633 if (desc->type != SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4634 return false; 4635 } 4636 4637 /* Descriptor length cannot exceed the page. */ 4638 desc_len = sizeof(*desc) + desc->length; 4639 if (desc_len > sizeof(page->descriptors)) { 4640 return false; 4641 } 4642 4643 /* It has to be the only descriptor in the page. */ 4644 if (desc_len + sizeof(*desc) <= sizeof(page->descriptors)) { 4645 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + desc_len); 4646 if (desc->length != 0) { 4647 return false; 4648 } 4649 } 4650 4651 return true; 4652 } 4653 4654 static bool 4655 bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx) 4656 { 4657 uint32_t crc; 4658 struct spdk_blob_md_page *page = ctx->page; 4659 4660 crc = blob_md_page_calc_crc(page); 4661 if (crc != page->crc) { 4662 return false; 4663 } 4664 4665 /* First page of a sequence should match the blobid. */ 4666 if (page->sequence_num == 0 && 4667 bs_page_to_blobid(ctx->cur_page) != page->id) { 4668 return false; 4669 } 4670 assert(bs_load_cur_extent_page_valid(page) == false); 4671 4672 return true; 4673 } 4674 4675 static void bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx); 4676 4677 static void 4678 bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4679 { 4680 struct spdk_bs_load_ctx *ctx = cb_arg; 4681 4682 if (bserrno != 0) { 4683 bs_load_ctx_fail(ctx, bserrno); 4684 return; 4685 } 4686 4687 bs_load_complete(ctx); 4688 } 4689 4690 static void 4691 bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4692 { 4693 struct spdk_bs_load_ctx *ctx = cb_arg; 4694 4695 spdk_free(ctx->mask); 4696 ctx->mask = NULL; 4697 4698 if (bserrno != 0) { 4699 bs_load_ctx_fail(ctx, bserrno); 4700 return; 4701 } 4702 4703 bs_write_used_clusters(seq, ctx, bs_load_write_used_clusters_cpl); 4704 } 4705 4706 static void 4707 bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4708 { 4709 struct spdk_bs_load_ctx *ctx = cb_arg; 4710 4711 spdk_free(ctx->mask); 4712 ctx->mask = NULL; 4713 4714 if (bserrno != 0) { 4715 bs_load_ctx_fail(ctx, bserrno); 4716 return; 4717 } 4718 4719 bs_write_used_blobids(seq, ctx, bs_load_write_used_blobids_cpl); 4720 } 4721 4722 static void 4723 bs_load_write_used_md(struct spdk_bs_load_ctx *ctx) 4724 { 4725 bs_write_used_md(ctx->seq, ctx, bs_load_write_used_pages_cpl); 4726 } 4727 4728 static void 4729 bs_load_replay_md_chain_cpl(struct spdk_bs_load_ctx *ctx) 4730 { 4731 uint64_t num_md_clusters; 4732 uint64_t i; 4733 4734 ctx->in_page_chain = false; 4735 4736 do { 4737 ctx->page_index++; 4738 } while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true); 4739 4740 if (ctx->page_index < ctx->super->md_len) { 4741 ctx->cur_page = ctx->page_index; 4742 bs_load_replay_cur_md_page(ctx); 4743 } else { 4744 /* Claim all of the clusters used by the metadata */ 4745 num_md_clusters = spdk_divide_round_up( 4746 ctx->super->md_start + ctx->super->md_len, ctx->bs->pages_per_cluster); 4747 for (i = 0; i < num_md_clusters; i++) { 4748 spdk_bit_array_set(ctx->used_clusters, i); 4749 } 4750 ctx->bs->num_free_clusters -= num_md_clusters; 4751 spdk_free(ctx->page); 4752 bs_load_write_used_md(ctx); 4753 } 4754 } 4755 4756 static void 4757 bs_load_replay_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4758 { 4759 struct spdk_bs_load_ctx *ctx = cb_arg; 4760 uint32_t page_num; 4761 uint64_t i; 4762 4763 if (bserrno != 0) { 4764 spdk_free(ctx->extent_pages); 4765 bs_load_ctx_fail(ctx, bserrno); 4766 return; 4767 } 4768 4769 for (i = 0; i < ctx->num_extent_pages; i++) { 4770 /* Extent pages are only read when present within in chain md. 4771 * Integrity of md is not right if that page was not a valid extent page. */ 4772 if (bs_load_cur_extent_page_valid(&ctx->extent_pages[i]) != true) { 4773 spdk_free(ctx->extent_pages); 4774 bs_load_ctx_fail(ctx, -EILSEQ); 4775 return; 4776 } 4777 4778 page_num = ctx->extent_page_num[i]; 4779 spdk_bit_array_set(ctx->bs->used_md_pages, page_num); 4780 if (bs_load_replay_md_parse_page(ctx, &ctx->extent_pages[i])) { 4781 spdk_free(ctx->extent_pages); 4782 bs_load_ctx_fail(ctx, -EILSEQ); 4783 return; 4784 } 4785 } 4786 4787 spdk_free(ctx->extent_pages); 4788 free(ctx->extent_page_num); 4789 ctx->extent_page_num = NULL; 4790 ctx->num_extent_pages = 0; 4791 4792 bs_load_replay_md_chain_cpl(ctx); 4793 } 4794 4795 static void 4796 bs_load_replay_extent_pages(struct spdk_bs_load_ctx *ctx) 4797 { 4798 spdk_bs_batch_t *batch; 4799 uint32_t page; 4800 uint64_t lba; 4801 uint64_t i; 4802 4803 ctx->extent_pages = spdk_zmalloc(ctx->super->md_page_size * ctx->num_extent_pages, 0, 4804 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4805 if (!ctx->extent_pages) { 4806 bs_load_ctx_fail(ctx, -ENOMEM); 4807 return; 4808 } 4809 4810 batch = bs_sequence_to_batch(ctx->seq, bs_load_replay_extent_page_cpl, ctx); 4811 4812 for (i = 0; i < ctx->num_extent_pages; i++) { 4813 page = ctx->extent_page_num[i]; 4814 assert(page < ctx->super->md_len); 4815 lba = bs_md_page_to_lba(ctx->bs, page); 4816 bs_batch_read_dev(batch, &ctx->extent_pages[i], lba, 4817 bs_byte_to_lba(ctx->bs, ctx->super->md_page_size)); 4818 } 4819 4820 bs_batch_close(batch); 4821 } 4822 4823 static void 4824 bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4825 { 4826 struct spdk_bs_load_ctx *ctx = cb_arg; 4827 uint32_t page_num; 4828 struct spdk_blob_md_page *page; 4829 4830 if (bserrno != 0) { 4831 bs_load_ctx_fail(ctx, bserrno); 4832 return; 4833 } 4834 4835 page_num = ctx->cur_page; 4836 page = ctx->page; 4837 if (bs_load_cur_md_page_valid(ctx) == true) { 4838 if (page->sequence_num == 0 || ctx->in_page_chain == true) { 4839 spdk_spin_lock(&ctx->bs->used_lock); 4840 bs_claim_md_page(ctx->bs, page_num); 4841 spdk_spin_unlock(&ctx->bs->used_lock); 4842 if (page->sequence_num == 0) { 4843 SPDK_NOTICELOG("Recover: blob 0x%" PRIx32 "\n", page_num); 4844 spdk_bit_array_set(ctx->bs->used_blobids, page_num); 4845 } 4846 if (bs_load_replay_md_parse_page(ctx, page)) { 4847 bs_load_ctx_fail(ctx, -EILSEQ); 4848 return; 4849 } 4850 if (page->next != SPDK_INVALID_MD_PAGE) { 4851 ctx->in_page_chain = true; 4852 ctx->cur_page = page->next; 4853 bs_load_replay_cur_md_page(ctx); 4854 return; 4855 } 4856 if (ctx->num_extent_pages != 0) { 4857 bs_load_replay_extent_pages(ctx); 4858 return; 4859 } 4860 } 4861 } 4862 bs_load_replay_md_chain_cpl(ctx); 4863 } 4864 4865 static void 4866 bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx) 4867 { 4868 uint64_t lba; 4869 4870 assert(ctx->cur_page < ctx->super->md_len); 4871 lba = bs_md_page_to_lba(ctx->bs, ctx->cur_page); 4872 bs_sequence_read_dev(ctx->seq, ctx->page, lba, 4873 bs_byte_to_lba(ctx->bs, ctx->super->md_page_size), 4874 bs_load_replay_md_cpl, ctx); 4875 } 4876 4877 static void 4878 bs_load_replay_md(struct spdk_bs_load_ctx *ctx) 4879 { 4880 ctx->page_index = 0; 4881 ctx->cur_page = 0; 4882 ctx->page = spdk_zmalloc(ctx->bs->md_page_size, 0, 4883 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4884 if (!ctx->page) { 4885 bs_load_ctx_fail(ctx, -ENOMEM); 4886 return; 4887 } 4888 bs_load_replay_cur_md_page(ctx); 4889 } 4890 4891 static void 4892 bs_recover(struct spdk_bs_load_ctx *ctx) 4893 { 4894 int rc; 4895 4896 SPDK_NOTICELOG("Performing recovery on blobstore\n"); 4897 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len); 4898 if (rc < 0) { 4899 bs_load_ctx_fail(ctx, -ENOMEM); 4900 return; 4901 } 4902 4903 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len); 4904 if (rc < 0) { 4905 bs_load_ctx_fail(ctx, -ENOMEM); 4906 return; 4907 } 4908 4909 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 4910 if (rc < 0) { 4911 bs_load_ctx_fail(ctx, -ENOMEM); 4912 return; 4913 } 4914 4915 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->super->md_len); 4916 if (rc < 0) { 4917 bs_load_ctx_fail(ctx, -ENOMEM); 4918 return; 4919 } 4920 4921 ctx->bs->num_free_clusters = ctx->bs->total_clusters; 4922 bs_load_replay_md(ctx); 4923 } 4924 4925 static int 4926 bs_parse_super(struct spdk_bs_load_ctx *ctx) 4927 { 4928 int rc; 4929 4930 if (ctx->super->size == 0) { 4931 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 4932 } 4933 4934 if (ctx->super->io_unit_size == 0) { 4935 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 4936 } 4937 if (ctx->super->md_page_size == 0) { 4938 ctx->super->md_page_size = SPDK_BS_PAGE_SIZE; 4939 } 4940 4941 ctx->bs->clean = 1; 4942 ctx->bs->cluster_sz = ctx->super->cluster_size; 4943 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 4944 ctx->bs->io_unit_size = ctx->super->io_unit_size; 4945 ctx->bs->md_page_size = ctx->super->md_page_size; 4946 bs_init_per_cluster_fields(ctx->bs); 4947 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 4948 if (rc < 0) { 4949 return -ENOMEM; 4950 } 4951 ctx->bs->md_start = ctx->super->md_start; 4952 ctx->bs->md_len = ctx->super->md_len; 4953 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len); 4954 if (rc < 0) { 4955 return -ENOMEM; 4956 } 4957 4958 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 4959 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 4960 ctx->bs->super_blob = ctx->super->super_blob; 4961 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 4962 4963 return 0; 4964 } 4965 4966 static void 4967 bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4968 { 4969 struct spdk_bs_load_ctx *ctx = cb_arg; 4970 int rc; 4971 4972 rc = bs_super_validate(ctx->super, ctx->bs); 4973 if (rc != 0) { 4974 bs_load_ctx_fail(ctx, rc); 4975 return; 4976 } 4977 4978 rc = bs_parse_super(ctx); 4979 if (rc < 0) { 4980 bs_load_ctx_fail(ctx, rc); 4981 return; 4982 } 4983 4984 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0 || ctx->force_recover) { 4985 bs_recover(ctx); 4986 } else { 4987 bs_load_read_used_pages(ctx); 4988 } 4989 } 4990 4991 static inline int 4992 bs_opts_copy(struct spdk_bs_opts *src, struct spdk_bs_opts *dst) 4993 { 4994 4995 if (!src->opts_size) { 4996 SPDK_ERRLOG("opts_size should not be zero value\n"); 4997 return -1; 4998 } 4999 5000 #define FIELD_OK(field) \ 5001 offsetof(struct spdk_bs_opts, field) + sizeof(src->field) <= src->opts_size 5002 5003 #define SET_FIELD(field) \ 5004 if (FIELD_OK(field)) { \ 5005 dst->field = src->field; \ 5006 } \ 5007 5008 SET_FIELD(cluster_sz); 5009 SET_FIELD(num_md_pages); 5010 SET_FIELD(max_md_ops); 5011 SET_FIELD(max_channel_ops); 5012 SET_FIELD(clear_method); 5013 5014 if (FIELD_OK(bstype)) { 5015 memcpy(&dst->bstype, &src->bstype, sizeof(dst->bstype)); 5016 } 5017 SET_FIELD(md_page_size); 5018 SET_FIELD(iter_cb_fn); 5019 SET_FIELD(iter_cb_arg); 5020 SET_FIELD(force_recover); 5021 SET_FIELD(esnap_bs_dev_create); 5022 SET_FIELD(esnap_ctx); 5023 5024 dst->opts_size = src->opts_size; 5025 5026 /* You should not remove this statement, but need to update the assert statement 5027 * if you add a new field, and also add a corresponding SET_FIELD statement */ 5028 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_opts) == 88, "Incorrect size"); 5029 5030 #undef FIELD_OK 5031 #undef SET_FIELD 5032 5033 return 0; 5034 } 5035 5036 void 5037 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 5038 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 5039 { 5040 struct spdk_blob_store *bs; 5041 struct spdk_bs_cpl cpl; 5042 struct spdk_bs_load_ctx *ctx; 5043 struct spdk_bs_opts opts = {}; 5044 int err; 5045 5046 SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev); 5047 5048 if ((dev->phys_blocklen % dev->blocklen) != 0) { 5049 SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen); 5050 dev->destroy(dev); 5051 cb_fn(cb_arg, NULL, -EINVAL); 5052 return; 5053 } 5054 5055 spdk_bs_opts_init(&opts, sizeof(opts)); 5056 if (o) { 5057 if (bs_opts_copy(o, &opts)) { 5058 dev->destroy(dev); 5059 cb_fn(cb_arg, NULL, -EINVAL); 5060 return; 5061 } 5062 } 5063 5064 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 5065 dev->destroy(dev); 5066 cb_fn(cb_arg, NULL, -EINVAL); 5067 return; 5068 } 5069 5070 err = bs_alloc(dev, &opts, &bs, &ctx); 5071 if (err) { 5072 dev->destroy(dev); 5073 cb_fn(cb_arg, NULL, err); 5074 return; 5075 } 5076 5077 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 5078 cpl.u.bs_handle.cb_fn = cb_fn; 5079 cpl.u.bs_handle.cb_arg = cb_arg; 5080 cpl.u.bs_handle.bs = bs; 5081 5082 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5083 if (!ctx->seq) { 5084 spdk_free(ctx->super); 5085 free(ctx); 5086 bs_free(bs); 5087 cb_fn(cb_arg, NULL, -ENOMEM); 5088 return; 5089 } 5090 5091 /* Read the super block */ 5092 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5093 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5094 bs_load_super_cpl, ctx); 5095 } 5096 5097 /* END spdk_bs_load */ 5098 5099 /* START spdk_bs_dump */ 5100 5101 static void 5102 bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno) 5103 { 5104 spdk_free(ctx->super); 5105 5106 /* 5107 * We need to defer calling bs_call_cpl() until after 5108 * dev destruction, so tuck these away for later use. 5109 */ 5110 ctx->bs->unload_err = bserrno; 5111 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5112 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5113 5114 bs_sequence_finish(seq, 0); 5115 bs_free(ctx->bs); 5116 free(ctx); 5117 } 5118 5119 static void 5120 bs_dump_print_xattr(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 5121 { 5122 struct spdk_blob_md_descriptor_xattr *desc_xattr; 5123 uint32_t i; 5124 const char *type; 5125 5126 desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc; 5127 5128 if (desc_xattr->length != 5129 sizeof(desc_xattr->name_length) + sizeof(desc_xattr->value_length) + 5130 desc_xattr->name_length + desc_xattr->value_length) { 5131 } 5132 5133 memcpy(ctx->xattr_name, desc_xattr->name, desc_xattr->name_length); 5134 ctx->xattr_name[desc_xattr->name_length] = '\0'; 5135 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 5136 type = "XATTR"; 5137 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 5138 type = "XATTR_INTERNAL"; 5139 } else { 5140 assert(false); 5141 type = "XATTR_?"; 5142 } 5143 fprintf(ctx->fp, "%s: name = \"%s\"\n", type, ctx->xattr_name); 5144 fprintf(ctx->fp, " value = \""); 5145 ctx->print_xattr_fn(ctx->fp, ctx->super->bstype.bstype, ctx->xattr_name, 5146 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 5147 desc_xattr->value_length); 5148 fprintf(ctx->fp, "\"\n"); 5149 for (i = 0; i < desc_xattr->value_length; i++) { 5150 if (i % 16 == 0) { 5151 fprintf(ctx->fp, " "); 5152 } 5153 fprintf(ctx->fp, "%02" PRIx8 " ", *((uint8_t *)desc_xattr->name + desc_xattr->name_length + i)); 5154 if ((i + 1) % 16 == 0) { 5155 fprintf(ctx->fp, "\n"); 5156 } 5157 } 5158 if (i % 16 != 0) { 5159 fprintf(ctx->fp, "\n"); 5160 } 5161 } 5162 5163 struct type_flag_desc { 5164 uint64_t mask; 5165 uint64_t val; 5166 const char *name; 5167 }; 5168 5169 static void 5170 bs_dump_print_type_bits(struct spdk_bs_load_ctx *ctx, uint64_t flags, 5171 struct type_flag_desc *desc, size_t numflags) 5172 { 5173 uint64_t covered = 0; 5174 size_t i; 5175 5176 for (i = 0; i < numflags; i++) { 5177 if ((desc[i].mask & flags) != desc[i].val) { 5178 continue; 5179 } 5180 fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " %s", desc[i].val, desc[i].name); 5181 if (desc[i].mask != desc[i].val) { 5182 fprintf(ctx->fp, " (mask 0x%" PRIx64 " value 0x%" PRIx64 ")", 5183 desc[i].mask, desc[i].val); 5184 } 5185 fprintf(ctx->fp, "\n"); 5186 covered |= desc[i].mask; 5187 } 5188 if ((flags & ~covered) != 0) { 5189 fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " Unknown\n", flags & ~covered); 5190 } 5191 } 5192 5193 static void 5194 bs_dump_print_type_flags(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 5195 { 5196 struct spdk_blob_md_descriptor_flags *type_desc; 5197 #define ADD_FLAG(f) { f, f, #f } 5198 #define ADD_MASK_VAL(m, v) { m, v, #v } 5199 static struct type_flag_desc invalid[] = { 5200 ADD_FLAG(SPDK_BLOB_THIN_PROV), 5201 ADD_FLAG(SPDK_BLOB_INTERNAL_XATTR), 5202 ADD_FLAG(SPDK_BLOB_EXTENT_TABLE), 5203 }; 5204 static struct type_flag_desc data_ro[] = { 5205 ADD_FLAG(SPDK_BLOB_READ_ONLY), 5206 }; 5207 static struct type_flag_desc md_ro[] = { 5208 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_DEFAULT), 5209 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_NONE), 5210 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_UNMAP), 5211 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_WRITE_ZEROES), 5212 }; 5213 #undef ADD_FLAG 5214 #undef ADD_MASK_VAL 5215 5216 type_desc = (struct spdk_blob_md_descriptor_flags *)desc; 5217 fprintf(ctx->fp, "Flags:\n"); 5218 fprintf(ctx->fp, "\tinvalid: 0x%016" PRIx64 "\n", type_desc->invalid_flags); 5219 bs_dump_print_type_bits(ctx, type_desc->invalid_flags, invalid, 5220 SPDK_COUNTOF(invalid)); 5221 fprintf(ctx->fp, "\tdata_ro: 0x%016" PRIx64 "\n", type_desc->data_ro_flags); 5222 bs_dump_print_type_bits(ctx, type_desc->data_ro_flags, data_ro, 5223 SPDK_COUNTOF(data_ro)); 5224 fprintf(ctx->fp, "\t md_ro: 0x%016" PRIx64 "\n", type_desc->md_ro_flags); 5225 bs_dump_print_type_bits(ctx, type_desc->md_ro_flags, md_ro, 5226 SPDK_COUNTOF(md_ro)); 5227 } 5228 5229 static void 5230 bs_dump_print_extent_table(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 5231 { 5232 struct spdk_blob_md_descriptor_extent_table *et_desc; 5233 uint64_t num_extent_pages; 5234 uint32_t et_idx; 5235 5236 et_desc = (struct spdk_blob_md_descriptor_extent_table *)desc; 5237 num_extent_pages = (et_desc->length - sizeof(et_desc->num_clusters)) / 5238 sizeof(et_desc->extent_page[0]); 5239 5240 fprintf(ctx->fp, "Extent table:\n"); 5241 for (et_idx = 0; et_idx < num_extent_pages; et_idx++) { 5242 if (et_desc->extent_page[et_idx].page_idx == 0) { 5243 /* Zeroes represent unallocated extent pages. */ 5244 continue; 5245 } 5246 fprintf(ctx->fp, "\tExtent page: %5" PRIu32 " length %3" PRIu32 5247 " at LBA %" PRIu64 "\n", et_desc->extent_page[et_idx].page_idx, 5248 et_desc->extent_page[et_idx].num_pages, 5249 bs_md_page_to_lba(ctx->bs, et_desc->extent_page[et_idx].page_idx)); 5250 } 5251 } 5252 5253 static void 5254 bs_dump_print_md_page(struct spdk_bs_load_ctx *ctx) 5255 { 5256 uint32_t page_idx = ctx->cur_page; 5257 struct spdk_blob_md_page *page = ctx->page; 5258 struct spdk_blob_md_descriptor *desc; 5259 size_t cur_desc = 0; 5260 uint32_t crc; 5261 5262 fprintf(ctx->fp, "=========\n"); 5263 fprintf(ctx->fp, "Metadata Page Index: %" PRIu32 " (0x%" PRIx32 ")\n", page_idx, page_idx); 5264 fprintf(ctx->fp, "Start LBA: %" PRIu64 "\n", bs_md_page_to_lba(ctx->bs, page_idx)); 5265 fprintf(ctx->fp, "Blob ID: 0x%" PRIx64 "\n", page->id); 5266 fprintf(ctx->fp, "Sequence: %" PRIu32 "\n", page->sequence_num); 5267 if (page->next == SPDK_INVALID_MD_PAGE) { 5268 fprintf(ctx->fp, "Next: None\n"); 5269 } else { 5270 fprintf(ctx->fp, "Next: %" PRIu32 "\n", page->next); 5271 } 5272 fprintf(ctx->fp, "In used bit array%s:", ctx->super->clean ? "" : " (not clean: dubious)"); 5273 if (spdk_bit_array_get(ctx->bs->used_md_pages, page_idx)) { 5274 fprintf(ctx->fp, " md"); 5275 } 5276 if (spdk_bit_array_get(ctx->bs->used_blobids, page_idx)) { 5277 fprintf(ctx->fp, " blob"); 5278 } 5279 fprintf(ctx->fp, "\n"); 5280 5281 crc = blob_md_page_calc_crc(page); 5282 fprintf(ctx->fp, "CRC: 0x%" PRIx32 " (%s)\n", page->crc, crc == page->crc ? "OK" : "Mismatch"); 5283 5284 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 5285 while (cur_desc < sizeof(page->descriptors)) { 5286 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 5287 if (desc->length == 0) { 5288 /* If padding and length are 0, this terminates the page */ 5289 break; 5290 } 5291 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 5292 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 5293 unsigned int i; 5294 5295 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 5296 5297 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 5298 if (desc_extent_rle->extents[i].cluster_idx != 0) { 5299 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 5300 desc_extent_rle->extents[i].cluster_idx); 5301 } else { 5302 fprintf(ctx->fp, "Unallocated Extent - "); 5303 } 5304 fprintf(ctx->fp, " Length: %" PRIu32, desc_extent_rle->extents[i].length); 5305 fprintf(ctx->fp, "\n"); 5306 } 5307 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 5308 struct spdk_blob_md_descriptor_extent_page *desc_extent; 5309 unsigned int i; 5310 5311 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 5312 5313 for (i = 0; i < desc_extent->length / sizeof(desc_extent->cluster_idx[0]); i++) { 5314 if (desc_extent->cluster_idx[i] != 0) { 5315 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 5316 desc_extent->cluster_idx[i]); 5317 } else { 5318 fprintf(ctx->fp, "Unallocated Extent"); 5319 } 5320 fprintf(ctx->fp, "\n"); 5321 } 5322 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 5323 bs_dump_print_xattr(ctx, desc); 5324 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 5325 bs_dump_print_xattr(ctx, desc); 5326 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 5327 bs_dump_print_type_flags(ctx, desc); 5328 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 5329 bs_dump_print_extent_table(ctx, desc); 5330 } else { 5331 /* Error */ 5332 fprintf(ctx->fp, "Unknown descriptor type %" PRIu8 "\n", desc->type); 5333 } 5334 /* Advance to the next descriptor */ 5335 cur_desc += sizeof(*desc) + desc->length; 5336 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 5337 break; 5338 } 5339 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 5340 } 5341 } 5342 5343 static void 5344 bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5345 { 5346 struct spdk_bs_load_ctx *ctx = cb_arg; 5347 5348 if (bserrno != 0) { 5349 bs_dump_finish(seq, ctx, bserrno); 5350 return; 5351 } 5352 5353 if (ctx->page->id != 0) { 5354 bs_dump_print_md_page(ctx); 5355 } 5356 5357 ctx->cur_page++; 5358 5359 if (ctx->cur_page < ctx->super->md_len) { 5360 bs_dump_read_md_page(seq, ctx); 5361 } else { 5362 spdk_free(ctx->page); 5363 bs_dump_finish(seq, ctx, 0); 5364 } 5365 } 5366 5367 static void 5368 bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg) 5369 { 5370 struct spdk_bs_load_ctx *ctx = cb_arg; 5371 uint64_t lba; 5372 5373 assert(ctx->cur_page < ctx->super->md_len); 5374 lba = bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page); 5375 bs_sequence_read_dev(seq, ctx->page, lba, 5376 bs_byte_to_lba(ctx->bs, ctx->super->md_page_size), 5377 bs_dump_read_md_page_cpl, ctx); 5378 } 5379 5380 static void 5381 bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5382 { 5383 struct spdk_bs_load_ctx *ctx = cb_arg; 5384 int rc; 5385 5386 fprintf(ctx->fp, "Signature: \"%.8s\" ", ctx->super->signature); 5387 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 5388 sizeof(ctx->super->signature)) != 0) { 5389 fprintf(ctx->fp, "(Mismatch)\n"); 5390 bs_dump_finish(seq, ctx, bserrno); 5391 return; 5392 } else { 5393 fprintf(ctx->fp, "(OK)\n"); 5394 } 5395 fprintf(ctx->fp, "Version: %" PRIu32 "\n", ctx->super->version); 5396 fprintf(ctx->fp, "CRC: 0x%x (%s)\n", ctx->super->crc, 5397 (ctx->super->crc == blob_md_page_calc_crc(ctx->super)) ? "OK" : "Mismatch"); 5398 fprintf(ctx->fp, "Blobstore Type: %.*s\n", SPDK_BLOBSTORE_TYPE_LENGTH, ctx->super->bstype.bstype); 5399 fprintf(ctx->fp, "Cluster Size: %" PRIu32 "\n", ctx->super->cluster_size); 5400 fprintf(ctx->fp, "Super Blob ID: "); 5401 if (ctx->super->super_blob == SPDK_BLOBID_INVALID) { 5402 fprintf(ctx->fp, "(None)\n"); 5403 } else { 5404 fprintf(ctx->fp, "0x%" PRIx64 "\n", ctx->super->super_blob); 5405 } 5406 fprintf(ctx->fp, "Clean: %" PRIu32 "\n", ctx->super->clean); 5407 fprintf(ctx->fp, "Used Metadata Page Mask Start: %" PRIu32 "\n", ctx->super->used_page_mask_start); 5408 fprintf(ctx->fp, "Used Metadata Page Mask Length: %" PRIu32 "\n", ctx->super->used_page_mask_len); 5409 fprintf(ctx->fp, "Used Cluster Mask Start: %" PRIu32 "\n", ctx->super->used_cluster_mask_start); 5410 fprintf(ctx->fp, "Used Cluster Mask Length: %" PRIu32 "\n", ctx->super->used_cluster_mask_len); 5411 fprintf(ctx->fp, "Used Blob ID Mask Start: %" PRIu32 "\n", ctx->super->used_blobid_mask_start); 5412 fprintf(ctx->fp, "Used Blob ID Mask Length: %" PRIu32 "\n", ctx->super->used_blobid_mask_len); 5413 fprintf(ctx->fp, "Metadata Start: %" PRIu32 "\n", ctx->super->md_start); 5414 fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len); 5415 5416 ctx->cur_page = 0; 5417 ctx->page = spdk_zmalloc(ctx->super->md_page_size, 0, 5418 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 5419 if (!ctx->page) { 5420 bs_dump_finish(seq, ctx, -ENOMEM); 5421 return; 5422 } 5423 5424 rc = bs_parse_super(ctx); 5425 if (rc < 0) { 5426 bs_load_ctx_fail(ctx, rc); 5427 return; 5428 } 5429 5430 bs_load_read_used_pages(ctx); 5431 } 5432 5433 void 5434 spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_xattr_fn, 5435 spdk_bs_op_complete cb_fn, void *cb_arg) 5436 { 5437 struct spdk_blob_store *bs; 5438 struct spdk_bs_cpl cpl; 5439 struct spdk_bs_load_ctx *ctx; 5440 struct spdk_bs_opts opts = {}; 5441 int err; 5442 5443 SPDK_DEBUGLOG(blob, "Dumping blobstore from dev %p\n", dev); 5444 5445 spdk_bs_opts_init(&opts, sizeof(opts)); 5446 5447 err = bs_alloc(dev, &opts, &bs, &ctx); 5448 if (err) { 5449 dev->destroy(dev); 5450 cb_fn(cb_arg, err); 5451 return; 5452 } 5453 5454 ctx->dumping = true; 5455 ctx->fp = fp; 5456 ctx->print_xattr_fn = print_xattr_fn; 5457 5458 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5459 cpl.u.bs_basic.cb_fn = cb_fn; 5460 cpl.u.bs_basic.cb_arg = cb_arg; 5461 5462 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5463 if (!ctx->seq) { 5464 spdk_free(ctx->super); 5465 free(ctx); 5466 bs_free(bs); 5467 cb_fn(cb_arg, -ENOMEM); 5468 return; 5469 } 5470 5471 /* Read the super block */ 5472 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5473 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5474 bs_dump_super_cpl, ctx); 5475 } 5476 5477 /* END spdk_bs_dump */ 5478 5479 /* START spdk_bs_init */ 5480 5481 static void 5482 bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5483 { 5484 struct spdk_bs_load_ctx *ctx = cb_arg; 5485 5486 ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters); 5487 spdk_free(ctx->super); 5488 free(ctx); 5489 5490 bs_sequence_finish(seq, bserrno); 5491 } 5492 5493 static void 5494 bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5495 { 5496 struct spdk_bs_load_ctx *ctx = cb_arg; 5497 5498 /* Write super block */ 5499 bs_sequence_write_dev(seq, ctx->super, bs_page_to_lba(ctx->bs, 0), 5500 bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 5501 bs_init_persist_super_cpl, ctx); 5502 } 5503 5504 void 5505 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 5506 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 5507 { 5508 struct spdk_bs_load_ctx *ctx; 5509 struct spdk_blob_store *bs; 5510 struct spdk_bs_cpl cpl; 5511 spdk_bs_sequence_t *seq; 5512 spdk_bs_batch_t *batch; 5513 uint64_t num_md_lba; 5514 uint64_t num_md_pages; 5515 uint64_t num_md_clusters; 5516 uint64_t max_used_cluster_mask_len; 5517 uint32_t i; 5518 struct spdk_bs_opts opts = {}; 5519 int rc; 5520 uint64_t lba, lba_count; 5521 5522 SPDK_DEBUGLOG(blob, "Initializing blobstore on dev %p\n", dev); 5523 if ((dev->phys_blocklen % dev->blocklen) != 0) { 5524 SPDK_ERRLOG("unsupported dev block length of %d\n", 5525 dev->blocklen); 5526 dev->destroy(dev); 5527 cb_fn(cb_arg, NULL, -EINVAL); 5528 return; 5529 } 5530 5531 spdk_bs_opts_init(&opts, sizeof(opts)); 5532 if (o) { 5533 if (bs_opts_copy(o, &opts)) { 5534 dev->destroy(dev); 5535 cb_fn(cb_arg, NULL, -EINVAL); 5536 return; 5537 } 5538 } 5539 5540 if (bs_opts_verify(&opts) != 0) { 5541 dev->destroy(dev); 5542 cb_fn(cb_arg, NULL, -EINVAL); 5543 return; 5544 } 5545 5546 rc = bs_alloc(dev, &opts, &bs, &ctx); 5547 if (rc) { 5548 dev->destroy(dev); 5549 cb_fn(cb_arg, NULL, rc); 5550 return; 5551 } 5552 5553 if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) { 5554 /* By default, allocate 1 page per cluster. 5555 * Technically, this over-allocates metadata 5556 * because more metadata will reduce the number 5557 * of usable clusters. This can be addressed with 5558 * more complex math in the future. 5559 */ 5560 bs->md_len = bs->total_clusters; 5561 } else { 5562 bs->md_len = opts.num_md_pages; 5563 } 5564 rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len); 5565 if (rc < 0) { 5566 spdk_free(ctx->super); 5567 free(ctx); 5568 bs_free(bs); 5569 cb_fn(cb_arg, NULL, -ENOMEM); 5570 return; 5571 } 5572 5573 rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len); 5574 if (rc < 0) { 5575 spdk_free(ctx->super); 5576 free(ctx); 5577 bs_free(bs); 5578 cb_fn(cb_arg, NULL, -ENOMEM); 5579 return; 5580 } 5581 5582 rc = spdk_bit_array_resize(&bs->open_blobids, bs->md_len); 5583 if (rc < 0) { 5584 spdk_free(ctx->super); 5585 free(ctx); 5586 bs_free(bs); 5587 cb_fn(cb_arg, NULL, -ENOMEM); 5588 return; 5589 } 5590 5591 memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 5592 sizeof(ctx->super->signature)); 5593 ctx->super->version = SPDK_BS_VERSION; 5594 ctx->super->length = sizeof(*ctx->super); 5595 ctx->super->super_blob = bs->super_blob; 5596 ctx->super->clean = 0; 5597 ctx->super->cluster_size = bs->cluster_sz; 5598 ctx->super->io_unit_size = bs->io_unit_size; 5599 ctx->super->md_page_size = bs->md_page_size; 5600 memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype)); 5601 5602 /* Calculate how many pages the metadata consumes at the front 5603 * of the disk. 5604 */ 5605 5606 /* The super block uses 1 page */ 5607 num_md_pages = 1; 5608 5609 /* The used_md_pages mask requires 1 bit per metadata page, rounded 5610 * up to the nearest page, plus a header. 5611 */ 5612 ctx->super->used_page_mask_start = num_md_pages; 5613 ctx->super->used_page_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5614 spdk_divide_round_up(bs->md_len, 8), 5615 ctx->super->md_page_size); 5616 num_md_pages += ctx->super->used_page_mask_len; 5617 5618 /* The used_clusters mask requires 1 bit per cluster, rounded 5619 * up to the nearest page, plus a header. 5620 */ 5621 ctx->super->used_cluster_mask_start = num_md_pages; 5622 ctx->super->used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5623 spdk_divide_round_up(bs->total_clusters, 8), 5624 ctx->super->md_page_size); 5625 /* The blobstore might be extended, then the used_cluster bitmap will need more space. 5626 * Here we calculate the max clusters we can support according to the 5627 * num_md_pages (bs->md_len). 5628 */ 5629 max_used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5630 spdk_divide_round_up(bs->md_len, 8), 5631 ctx->super->md_page_size); 5632 max_used_cluster_mask_len = spdk_max(max_used_cluster_mask_len, 5633 ctx->super->used_cluster_mask_len); 5634 num_md_pages += max_used_cluster_mask_len; 5635 5636 /* The used_blobids mask requires 1 bit per metadata page, rounded 5637 * up to the nearest page, plus a header. 5638 */ 5639 ctx->super->used_blobid_mask_start = num_md_pages; 5640 ctx->super->used_blobid_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5641 spdk_divide_round_up(bs->md_len, 8), 5642 ctx->super->md_page_size); 5643 num_md_pages += ctx->super->used_blobid_mask_len; 5644 5645 /* The metadata region size was chosen above */ 5646 ctx->super->md_start = bs->md_start = num_md_pages; 5647 ctx->super->md_len = bs->md_len; 5648 num_md_pages += bs->md_len; 5649 5650 num_md_lba = bs_page_to_lba(bs, num_md_pages); 5651 5652 ctx->super->size = dev->blockcnt * dev->blocklen; 5653 5654 ctx->super->crc = blob_md_page_calc_crc(ctx->super); 5655 5656 num_md_clusters = spdk_divide_round_up(num_md_pages, bs->pages_per_cluster); 5657 if (num_md_clusters > bs->total_clusters) { 5658 SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, " 5659 "please decrease number of pages reserved for metadata " 5660 "or increase cluster size.\n"); 5661 spdk_free(ctx->super); 5662 spdk_bit_array_free(&ctx->used_clusters); 5663 free(ctx); 5664 bs_free(bs); 5665 cb_fn(cb_arg, NULL, -ENOMEM); 5666 return; 5667 } 5668 /* Claim all of the clusters used by the metadata */ 5669 for (i = 0; i < num_md_clusters; i++) { 5670 spdk_bit_array_set(ctx->used_clusters, i); 5671 } 5672 5673 bs->num_free_clusters -= num_md_clusters; 5674 bs->total_data_clusters = bs->num_free_clusters; 5675 5676 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 5677 cpl.u.bs_handle.cb_fn = cb_fn; 5678 cpl.u.bs_handle.cb_arg = cb_arg; 5679 cpl.u.bs_handle.bs = bs; 5680 5681 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5682 if (!seq) { 5683 spdk_free(ctx->super); 5684 free(ctx); 5685 bs_free(bs); 5686 cb_fn(cb_arg, NULL, -ENOMEM); 5687 return; 5688 } 5689 5690 batch = bs_sequence_to_batch(seq, bs_init_trim_cpl, ctx); 5691 5692 /* Clear metadata space */ 5693 bs_batch_write_zeroes_dev(batch, 0, num_md_lba); 5694 5695 lba = num_md_lba; 5696 lba_count = ctx->bs->dev->blockcnt - lba; 5697 switch (opts.clear_method) { 5698 case BS_CLEAR_WITH_UNMAP: 5699 /* Trim data clusters */ 5700 bs_batch_unmap_dev(batch, lba, lba_count); 5701 break; 5702 case BS_CLEAR_WITH_WRITE_ZEROES: 5703 /* Write_zeroes to data clusters */ 5704 bs_batch_write_zeroes_dev(batch, lba, lba_count); 5705 break; 5706 case BS_CLEAR_WITH_NONE: 5707 default: 5708 break; 5709 } 5710 5711 bs_batch_close(batch); 5712 } 5713 5714 /* END spdk_bs_init */ 5715 5716 /* START spdk_bs_destroy */ 5717 5718 static void 5719 bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5720 { 5721 struct spdk_bs_load_ctx *ctx = cb_arg; 5722 struct spdk_blob_store *bs = ctx->bs; 5723 5724 /* 5725 * We need to defer calling bs_call_cpl() until after 5726 * dev destruction, so tuck these away for later use. 5727 */ 5728 bs->unload_err = bserrno; 5729 memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5730 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5731 5732 bs_sequence_finish(seq, bserrno); 5733 5734 bs_free(bs); 5735 free(ctx); 5736 } 5737 5738 void 5739 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, 5740 void *cb_arg) 5741 { 5742 struct spdk_bs_cpl cpl; 5743 spdk_bs_sequence_t *seq; 5744 struct spdk_bs_load_ctx *ctx; 5745 5746 SPDK_DEBUGLOG(blob, "Destroying blobstore\n"); 5747 5748 if (!RB_EMPTY(&bs->open_blobs)) { 5749 SPDK_ERRLOG("Blobstore still has open blobs\n"); 5750 cb_fn(cb_arg, -EBUSY); 5751 return; 5752 } 5753 5754 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5755 cpl.u.bs_basic.cb_fn = cb_fn; 5756 cpl.u.bs_basic.cb_arg = cb_arg; 5757 5758 ctx = calloc(1, sizeof(*ctx)); 5759 if (!ctx) { 5760 cb_fn(cb_arg, -ENOMEM); 5761 return; 5762 } 5763 5764 ctx->bs = bs; 5765 5766 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5767 if (!seq) { 5768 free(ctx); 5769 cb_fn(cb_arg, -ENOMEM); 5770 return; 5771 } 5772 5773 /* Write zeroes to the super block */ 5774 bs_sequence_write_zeroes_dev(seq, 5775 bs_page_to_lba(bs, 0), 5776 bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)), 5777 bs_destroy_trim_cpl, ctx); 5778 } 5779 5780 /* END spdk_bs_destroy */ 5781 5782 /* START spdk_bs_unload */ 5783 5784 static void 5785 bs_unload_finish(struct spdk_bs_load_ctx *ctx, int bserrno) 5786 { 5787 spdk_bs_sequence_t *seq = ctx->seq; 5788 5789 spdk_free(ctx->super); 5790 5791 /* 5792 * We need to defer calling bs_call_cpl() until after 5793 * dev destruction, so tuck these away for later use. 5794 */ 5795 ctx->bs->unload_err = bserrno; 5796 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5797 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5798 5799 bs_sequence_finish(seq, bserrno); 5800 5801 bs_free(ctx->bs); 5802 free(ctx); 5803 } 5804 5805 static void 5806 bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5807 { 5808 struct spdk_bs_load_ctx *ctx = cb_arg; 5809 5810 bs_unload_finish(ctx, bserrno); 5811 } 5812 5813 static void 5814 bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5815 { 5816 struct spdk_bs_load_ctx *ctx = cb_arg; 5817 5818 spdk_free(ctx->mask); 5819 5820 if (bserrno != 0) { 5821 bs_unload_finish(ctx, bserrno); 5822 return; 5823 } 5824 5825 ctx->super->clean = 1; 5826 5827 bs_write_super(seq, ctx->bs, ctx->super, bs_unload_write_super_cpl, ctx); 5828 } 5829 5830 static void 5831 bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5832 { 5833 struct spdk_bs_load_ctx *ctx = cb_arg; 5834 5835 spdk_free(ctx->mask); 5836 ctx->mask = NULL; 5837 5838 if (bserrno != 0) { 5839 bs_unload_finish(ctx, bserrno); 5840 return; 5841 } 5842 5843 bs_write_used_clusters(seq, ctx, bs_unload_write_used_clusters_cpl); 5844 } 5845 5846 static void 5847 bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5848 { 5849 struct spdk_bs_load_ctx *ctx = cb_arg; 5850 5851 spdk_free(ctx->mask); 5852 ctx->mask = NULL; 5853 5854 if (bserrno != 0) { 5855 bs_unload_finish(ctx, bserrno); 5856 return; 5857 } 5858 5859 bs_write_used_blobids(seq, ctx, bs_unload_write_used_blobids_cpl); 5860 } 5861 5862 static void 5863 bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5864 { 5865 struct spdk_bs_load_ctx *ctx = cb_arg; 5866 int rc; 5867 5868 if (bserrno != 0) { 5869 bs_unload_finish(ctx, bserrno); 5870 return; 5871 } 5872 5873 rc = bs_super_validate(ctx->super, ctx->bs); 5874 if (rc != 0) { 5875 bs_unload_finish(ctx, rc); 5876 return; 5877 } 5878 5879 bs_write_used_md(seq, cb_arg, bs_unload_write_used_pages_cpl); 5880 } 5881 5882 void 5883 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg) 5884 { 5885 struct spdk_bs_cpl cpl; 5886 struct spdk_bs_load_ctx *ctx; 5887 5888 SPDK_DEBUGLOG(blob, "Syncing blobstore\n"); 5889 5890 /* 5891 * If external snapshot channels are being destroyed while the blobstore is unloaded, the 5892 * unload is deferred until after the channel destruction completes. 5893 */ 5894 if (bs->esnap_channels_unloading != 0) { 5895 if (bs->esnap_unload_cb_fn != NULL) { 5896 SPDK_ERRLOG("Blobstore unload in progress\n"); 5897 cb_fn(cb_arg, -EBUSY); 5898 return; 5899 } 5900 SPDK_DEBUGLOG(blob_esnap, "Blobstore unload deferred: %" PRIu32 5901 " esnap clones are unloading\n", bs->esnap_channels_unloading); 5902 bs->esnap_unload_cb_fn = cb_fn; 5903 bs->esnap_unload_cb_arg = cb_arg; 5904 return; 5905 } 5906 if (bs->esnap_unload_cb_fn != NULL) { 5907 SPDK_DEBUGLOG(blob_esnap, "Blobstore deferred unload progressing\n"); 5908 assert(bs->esnap_unload_cb_fn == cb_fn); 5909 assert(bs->esnap_unload_cb_arg == cb_arg); 5910 bs->esnap_unload_cb_fn = NULL; 5911 bs->esnap_unload_cb_arg = NULL; 5912 } 5913 5914 if (!RB_EMPTY(&bs->open_blobs)) { 5915 SPDK_ERRLOG("Blobstore still has open blobs\n"); 5916 cb_fn(cb_arg, -EBUSY); 5917 return; 5918 } 5919 5920 ctx = calloc(1, sizeof(*ctx)); 5921 if (!ctx) { 5922 cb_fn(cb_arg, -ENOMEM); 5923 return; 5924 } 5925 5926 ctx->bs = bs; 5927 5928 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 5929 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 5930 if (!ctx->super) { 5931 free(ctx); 5932 cb_fn(cb_arg, -ENOMEM); 5933 return; 5934 } 5935 5936 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5937 cpl.u.bs_basic.cb_fn = cb_fn; 5938 cpl.u.bs_basic.cb_arg = cb_arg; 5939 5940 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5941 if (!ctx->seq) { 5942 spdk_free(ctx->super); 5943 free(ctx); 5944 cb_fn(cb_arg, -ENOMEM); 5945 return; 5946 } 5947 5948 /* Read super block */ 5949 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5950 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5951 bs_unload_read_super_cpl, ctx); 5952 } 5953 5954 /* END spdk_bs_unload */ 5955 5956 /* START spdk_bs_set_super */ 5957 5958 struct spdk_bs_set_super_ctx { 5959 struct spdk_blob_store *bs; 5960 struct spdk_bs_super_block *super; 5961 }; 5962 5963 static void 5964 bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5965 { 5966 struct spdk_bs_set_super_ctx *ctx = cb_arg; 5967 5968 if (bserrno != 0) { 5969 SPDK_ERRLOG("Unable to write to super block of blobstore\n"); 5970 } 5971 5972 spdk_free(ctx->super); 5973 5974 bs_sequence_finish(seq, bserrno); 5975 5976 free(ctx); 5977 } 5978 5979 static void 5980 bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5981 { 5982 struct spdk_bs_set_super_ctx *ctx = cb_arg; 5983 int rc; 5984 5985 if (bserrno != 0) { 5986 SPDK_ERRLOG("Unable to read super block of blobstore\n"); 5987 spdk_free(ctx->super); 5988 bs_sequence_finish(seq, bserrno); 5989 free(ctx); 5990 return; 5991 } 5992 5993 rc = bs_super_validate(ctx->super, ctx->bs); 5994 if (rc != 0) { 5995 SPDK_ERRLOG("Not a valid super block\n"); 5996 spdk_free(ctx->super); 5997 bs_sequence_finish(seq, rc); 5998 free(ctx); 5999 return; 6000 } 6001 6002 bs_write_super(seq, ctx->bs, ctx->super, bs_set_super_write_cpl, ctx); 6003 } 6004 6005 void 6006 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid, 6007 spdk_bs_op_complete cb_fn, void *cb_arg) 6008 { 6009 struct spdk_bs_cpl cpl; 6010 spdk_bs_sequence_t *seq; 6011 struct spdk_bs_set_super_ctx *ctx; 6012 6013 SPDK_DEBUGLOG(blob, "Setting super blob id on blobstore\n"); 6014 6015 ctx = calloc(1, sizeof(*ctx)); 6016 if (!ctx) { 6017 cb_fn(cb_arg, -ENOMEM); 6018 return; 6019 } 6020 6021 ctx->bs = bs; 6022 6023 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 6024 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 6025 if (!ctx->super) { 6026 free(ctx); 6027 cb_fn(cb_arg, -ENOMEM); 6028 return; 6029 } 6030 6031 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 6032 cpl.u.bs_basic.cb_fn = cb_fn; 6033 cpl.u.bs_basic.cb_arg = cb_arg; 6034 6035 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 6036 if (!seq) { 6037 spdk_free(ctx->super); 6038 free(ctx); 6039 cb_fn(cb_arg, -ENOMEM); 6040 return; 6041 } 6042 6043 bs->super_blob = blobid; 6044 6045 /* Read super block */ 6046 bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0), 6047 bs_byte_to_lba(bs, sizeof(*ctx->super)), 6048 bs_set_super_read_cpl, ctx); 6049 } 6050 6051 /* END spdk_bs_set_super */ 6052 6053 void 6054 spdk_bs_get_super(struct spdk_blob_store *bs, 6055 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6056 { 6057 if (bs->super_blob == SPDK_BLOBID_INVALID) { 6058 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT); 6059 } else { 6060 cb_fn(cb_arg, bs->super_blob, 0); 6061 } 6062 } 6063 6064 uint64_t 6065 spdk_bs_get_cluster_size(struct spdk_blob_store *bs) 6066 { 6067 return bs->cluster_sz; 6068 } 6069 6070 uint64_t 6071 spdk_bs_get_page_size(struct spdk_blob_store *bs) 6072 { 6073 return bs->md_page_size; 6074 } 6075 6076 uint64_t 6077 spdk_bs_get_io_unit_size(struct spdk_blob_store *bs) 6078 { 6079 return bs->io_unit_size; 6080 } 6081 6082 uint64_t 6083 spdk_bs_free_cluster_count(struct spdk_blob_store *bs) 6084 { 6085 return bs->num_free_clusters; 6086 } 6087 6088 uint64_t 6089 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs) 6090 { 6091 return bs->total_data_clusters; 6092 } 6093 6094 static int 6095 bs_register_md_thread(struct spdk_blob_store *bs) 6096 { 6097 bs->md_channel = spdk_get_io_channel(bs); 6098 if (!bs->md_channel) { 6099 SPDK_ERRLOG("Failed to get IO channel.\n"); 6100 return -1; 6101 } 6102 6103 return 0; 6104 } 6105 6106 static int 6107 bs_unregister_md_thread(struct spdk_blob_store *bs) 6108 { 6109 spdk_put_io_channel(bs->md_channel); 6110 6111 return 0; 6112 } 6113 6114 spdk_blob_id 6115 spdk_blob_get_id(struct spdk_blob *blob) 6116 { 6117 assert(blob != NULL); 6118 6119 return blob->id; 6120 } 6121 6122 uint64_t 6123 spdk_blob_get_num_io_units(struct spdk_blob *blob) 6124 { 6125 assert(blob != NULL); 6126 6127 return bs_cluster_to_io_unit(blob->bs, blob->active.num_clusters); 6128 } 6129 6130 uint64_t 6131 spdk_blob_get_num_clusters(struct spdk_blob *blob) 6132 { 6133 assert(blob != NULL); 6134 6135 return blob->active.num_clusters; 6136 } 6137 6138 uint64_t 6139 spdk_blob_get_num_allocated_clusters(struct spdk_blob *blob) 6140 { 6141 assert(blob != NULL); 6142 6143 return blob->active.num_allocated_clusters; 6144 } 6145 6146 static uint64_t 6147 blob_find_io_unit(struct spdk_blob *blob, uint64_t offset, bool is_allocated) 6148 { 6149 uint64_t blob_io_unit_num = spdk_blob_get_num_io_units(blob); 6150 6151 while (offset < blob_io_unit_num) { 6152 if (bs_io_unit_is_allocated(blob, offset) == is_allocated) { 6153 return offset; 6154 } 6155 6156 offset += bs_num_io_units_to_cluster_boundary(blob, offset); 6157 } 6158 6159 return UINT64_MAX; 6160 } 6161 6162 uint64_t 6163 spdk_blob_get_next_allocated_io_unit(struct spdk_blob *blob, uint64_t offset) 6164 { 6165 return blob_find_io_unit(blob, offset, true); 6166 } 6167 6168 uint64_t 6169 spdk_blob_get_next_unallocated_io_unit(struct spdk_blob *blob, uint64_t offset) 6170 { 6171 return blob_find_io_unit(blob, offset, false); 6172 } 6173 6174 /* START spdk_bs_create_blob */ 6175 6176 static void 6177 bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 6178 { 6179 struct spdk_blob *blob = cb_arg; 6180 uint32_t page_idx = bs_blobid_to_page(blob->id); 6181 6182 if (bserrno != 0) { 6183 spdk_spin_lock(&blob->bs->used_lock); 6184 spdk_bit_array_clear(blob->bs->used_blobids, page_idx); 6185 bs_release_md_page(blob->bs, page_idx); 6186 spdk_spin_unlock(&blob->bs->used_lock); 6187 } 6188 6189 blob_free(blob); 6190 6191 bs_sequence_finish(seq, bserrno); 6192 } 6193 6194 static int 6195 blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs, 6196 bool internal) 6197 { 6198 uint64_t i; 6199 size_t value_len = 0; 6200 int rc; 6201 const void *value = NULL; 6202 if (xattrs->count > 0 && xattrs->get_value == NULL) { 6203 return -EINVAL; 6204 } 6205 for (i = 0; i < xattrs->count; i++) { 6206 xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len); 6207 if (value == NULL || value_len == 0) { 6208 return -EINVAL; 6209 } 6210 rc = blob_set_xattr(blob, xattrs->names[i], value, value_len, internal); 6211 if (rc < 0) { 6212 return rc; 6213 } 6214 } 6215 return 0; 6216 } 6217 6218 static void 6219 blob_opts_copy(const struct spdk_blob_opts *src, struct spdk_blob_opts *dst) 6220 { 6221 #define FIELD_OK(field) \ 6222 offsetof(struct spdk_blob_opts, field) + sizeof(src->field) <= src->opts_size 6223 6224 #define SET_FIELD(field) \ 6225 if (FIELD_OK(field)) { \ 6226 dst->field = src->field; \ 6227 } \ 6228 6229 SET_FIELD(num_clusters); 6230 SET_FIELD(thin_provision); 6231 SET_FIELD(clear_method); 6232 6233 if (FIELD_OK(xattrs)) { 6234 memcpy(&dst->xattrs, &src->xattrs, sizeof(src->xattrs)); 6235 } 6236 6237 SET_FIELD(use_extent_table); 6238 SET_FIELD(esnap_id); 6239 SET_FIELD(esnap_id_len); 6240 6241 dst->opts_size = src->opts_size; 6242 6243 /* You should not remove this statement, but need to update the assert statement 6244 * if you add a new field, and also add a corresponding SET_FIELD statement */ 6245 SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_opts) == 80, "Incorrect size"); 6246 6247 #undef FIELD_OK 6248 #undef SET_FIELD 6249 } 6250 6251 static void 6252 bs_create_blob(struct spdk_blob_store *bs, 6253 const struct spdk_blob_opts *opts, 6254 const struct spdk_blob_xattr_opts *internal_xattrs, 6255 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6256 { 6257 struct spdk_blob *blob; 6258 uint32_t page_idx; 6259 struct spdk_bs_cpl cpl; 6260 struct spdk_blob_opts opts_local; 6261 struct spdk_blob_xattr_opts internal_xattrs_default; 6262 spdk_bs_sequence_t *seq; 6263 spdk_blob_id id; 6264 int rc; 6265 6266 assert(spdk_get_thread() == bs->md_thread); 6267 6268 spdk_spin_lock(&bs->used_lock); 6269 page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); 6270 if (page_idx == UINT32_MAX) { 6271 spdk_spin_unlock(&bs->used_lock); 6272 cb_fn(cb_arg, 0, -ENOMEM); 6273 return; 6274 } 6275 spdk_bit_array_set(bs->used_blobids, page_idx); 6276 bs_claim_md_page(bs, page_idx); 6277 spdk_spin_unlock(&bs->used_lock); 6278 6279 id = bs_page_to_blobid(page_idx); 6280 6281 SPDK_DEBUGLOG(blob, "Creating blob with id 0x%" PRIx64 " at page %u\n", id, page_idx); 6282 6283 spdk_blob_opts_init(&opts_local, sizeof(opts_local)); 6284 if (opts) { 6285 blob_opts_copy(opts, &opts_local); 6286 } 6287 6288 blob = blob_alloc(bs, id); 6289 if (!blob) { 6290 rc = -ENOMEM; 6291 goto error; 6292 } 6293 6294 blob->use_extent_table = opts_local.use_extent_table; 6295 if (blob->use_extent_table) { 6296 blob->invalid_flags |= SPDK_BLOB_EXTENT_TABLE; 6297 } 6298 6299 if (!internal_xattrs) { 6300 blob_xattrs_init(&internal_xattrs_default); 6301 internal_xattrs = &internal_xattrs_default; 6302 } 6303 6304 rc = blob_set_xattrs(blob, &opts_local.xattrs, false); 6305 if (rc < 0) { 6306 goto error; 6307 } 6308 6309 rc = blob_set_xattrs(blob, internal_xattrs, true); 6310 if (rc < 0) { 6311 goto error; 6312 } 6313 6314 if (opts_local.thin_provision) { 6315 blob_set_thin_provision(blob); 6316 } 6317 6318 blob_set_clear_method(blob, opts_local.clear_method); 6319 6320 if (opts_local.esnap_id != NULL) { 6321 if (opts_local.esnap_id_len > UINT16_MAX) { 6322 SPDK_ERRLOG("esnap id length %" PRIu64 "is too long\n", 6323 opts_local.esnap_id_len); 6324 rc = -EINVAL; 6325 goto error; 6326 6327 } 6328 blob_set_thin_provision(blob); 6329 blob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 6330 rc = blob_set_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID, 6331 opts_local.esnap_id, opts_local.esnap_id_len, true); 6332 if (rc != 0) { 6333 goto error; 6334 } 6335 } 6336 6337 rc = blob_resize(blob, opts_local.num_clusters); 6338 if (rc < 0) { 6339 goto error; 6340 } 6341 cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6342 cpl.u.blobid.cb_fn = cb_fn; 6343 cpl.u.blobid.cb_arg = cb_arg; 6344 cpl.u.blobid.blobid = blob->id; 6345 6346 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 6347 if (!seq) { 6348 rc = -ENOMEM; 6349 goto error; 6350 } 6351 6352 blob_persist(seq, blob, bs_create_blob_cpl, blob); 6353 return; 6354 6355 error: 6356 SPDK_ERRLOG("Failed to create blob: %s, size in clusters/size: %lu (clusters)\n", 6357 spdk_strerror(rc), opts_local.num_clusters); 6358 if (blob != NULL) { 6359 blob_free(blob); 6360 } 6361 spdk_spin_lock(&bs->used_lock); 6362 spdk_bit_array_clear(bs->used_blobids, page_idx); 6363 bs_release_md_page(bs, page_idx); 6364 spdk_spin_unlock(&bs->used_lock); 6365 cb_fn(cb_arg, 0, rc); 6366 } 6367 6368 void 6369 spdk_bs_create_blob(struct spdk_blob_store *bs, 6370 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6371 { 6372 bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg); 6373 } 6374 6375 void 6376 spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts, 6377 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6378 { 6379 bs_create_blob(bs, opts, NULL, cb_fn, cb_arg); 6380 } 6381 6382 /* END spdk_bs_create_blob */ 6383 6384 /* START blob_cleanup */ 6385 6386 struct spdk_clone_snapshot_ctx { 6387 struct spdk_bs_cpl cpl; 6388 int bserrno; 6389 bool frozen; 6390 6391 struct spdk_io_channel *channel; 6392 6393 /* Current cluster for inflate operation */ 6394 uint64_t cluster; 6395 6396 /* For inflation force allocation of all unallocated clusters and remove 6397 * thin-provisioning. Otherwise only decouple parent and keep clone thin. */ 6398 bool allocate_all; 6399 6400 struct { 6401 spdk_blob_id id; 6402 struct spdk_blob *blob; 6403 bool md_ro; 6404 } original; 6405 struct { 6406 spdk_blob_id id; 6407 struct spdk_blob *blob; 6408 } new; 6409 6410 /* xattrs specified for snapshot/clones only. They have no impact on 6411 * the original blobs xattrs. */ 6412 const struct spdk_blob_xattr_opts *xattrs; 6413 }; 6414 6415 static void 6416 bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno) 6417 { 6418 struct spdk_clone_snapshot_ctx *ctx = cb_arg; 6419 struct spdk_bs_cpl *cpl = &ctx->cpl; 6420 6421 if (bserrno != 0) { 6422 if (ctx->bserrno != 0) { 6423 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6424 } else { 6425 ctx->bserrno = bserrno; 6426 } 6427 } 6428 6429 switch (cpl->type) { 6430 case SPDK_BS_CPL_TYPE_BLOBID: 6431 cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno); 6432 break; 6433 case SPDK_BS_CPL_TYPE_BLOB_BASIC: 6434 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno); 6435 break; 6436 default: 6437 SPDK_UNREACHABLE(); 6438 break; 6439 } 6440 6441 free(ctx); 6442 } 6443 6444 static void 6445 bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 6446 { 6447 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6448 struct spdk_blob *origblob = ctx->original.blob; 6449 6450 if (bserrno != 0) { 6451 if (ctx->bserrno != 0) { 6452 SPDK_ERRLOG("Unfreeze error %d\n", bserrno); 6453 } else { 6454 ctx->bserrno = bserrno; 6455 } 6456 } 6457 6458 ctx->original.id = origblob->id; 6459 origblob->locked_operation_in_progress = false; 6460 6461 /* Revert md_ro to original state */ 6462 origblob->md_ro = ctx->original.md_ro; 6463 6464 spdk_blob_close(origblob, bs_clone_snapshot_cleanup_finish, ctx); 6465 } 6466 6467 static void 6468 bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno) 6469 { 6470 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6471 struct spdk_blob *origblob = ctx->original.blob; 6472 6473 if (bserrno != 0) { 6474 if (ctx->bserrno != 0) { 6475 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6476 } else { 6477 ctx->bserrno = bserrno; 6478 } 6479 } 6480 6481 if (ctx->frozen) { 6482 /* Unfreeze any outstanding I/O */ 6483 blob_unfreeze_io(origblob, bs_snapshot_unfreeze_cpl, ctx); 6484 } else { 6485 bs_snapshot_unfreeze_cpl(ctx, 0); 6486 } 6487 6488 } 6489 6490 static void 6491 bs_clone_snapshot_newblob_cleanup(struct spdk_clone_snapshot_ctx *ctx, int bserrno) 6492 { 6493 struct spdk_blob *newblob = ctx->new.blob; 6494 6495 if (bserrno != 0) { 6496 if (ctx->bserrno != 0) { 6497 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6498 } else { 6499 ctx->bserrno = bserrno; 6500 } 6501 } 6502 6503 ctx->new.id = newblob->id; 6504 spdk_blob_close(newblob, bs_clone_snapshot_origblob_cleanup, ctx); 6505 } 6506 6507 /* END blob_cleanup */ 6508 6509 /* START spdk_bs_create_snapshot */ 6510 6511 static void 6512 bs_snapshot_swap_cluster_maps(struct spdk_blob *blob1, struct spdk_blob *blob2) 6513 { 6514 uint64_t *cluster_temp; 6515 uint64_t num_allocated_clusters_temp; 6516 uint32_t *extent_page_temp; 6517 6518 cluster_temp = blob1->active.clusters; 6519 blob1->active.clusters = blob2->active.clusters; 6520 blob2->active.clusters = cluster_temp; 6521 6522 num_allocated_clusters_temp = blob1->active.num_allocated_clusters; 6523 blob1->active.num_allocated_clusters = blob2->active.num_allocated_clusters; 6524 blob2->active.num_allocated_clusters = num_allocated_clusters_temp; 6525 6526 extent_page_temp = blob1->active.extent_pages; 6527 blob1->active.extent_pages = blob2->active.extent_pages; 6528 blob2->active.extent_pages = extent_page_temp; 6529 } 6530 6531 /* Copies an internal xattr */ 6532 static int 6533 bs_snapshot_copy_xattr(struct spdk_blob *toblob, struct spdk_blob *fromblob, const char *name) 6534 { 6535 const void *val = NULL; 6536 size_t len; 6537 int bserrno; 6538 6539 bserrno = blob_get_xattr_value(fromblob, name, &val, &len, true); 6540 if (bserrno != 0) { 6541 SPDK_ERRLOG("blob 0x%" PRIx64 " missing %s XATTR\n", fromblob->id, name); 6542 return bserrno; 6543 } 6544 6545 bserrno = blob_set_xattr(toblob, name, val, len, true); 6546 if (bserrno != 0) { 6547 SPDK_ERRLOG("could not set %s XATTR on blob 0x%" PRIx64 "\n", 6548 name, toblob->id); 6549 return bserrno; 6550 } 6551 return 0; 6552 } 6553 6554 static void 6555 bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno) 6556 { 6557 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6558 struct spdk_blob *origblob = ctx->original.blob; 6559 struct spdk_blob *newblob = ctx->new.blob; 6560 6561 if (bserrno != 0) { 6562 bs_snapshot_swap_cluster_maps(newblob, origblob); 6563 if (blob_is_esnap_clone(newblob)) { 6564 bs_snapshot_copy_xattr(origblob, newblob, BLOB_EXTERNAL_SNAPSHOT_ID); 6565 origblob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 6566 } 6567 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6568 return; 6569 } 6570 6571 /* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */ 6572 bserrno = blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true); 6573 if (bserrno != 0) { 6574 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6575 return; 6576 } 6577 6578 bs_blob_list_add(ctx->original.blob); 6579 6580 spdk_blob_set_read_only(newblob); 6581 6582 /* sync snapshot metadata */ 6583 spdk_blob_sync_md(newblob, bs_clone_snapshot_origblob_cleanup, ctx); 6584 } 6585 6586 static void 6587 bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno) 6588 { 6589 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6590 struct spdk_blob *origblob = ctx->original.blob; 6591 struct spdk_blob *newblob = ctx->new.blob; 6592 6593 if (bserrno != 0) { 6594 /* return cluster map back to original */ 6595 bs_snapshot_swap_cluster_maps(newblob, origblob); 6596 6597 /* Newblob md sync failed. Valid clusters are only present in origblob. 6598 * Since I/O is frozen on origblob, not changes to zeroed out cluster map should have occurred. 6599 * Newblob needs to be reverted to thin_provisioned state at creation to properly close. */ 6600 blob_set_thin_provision(newblob); 6601 assert(spdk_mem_all_zero(newblob->active.clusters, 6602 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 6603 assert(spdk_mem_all_zero(newblob->active.extent_pages, 6604 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 6605 6606 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6607 return; 6608 } 6609 6610 /* Set internal xattr for snapshot id */ 6611 bserrno = blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true); 6612 if (bserrno != 0) { 6613 /* return cluster map back to original */ 6614 bs_snapshot_swap_cluster_maps(newblob, origblob); 6615 blob_set_thin_provision(newblob); 6616 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6617 return; 6618 } 6619 6620 /* Create new back_bs_dev for snapshot */ 6621 origblob->back_bs_dev = bs_create_blob_bs_dev(newblob); 6622 if (origblob->back_bs_dev == NULL) { 6623 /* return cluster map back to original */ 6624 bs_snapshot_swap_cluster_maps(newblob, origblob); 6625 blob_set_thin_provision(newblob); 6626 bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL); 6627 return; 6628 } 6629 6630 /* Remove the xattr that references an external snapshot */ 6631 if (blob_is_esnap_clone(origblob)) { 6632 origblob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT; 6633 bserrno = blob_remove_xattr(origblob, BLOB_EXTERNAL_SNAPSHOT_ID, true); 6634 if (bserrno != 0) { 6635 if (bserrno == -ENOENT) { 6636 SPDK_ERRLOG("blob 0x%" PRIx64 " has no " BLOB_EXTERNAL_SNAPSHOT_ID 6637 " xattr to remove\n", origblob->id); 6638 assert(false); 6639 } else { 6640 /* return cluster map back to original */ 6641 bs_snapshot_swap_cluster_maps(newblob, origblob); 6642 blob_set_thin_provision(newblob); 6643 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6644 return; 6645 } 6646 } 6647 } 6648 6649 bs_blob_list_remove(origblob); 6650 origblob->parent_id = newblob->id; 6651 /* set clone blob as thin provisioned */ 6652 blob_set_thin_provision(origblob); 6653 6654 bs_blob_list_add(newblob); 6655 6656 /* sync clone metadata */ 6657 spdk_blob_sync_md(origblob, bs_snapshot_origblob_sync_cpl, ctx); 6658 } 6659 6660 static void 6661 bs_snapshot_freeze_cpl(void *cb_arg, int rc) 6662 { 6663 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6664 struct spdk_blob *origblob = ctx->original.blob; 6665 struct spdk_blob *newblob = ctx->new.blob; 6666 int bserrno; 6667 6668 if (rc != 0) { 6669 bs_clone_snapshot_newblob_cleanup(ctx, rc); 6670 return; 6671 } 6672 6673 ctx->frozen = true; 6674 6675 if (blob_is_esnap_clone(origblob)) { 6676 /* Clean up any channels associated with the original blob id because future IO will 6677 * perform IO using the snapshot blob_id. 6678 */ 6679 blob_esnap_destroy_bs_dev_channels(origblob, false, NULL, NULL); 6680 } 6681 if (newblob->back_bs_dev) { 6682 blob_back_bs_destroy(newblob); 6683 } 6684 /* set new back_bs_dev for snapshot */ 6685 newblob->back_bs_dev = origblob->back_bs_dev; 6686 /* Set invalid flags from origblob */ 6687 newblob->invalid_flags = origblob->invalid_flags; 6688 6689 /* inherit parent from original blob if set */ 6690 newblob->parent_id = origblob->parent_id; 6691 switch (origblob->parent_id) { 6692 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 6693 bserrno = bs_snapshot_copy_xattr(newblob, origblob, BLOB_EXTERNAL_SNAPSHOT_ID); 6694 if (bserrno != 0) { 6695 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6696 return; 6697 } 6698 break; 6699 case SPDK_BLOBID_INVALID: 6700 break; 6701 default: 6702 /* Set internal xattr for snapshot id */ 6703 bserrno = blob_set_xattr(newblob, BLOB_SNAPSHOT, 6704 &origblob->parent_id, sizeof(spdk_blob_id), true); 6705 if (bserrno != 0) { 6706 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6707 return; 6708 } 6709 } 6710 6711 /* swap cluster maps */ 6712 bs_snapshot_swap_cluster_maps(newblob, origblob); 6713 6714 /* Set the clear method on the new blob to match the original. */ 6715 blob_set_clear_method(newblob, origblob->clear_method); 6716 6717 /* sync snapshot metadata */ 6718 spdk_blob_sync_md(newblob, bs_snapshot_newblob_sync_cpl, ctx); 6719 } 6720 6721 static void 6722 bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6723 { 6724 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6725 struct spdk_blob *origblob = ctx->original.blob; 6726 struct spdk_blob *newblob = _blob; 6727 6728 if (bserrno != 0) { 6729 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6730 return; 6731 } 6732 6733 ctx->new.blob = newblob; 6734 assert(spdk_blob_is_thin_provisioned(newblob)); 6735 assert(spdk_mem_all_zero(newblob->active.clusters, 6736 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 6737 assert(spdk_mem_all_zero(newblob->active.extent_pages, 6738 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 6739 6740 blob_freeze_io(origblob, bs_snapshot_freeze_cpl, ctx); 6741 } 6742 6743 static void 6744 bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 6745 { 6746 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6747 struct spdk_blob *origblob = ctx->original.blob; 6748 6749 if (bserrno != 0) { 6750 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6751 return; 6752 } 6753 6754 ctx->new.id = blobid; 6755 ctx->cpl.u.blobid.blobid = blobid; 6756 6757 spdk_bs_open_blob(origblob->bs, ctx->new.id, bs_snapshot_newblob_open_cpl, ctx); 6758 } 6759 6760 6761 static void 6762 bs_xattr_snapshot(void *arg, const char *name, 6763 const void **value, size_t *value_len) 6764 { 6765 assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0); 6766 6767 struct spdk_blob *blob = (struct spdk_blob *)arg; 6768 *value = &blob->id; 6769 *value_len = sizeof(blob->id); 6770 } 6771 6772 static void 6773 bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6774 { 6775 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6776 struct spdk_blob_opts opts; 6777 struct spdk_blob_xattr_opts internal_xattrs; 6778 char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS }; 6779 6780 if (bserrno != 0) { 6781 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6782 return; 6783 } 6784 6785 ctx->original.blob = _blob; 6786 6787 if (_blob->data_ro || _blob->md_ro) { 6788 SPDK_DEBUGLOG(blob, "Cannot create snapshot from read only blob with id 0x%" 6789 PRIx64 "\n", _blob->id); 6790 ctx->bserrno = -EINVAL; 6791 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6792 return; 6793 } 6794 6795 if (_blob->locked_operation_in_progress) { 6796 SPDK_DEBUGLOG(blob, "Cannot create snapshot - another operation in progress\n"); 6797 ctx->bserrno = -EBUSY; 6798 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6799 return; 6800 } 6801 6802 _blob->locked_operation_in_progress = true; 6803 6804 spdk_blob_opts_init(&opts, sizeof(opts)); 6805 blob_xattrs_init(&internal_xattrs); 6806 6807 /* Change the size of new blob to the same as in original blob, 6808 * but do not allocate clusters */ 6809 opts.thin_provision = true; 6810 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 6811 opts.use_extent_table = _blob->use_extent_table; 6812 6813 /* If there are any xattrs specified for snapshot, set them now */ 6814 if (ctx->xattrs) { 6815 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 6816 } 6817 /* Set internal xattr SNAPSHOT_IN_PROGRESS */ 6818 internal_xattrs.count = 1; 6819 internal_xattrs.ctx = _blob; 6820 internal_xattrs.names = xattrs_names; 6821 internal_xattrs.get_value = bs_xattr_snapshot; 6822 6823 bs_create_blob(_blob->bs, &opts, &internal_xattrs, 6824 bs_snapshot_newblob_create_cpl, ctx); 6825 } 6826 6827 void 6828 spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid, 6829 const struct spdk_blob_xattr_opts *snapshot_xattrs, 6830 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6831 { 6832 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6833 6834 if (!ctx) { 6835 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 6836 return; 6837 } 6838 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6839 ctx->cpl.u.blobid.cb_fn = cb_fn; 6840 ctx->cpl.u.blobid.cb_arg = cb_arg; 6841 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 6842 ctx->bserrno = 0; 6843 ctx->frozen = false; 6844 ctx->original.id = blobid; 6845 ctx->xattrs = snapshot_xattrs; 6846 6847 spdk_bs_open_blob(bs, ctx->original.id, bs_snapshot_origblob_open_cpl, ctx); 6848 } 6849 /* END spdk_bs_create_snapshot */ 6850 6851 /* START spdk_bs_create_clone */ 6852 6853 static void 6854 bs_xattr_clone(void *arg, const char *name, 6855 const void **value, size_t *value_len) 6856 { 6857 assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0); 6858 6859 struct spdk_blob *blob = (struct spdk_blob *)arg; 6860 *value = &blob->id; 6861 *value_len = sizeof(blob->id); 6862 } 6863 6864 static void 6865 bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6866 { 6867 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6868 struct spdk_blob *clone = _blob; 6869 6870 ctx->new.blob = clone; 6871 bs_blob_list_add(clone); 6872 6873 spdk_blob_close(clone, bs_clone_snapshot_origblob_cleanup, ctx); 6874 } 6875 6876 static void 6877 bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 6878 { 6879 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6880 6881 ctx->cpl.u.blobid.blobid = blobid; 6882 spdk_bs_open_blob(ctx->original.blob->bs, blobid, bs_clone_newblob_open_cpl, ctx); 6883 } 6884 6885 static void 6886 bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6887 { 6888 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6889 struct spdk_blob_opts opts; 6890 struct spdk_blob_xattr_opts internal_xattrs; 6891 char *xattr_names[] = { BLOB_SNAPSHOT }; 6892 6893 if (bserrno != 0) { 6894 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6895 return; 6896 } 6897 6898 ctx->original.blob = _blob; 6899 ctx->original.md_ro = _blob->md_ro; 6900 6901 if (!_blob->data_ro || !_blob->md_ro) { 6902 SPDK_DEBUGLOG(blob, "Clone not from read-only blob\n"); 6903 ctx->bserrno = -EINVAL; 6904 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6905 return; 6906 } 6907 6908 if (_blob->locked_operation_in_progress) { 6909 SPDK_DEBUGLOG(blob, "Cannot create clone - another operation in progress\n"); 6910 ctx->bserrno = -EBUSY; 6911 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6912 return; 6913 } 6914 6915 _blob->locked_operation_in_progress = true; 6916 6917 spdk_blob_opts_init(&opts, sizeof(opts)); 6918 blob_xattrs_init(&internal_xattrs); 6919 6920 opts.thin_provision = true; 6921 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 6922 opts.use_extent_table = _blob->use_extent_table; 6923 if (ctx->xattrs) { 6924 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 6925 } 6926 6927 /* Set internal xattr BLOB_SNAPSHOT */ 6928 internal_xattrs.count = 1; 6929 internal_xattrs.ctx = _blob; 6930 internal_xattrs.names = xattr_names; 6931 internal_xattrs.get_value = bs_xattr_clone; 6932 6933 bs_create_blob(_blob->bs, &opts, &internal_xattrs, 6934 bs_clone_newblob_create_cpl, ctx); 6935 } 6936 6937 void 6938 spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid, 6939 const struct spdk_blob_xattr_opts *clone_xattrs, 6940 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6941 { 6942 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6943 6944 if (!ctx) { 6945 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 6946 return; 6947 } 6948 6949 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6950 ctx->cpl.u.blobid.cb_fn = cb_fn; 6951 ctx->cpl.u.blobid.cb_arg = cb_arg; 6952 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 6953 ctx->bserrno = 0; 6954 ctx->xattrs = clone_xattrs; 6955 ctx->original.id = blobid; 6956 6957 spdk_bs_open_blob(bs, ctx->original.id, bs_clone_origblob_open_cpl, ctx); 6958 } 6959 6960 /* END spdk_bs_create_clone */ 6961 6962 /* START spdk_bs_inflate_blob */ 6963 6964 static void 6965 bs_inflate_blob_set_parent_cpl(void *cb_arg, struct spdk_blob *_parent, int bserrno) 6966 { 6967 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6968 struct spdk_blob *_blob = ctx->original.blob; 6969 6970 if (bserrno != 0) { 6971 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6972 return; 6973 } 6974 6975 /* Temporarily override md_ro flag for MD modification */ 6976 _blob->md_ro = false; 6977 6978 bserrno = blob_set_xattr(_blob, BLOB_SNAPSHOT, &_parent->id, sizeof(spdk_blob_id), true); 6979 if (bserrno != 0) { 6980 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6981 return; 6982 } 6983 6984 assert(_parent != NULL); 6985 6986 bs_blob_list_remove(_blob); 6987 _blob->parent_id = _parent->id; 6988 6989 blob_back_bs_destroy(_blob); 6990 _blob->back_bs_dev = bs_create_blob_bs_dev(_parent); 6991 bs_blob_list_add(_blob); 6992 6993 spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx); 6994 } 6995 6996 static void 6997 bs_inflate_blob_done(struct spdk_clone_snapshot_ctx *ctx) 6998 { 6999 struct spdk_blob *_blob = ctx->original.blob; 7000 struct spdk_blob *_parent; 7001 7002 if (ctx->allocate_all) { 7003 /* remove thin provisioning */ 7004 bs_blob_list_remove(_blob); 7005 if (_blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 7006 blob_remove_xattr(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, true); 7007 _blob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT; 7008 } else { 7009 blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 7010 } 7011 _blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV; 7012 blob_back_bs_destroy(_blob); 7013 _blob->parent_id = SPDK_BLOBID_INVALID; 7014 } else { 7015 /* For now, esnap clones always have allocate_all set. */ 7016 assert(!blob_is_esnap_clone(_blob)); 7017 7018 _parent = ((struct spdk_blob_bs_dev *)(_blob->back_bs_dev))->blob; 7019 if (_parent->parent_id != SPDK_BLOBID_INVALID) { 7020 /* We must change the parent of the inflated blob */ 7021 spdk_bs_open_blob(_blob->bs, _parent->parent_id, 7022 bs_inflate_blob_set_parent_cpl, ctx); 7023 return; 7024 } 7025 7026 bs_blob_list_remove(_blob); 7027 _blob->parent_id = SPDK_BLOBID_INVALID; 7028 blob_back_bs_destroy(_blob); 7029 _blob->back_bs_dev = bs_create_zeroes_dev(); 7030 } 7031 7032 /* Temporarily override md_ro flag for MD modification */ 7033 _blob->md_ro = false; 7034 blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 7035 _blob->state = SPDK_BLOB_STATE_DIRTY; 7036 7037 spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx); 7038 } 7039 7040 /* Check if cluster needs allocation */ 7041 static inline bool 7042 bs_cluster_needs_allocation(struct spdk_blob *blob, uint64_t cluster, bool allocate_all) 7043 { 7044 struct spdk_blob_bs_dev *b; 7045 7046 assert(blob != NULL); 7047 7048 if (blob->active.clusters[cluster] != 0) { 7049 /* Cluster is already allocated */ 7050 return false; 7051 } 7052 7053 if (blob->parent_id == SPDK_BLOBID_INVALID) { 7054 /* Blob have no parent blob */ 7055 return allocate_all; 7056 } 7057 7058 if (blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 7059 return true; 7060 } 7061 7062 b = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 7063 return (allocate_all || b->blob->active.clusters[cluster] != 0); 7064 } 7065 7066 static void 7067 bs_inflate_blob_touch_next(void *cb_arg, int bserrno) 7068 { 7069 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 7070 struct spdk_blob *_blob = ctx->original.blob; 7071 struct spdk_bs_cpl cpl; 7072 spdk_bs_user_op_t *op; 7073 uint64_t offset; 7074 7075 if (bserrno != 0) { 7076 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 7077 return; 7078 } 7079 7080 for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) { 7081 if (bs_cluster_needs_allocation(_blob, ctx->cluster, ctx->allocate_all)) { 7082 break; 7083 } 7084 } 7085 7086 if (ctx->cluster < _blob->active.num_clusters) { 7087 offset = bs_cluster_to_lba(_blob->bs, ctx->cluster); 7088 7089 /* We may safely increment a cluster before copying */ 7090 ctx->cluster++; 7091 7092 /* Use a dummy 0B read as a context for cluster copy */ 7093 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7094 cpl.u.blob_basic.cb_fn = bs_inflate_blob_touch_next; 7095 cpl.u.blob_basic.cb_arg = ctx; 7096 7097 op = bs_user_op_alloc(ctx->channel, &cpl, SPDK_BLOB_READ, _blob, 7098 NULL, 0, offset, 0); 7099 if (!op) { 7100 bs_clone_snapshot_origblob_cleanup(ctx, -ENOMEM); 7101 return; 7102 } 7103 7104 bs_allocate_and_copy_cluster(_blob, ctx->channel, offset, op); 7105 } else { 7106 bs_inflate_blob_done(ctx); 7107 } 7108 } 7109 7110 static void 7111 bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 7112 { 7113 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 7114 uint64_t clusters_needed; 7115 uint64_t i; 7116 7117 if (bserrno != 0) { 7118 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 7119 return; 7120 } 7121 7122 ctx->original.blob = _blob; 7123 ctx->original.md_ro = _blob->md_ro; 7124 7125 if (_blob->locked_operation_in_progress) { 7126 SPDK_DEBUGLOG(blob, "Cannot inflate blob - another operation in progress\n"); 7127 ctx->bserrno = -EBUSY; 7128 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 7129 return; 7130 } 7131 7132 _blob->locked_operation_in_progress = true; 7133 7134 switch (_blob->parent_id) { 7135 case SPDK_BLOBID_INVALID: 7136 if (!ctx->allocate_all) { 7137 /* This blob has no parent, so we cannot decouple it. */ 7138 SPDK_ERRLOG("Cannot decouple parent of blob with no parent.\n"); 7139 bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 7140 return; 7141 } 7142 break; 7143 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 7144 /* 7145 * It would be better to rely on back_bs_dev->is_zeroes(), to determine which 7146 * clusters require allocation. Until there is a blobstore consumer that 7147 * uses esnaps with an spdk_bs_dev that implements a useful is_zeroes() it is not 7148 * worth the effort. 7149 */ 7150 ctx->allocate_all = true; 7151 break; 7152 default: 7153 break; 7154 } 7155 7156 if (spdk_blob_is_thin_provisioned(_blob) == false) { 7157 /* This is not thin provisioned blob. No need to inflate. */ 7158 bs_clone_snapshot_origblob_cleanup(ctx, 0); 7159 return; 7160 } 7161 7162 /* Do two passes - one to verify that we can obtain enough clusters 7163 * and another to actually claim them. 7164 */ 7165 clusters_needed = 0; 7166 for (i = 0; i < _blob->active.num_clusters; i++) { 7167 if (bs_cluster_needs_allocation(_blob, i, ctx->allocate_all)) { 7168 clusters_needed++; 7169 } 7170 } 7171 7172 if (clusters_needed > _blob->bs->num_free_clusters) { 7173 /* Not enough free clusters. Cannot satisfy the request. */ 7174 bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC); 7175 return; 7176 } 7177 7178 ctx->cluster = 0; 7179 bs_inflate_blob_touch_next(ctx, 0); 7180 } 7181 7182 static void 7183 bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7184 spdk_blob_id blobid, bool allocate_all, spdk_blob_op_complete cb_fn, void *cb_arg) 7185 { 7186 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 7187 7188 if (!ctx) { 7189 cb_fn(cb_arg, -ENOMEM); 7190 return; 7191 } 7192 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7193 ctx->cpl.u.bs_basic.cb_fn = cb_fn; 7194 ctx->cpl.u.bs_basic.cb_arg = cb_arg; 7195 ctx->bserrno = 0; 7196 ctx->original.id = blobid; 7197 ctx->channel = channel; 7198 ctx->allocate_all = allocate_all; 7199 7200 spdk_bs_open_blob(bs, ctx->original.id, bs_inflate_blob_open_cpl, ctx); 7201 } 7202 7203 void 7204 spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7205 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 7206 { 7207 bs_inflate_blob(bs, channel, blobid, true, cb_fn, cb_arg); 7208 } 7209 7210 void 7211 spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7212 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 7213 { 7214 bs_inflate_blob(bs, channel, blobid, false, cb_fn, cb_arg); 7215 } 7216 /* END spdk_bs_inflate_blob */ 7217 7218 /* START spdk_bs_blob_shallow_copy */ 7219 7220 struct shallow_copy_ctx { 7221 struct spdk_bs_cpl cpl; 7222 int bserrno; 7223 7224 /* Blob source for copy */ 7225 struct spdk_blob_store *bs; 7226 spdk_blob_id blobid; 7227 struct spdk_blob *blob; 7228 struct spdk_io_channel *blob_channel; 7229 7230 /* Destination device for copy */ 7231 struct spdk_bs_dev *ext_dev; 7232 struct spdk_io_channel *ext_channel; 7233 7234 /* Current cluster for copy operation */ 7235 uint64_t cluster; 7236 7237 /* Buffer for blob reading */ 7238 uint8_t *read_buff; 7239 7240 /* Struct for external device writing */ 7241 struct spdk_bs_dev_cb_args ext_args; 7242 7243 /* Actual number of copied clusters */ 7244 uint64_t copied_clusters_count; 7245 7246 /* Status callback for updates about the ongoing operation */ 7247 spdk_blob_shallow_copy_status status_cb; 7248 7249 /* Argument passed to function status_cb */ 7250 void *status_cb_arg; 7251 }; 7252 7253 static void 7254 bs_shallow_copy_cleanup_finish(void *cb_arg, int bserrno) 7255 { 7256 struct shallow_copy_ctx *ctx = cb_arg; 7257 struct spdk_bs_cpl *cpl = &ctx->cpl; 7258 7259 if (bserrno != 0) { 7260 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, cleanup error %d\n", ctx->blob->id, bserrno); 7261 ctx->bserrno = bserrno; 7262 } 7263 7264 ctx->ext_dev->destroy_channel(ctx->ext_dev, ctx->ext_channel); 7265 spdk_free(ctx->read_buff); 7266 7267 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno); 7268 7269 free(ctx); 7270 } 7271 7272 static void 7273 bs_shallow_copy_bdev_write_cpl(struct spdk_io_channel *channel, void *cb_arg, int bserrno) 7274 { 7275 struct shallow_copy_ctx *ctx = cb_arg; 7276 struct spdk_blob *_blob = ctx->blob; 7277 7278 if (bserrno != 0) { 7279 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, ext dev write error %d\n", ctx->blob->id, bserrno); 7280 ctx->bserrno = bserrno; 7281 _blob->locked_operation_in_progress = false; 7282 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7283 return; 7284 } 7285 7286 ctx->cluster++; 7287 if (ctx->status_cb) { 7288 ctx->copied_clusters_count++; 7289 ctx->status_cb(ctx->copied_clusters_count, ctx->status_cb_arg); 7290 } 7291 7292 bs_shallow_copy_cluster_find_next(ctx); 7293 } 7294 7295 static void 7296 bs_shallow_copy_blob_read_cpl(void *cb_arg, int bserrno) 7297 { 7298 struct shallow_copy_ctx *ctx = cb_arg; 7299 struct spdk_bs_dev *ext_dev = ctx->ext_dev; 7300 struct spdk_blob *_blob = ctx->blob; 7301 7302 if (bserrno != 0) { 7303 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, blob read error %d\n", ctx->blob->id, bserrno); 7304 ctx->bserrno = bserrno; 7305 _blob->locked_operation_in_progress = false; 7306 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7307 return; 7308 } 7309 7310 ctx->ext_args.channel = ctx->ext_channel; 7311 ctx->ext_args.cb_fn = bs_shallow_copy_bdev_write_cpl; 7312 ctx->ext_args.cb_arg = ctx; 7313 7314 ext_dev->write(ext_dev, ctx->ext_channel, ctx->read_buff, 7315 bs_cluster_to_lba(_blob->bs, ctx->cluster), 7316 bs_dev_byte_to_lba(_blob->bs->dev, _blob->bs->cluster_sz), 7317 &ctx->ext_args); 7318 } 7319 7320 static void 7321 bs_shallow_copy_cluster_find_next(void *cb_arg) 7322 { 7323 struct shallow_copy_ctx *ctx = cb_arg; 7324 struct spdk_blob *_blob = ctx->blob; 7325 7326 while (ctx->cluster < _blob->active.num_clusters) { 7327 if (_blob->active.clusters[ctx->cluster] != 0) { 7328 break; 7329 } 7330 7331 ctx->cluster++; 7332 } 7333 7334 if (ctx->cluster < _blob->active.num_clusters) { 7335 blob_request_submit_op_single(ctx->blob_channel, _blob, ctx->read_buff, 7336 bs_cluster_to_lba(_blob->bs, ctx->cluster), 7337 bs_dev_byte_to_lba(_blob->bs->dev, _blob->bs->cluster_sz), 7338 bs_shallow_copy_blob_read_cpl, ctx, SPDK_BLOB_READ); 7339 } else { 7340 _blob->locked_operation_in_progress = false; 7341 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7342 } 7343 } 7344 7345 static void 7346 bs_shallow_copy_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 7347 { 7348 struct shallow_copy_ctx *ctx = cb_arg; 7349 struct spdk_bs_dev *ext_dev = ctx->ext_dev; 7350 uint32_t blob_block_size; 7351 uint64_t blob_total_size; 7352 7353 if (bserrno != 0) { 7354 SPDK_ERRLOG("Shallow copy blob open error %d\n", bserrno); 7355 ctx->bserrno = bserrno; 7356 bs_shallow_copy_cleanup_finish(ctx, 0); 7357 return; 7358 } 7359 7360 if (!spdk_blob_is_read_only(_blob)) { 7361 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, blob must be read only\n", _blob->id); 7362 ctx->bserrno = -EPERM; 7363 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7364 return; 7365 } 7366 7367 blob_block_size = _blob->bs->dev->blocklen; 7368 blob_total_size = spdk_blob_get_num_clusters(_blob) * spdk_bs_get_cluster_size(_blob->bs); 7369 7370 if (blob_total_size > ext_dev->blockcnt * ext_dev->blocklen) { 7371 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, external device must have at least blob size\n", 7372 _blob->id); 7373 ctx->bserrno = -EINVAL; 7374 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7375 return; 7376 } 7377 7378 if (blob_block_size % ext_dev->blocklen != 0) { 7379 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, external device block size is not compatible with \ 7380 blobstore block size\n", _blob->id); 7381 ctx->bserrno = -EINVAL; 7382 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7383 return; 7384 } 7385 7386 ctx->blob = _blob; 7387 7388 if (_blob->locked_operation_in_progress) { 7389 SPDK_DEBUGLOG(blob, "blob 0x%" PRIx64 " shallow copy - another operation in progress\n", _blob->id); 7390 ctx->bserrno = -EBUSY; 7391 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7392 return; 7393 } 7394 7395 _blob->locked_operation_in_progress = true; 7396 7397 ctx->cluster = 0; 7398 bs_shallow_copy_cluster_find_next(ctx); 7399 } 7400 7401 int 7402 spdk_bs_blob_shallow_copy(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7403 spdk_blob_id blobid, struct spdk_bs_dev *ext_dev, 7404 spdk_blob_shallow_copy_status status_cb_fn, void *status_cb_arg, 7405 spdk_blob_op_complete cb_fn, void *cb_arg) 7406 { 7407 struct shallow_copy_ctx *ctx; 7408 struct spdk_io_channel *ext_channel; 7409 7410 ctx = calloc(1, sizeof(*ctx)); 7411 if (!ctx) { 7412 return -ENOMEM; 7413 } 7414 7415 ctx->bs = bs; 7416 ctx->blobid = blobid; 7417 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7418 ctx->cpl.u.bs_basic.cb_fn = cb_fn; 7419 ctx->cpl.u.bs_basic.cb_arg = cb_arg; 7420 ctx->bserrno = 0; 7421 ctx->blob_channel = channel; 7422 ctx->status_cb = status_cb_fn; 7423 ctx->status_cb_arg = status_cb_arg; 7424 ctx->read_buff = spdk_malloc(bs->cluster_sz, bs->dev->blocklen, NULL, 7425 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 7426 if (!ctx->read_buff) { 7427 free(ctx); 7428 return -ENOMEM; 7429 } 7430 7431 ext_channel = ext_dev->create_channel(ext_dev); 7432 if (!ext_channel) { 7433 spdk_free(ctx->read_buff); 7434 free(ctx); 7435 return -ENOMEM; 7436 } 7437 ctx->ext_dev = ext_dev; 7438 ctx->ext_channel = ext_channel; 7439 7440 spdk_bs_open_blob(ctx->bs, ctx->blobid, bs_shallow_copy_blob_open_cpl, ctx); 7441 7442 return 0; 7443 } 7444 /* END spdk_bs_blob_shallow_copy */ 7445 7446 /* START spdk_bs_blob_set_parent */ 7447 7448 struct set_parent_ctx { 7449 struct spdk_blob_store *bs; 7450 int bserrno; 7451 spdk_bs_op_complete cb_fn; 7452 void *cb_arg; 7453 7454 struct spdk_blob *blob; 7455 bool blob_md_ro; 7456 7457 struct blob_parent parent; 7458 }; 7459 7460 static void 7461 bs_set_parent_cleanup_finish(void *cb_arg, int bserrno) 7462 { 7463 struct set_parent_ctx *ctx = cb_arg; 7464 7465 assert(ctx != NULL); 7466 7467 if (bserrno != 0) { 7468 SPDK_ERRLOG("blob set parent finish error %d\n", bserrno); 7469 if (ctx->bserrno == 0) { 7470 ctx->bserrno = bserrno; 7471 } 7472 } 7473 7474 ctx->cb_fn(ctx->cb_arg, ctx->bserrno); 7475 7476 free(ctx); 7477 } 7478 7479 static void 7480 bs_set_parent_close_snapshot(void *cb_arg, int bserrno) 7481 { 7482 struct set_parent_ctx *ctx = cb_arg; 7483 7484 if (ctx->bserrno != 0) { 7485 spdk_blob_close(ctx->parent.u.snapshot.blob, bs_set_parent_cleanup_finish, ctx); 7486 return; 7487 } 7488 7489 if (bserrno != 0) { 7490 SPDK_ERRLOG("blob close error %d\n", bserrno); 7491 ctx->bserrno = bserrno; 7492 } 7493 7494 bs_set_parent_cleanup_finish(ctx, ctx->bserrno); 7495 } 7496 7497 static void 7498 bs_set_parent_close_blob(void *cb_arg, int bserrno) 7499 { 7500 struct set_parent_ctx *ctx = cb_arg; 7501 struct spdk_blob *blob = ctx->blob; 7502 struct spdk_blob *snapshot = ctx->parent.u.snapshot.blob; 7503 7504 if (bserrno != 0 && ctx->bserrno == 0) { 7505 SPDK_ERRLOG("error %d in metadata sync\n", bserrno); 7506 ctx->bserrno = bserrno; 7507 } 7508 7509 /* Revert md_ro to original state */ 7510 blob->md_ro = ctx->blob_md_ro; 7511 7512 blob->locked_operation_in_progress = false; 7513 snapshot->locked_operation_in_progress = false; 7514 7515 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7516 } 7517 7518 static void 7519 bs_set_parent_set_back_bs_dev_done(void *cb_arg, int bserrno) 7520 { 7521 struct set_parent_ctx *ctx = cb_arg; 7522 struct spdk_blob *blob = ctx->blob; 7523 7524 if (bserrno != 0) { 7525 SPDK_ERRLOG("error %d setting back_bs_dev\n", bserrno); 7526 ctx->bserrno = bserrno; 7527 bs_set_parent_close_blob(ctx, bserrno); 7528 return; 7529 } 7530 7531 spdk_blob_sync_md(blob, bs_set_parent_close_blob, ctx); 7532 } 7533 7534 static int 7535 bs_set_parent_refs(struct spdk_blob *blob, struct blob_parent *parent) 7536 { 7537 int rc; 7538 7539 bs_blob_list_remove(blob); 7540 7541 rc = blob_set_xattr(blob, BLOB_SNAPSHOT, &parent->u.snapshot.id, sizeof(spdk_blob_id), true); 7542 if (rc != 0) { 7543 SPDK_ERRLOG("error %d setting snapshot xattr\n", rc); 7544 return rc; 7545 } 7546 blob->parent_id = parent->u.snapshot.id; 7547 7548 if (blob_is_esnap_clone(blob)) { 7549 /* Remove the xattr that references the external snapshot */ 7550 blob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT; 7551 blob_remove_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID, true); 7552 } 7553 7554 bs_blob_list_add(blob); 7555 7556 return 0; 7557 } 7558 7559 static void 7560 bs_set_parent_snapshot_open_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno) 7561 { 7562 struct set_parent_ctx *ctx = cb_arg; 7563 struct spdk_blob *blob = ctx->blob; 7564 struct spdk_bs_dev *back_bs_dev; 7565 7566 if (bserrno != 0) { 7567 SPDK_ERRLOG("snapshot open error %d\n", bserrno); 7568 ctx->bserrno = bserrno; 7569 spdk_blob_close(blob, bs_set_parent_cleanup_finish, ctx); 7570 return; 7571 } 7572 7573 ctx->parent.u.snapshot.blob = snapshot; 7574 ctx->parent.u.snapshot.id = snapshot->id; 7575 7576 if (!spdk_blob_is_snapshot(snapshot)) { 7577 SPDK_ERRLOG("parent blob is not a snapshot\n"); 7578 ctx->bserrno = -EINVAL; 7579 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7580 return; 7581 } 7582 7583 if (blob->active.num_clusters != snapshot->active.num_clusters) { 7584 SPDK_ERRLOG("parent blob has a number of clusters different from child's ones\n"); 7585 ctx->bserrno = -EINVAL; 7586 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7587 return; 7588 } 7589 7590 if (blob->locked_operation_in_progress || snapshot->locked_operation_in_progress) { 7591 SPDK_ERRLOG("cannot set parent of blob, another operation in progress\n"); 7592 ctx->bserrno = -EBUSY; 7593 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7594 return; 7595 } 7596 7597 blob->locked_operation_in_progress = true; 7598 snapshot->locked_operation_in_progress = true; 7599 7600 /* Temporarily override md_ro flag for MD modification */ 7601 blob->md_ro = false; 7602 7603 back_bs_dev = bs_create_blob_bs_dev(snapshot); 7604 7605 blob_set_back_bs_dev(blob, back_bs_dev, bs_set_parent_refs, &ctx->parent, 7606 bs_set_parent_set_back_bs_dev_done, 7607 ctx); 7608 } 7609 7610 static void 7611 bs_set_parent_blob_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 7612 { 7613 struct set_parent_ctx *ctx = cb_arg; 7614 7615 if (bserrno != 0) { 7616 SPDK_ERRLOG("blob open error %d\n", bserrno); 7617 ctx->bserrno = bserrno; 7618 bs_set_parent_cleanup_finish(ctx, 0); 7619 return; 7620 } 7621 7622 if (!spdk_blob_is_thin_provisioned(blob)) { 7623 SPDK_ERRLOG("blob is not thin-provisioned\n"); 7624 ctx->bserrno = -EINVAL; 7625 spdk_blob_close(blob, bs_set_parent_cleanup_finish, ctx); 7626 return; 7627 } 7628 7629 ctx->blob = blob; 7630 ctx->blob_md_ro = blob->md_ro; 7631 7632 spdk_bs_open_blob(ctx->bs, ctx->parent.u.snapshot.id, bs_set_parent_snapshot_open_cpl, ctx); 7633 } 7634 7635 void 7636 spdk_bs_blob_set_parent(struct spdk_blob_store *bs, spdk_blob_id blob_id, 7637 spdk_blob_id snapshot_id, spdk_blob_op_complete cb_fn, void *cb_arg) 7638 { 7639 struct set_parent_ctx *ctx; 7640 7641 if (snapshot_id == SPDK_BLOBID_INVALID) { 7642 SPDK_ERRLOG("snapshot id not valid\n"); 7643 cb_fn(cb_arg, -EINVAL); 7644 return; 7645 } 7646 7647 if (blob_id == snapshot_id) { 7648 SPDK_ERRLOG("blob id and snapshot id cannot be the same\n"); 7649 cb_fn(cb_arg, -EINVAL); 7650 return; 7651 } 7652 7653 if (spdk_blob_get_parent_snapshot(bs, blob_id) == snapshot_id) { 7654 SPDK_NOTICELOG("snapshot is already the parent of blob\n"); 7655 cb_fn(cb_arg, -EEXIST); 7656 return; 7657 } 7658 7659 ctx = calloc(1, sizeof(*ctx)); 7660 if (!ctx) { 7661 cb_fn(cb_arg, -ENOMEM); 7662 return; 7663 } 7664 7665 ctx->bs = bs; 7666 ctx->parent.u.snapshot.id = snapshot_id; 7667 ctx->cb_fn = cb_fn; 7668 ctx->cb_arg = cb_arg; 7669 ctx->bserrno = 0; 7670 7671 spdk_bs_open_blob(bs, blob_id, bs_set_parent_blob_open_cpl, ctx); 7672 } 7673 /* END spdk_bs_blob_set_parent */ 7674 7675 /* START spdk_bs_blob_set_external_parent */ 7676 7677 static void 7678 bs_set_external_parent_cleanup_finish(void *cb_arg, int bserrno) 7679 { 7680 struct set_parent_ctx *ctx = cb_arg; 7681 7682 if (bserrno != 0) { 7683 SPDK_ERRLOG("blob set external parent finish error %d\n", bserrno); 7684 if (ctx->bserrno == 0) { 7685 ctx->bserrno = bserrno; 7686 } 7687 } 7688 7689 ctx->cb_fn(ctx->cb_arg, ctx->bserrno); 7690 7691 free(ctx->parent.u.esnap.id); 7692 free(ctx); 7693 } 7694 7695 static void 7696 bs_set_external_parent_close_blob(void *cb_arg, int bserrno) 7697 { 7698 struct set_parent_ctx *ctx = cb_arg; 7699 struct spdk_blob *blob = ctx->blob; 7700 7701 if (bserrno != 0 && ctx->bserrno == 0) { 7702 SPDK_ERRLOG("error %d in metadata sync\n", bserrno); 7703 ctx->bserrno = bserrno; 7704 } 7705 7706 /* Revert md_ro to original state */ 7707 blob->md_ro = ctx->blob_md_ro; 7708 7709 blob->locked_operation_in_progress = false; 7710 7711 spdk_blob_close(blob, bs_set_external_parent_cleanup_finish, ctx); 7712 } 7713 7714 static void 7715 bs_set_external_parent_unfrozen(void *cb_arg, int bserrno) 7716 { 7717 struct set_parent_ctx *ctx = cb_arg; 7718 struct spdk_blob *blob = ctx->blob; 7719 7720 if (bserrno != 0) { 7721 SPDK_ERRLOG("error %d setting back_bs_dev\n", bserrno); 7722 ctx->bserrno = bserrno; 7723 bs_set_external_parent_close_blob(ctx, bserrno); 7724 return; 7725 } 7726 7727 spdk_blob_sync_md(blob, bs_set_external_parent_close_blob, ctx); 7728 } 7729 7730 static int 7731 bs_set_external_parent_refs(struct spdk_blob *blob, struct blob_parent *parent) 7732 { 7733 int rc; 7734 7735 bs_blob_list_remove(blob); 7736 7737 if (spdk_blob_is_clone(blob)) { 7738 /* Remove the xattr that references the snapshot */ 7739 blob->parent_id = SPDK_BLOBID_INVALID; 7740 blob_remove_xattr(blob, BLOB_SNAPSHOT, true); 7741 } 7742 7743 rc = blob_set_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID, parent->u.esnap.id, 7744 parent->u.esnap.id_len, true); 7745 if (rc != 0) { 7746 SPDK_ERRLOG("error %d setting external snapshot xattr\n", rc); 7747 return rc; 7748 } 7749 blob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 7750 blob->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT; 7751 7752 bs_blob_list_add(blob); 7753 7754 return 0; 7755 } 7756 7757 static void 7758 bs_set_external_parent_blob_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 7759 { 7760 struct set_parent_ctx *ctx = cb_arg; 7761 const void *esnap_id; 7762 size_t esnap_id_len; 7763 int rc; 7764 7765 if (bserrno != 0) { 7766 SPDK_ERRLOG("blob open error %d\n", bserrno); 7767 ctx->bserrno = bserrno; 7768 bs_set_parent_cleanup_finish(ctx, 0); 7769 return; 7770 } 7771 7772 ctx->blob = blob; 7773 ctx->blob_md_ro = blob->md_ro; 7774 7775 rc = spdk_blob_get_esnap_id(blob, &esnap_id, &esnap_id_len); 7776 if (rc == 0 && esnap_id != NULL && esnap_id_len == ctx->parent.u.esnap.id_len && 7777 memcmp(esnap_id, ctx->parent.u.esnap.id, esnap_id_len) == 0) { 7778 SPDK_ERRLOG("external snapshot is already the parent of blob\n"); 7779 ctx->bserrno = -EEXIST; 7780 goto error; 7781 } 7782 7783 if (!spdk_blob_is_thin_provisioned(blob)) { 7784 SPDK_ERRLOG("blob is not thin-provisioned\n"); 7785 ctx->bserrno = -EINVAL; 7786 goto error; 7787 } 7788 7789 if (blob->locked_operation_in_progress) { 7790 SPDK_ERRLOG("cannot set external parent of blob, another operation in progress\n"); 7791 ctx->bserrno = -EBUSY; 7792 goto error; 7793 } 7794 7795 blob->locked_operation_in_progress = true; 7796 7797 /* Temporarily override md_ro flag for MD modification */ 7798 blob->md_ro = false; 7799 7800 blob_set_back_bs_dev(blob, ctx->parent.u.esnap.back_bs_dev, bs_set_external_parent_refs, 7801 &ctx->parent, bs_set_external_parent_unfrozen, ctx); 7802 return; 7803 7804 error: 7805 spdk_blob_close(blob, bs_set_external_parent_cleanup_finish, ctx); 7806 } 7807 7808 void 7809 spdk_bs_blob_set_external_parent(struct spdk_blob_store *bs, spdk_blob_id blob_id, 7810 struct spdk_bs_dev *esnap_bs_dev, const void *esnap_id, 7811 uint32_t esnap_id_len, spdk_blob_op_complete cb_fn, void *cb_arg) 7812 { 7813 struct set_parent_ctx *ctx; 7814 uint64_t esnap_dev_size, cluster_sz; 7815 7816 if (sizeof(blob_id) == esnap_id_len && memcmp(&blob_id, esnap_id, sizeof(blob_id)) == 0) { 7817 SPDK_ERRLOG("blob id and external snapshot id cannot be the same\n"); 7818 cb_fn(cb_arg, -EINVAL); 7819 return; 7820 } 7821 7822 esnap_dev_size = esnap_bs_dev->blockcnt * esnap_bs_dev->blocklen; 7823 cluster_sz = spdk_bs_get_cluster_size(bs); 7824 if ((esnap_dev_size % cluster_sz) != 0) { 7825 SPDK_ERRLOG("Esnap device size %" PRIu64 " is not an integer multiple of " 7826 "cluster size %" PRIu64 "\n", esnap_dev_size, cluster_sz); 7827 cb_fn(cb_arg, -EINVAL); 7828 return; 7829 } 7830 7831 ctx = calloc(1, sizeof(*ctx)); 7832 if (!ctx) { 7833 cb_fn(cb_arg, -ENOMEM); 7834 return; 7835 } 7836 7837 ctx->parent.u.esnap.id = calloc(1, esnap_id_len); 7838 if (!ctx->parent.u.esnap.id) { 7839 free(ctx); 7840 cb_fn(cb_arg, -ENOMEM); 7841 return; 7842 } 7843 7844 ctx->bs = bs; 7845 ctx->parent.u.esnap.back_bs_dev = esnap_bs_dev; 7846 memcpy(ctx->parent.u.esnap.id, esnap_id, esnap_id_len); 7847 ctx->parent.u.esnap.id_len = esnap_id_len; 7848 ctx->cb_fn = cb_fn; 7849 ctx->cb_arg = cb_arg; 7850 ctx->bserrno = 0; 7851 7852 spdk_bs_open_blob(bs, blob_id, bs_set_external_parent_blob_open_cpl, ctx); 7853 } 7854 /* END spdk_bs_blob_set_external_parent */ 7855 7856 /* START spdk_blob_resize */ 7857 struct spdk_bs_resize_ctx { 7858 spdk_blob_op_complete cb_fn; 7859 void *cb_arg; 7860 struct spdk_blob *blob; 7861 uint64_t sz; 7862 int rc; 7863 }; 7864 7865 static void 7866 bs_resize_unfreeze_cpl(void *cb_arg, int rc) 7867 { 7868 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 7869 7870 if (rc != 0) { 7871 SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc); 7872 } 7873 7874 if (ctx->rc != 0) { 7875 SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc); 7876 rc = ctx->rc; 7877 } 7878 7879 ctx->blob->locked_operation_in_progress = false; 7880 7881 ctx->cb_fn(ctx->cb_arg, rc); 7882 free(ctx); 7883 } 7884 7885 static void 7886 bs_resize_freeze_cpl(void *cb_arg, int rc) 7887 { 7888 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 7889 7890 if (rc != 0) { 7891 ctx->blob->locked_operation_in_progress = false; 7892 ctx->cb_fn(ctx->cb_arg, rc); 7893 free(ctx); 7894 return; 7895 } 7896 7897 ctx->rc = blob_resize(ctx->blob, ctx->sz); 7898 7899 blob_unfreeze_io(ctx->blob, bs_resize_unfreeze_cpl, ctx); 7900 } 7901 7902 void 7903 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg) 7904 { 7905 struct spdk_bs_resize_ctx *ctx; 7906 7907 blob_verify_md_op(blob); 7908 7909 SPDK_DEBUGLOG(blob, "Resizing blob 0x%" PRIx64 " to %" PRIu64 " clusters\n", blob->id, sz); 7910 7911 if (blob->md_ro) { 7912 cb_fn(cb_arg, -EPERM); 7913 return; 7914 } 7915 7916 if (sz == blob->active.num_clusters) { 7917 cb_fn(cb_arg, 0); 7918 return; 7919 } 7920 7921 if (blob->locked_operation_in_progress) { 7922 cb_fn(cb_arg, -EBUSY); 7923 return; 7924 } 7925 7926 ctx = calloc(1, sizeof(*ctx)); 7927 if (!ctx) { 7928 cb_fn(cb_arg, -ENOMEM); 7929 return; 7930 } 7931 7932 blob->locked_operation_in_progress = true; 7933 ctx->cb_fn = cb_fn; 7934 ctx->cb_arg = cb_arg; 7935 ctx->blob = blob; 7936 ctx->sz = sz; 7937 blob_freeze_io(blob, bs_resize_freeze_cpl, ctx); 7938 } 7939 7940 /* END spdk_blob_resize */ 7941 7942 7943 /* START spdk_bs_delete_blob */ 7944 7945 static void 7946 bs_delete_close_cpl(void *cb_arg, int bserrno) 7947 { 7948 spdk_bs_sequence_t *seq = cb_arg; 7949 7950 bs_sequence_finish(seq, bserrno); 7951 } 7952 7953 static void 7954 bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7955 { 7956 struct spdk_blob *blob = cb_arg; 7957 7958 if (bserrno != 0) { 7959 /* 7960 * We already removed this blob from the blobstore tailq, so 7961 * we need to free it here since this is the last reference 7962 * to it. 7963 */ 7964 blob_free(blob); 7965 bs_delete_close_cpl(seq, bserrno); 7966 return; 7967 } 7968 7969 /* 7970 * This will immediately decrement the ref_count and call 7971 * the completion routine since the metadata state is clean. 7972 * By calling spdk_blob_close, we reduce the number of call 7973 * points into code that touches the blob->open_ref count 7974 * and the blobstore's blob list. 7975 */ 7976 spdk_blob_close(blob, bs_delete_close_cpl, seq); 7977 } 7978 7979 struct delete_snapshot_ctx { 7980 struct spdk_blob_list *parent_snapshot_entry; 7981 struct spdk_blob *snapshot; 7982 struct spdk_blob_md_page *page; 7983 bool snapshot_md_ro; 7984 struct spdk_blob *clone; 7985 bool clone_md_ro; 7986 spdk_blob_op_with_handle_complete cb_fn; 7987 void *cb_arg; 7988 int bserrno; 7989 uint32_t next_extent_page; 7990 }; 7991 7992 static void 7993 delete_blob_cleanup_finish(void *cb_arg, int bserrno) 7994 { 7995 struct delete_snapshot_ctx *ctx = cb_arg; 7996 7997 if (bserrno != 0) { 7998 SPDK_ERRLOG("Snapshot cleanup error %d\n", bserrno); 7999 } 8000 8001 assert(ctx != NULL); 8002 8003 if (bserrno != 0 && ctx->bserrno == 0) { 8004 ctx->bserrno = bserrno; 8005 } 8006 8007 ctx->cb_fn(ctx->cb_arg, ctx->snapshot, ctx->bserrno); 8008 spdk_free(ctx->page); 8009 free(ctx); 8010 } 8011 8012 static void 8013 delete_snapshot_cleanup_snapshot(void *cb_arg, int bserrno) 8014 { 8015 struct delete_snapshot_ctx *ctx = cb_arg; 8016 8017 if (bserrno != 0) { 8018 ctx->bserrno = bserrno; 8019 SPDK_ERRLOG("Clone cleanup error %d\n", bserrno); 8020 } 8021 8022 if (ctx->bserrno != 0) { 8023 assert(blob_lookup(ctx->snapshot->bs, ctx->snapshot->id) == NULL); 8024 RB_INSERT(spdk_blob_tree, &ctx->snapshot->bs->open_blobs, ctx->snapshot); 8025 spdk_bit_array_set(ctx->snapshot->bs->open_blobids, ctx->snapshot->id); 8026 } 8027 8028 ctx->snapshot->locked_operation_in_progress = false; 8029 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 8030 8031 spdk_blob_close(ctx->snapshot, delete_blob_cleanup_finish, ctx); 8032 } 8033 8034 static void 8035 delete_snapshot_cleanup_clone(void *cb_arg, int bserrno) 8036 { 8037 struct delete_snapshot_ctx *ctx = cb_arg; 8038 8039 ctx->clone->locked_operation_in_progress = false; 8040 ctx->clone->md_ro = ctx->clone_md_ro; 8041 8042 spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx); 8043 } 8044 8045 static void 8046 delete_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 8047 { 8048 struct delete_snapshot_ctx *ctx = cb_arg; 8049 8050 if (bserrno) { 8051 ctx->bserrno = bserrno; 8052 delete_snapshot_cleanup_clone(ctx, 0); 8053 return; 8054 } 8055 8056 ctx->clone->locked_operation_in_progress = false; 8057 spdk_blob_close(ctx->clone, delete_blob_cleanup_finish, ctx); 8058 } 8059 8060 static void 8061 delete_snapshot_sync_snapshot_cpl(void *cb_arg, int bserrno) 8062 { 8063 struct delete_snapshot_ctx *ctx = cb_arg; 8064 struct spdk_blob_list *parent_snapshot_entry = NULL; 8065 struct spdk_blob_list *snapshot_entry = NULL; 8066 struct spdk_blob_list *clone_entry = NULL; 8067 struct spdk_blob_list *snapshot_clone_entry = NULL; 8068 8069 if (bserrno) { 8070 SPDK_ERRLOG("Failed to sync MD on blob\n"); 8071 ctx->bserrno = bserrno; 8072 delete_snapshot_cleanup_clone(ctx, 0); 8073 return; 8074 } 8075 8076 /* Get snapshot entry for the snapshot we want to remove */ 8077 snapshot_entry = bs_get_snapshot_entry(ctx->snapshot->bs, ctx->snapshot->id); 8078 8079 assert(snapshot_entry != NULL); 8080 8081 /* Remove clone entry in this snapshot (at this point there can be only one clone) */ 8082 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 8083 assert(clone_entry != NULL); 8084 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 8085 snapshot_entry->clone_count--; 8086 assert(TAILQ_EMPTY(&snapshot_entry->clones)); 8087 8088 switch (ctx->snapshot->parent_id) { 8089 case SPDK_BLOBID_INVALID: 8090 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 8091 /* No parent snapshot - just remove clone entry */ 8092 free(clone_entry); 8093 break; 8094 default: 8095 /* This snapshot is at the same time a clone of another snapshot - we need to 8096 * update parent snapshot (remove current clone, add new one inherited from 8097 * the snapshot that is being removed) */ 8098 8099 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 8100 * snapshot that we are removing */ 8101 blob_get_snapshot_and_clone_entries(ctx->snapshot, &parent_snapshot_entry, 8102 &snapshot_clone_entry); 8103 8104 /* Switch clone entry in parent snapshot */ 8105 TAILQ_INSERT_TAIL(&parent_snapshot_entry->clones, clone_entry, link); 8106 TAILQ_REMOVE(&parent_snapshot_entry->clones, snapshot_clone_entry, link); 8107 free(snapshot_clone_entry); 8108 } 8109 8110 /* Restore md_ro flags */ 8111 ctx->clone->md_ro = ctx->clone_md_ro; 8112 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 8113 8114 blob_unfreeze_io(ctx->clone, delete_snapshot_unfreeze_cpl, ctx); 8115 } 8116 8117 static void 8118 delete_snapshot_sync_clone_cpl(void *cb_arg, int bserrno) 8119 { 8120 struct delete_snapshot_ctx *ctx = cb_arg; 8121 uint64_t i; 8122 8123 ctx->snapshot->md_ro = false; 8124 8125 if (bserrno) { 8126 SPDK_ERRLOG("Failed to sync MD on clone\n"); 8127 ctx->bserrno = bserrno; 8128 8129 /* Restore snapshot to previous state */ 8130 bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true); 8131 if (bserrno != 0) { 8132 delete_snapshot_cleanup_clone(ctx, bserrno); 8133 return; 8134 } 8135 8136 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx); 8137 return; 8138 } 8139 8140 /* Clear cluster map entries for snapshot */ 8141 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 8142 if (ctx->clone->active.clusters[i] == ctx->snapshot->active.clusters[i]) { 8143 if (ctx->snapshot->active.clusters[i] != 0) { 8144 ctx->snapshot->active.num_allocated_clusters--; 8145 } 8146 ctx->snapshot->active.clusters[i] = 0; 8147 } 8148 } 8149 for (i = 0; i < ctx->snapshot->active.num_extent_pages && 8150 i < ctx->clone->active.num_extent_pages; i++) { 8151 if (ctx->clone->active.extent_pages[i] == ctx->snapshot->active.extent_pages[i]) { 8152 ctx->snapshot->active.extent_pages[i] = 0; 8153 } 8154 } 8155 8156 blob_set_thin_provision(ctx->snapshot); 8157 ctx->snapshot->state = SPDK_BLOB_STATE_DIRTY; 8158 8159 if (ctx->parent_snapshot_entry != NULL) { 8160 ctx->snapshot->back_bs_dev = NULL; 8161 } 8162 8163 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_cpl, ctx); 8164 } 8165 8166 static void 8167 delete_snapshot_update_extent_pages_cpl(struct delete_snapshot_ctx *ctx) 8168 { 8169 int bserrno; 8170 8171 /* Delete old backing bs_dev from clone (related to snapshot that will be removed) */ 8172 blob_back_bs_destroy(ctx->clone); 8173 8174 /* Set/remove snapshot xattr and switch parent ID and backing bs_dev on clone... */ 8175 if (ctx->snapshot->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 8176 bserrno = bs_snapshot_copy_xattr(ctx->clone, ctx->snapshot, 8177 BLOB_EXTERNAL_SNAPSHOT_ID); 8178 if (bserrno != 0) { 8179 ctx->bserrno = bserrno; 8180 8181 /* Restore snapshot to previous state */ 8182 bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true); 8183 if (bserrno != 0) { 8184 delete_snapshot_cleanup_clone(ctx, bserrno); 8185 return; 8186 } 8187 8188 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx); 8189 return; 8190 } 8191 ctx->clone->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT; 8192 ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev; 8193 /* Do not delete the external snapshot along with this snapshot */ 8194 ctx->snapshot->back_bs_dev = NULL; 8195 ctx->clone->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 8196 } else if (ctx->parent_snapshot_entry != NULL) { 8197 /* ...to parent snapshot */ 8198 ctx->clone->parent_id = ctx->parent_snapshot_entry->id; 8199 ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev; 8200 blob_set_xattr(ctx->clone, BLOB_SNAPSHOT, &ctx->parent_snapshot_entry->id, 8201 sizeof(spdk_blob_id), 8202 true); 8203 } else { 8204 /* ...to blobid invalid and zeroes dev */ 8205 ctx->clone->parent_id = SPDK_BLOBID_INVALID; 8206 ctx->clone->back_bs_dev = bs_create_zeroes_dev(); 8207 blob_remove_xattr(ctx->clone, BLOB_SNAPSHOT, true); 8208 } 8209 8210 spdk_blob_sync_md(ctx->clone, delete_snapshot_sync_clone_cpl, ctx); 8211 } 8212 8213 static void 8214 delete_snapshot_update_extent_pages(void *cb_arg, int bserrno) 8215 { 8216 struct delete_snapshot_ctx *ctx = cb_arg; 8217 uint32_t *extent_page; 8218 uint64_t i; 8219 8220 for (i = ctx->next_extent_page; i < ctx->snapshot->active.num_extent_pages && 8221 i < ctx->clone->active.num_extent_pages; i++) { 8222 if (ctx->snapshot->active.extent_pages[i] == 0) { 8223 /* No extent page to use from snapshot */ 8224 continue; 8225 } 8226 8227 extent_page = &ctx->clone->active.extent_pages[i]; 8228 if (*extent_page == 0) { 8229 /* Copy extent page from snapshot when clone did not have a matching one */ 8230 *extent_page = ctx->snapshot->active.extent_pages[i]; 8231 continue; 8232 } 8233 8234 /* Clone and snapshot both contain partially filled matching extent pages. 8235 * Update the clone extent page in place with cluster map containing the mix of both. */ 8236 ctx->next_extent_page = i + 1; 8237 memset(ctx->page, 0, SPDK_BS_PAGE_SIZE); 8238 8239 blob_write_extent_page(ctx->clone, *extent_page, i * SPDK_EXTENTS_PER_EP, ctx->page, 8240 delete_snapshot_update_extent_pages, ctx); 8241 return; 8242 } 8243 delete_snapshot_update_extent_pages_cpl(ctx); 8244 } 8245 8246 static void 8247 delete_snapshot_sync_snapshot_xattr_cpl(void *cb_arg, int bserrno) 8248 { 8249 struct delete_snapshot_ctx *ctx = cb_arg; 8250 uint64_t i; 8251 8252 /* Temporarily override md_ro flag for clone for MD modification */ 8253 ctx->clone_md_ro = ctx->clone->md_ro; 8254 ctx->clone->md_ro = false; 8255 8256 if (bserrno) { 8257 SPDK_ERRLOG("Failed to sync MD with xattr on blob\n"); 8258 ctx->bserrno = bserrno; 8259 delete_snapshot_cleanup_clone(ctx, 0); 8260 return; 8261 } 8262 8263 /* Copy snapshot map to clone map (only unallocated clusters in clone) */ 8264 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 8265 if (ctx->clone->active.clusters[i] == 0) { 8266 ctx->clone->active.clusters[i] = ctx->snapshot->active.clusters[i]; 8267 if (ctx->clone->active.clusters[i] != 0) { 8268 ctx->clone->active.num_allocated_clusters++; 8269 } 8270 } 8271 } 8272 ctx->next_extent_page = 0; 8273 delete_snapshot_update_extent_pages(ctx, 0); 8274 } 8275 8276 static void 8277 delete_snapshot_esnap_channels_destroyed_cb(void *cb_arg, struct spdk_blob *blob, int bserrno) 8278 { 8279 struct delete_snapshot_ctx *ctx = cb_arg; 8280 8281 if (bserrno != 0) { 8282 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to destroy esnap channels: %d\n", 8283 blob->id, bserrno); 8284 /* That error should not stop us from syncing metadata. */ 8285 } 8286 8287 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx); 8288 } 8289 8290 static void 8291 delete_snapshot_freeze_io_cb(void *cb_arg, int bserrno) 8292 { 8293 struct delete_snapshot_ctx *ctx = cb_arg; 8294 8295 if (bserrno) { 8296 SPDK_ERRLOG("Failed to freeze I/O on clone\n"); 8297 ctx->bserrno = bserrno; 8298 delete_snapshot_cleanup_clone(ctx, 0); 8299 return; 8300 } 8301 8302 /* Temporarily override md_ro flag for snapshot for MD modification */ 8303 ctx->snapshot_md_ro = ctx->snapshot->md_ro; 8304 ctx->snapshot->md_ro = false; 8305 8306 /* Mark blob as pending for removal for power failure safety, use clone id for recovery */ 8307 ctx->bserrno = blob_set_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, &ctx->clone->id, 8308 sizeof(spdk_blob_id), true); 8309 if (ctx->bserrno != 0) { 8310 delete_snapshot_cleanup_clone(ctx, 0); 8311 return; 8312 } 8313 8314 if (blob_is_esnap_clone(ctx->snapshot)) { 8315 blob_esnap_destroy_bs_dev_channels(ctx->snapshot, false, 8316 delete_snapshot_esnap_channels_destroyed_cb, 8317 ctx); 8318 return; 8319 } 8320 8321 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx); 8322 } 8323 8324 static void 8325 delete_snapshot_open_clone_cb(void *cb_arg, struct spdk_blob *clone, int bserrno) 8326 { 8327 struct delete_snapshot_ctx *ctx = cb_arg; 8328 8329 if (bserrno) { 8330 SPDK_ERRLOG("Failed to open clone\n"); 8331 ctx->bserrno = bserrno; 8332 delete_snapshot_cleanup_snapshot(ctx, 0); 8333 return; 8334 } 8335 8336 ctx->clone = clone; 8337 8338 if (clone->locked_operation_in_progress) { 8339 SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress on its clone\n"); 8340 ctx->bserrno = -EBUSY; 8341 spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx); 8342 return; 8343 } 8344 8345 clone->locked_operation_in_progress = true; 8346 8347 blob_freeze_io(clone, delete_snapshot_freeze_io_cb, ctx); 8348 } 8349 8350 static void 8351 update_clone_on_snapshot_deletion(struct spdk_blob *snapshot, struct delete_snapshot_ctx *ctx) 8352 { 8353 struct spdk_blob_list *snapshot_entry = NULL; 8354 struct spdk_blob_list *clone_entry = NULL; 8355 struct spdk_blob_list *snapshot_clone_entry = NULL; 8356 8357 /* Get snapshot entry for the snapshot we want to remove */ 8358 snapshot_entry = bs_get_snapshot_entry(snapshot->bs, snapshot->id); 8359 8360 assert(snapshot_entry != NULL); 8361 8362 /* Get clone of the snapshot (at this point there can be only one clone) */ 8363 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 8364 assert(snapshot_entry->clone_count == 1); 8365 assert(clone_entry != NULL); 8366 8367 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 8368 * snapshot that we are removing */ 8369 blob_get_snapshot_and_clone_entries(snapshot, &ctx->parent_snapshot_entry, 8370 &snapshot_clone_entry); 8371 8372 spdk_bs_open_blob(snapshot->bs, clone_entry->id, delete_snapshot_open_clone_cb, ctx); 8373 } 8374 8375 static void 8376 bs_delete_blob_finish(void *cb_arg, struct spdk_blob *blob, int bserrno) 8377 { 8378 spdk_bs_sequence_t *seq = cb_arg; 8379 struct spdk_blob_list *snapshot_entry = NULL; 8380 uint32_t page_num; 8381 8382 if (bserrno) { 8383 SPDK_ERRLOG("Failed to remove blob\n"); 8384 bs_sequence_finish(seq, bserrno); 8385 return; 8386 } 8387 8388 /* Remove snapshot from the list */ 8389 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 8390 if (snapshot_entry != NULL) { 8391 TAILQ_REMOVE(&blob->bs->snapshots, snapshot_entry, link); 8392 free(snapshot_entry); 8393 } 8394 8395 page_num = bs_blobid_to_page(blob->id); 8396 spdk_bit_array_clear(blob->bs->used_blobids, page_num); 8397 blob->state = SPDK_BLOB_STATE_DIRTY; 8398 blob->active.num_pages = 0; 8399 blob_resize(blob, 0); 8400 8401 blob_persist(seq, blob, bs_delete_persist_cpl, blob); 8402 } 8403 8404 static int 8405 bs_is_blob_deletable(struct spdk_blob *blob, bool *update_clone) 8406 { 8407 struct spdk_blob_list *snapshot_entry = NULL; 8408 struct spdk_blob_list *clone_entry = NULL; 8409 struct spdk_blob *clone = NULL; 8410 bool has_one_clone = false; 8411 8412 /* Check if this is a snapshot with clones */ 8413 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 8414 if (snapshot_entry != NULL) { 8415 if (snapshot_entry->clone_count > 1) { 8416 SPDK_ERRLOG("Cannot remove snapshot with more than one clone\n"); 8417 return -EBUSY; 8418 } else if (snapshot_entry->clone_count == 1) { 8419 has_one_clone = true; 8420 } 8421 } 8422 8423 /* Check if someone has this blob open (besides this delete context): 8424 * - open_ref = 1 - only this context opened blob, so it is ok to remove it 8425 * - open_ref <= 2 && has_one_clone = true - clone is holding snapshot 8426 * and that is ok, because we will update it accordingly */ 8427 if (blob->open_ref <= 2 && has_one_clone) { 8428 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 8429 assert(clone_entry != NULL); 8430 clone = blob_lookup(blob->bs, clone_entry->id); 8431 8432 if (blob->open_ref == 2 && clone == NULL) { 8433 /* Clone is closed and someone else opened this blob */ 8434 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 8435 return -EBUSY; 8436 } 8437 8438 *update_clone = true; 8439 return 0; 8440 } 8441 8442 if (blob->open_ref > 1) { 8443 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 8444 return -EBUSY; 8445 } 8446 8447 assert(has_one_clone == false); 8448 *update_clone = false; 8449 return 0; 8450 } 8451 8452 static void 8453 bs_delete_enomem_close_cpl(void *cb_arg, int bserrno) 8454 { 8455 spdk_bs_sequence_t *seq = cb_arg; 8456 8457 bs_sequence_finish(seq, -ENOMEM); 8458 } 8459 8460 static void 8461 bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 8462 { 8463 spdk_bs_sequence_t *seq = cb_arg; 8464 struct delete_snapshot_ctx *ctx; 8465 bool update_clone = false; 8466 8467 if (bserrno != 0) { 8468 bs_sequence_finish(seq, bserrno); 8469 return; 8470 } 8471 8472 blob_verify_md_op(blob); 8473 8474 ctx = calloc(1, sizeof(*ctx)); 8475 if (ctx == NULL) { 8476 spdk_blob_close(blob, bs_delete_enomem_close_cpl, seq); 8477 return; 8478 } 8479 8480 ctx->snapshot = blob; 8481 ctx->cb_fn = bs_delete_blob_finish; 8482 ctx->cb_arg = seq; 8483 8484 /* Check if blob can be removed and if it is a snapshot with clone on top of it */ 8485 ctx->bserrno = bs_is_blob_deletable(blob, &update_clone); 8486 if (ctx->bserrno) { 8487 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 8488 return; 8489 } 8490 8491 if (blob->locked_operation_in_progress) { 8492 SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress\n"); 8493 ctx->bserrno = -EBUSY; 8494 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 8495 return; 8496 } 8497 8498 blob->locked_operation_in_progress = true; 8499 8500 /* 8501 * Remove the blob from the blob_store list now, to ensure it does not 8502 * get returned after this point by blob_lookup(). 8503 */ 8504 spdk_bit_array_clear(blob->bs->open_blobids, blob->id); 8505 RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob); 8506 8507 if (update_clone) { 8508 ctx->page = spdk_zmalloc(blob->bs->md_page_size, 0, NULL, SPDK_ENV_NUMA_ID_ANY, 8509 SPDK_MALLOC_DMA); 8510 if (!ctx->page) { 8511 ctx->bserrno = -ENOMEM; 8512 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 8513 return; 8514 } 8515 /* This blob is a snapshot with active clone - update clone first */ 8516 update_clone_on_snapshot_deletion(blob, ctx); 8517 } else { 8518 /* This blob does not have any clones - just remove it */ 8519 bs_blob_list_remove(blob); 8520 bs_delete_blob_finish(seq, blob, 0); 8521 free(ctx); 8522 } 8523 } 8524 8525 void 8526 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 8527 spdk_blob_op_complete cb_fn, void *cb_arg) 8528 { 8529 struct spdk_bs_cpl cpl; 8530 spdk_bs_sequence_t *seq; 8531 8532 SPDK_DEBUGLOG(blob, "Deleting blob 0x%" PRIx64 "\n", blobid); 8533 8534 assert(spdk_get_thread() == bs->md_thread); 8535 8536 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 8537 cpl.u.blob_basic.cb_fn = cb_fn; 8538 cpl.u.blob_basic.cb_arg = cb_arg; 8539 8540 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 8541 if (!seq) { 8542 cb_fn(cb_arg, -ENOMEM); 8543 return; 8544 } 8545 8546 spdk_bs_open_blob(bs, blobid, bs_delete_open_cpl, seq); 8547 } 8548 8549 /* END spdk_bs_delete_blob */ 8550 8551 /* START spdk_bs_open_blob */ 8552 8553 static void 8554 bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8555 { 8556 struct spdk_blob *blob = cb_arg; 8557 struct spdk_blob *existing; 8558 8559 if (bserrno != 0) { 8560 blob_free(blob); 8561 seq->cpl.u.blob_handle.blob = NULL; 8562 bs_sequence_finish(seq, bserrno); 8563 return; 8564 } 8565 8566 existing = blob_lookup(blob->bs, blob->id); 8567 if (existing) { 8568 blob_free(blob); 8569 existing->open_ref++; 8570 seq->cpl.u.blob_handle.blob = existing; 8571 bs_sequence_finish(seq, 0); 8572 return; 8573 } 8574 8575 blob->open_ref++; 8576 8577 spdk_bit_array_set(blob->bs->open_blobids, blob->id); 8578 RB_INSERT(spdk_blob_tree, &blob->bs->open_blobs, blob); 8579 8580 bs_sequence_finish(seq, bserrno); 8581 } 8582 8583 static inline void 8584 blob_open_opts_copy(const struct spdk_blob_open_opts *src, struct spdk_blob_open_opts *dst) 8585 { 8586 #define FIELD_OK(field) \ 8587 offsetof(struct spdk_blob_open_opts, field) + sizeof(src->field) <= src->opts_size 8588 8589 #define SET_FIELD(field) \ 8590 if (FIELD_OK(field)) { \ 8591 dst->field = src->field; \ 8592 } \ 8593 8594 SET_FIELD(clear_method); 8595 SET_FIELD(esnap_ctx); 8596 8597 dst->opts_size = src->opts_size; 8598 8599 /* You should not remove this statement, but need to update the assert statement 8600 * if you add a new field, and also add a corresponding SET_FIELD statement */ 8601 SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_open_opts) == 24, "Incorrect size"); 8602 8603 #undef FIELD_OK 8604 #undef SET_FIELD 8605 } 8606 8607 static void 8608 bs_open_blob(struct spdk_blob_store *bs, 8609 spdk_blob_id blobid, 8610 struct spdk_blob_open_opts *opts, 8611 spdk_blob_op_with_handle_complete cb_fn, 8612 void *cb_arg) 8613 { 8614 struct spdk_blob *blob; 8615 struct spdk_bs_cpl cpl; 8616 struct spdk_blob_open_opts opts_local; 8617 spdk_bs_sequence_t *seq; 8618 uint32_t page_num; 8619 8620 SPDK_DEBUGLOG(blob, "Opening blob 0x%" PRIx64 "\n", blobid); 8621 assert(spdk_get_thread() == bs->md_thread); 8622 8623 page_num = bs_blobid_to_page(blobid); 8624 if (spdk_bit_array_get(bs->used_blobids, page_num) == false) { 8625 /* Invalid blobid */ 8626 cb_fn(cb_arg, NULL, -ENOENT); 8627 return; 8628 } 8629 8630 blob = blob_lookup(bs, blobid); 8631 if (blob) { 8632 blob->open_ref++; 8633 cb_fn(cb_arg, blob, 0); 8634 return; 8635 } 8636 8637 blob = blob_alloc(bs, blobid); 8638 if (!blob) { 8639 cb_fn(cb_arg, NULL, -ENOMEM); 8640 return; 8641 } 8642 8643 spdk_blob_open_opts_init(&opts_local, sizeof(opts_local)); 8644 if (opts) { 8645 blob_open_opts_copy(opts, &opts_local); 8646 } 8647 8648 blob->clear_method = opts_local.clear_method; 8649 8650 cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE; 8651 cpl.u.blob_handle.cb_fn = cb_fn; 8652 cpl.u.blob_handle.cb_arg = cb_arg; 8653 cpl.u.blob_handle.blob = blob; 8654 cpl.u.blob_handle.esnap_ctx = opts_local.esnap_ctx; 8655 8656 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 8657 if (!seq) { 8658 blob_free(blob); 8659 cb_fn(cb_arg, NULL, -ENOMEM); 8660 return; 8661 } 8662 8663 blob_load(seq, blob, bs_open_blob_cpl, blob); 8664 } 8665 8666 void 8667 spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 8668 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 8669 { 8670 bs_open_blob(bs, blobid, NULL, cb_fn, cb_arg); 8671 } 8672 8673 void 8674 spdk_bs_open_blob_ext(struct spdk_blob_store *bs, spdk_blob_id blobid, 8675 struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 8676 { 8677 bs_open_blob(bs, blobid, opts, cb_fn, cb_arg); 8678 } 8679 8680 /* END spdk_bs_open_blob */ 8681 8682 /* START spdk_blob_set_read_only */ 8683 int 8684 spdk_blob_set_read_only(struct spdk_blob *blob) 8685 { 8686 blob_verify_md_op(blob); 8687 8688 blob->data_ro_flags |= SPDK_BLOB_READ_ONLY; 8689 8690 blob->state = SPDK_BLOB_STATE_DIRTY; 8691 return 0; 8692 } 8693 /* END spdk_blob_set_read_only */ 8694 8695 /* START spdk_blob_sync_md */ 8696 8697 static void 8698 blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8699 { 8700 struct spdk_blob *blob = cb_arg; 8701 8702 if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 8703 blob->data_ro = true; 8704 blob->md_ro = true; 8705 } 8706 8707 bs_sequence_finish(seq, bserrno); 8708 } 8709 8710 static void 8711 blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 8712 { 8713 struct spdk_bs_cpl cpl; 8714 spdk_bs_sequence_t *seq; 8715 8716 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 8717 cpl.u.blob_basic.cb_fn = cb_fn; 8718 cpl.u.blob_basic.cb_arg = cb_arg; 8719 8720 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 8721 if (!seq) { 8722 cb_fn(cb_arg, -ENOMEM); 8723 return; 8724 } 8725 8726 blob_persist(seq, blob, blob_sync_md_cpl, blob); 8727 } 8728 8729 void 8730 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 8731 { 8732 blob_verify_md_op(blob); 8733 8734 SPDK_DEBUGLOG(blob, "Syncing blob 0x%" PRIx64 "\n", blob->id); 8735 8736 if (blob->md_ro) { 8737 assert(blob->state == SPDK_BLOB_STATE_CLEAN); 8738 cb_fn(cb_arg, 0); 8739 return; 8740 } 8741 8742 blob_sync_md(blob, cb_fn, cb_arg); 8743 } 8744 8745 /* END spdk_blob_sync_md */ 8746 8747 struct spdk_blob_cluster_op_ctx { 8748 struct spdk_thread *thread; 8749 struct spdk_blob *blob; 8750 uint32_t cluster_num; /* cluster index in blob */ 8751 uint32_t cluster; /* cluster on disk */ 8752 uint32_t extent_page; /* extent page on disk */ 8753 struct spdk_blob_md_page *page; /* preallocated extent page */ 8754 int rc; 8755 spdk_blob_op_complete cb_fn; 8756 void *cb_arg; 8757 }; 8758 8759 static void 8760 blob_op_cluster_msg_cpl(void *arg) 8761 { 8762 struct spdk_blob_cluster_op_ctx *ctx = arg; 8763 8764 ctx->cb_fn(ctx->cb_arg, ctx->rc); 8765 free(ctx); 8766 } 8767 8768 static void 8769 blob_op_cluster_msg_cb(void *arg, int bserrno) 8770 { 8771 struct spdk_blob_cluster_op_ctx *ctx = arg; 8772 8773 ctx->rc = bserrno; 8774 spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx); 8775 } 8776 8777 static void 8778 blob_insert_new_ep_cb(void *arg, int bserrno) 8779 { 8780 struct spdk_blob_cluster_op_ctx *ctx = arg; 8781 uint32_t *extent_page; 8782 8783 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 8784 *extent_page = ctx->extent_page; 8785 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8786 blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx); 8787 } 8788 8789 struct spdk_blob_write_extent_page_ctx { 8790 struct spdk_blob_store *bs; 8791 8792 uint32_t extent; 8793 struct spdk_blob_md_page *page; 8794 }; 8795 8796 static void 8797 blob_free_cluster_msg_cb(void *arg, int bserrno) 8798 { 8799 struct spdk_blob_cluster_op_ctx *ctx = arg; 8800 8801 spdk_spin_lock(&ctx->blob->bs->used_lock); 8802 bs_release_cluster(ctx->blob->bs, ctx->cluster); 8803 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8804 8805 ctx->rc = bserrno; 8806 spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx); 8807 } 8808 8809 static void 8810 blob_free_cluster_update_ep_cb(void *arg, int bserrno) 8811 { 8812 struct spdk_blob_cluster_op_ctx *ctx = arg; 8813 8814 if (bserrno != 0 || ctx->blob->bs->clean == 0) { 8815 blob_free_cluster_msg_cb(ctx, bserrno); 8816 return; 8817 } 8818 8819 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8820 blob_sync_md(ctx->blob, blob_free_cluster_msg_cb, ctx); 8821 } 8822 8823 static void 8824 blob_free_cluster_free_ep_cb(void *arg, int bserrno) 8825 { 8826 struct spdk_blob_cluster_op_ctx *ctx = arg; 8827 8828 spdk_spin_lock(&ctx->blob->bs->used_lock); 8829 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8830 bs_release_md_page(ctx->blob->bs, ctx->extent_page); 8831 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8832 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8833 blob_sync_md(ctx->blob, blob_free_cluster_msg_cb, ctx); 8834 } 8835 8836 static void 8837 blob_persist_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8838 { 8839 struct spdk_blob_write_extent_page_ctx *ctx = cb_arg; 8840 8841 free(ctx); 8842 bs_sequence_finish(seq, bserrno); 8843 } 8844 8845 static void 8846 blob_write_extent_page_ready(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8847 { 8848 struct spdk_blob_write_extent_page_ctx *ctx = cb_arg; 8849 8850 if (bserrno != 0) { 8851 blob_persist_extent_page_cpl(seq, ctx, bserrno); 8852 return; 8853 } 8854 bs_sequence_write_dev(seq, ctx->page, bs_md_page_to_lba(ctx->bs, ctx->extent), 8855 bs_byte_to_lba(ctx->bs, ctx->bs->md_page_size), 8856 blob_persist_extent_page_cpl, ctx); 8857 } 8858 8859 static void 8860 blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 8861 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg) 8862 { 8863 struct spdk_blob_write_extent_page_ctx *ctx; 8864 spdk_bs_sequence_t *seq; 8865 struct spdk_bs_cpl cpl; 8866 8867 ctx = calloc(1, sizeof(*ctx)); 8868 if (!ctx) { 8869 cb_fn(cb_arg, -ENOMEM); 8870 return; 8871 } 8872 ctx->bs = blob->bs; 8873 ctx->extent = extent; 8874 ctx->page = page; 8875 8876 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 8877 cpl.u.blob_basic.cb_fn = cb_fn; 8878 cpl.u.blob_basic.cb_arg = cb_arg; 8879 8880 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 8881 if (!seq) { 8882 free(ctx); 8883 cb_fn(cb_arg, -ENOMEM); 8884 return; 8885 } 8886 8887 assert(page); 8888 page->next = SPDK_INVALID_MD_PAGE; 8889 page->id = blob->id; 8890 page->sequence_num = 0; 8891 8892 blob_serialize_extent_page(blob, cluster_num, page); 8893 8894 page->crc = blob_md_page_calc_crc(page); 8895 8896 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent) == true); 8897 8898 bs_mark_dirty(seq, blob->bs, blob_write_extent_page_ready, ctx); 8899 } 8900 8901 static void 8902 blob_insert_cluster_msg(void *arg) 8903 { 8904 struct spdk_blob_cluster_op_ctx *ctx = arg; 8905 uint32_t *extent_page; 8906 8907 ctx->rc = blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster); 8908 if (ctx->rc != 0) { 8909 spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx); 8910 return; 8911 } 8912 8913 if (ctx->blob->use_extent_table == false) { 8914 /* Extent table is not used, proceed with sync of md that will only use extents_rle. */ 8915 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8916 blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx); 8917 return; 8918 } 8919 8920 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 8921 if (*extent_page == 0) { 8922 /* Extent page requires allocation. 8923 * It was already claimed in the used_md_pages map and placed in ctx. */ 8924 assert(ctx->extent_page != 0); 8925 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8926 blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, ctx->page, 8927 blob_insert_new_ep_cb, ctx); 8928 } else { 8929 /* It is possible for original thread to allocate extent page for 8930 * different cluster in the same extent page. In such case proceed with 8931 * updating the existing extent page, but release the additional one. */ 8932 if (ctx->extent_page != 0) { 8933 spdk_spin_lock(&ctx->blob->bs->used_lock); 8934 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8935 bs_release_md_page(ctx->blob->bs, ctx->extent_page); 8936 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8937 ctx->extent_page = 0; 8938 } 8939 /* Extent page already allocated. 8940 * Every cluster allocation, requires just an update of single extent page. */ 8941 blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, ctx->page, 8942 blob_op_cluster_msg_cb, ctx); 8943 } 8944 } 8945 8946 static void 8947 blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 8948 uint64_t cluster, uint32_t extent_page, struct spdk_blob_md_page *page, 8949 spdk_blob_op_complete cb_fn, void *cb_arg) 8950 { 8951 struct spdk_blob_cluster_op_ctx *ctx; 8952 8953 ctx = calloc(1, sizeof(*ctx)); 8954 if (ctx == NULL) { 8955 cb_fn(cb_arg, -ENOMEM); 8956 return; 8957 } 8958 8959 ctx->thread = spdk_get_thread(); 8960 ctx->blob = blob; 8961 ctx->cluster_num = cluster_num; 8962 ctx->cluster = cluster; 8963 ctx->extent_page = extent_page; 8964 ctx->page = page; 8965 ctx->cb_fn = cb_fn; 8966 ctx->cb_arg = cb_arg; 8967 8968 spdk_thread_send_msg(blob->bs->md_thread, blob_insert_cluster_msg, ctx); 8969 } 8970 8971 static void 8972 blob_free_cluster_msg(void *arg) 8973 { 8974 struct spdk_blob_cluster_op_ctx *ctx = arg; 8975 uint32_t *extent_page; 8976 uint32_t start_cluster_idx; 8977 bool free_extent_page = true; 8978 size_t i; 8979 8980 ctx->cluster = bs_lba_to_cluster(ctx->blob->bs, ctx->blob->active.clusters[ctx->cluster_num]); 8981 8982 /* There were concurrent unmaps to the same cluster, only release the cluster on the first one */ 8983 if (ctx->cluster == 0) { 8984 blob_op_cluster_msg_cb(ctx, 0); 8985 return; 8986 } 8987 8988 ctx->blob->active.clusters[ctx->cluster_num] = 0; 8989 if (ctx->cluster != 0) { 8990 ctx->blob->active.num_allocated_clusters--; 8991 } 8992 8993 if (ctx->blob->use_extent_table == false) { 8994 /* Extent table is not used, proceed with sync of md that will only use extents_rle. */ 8995 spdk_spin_lock(&ctx->blob->bs->used_lock); 8996 bs_release_cluster(ctx->blob->bs, ctx->cluster); 8997 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8998 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8999 blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx); 9000 return; 9001 } 9002 9003 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 9004 9005 /* There shouldn't be parallel release operations on same cluster */ 9006 assert(*extent_page == ctx->extent_page); 9007 9008 start_cluster_idx = (ctx->cluster_num / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP; 9009 for (i = 0; i < SPDK_EXTENTS_PER_EP; ++i) { 9010 if (ctx->blob->active.clusters[start_cluster_idx + i] != 0) { 9011 free_extent_page = false; 9012 break; 9013 } 9014 } 9015 9016 if (free_extent_page) { 9017 assert(ctx->extent_page != 0); 9018 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 9019 ctx->blob->active.extent_pages[bs_cluster_to_extent_table_id(ctx->cluster_num)] = 0; 9020 blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, ctx->page, 9021 blob_free_cluster_free_ep_cb, ctx); 9022 } else { 9023 blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, ctx->page, 9024 blob_free_cluster_update_ep_cb, ctx); 9025 } 9026 } 9027 9028 9029 static void 9030 blob_free_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, uint32_t extent_page, 9031 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg) 9032 { 9033 struct spdk_blob_cluster_op_ctx *ctx; 9034 9035 ctx = calloc(1, sizeof(*ctx)); 9036 if (ctx == NULL) { 9037 cb_fn(cb_arg, -ENOMEM); 9038 return; 9039 } 9040 9041 ctx->thread = spdk_get_thread(); 9042 ctx->blob = blob; 9043 ctx->cluster_num = cluster_num; 9044 ctx->extent_page = extent_page; 9045 ctx->page = page; 9046 ctx->cb_fn = cb_fn; 9047 ctx->cb_arg = cb_arg; 9048 9049 spdk_thread_send_msg(blob->bs->md_thread, blob_free_cluster_msg, ctx); 9050 } 9051 9052 /* START spdk_blob_close */ 9053 9054 static void 9055 blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9056 { 9057 struct spdk_blob *blob = cb_arg; 9058 9059 if (bserrno == 0) { 9060 blob->open_ref--; 9061 if (blob->open_ref == 0) { 9062 /* 9063 * Blobs with active.num_pages == 0 are deleted blobs. 9064 * these blobs are removed from the blob_store list 9065 * when the deletion process starts - so don't try to 9066 * remove them again. 9067 */ 9068 if (blob->active.num_pages > 0) { 9069 spdk_bit_array_clear(blob->bs->open_blobids, blob->id); 9070 RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob); 9071 } 9072 blob_free(blob); 9073 } 9074 } 9075 9076 bs_sequence_finish(seq, bserrno); 9077 } 9078 9079 static void 9080 blob_close_esnap_done(void *cb_arg, struct spdk_blob *blob, int bserrno) 9081 { 9082 spdk_bs_sequence_t *seq = cb_arg; 9083 9084 if (bserrno != 0) { 9085 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": close failed with error %d\n", 9086 blob->id, bserrno); 9087 bs_sequence_finish(seq, bserrno); 9088 return; 9089 } 9090 9091 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": closed, syncing metadata on thread %s\n", 9092 blob->id, spdk_thread_get_name(spdk_get_thread())); 9093 9094 /* Sync metadata */ 9095 blob_persist(seq, blob, blob_close_cpl, blob); 9096 } 9097 9098 void 9099 spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 9100 { 9101 struct spdk_bs_cpl cpl; 9102 spdk_bs_sequence_t *seq; 9103 9104 blob_verify_md_op(blob); 9105 9106 SPDK_DEBUGLOG(blob, "Closing blob 0x%" PRIx64 "\n", blob->id); 9107 9108 if (blob->open_ref == 0) { 9109 cb_fn(cb_arg, -EBADF); 9110 return; 9111 } 9112 9113 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 9114 cpl.u.blob_basic.cb_fn = cb_fn; 9115 cpl.u.blob_basic.cb_arg = cb_arg; 9116 9117 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 9118 if (!seq) { 9119 cb_fn(cb_arg, -ENOMEM); 9120 return; 9121 } 9122 9123 if (blob->open_ref == 1 && blob_is_esnap_clone(blob)) { 9124 blob_esnap_destroy_bs_dev_channels(blob, false, blob_close_esnap_done, seq); 9125 return; 9126 } 9127 9128 /* Sync metadata */ 9129 blob_persist(seq, blob, blob_close_cpl, blob); 9130 } 9131 9132 /* END spdk_blob_close */ 9133 9134 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs) 9135 { 9136 return spdk_get_io_channel(bs); 9137 } 9138 9139 void 9140 spdk_bs_free_io_channel(struct spdk_io_channel *channel) 9141 { 9142 blob_esnap_destroy_bs_channel(spdk_io_channel_get_ctx(channel)); 9143 spdk_put_io_channel(channel); 9144 } 9145 9146 void 9147 spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel, 9148 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 9149 { 9150 blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 9151 SPDK_BLOB_UNMAP); 9152 } 9153 9154 void 9155 spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel, 9156 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 9157 { 9158 blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 9159 SPDK_BLOB_WRITE_ZEROES); 9160 } 9161 9162 void 9163 spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel, 9164 void *payload, uint64_t offset, uint64_t length, 9165 spdk_blob_op_complete cb_fn, void *cb_arg) 9166 { 9167 blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 9168 SPDK_BLOB_WRITE); 9169 } 9170 9171 void 9172 spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel, 9173 void *payload, uint64_t offset, uint64_t length, 9174 spdk_blob_op_complete cb_fn, void *cb_arg) 9175 { 9176 blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 9177 SPDK_BLOB_READ); 9178 } 9179 9180 void 9181 spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel, 9182 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9183 spdk_blob_op_complete cb_fn, void *cb_arg) 9184 { 9185 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, NULL); 9186 } 9187 9188 void 9189 spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel, 9190 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9191 spdk_blob_op_complete cb_fn, void *cb_arg) 9192 { 9193 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, NULL); 9194 } 9195 9196 void 9197 spdk_blob_io_writev_ext(struct spdk_blob *blob, struct spdk_io_channel *channel, 9198 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9199 spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts) 9200 { 9201 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, 9202 io_opts); 9203 } 9204 9205 void 9206 spdk_blob_io_readv_ext(struct spdk_blob *blob, struct spdk_io_channel *channel, 9207 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9208 spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts) 9209 { 9210 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, 9211 io_opts); 9212 } 9213 9214 struct spdk_bs_iter_ctx { 9215 int64_t page_num; 9216 struct spdk_blob_store *bs; 9217 9218 spdk_blob_op_with_handle_complete cb_fn; 9219 void *cb_arg; 9220 }; 9221 9222 static void 9223 bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 9224 { 9225 struct spdk_bs_iter_ctx *ctx = cb_arg; 9226 struct spdk_blob_store *bs = ctx->bs; 9227 spdk_blob_id id; 9228 9229 if (bserrno == 0) { 9230 ctx->cb_fn(ctx->cb_arg, _blob, bserrno); 9231 free(ctx); 9232 return; 9233 } 9234 9235 ctx->page_num++; 9236 ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num); 9237 if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) { 9238 ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT); 9239 free(ctx); 9240 return; 9241 } 9242 9243 id = bs_page_to_blobid(ctx->page_num); 9244 9245 spdk_bs_open_blob(bs, id, bs_iter_cpl, ctx); 9246 } 9247 9248 void 9249 spdk_bs_iter_first(struct spdk_blob_store *bs, 9250 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 9251 { 9252 struct spdk_bs_iter_ctx *ctx; 9253 9254 ctx = calloc(1, sizeof(*ctx)); 9255 if (!ctx) { 9256 cb_fn(cb_arg, NULL, -ENOMEM); 9257 return; 9258 } 9259 9260 ctx->page_num = -1; 9261 ctx->bs = bs; 9262 ctx->cb_fn = cb_fn; 9263 ctx->cb_arg = cb_arg; 9264 9265 bs_iter_cpl(ctx, NULL, -1); 9266 } 9267 9268 static void 9269 bs_iter_close_cpl(void *cb_arg, int bserrno) 9270 { 9271 struct spdk_bs_iter_ctx *ctx = cb_arg; 9272 9273 bs_iter_cpl(ctx, NULL, -1); 9274 } 9275 9276 void 9277 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob, 9278 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 9279 { 9280 struct spdk_bs_iter_ctx *ctx; 9281 9282 assert(blob != NULL); 9283 9284 ctx = calloc(1, sizeof(*ctx)); 9285 if (!ctx) { 9286 cb_fn(cb_arg, NULL, -ENOMEM); 9287 return; 9288 } 9289 9290 ctx->page_num = bs_blobid_to_page(blob->id); 9291 ctx->bs = bs; 9292 ctx->cb_fn = cb_fn; 9293 ctx->cb_arg = cb_arg; 9294 9295 /* Close the existing blob */ 9296 spdk_blob_close(blob, bs_iter_close_cpl, ctx); 9297 } 9298 9299 static int 9300 blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 9301 uint16_t value_len, bool internal) 9302 { 9303 struct spdk_xattr_tailq *xattrs; 9304 struct spdk_xattr *xattr; 9305 size_t desc_size; 9306 void *tmp; 9307 9308 blob_verify_md_op(blob); 9309 9310 if (blob->md_ro) { 9311 return -EPERM; 9312 } 9313 9314 desc_size = sizeof(struct spdk_blob_md_descriptor_xattr) + strlen(name) + value_len; 9315 if (desc_size > SPDK_BS_MAX_DESC_SIZE) { 9316 SPDK_DEBUGLOG(blob, "Xattr '%s' of size %zu does not fix into single page %zu\n", name, 9317 desc_size, SPDK_BS_MAX_DESC_SIZE); 9318 return -ENOMEM; 9319 } 9320 9321 if (internal) { 9322 xattrs = &blob->xattrs_internal; 9323 blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR; 9324 } else { 9325 xattrs = &blob->xattrs; 9326 } 9327 9328 TAILQ_FOREACH(xattr, xattrs, link) { 9329 if (!strcmp(name, xattr->name)) { 9330 tmp = malloc(value_len); 9331 if (!tmp) { 9332 return -ENOMEM; 9333 } 9334 9335 free(xattr->value); 9336 xattr->value_len = value_len; 9337 xattr->value = tmp; 9338 memcpy(xattr->value, value, value_len); 9339 9340 blob->state = SPDK_BLOB_STATE_DIRTY; 9341 9342 return 0; 9343 } 9344 } 9345 9346 xattr = calloc(1, sizeof(*xattr)); 9347 if (!xattr) { 9348 return -ENOMEM; 9349 } 9350 9351 xattr->name = strdup(name); 9352 if (!xattr->name) { 9353 free(xattr); 9354 return -ENOMEM; 9355 } 9356 9357 xattr->value_len = value_len; 9358 xattr->value = malloc(value_len); 9359 if (!xattr->value) { 9360 free(xattr->name); 9361 free(xattr); 9362 return -ENOMEM; 9363 } 9364 memcpy(xattr->value, value, value_len); 9365 TAILQ_INSERT_TAIL(xattrs, xattr, link); 9366 9367 blob->state = SPDK_BLOB_STATE_DIRTY; 9368 9369 return 0; 9370 } 9371 9372 int 9373 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 9374 uint16_t value_len) 9375 { 9376 return blob_set_xattr(blob, name, value, value_len, false); 9377 } 9378 9379 static int 9380 blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal) 9381 { 9382 struct spdk_xattr_tailq *xattrs; 9383 struct spdk_xattr *xattr; 9384 9385 blob_verify_md_op(blob); 9386 9387 if (blob->md_ro) { 9388 return -EPERM; 9389 } 9390 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 9391 9392 TAILQ_FOREACH(xattr, xattrs, link) { 9393 if (!strcmp(name, xattr->name)) { 9394 TAILQ_REMOVE(xattrs, xattr, link); 9395 free(xattr->value); 9396 free(xattr->name); 9397 free(xattr); 9398 9399 if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) { 9400 blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR; 9401 } 9402 blob->state = SPDK_BLOB_STATE_DIRTY; 9403 9404 return 0; 9405 } 9406 } 9407 9408 return -ENOENT; 9409 } 9410 9411 int 9412 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name) 9413 { 9414 return blob_remove_xattr(blob, name, false); 9415 } 9416 9417 static int 9418 blob_get_xattr_value(struct spdk_blob *blob, const char *name, 9419 const void **value, size_t *value_len, bool internal) 9420 { 9421 struct spdk_xattr *xattr; 9422 struct spdk_xattr_tailq *xattrs; 9423 9424 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 9425 9426 TAILQ_FOREACH(xattr, xattrs, link) { 9427 if (!strcmp(name, xattr->name)) { 9428 *value = xattr->value; 9429 *value_len = xattr->value_len; 9430 return 0; 9431 } 9432 } 9433 return -ENOENT; 9434 } 9435 9436 int 9437 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 9438 const void **value, size_t *value_len) 9439 { 9440 blob_verify_md_op(blob); 9441 9442 return blob_get_xattr_value(blob, name, value, value_len, false); 9443 } 9444 9445 struct spdk_xattr_names { 9446 uint32_t count; 9447 const char *names[0]; 9448 }; 9449 9450 static int 9451 blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names) 9452 { 9453 struct spdk_xattr *xattr; 9454 int count = 0; 9455 9456 TAILQ_FOREACH(xattr, xattrs, link) { 9457 count++; 9458 } 9459 9460 *names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *)); 9461 if (*names == NULL) { 9462 return -ENOMEM; 9463 } 9464 9465 TAILQ_FOREACH(xattr, xattrs, link) { 9466 (*names)->names[(*names)->count++] = xattr->name; 9467 } 9468 9469 return 0; 9470 } 9471 9472 int 9473 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names) 9474 { 9475 blob_verify_md_op(blob); 9476 9477 return blob_get_xattr_names(&blob->xattrs, names); 9478 } 9479 9480 uint32_t 9481 spdk_xattr_names_get_count(struct spdk_xattr_names *names) 9482 { 9483 assert(names != NULL); 9484 9485 return names->count; 9486 } 9487 9488 const char * 9489 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index) 9490 { 9491 if (index >= names->count) { 9492 return NULL; 9493 } 9494 9495 return names->names[index]; 9496 } 9497 9498 void 9499 spdk_xattr_names_free(struct spdk_xattr_names *names) 9500 { 9501 free(names); 9502 } 9503 9504 struct spdk_bs_type 9505 spdk_bs_get_bstype(struct spdk_blob_store *bs) 9506 { 9507 return bs->bstype; 9508 } 9509 9510 void 9511 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype) 9512 { 9513 memcpy(&bs->bstype, &bstype, sizeof(bstype)); 9514 } 9515 9516 bool 9517 spdk_blob_is_read_only(struct spdk_blob *blob) 9518 { 9519 assert(blob != NULL); 9520 return (blob->data_ro || blob->md_ro); 9521 } 9522 9523 bool 9524 spdk_blob_is_snapshot(struct spdk_blob *blob) 9525 { 9526 struct spdk_blob_list *snapshot_entry; 9527 9528 assert(blob != NULL); 9529 9530 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 9531 if (snapshot_entry == NULL) { 9532 return false; 9533 } 9534 9535 return true; 9536 } 9537 9538 bool 9539 spdk_blob_is_clone(struct spdk_blob *blob) 9540 { 9541 assert(blob != NULL); 9542 9543 if (blob->parent_id != SPDK_BLOBID_INVALID && 9544 blob->parent_id != SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 9545 assert(spdk_blob_is_thin_provisioned(blob)); 9546 return true; 9547 } 9548 9549 return false; 9550 } 9551 9552 bool 9553 spdk_blob_is_thin_provisioned(struct spdk_blob *blob) 9554 { 9555 assert(blob != NULL); 9556 return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 9557 } 9558 9559 bool 9560 spdk_blob_is_esnap_clone(const struct spdk_blob *blob) 9561 { 9562 return blob_is_esnap_clone(blob); 9563 } 9564 9565 static void 9566 blob_update_clear_method(struct spdk_blob *blob) 9567 { 9568 enum blob_clear_method stored_cm; 9569 9570 assert(blob != NULL); 9571 9572 /* If BLOB_CLEAR_WITH_DEFAULT was passed in, use the setting stored 9573 * in metadata previously. If something other than the default was 9574 * specified, ignore stored value and used what was passed in. 9575 */ 9576 stored_cm = ((blob->md_ro_flags & SPDK_BLOB_CLEAR_METHOD) >> SPDK_BLOB_CLEAR_METHOD_SHIFT); 9577 9578 if (blob->clear_method == BLOB_CLEAR_WITH_DEFAULT) { 9579 blob->clear_method = stored_cm; 9580 } else if (blob->clear_method != stored_cm) { 9581 SPDK_WARNLOG("Using passed in clear method 0x%x instead of stored value of 0x%x\n", 9582 blob->clear_method, stored_cm); 9583 } 9584 } 9585 9586 spdk_blob_id 9587 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id) 9588 { 9589 struct spdk_blob_list *snapshot_entry = NULL; 9590 struct spdk_blob_list *clone_entry = NULL; 9591 9592 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 9593 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 9594 if (clone_entry->id == blob_id) { 9595 return snapshot_entry->id; 9596 } 9597 } 9598 } 9599 9600 return SPDK_BLOBID_INVALID; 9601 } 9602 9603 int 9604 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids, 9605 size_t *count) 9606 { 9607 struct spdk_blob_list *snapshot_entry, *clone_entry; 9608 size_t n; 9609 9610 snapshot_entry = bs_get_snapshot_entry(bs, blobid); 9611 if (snapshot_entry == NULL) { 9612 *count = 0; 9613 return 0; 9614 } 9615 9616 if (ids == NULL || *count < snapshot_entry->clone_count) { 9617 *count = snapshot_entry->clone_count; 9618 return -ENOMEM; 9619 } 9620 *count = snapshot_entry->clone_count; 9621 9622 n = 0; 9623 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 9624 ids[n++] = clone_entry->id; 9625 } 9626 9627 return 0; 9628 } 9629 9630 static void 9631 bs_load_grow_continue(struct spdk_bs_load_ctx *ctx) 9632 { 9633 int rc; 9634 9635 if (ctx->super->size == 0) { 9636 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9637 } 9638 9639 if (ctx->super->io_unit_size == 0) { 9640 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 9641 } 9642 if (ctx->super->md_page_size == 0) { 9643 ctx->super->md_page_size = SPDK_BS_PAGE_SIZE; 9644 } 9645 9646 /* Parse the super block */ 9647 ctx->bs->clean = 1; 9648 ctx->bs->cluster_sz = ctx->super->cluster_size; 9649 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 9650 ctx->bs->md_page_size = ctx->super->md_page_size; 9651 ctx->bs->io_unit_size = ctx->super->io_unit_size; 9652 bs_init_per_cluster_fields(ctx->bs); 9653 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 9654 if (rc < 0) { 9655 bs_load_ctx_fail(ctx, -ENOMEM); 9656 return; 9657 } 9658 ctx->bs->md_start = ctx->super->md_start; 9659 ctx->bs->md_len = ctx->super->md_len; 9660 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len); 9661 if (rc < 0) { 9662 bs_load_ctx_fail(ctx, -ENOMEM); 9663 return; 9664 } 9665 9666 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 9667 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 9668 ctx->bs->super_blob = ctx->super->super_blob; 9669 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 9670 9671 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0) { 9672 SPDK_ERRLOG("Can not grow an unclean blobstore, please load it normally to clean it.\n"); 9673 bs_load_ctx_fail(ctx, -EIO); 9674 return; 9675 } else { 9676 bs_load_read_used_pages(ctx); 9677 } 9678 } 9679 9680 static void 9681 bs_load_grow_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9682 { 9683 struct spdk_bs_load_ctx *ctx = cb_arg; 9684 9685 if (bserrno != 0) { 9686 bs_load_ctx_fail(ctx, bserrno); 9687 return; 9688 } 9689 bs_load_grow_continue(ctx); 9690 } 9691 9692 static void 9693 bs_load_grow_used_clusters_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9694 { 9695 struct spdk_bs_load_ctx *ctx = cb_arg; 9696 9697 if (bserrno != 0) { 9698 bs_load_ctx_fail(ctx, bserrno); 9699 return; 9700 } 9701 9702 spdk_free(ctx->mask); 9703 9704 bs_sequence_write_dev(ctx->seq, ctx->super, bs_page_to_lba(ctx->bs, 0), 9705 bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 9706 bs_load_grow_super_write_cpl, ctx); 9707 } 9708 9709 static void 9710 bs_load_grow_used_clusters_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9711 { 9712 struct spdk_bs_load_ctx *ctx = cb_arg; 9713 uint64_t lba, lba_count; 9714 uint64_t dev_size; 9715 uint64_t total_clusters; 9716 9717 if (bserrno != 0) { 9718 bs_load_ctx_fail(ctx, bserrno); 9719 return; 9720 } 9721 9722 /* The type must be correct */ 9723 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 9724 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 9725 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 9726 struct spdk_blob_md_page) * 8)); 9727 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9728 total_clusters = dev_size / ctx->super->cluster_size; 9729 ctx->mask->length = total_clusters; 9730 9731 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 9732 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 9733 bs_sequence_write_dev(ctx->seq, ctx->mask, lba, lba_count, 9734 bs_load_grow_used_clusters_write_cpl, ctx); 9735 } 9736 9737 static void 9738 bs_load_try_to_grow(struct spdk_bs_load_ctx *ctx) 9739 { 9740 uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask; 9741 uint64_t lba, lba_count, mask_size; 9742 9743 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9744 total_clusters = dev_size / ctx->super->cluster_size; 9745 used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 9746 spdk_divide_round_up(total_clusters, 8), 9747 ctx->super->md_page_size); 9748 max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start; 9749 /* No necessary to grow or no space to grow */ 9750 if (ctx->super->size >= dev_size || used_cluster_mask_len > max_used_cluster_mask) { 9751 SPDK_DEBUGLOG(blob, "No grow\n"); 9752 bs_load_grow_continue(ctx); 9753 return; 9754 } 9755 9756 SPDK_DEBUGLOG(blob, "Resize blobstore\n"); 9757 9758 ctx->super->size = dev_size; 9759 ctx->super->used_cluster_mask_len = used_cluster_mask_len; 9760 ctx->super->crc = blob_md_page_calc_crc(ctx->super); 9761 9762 mask_size = used_cluster_mask_len * ctx->super->md_page_size; 9763 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_NUMA_ID_ANY, 9764 SPDK_MALLOC_DMA); 9765 if (!ctx->mask) { 9766 bs_load_ctx_fail(ctx, -ENOMEM); 9767 return; 9768 } 9769 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 9770 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 9771 bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count, 9772 bs_load_grow_used_clusters_read_cpl, ctx); 9773 } 9774 9775 static void 9776 bs_grow_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9777 { 9778 struct spdk_bs_load_ctx *ctx = cb_arg; 9779 int rc; 9780 9781 rc = bs_super_validate(ctx->super, ctx->bs); 9782 if (rc != 0) { 9783 bs_load_ctx_fail(ctx, rc); 9784 return; 9785 } 9786 9787 bs_load_try_to_grow(ctx); 9788 } 9789 9790 struct spdk_bs_grow_ctx { 9791 struct spdk_blob_store *bs; 9792 struct spdk_bs_super_block *super; 9793 9794 struct spdk_bit_pool *new_used_clusters; 9795 struct spdk_bs_md_mask *new_used_clusters_mask; 9796 9797 spdk_bs_sequence_t *seq; 9798 }; 9799 9800 static void 9801 bs_grow_live_done(struct spdk_bs_grow_ctx *ctx, int bserrno) 9802 { 9803 if (bserrno != 0) { 9804 spdk_bit_pool_free(&ctx->new_used_clusters); 9805 } 9806 9807 bs_sequence_finish(ctx->seq, bserrno); 9808 free(ctx->new_used_clusters_mask); 9809 spdk_free(ctx->super); 9810 free(ctx); 9811 } 9812 9813 static void 9814 bs_grow_live_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9815 { 9816 struct spdk_bs_grow_ctx *ctx = cb_arg; 9817 struct spdk_blob_store *bs = ctx->bs; 9818 uint64_t total_clusters; 9819 9820 if (bserrno != 0) { 9821 bs_grow_live_done(ctx, bserrno); 9822 return; 9823 } 9824 9825 /* 9826 * Blobstore is not clean until unload, for now only the super block is up to date. 9827 * This is similar to state right after blobstore init, when bs_write_used_md() didn't 9828 * yet execute. 9829 * When cleanly unloaded, the used md pages will be written out. 9830 * In case of unclean shutdown, loading blobstore will go through recovery path correctly 9831 * filling out the used_clusters with new size and writing it out. 9832 */ 9833 bs->clean = 0; 9834 9835 /* Reverting the super->size past this point is complex, avoid any error paths 9836 * that require to do so. */ 9837 spdk_spin_lock(&bs->used_lock); 9838 9839 total_clusters = ctx->super->size / ctx->super->cluster_size; 9840 9841 assert(total_clusters >= spdk_bit_pool_capacity(bs->used_clusters)); 9842 spdk_bit_pool_store_mask(bs->used_clusters, ctx->new_used_clusters_mask); 9843 9844 assert(total_clusters == spdk_bit_pool_capacity(ctx->new_used_clusters)); 9845 spdk_bit_pool_load_mask(ctx->new_used_clusters, ctx->new_used_clusters_mask); 9846 9847 spdk_bit_pool_free(&bs->used_clusters); 9848 bs->used_clusters = ctx->new_used_clusters; 9849 9850 bs->total_clusters = total_clusters; 9851 bs->total_data_clusters = bs->total_clusters - spdk_divide_round_up( 9852 bs->md_start + bs->md_len, bs->pages_per_cluster); 9853 9854 bs->num_free_clusters = spdk_bit_pool_count_free(bs->used_clusters); 9855 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 9856 spdk_spin_unlock(&bs->used_lock); 9857 9858 bs_grow_live_done(ctx, 0); 9859 } 9860 9861 static void 9862 bs_grow_live_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9863 { 9864 struct spdk_bs_grow_ctx *ctx = cb_arg; 9865 uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask; 9866 int rc; 9867 9868 if (bserrno != 0) { 9869 bs_grow_live_done(ctx, bserrno); 9870 return; 9871 } 9872 9873 rc = bs_super_validate(ctx->super, ctx->bs); 9874 if (rc != 0) { 9875 bs_grow_live_done(ctx, rc); 9876 return; 9877 } 9878 9879 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9880 total_clusters = dev_size / ctx->super->cluster_size; 9881 used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 9882 spdk_divide_round_up(total_clusters, 8), 9883 ctx->super->md_page_size); 9884 max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start; 9885 /* Only checking dev_size. Since it can change, but total_clusters remain the same. */ 9886 if (dev_size == ctx->super->size) { 9887 SPDK_DEBUGLOG(blob, "No need to grow blobstore\n"); 9888 bs_grow_live_done(ctx, 0); 9889 return; 9890 } 9891 /* 9892 * Blobstore cannot be shrunk, so check before if: 9893 * - new size of the device is smaller than size in super_block 9894 * - new total number of clusters is smaller than used_clusters bit_pool 9895 * - there is enough space in metadata for used_cluster_mask to be written out 9896 */ 9897 if (dev_size < ctx->super->size || 9898 total_clusters < spdk_bit_pool_capacity(ctx->bs->used_clusters) || 9899 used_cluster_mask_len > max_used_cluster_mask) { 9900 SPDK_DEBUGLOG(blob, "No space to grow blobstore\n"); 9901 bs_grow_live_done(ctx, -ENOSPC); 9902 return; 9903 } 9904 9905 SPDK_DEBUGLOG(blob, "Resizing blobstore\n"); 9906 9907 ctx->new_used_clusters_mask = calloc(1, total_clusters); 9908 if (!ctx->new_used_clusters_mask) { 9909 bs_grow_live_done(ctx, -ENOMEM); 9910 return; 9911 } 9912 ctx->new_used_clusters = spdk_bit_pool_create(total_clusters); 9913 if (!ctx->new_used_clusters) { 9914 bs_grow_live_done(ctx, -ENOMEM); 9915 return; 9916 } 9917 9918 ctx->super->clean = 0; 9919 ctx->super->size = dev_size; 9920 ctx->super->used_cluster_mask_len = used_cluster_mask_len; 9921 bs_write_super(seq, ctx->bs, ctx->super, bs_grow_live_super_write_cpl, ctx); 9922 } 9923 9924 void 9925 spdk_bs_grow_live(struct spdk_blob_store *bs, 9926 spdk_bs_op_complete cb_fn, void *cb_arg) 9927 { 9928 struct spdk_bs_cpl cpl; 9929 struct spdk_bs_grow_ctx *ctx; 9930 9931 assert(spdk_get_thread() == bs->md_thread); 9932 9933 SPDK_DEBUGLOG(blob, "Growing blobstore on dev %p\n", bs->dev); 9934 9935 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 9936 cpl.u.bs_basic.cb_fn = cb_fn; 9937 cpl.u.bs_basic.cb_arg = cb_arg; 9938 9939 ctx = calloc(1, sizeof(struct spdk_bs_grow_ctx)); 9940 if (!ctx) { 9941 cb_fn(cb_arg, -ENOMEM); 9942 return; 9943 } 9944 ctx->bs = bs; 9945 9946 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 9947 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 9948 if (!ctx->super) { 9949 free(ctx); 9950 cb_fn(cb_arg, -ENOMEM); 9951 return; 9952 } 9953 9954 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 9955 if (!ctx->seq) { 9956 spdk_free(ctx->super); 9957 free(ctx); 9958 cb_fn(cb_arg, -ENOMEM); 9959 return; 9960 } 9961 9962 /* Read the super block */ 9963 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 9964 bs_byte_to_lba(bs, sizeof(*ctx->super)), 9965 bs_grow_live_load_super_cpl, ctx); 9966 } 9967 9968 void 9969 spdk_bs_grow(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 9970 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 9971 { 9972 struct spdk_blob_store *bs; 9973 struct spdk_bs_cpl cpl; 9974 struct spdk_bs_load_ctx *ctx; 9975 struct spdk_bs_opts opts = {}; 9976 int err; 9977 9978 SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev); 9979 9980 if ((dev->phys_blocklen % dev->blocklen) != 0) { 9981 SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen); 9982 dev->destroy(dev); 9983 cb_fn(cb_arg, NULL, -EINVAL); 9984 return; 9985 } 9986 9987 spdk_bs_opts_init(&opts, sizeof(opts)); 9988 if (o) { 9989 if (bs_opts_copy(o, &opts)) { 9990 dev->destroy(dev); 9991 cb_fn(cb_arg, NULL, -EINVAL); 9992 return; 9993 } 9994 } 9995 9996 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 9997 dev->destroy(dev); 9998 cb_fn(cb_arg, NULL, -EINVAL); 9999 return; 10000 } 10001 10002 err = bs_alloc(dev, &opts, &bs, &ctx); 10003 if (err) { 10004 dev->destroy(dev); 10005 cb_fn(cb_arg, NULL, err); 10006 return; 10007 } 10008 10009 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 10010 cpl.u.bs_handle.cb_fn = cb_fn; 10011 cpl.u.bs_handle.cb_arg = cb_arg; 10012 cpl.u.bs_handle.bs = bs; 10013 10014 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 10015 if (!ctx->seq) { 10016 spdk_free(ctx->super); 10017 free(ctx); 10018 bs_free(bs); 10019 cb_fn(cb_arg, NULL, -ENOMEM); 10020 return; 10021 } 10022 10023 /* Read the super block */ 10024 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 10025 bs_byte_to_lba(bs, sizeof(*ctx->super)), 10026 bs_grow_load_super_cpl, ctx); 10027 } 10028 10029 int 10030 spdk_blob_get_esnap_id(struct spdk_blob *blob, const void **id, size_t *len) 10031 { 10032 if (!blob_is_esnap_clone(blob)) { 10033 return -EINVAL; 10034 } 10035 10036 return blob_get_xattr_value(blob, BLOB_EXTERNAL_SNAPSHOT_ID, id, len, true); 10037 } 10038 10039 struct spdk_io_channel * 10040 blob_esnap_get_io_channel(struct spdk_io_channel *ch, struct spdk_blob *blob) 10041 { 10042 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(ch); 10043 struct spdk_bs_dev *bs_dev = blob->back_bs_dev; 10044 struct blob_esnap_channel find = {}; 10045 struct blob_esnap_channel *esnap_channel, *existing; 10046 10047 find.blob_id = blob->id; 10048 esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find); 10049 if (spdk_likely(esnap_channel != NULL)) { 10050 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": using cached channel on thread %s\n", 10051 blob->id, spdk_thread_get_name(spdk_get_thread())); 10052 return esnap_channel->channel; 10053 } 10054 10055 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": allocating channel on thread %s\n", 10056 blob->id, spdk_thread_get_name(spdk_get_thread())); 10057 10058 esnap_channel = calloc(1, sizeof(*esnap_channel)); 10059 if (esnap_channel == NULL) { 10060 SPDK_NOTICELOG("blob 0x%" PRIx64 " channel allocation failed: no memory\n", 10061 find.blob_id); 10062 return NULL; 10063 } 10064 esnap_channel->channel = bs_dev->create_channel(bs_dev); 10065 if (esnap_channel->channel == NULL) { 10066 SPDK_NOTICELOG("blob 0x%" PRIx64 " back channel allocation failed\n", blob->id); 10067 free(esnap_channel); 10068 return NULL; 10069 } 10070 esnap_channel->blob_id = find.blob_id; 10071 existing = RB_INSERT(blob_esnap_channel_tree, &bs_channel->esnap_channels, esnap_channel); 10072 if (spdk_unlikely(existing != NULL)) { 10073 /* 10074 * This should be unreachable: all modifications to this tree happen on this thread. 10075 */ 10076 SPDK_ERRLOG("blob 0x%" PRIx64 "lost race to allocate a channel\n", find.blob_id); 10077 assert(false); 10078 10079 bs_dev->destroy_channel(bs_dev, esnap_channel->channel); 10080 free(esnap_channel); 10081 10082 return existing->channel; 10083 } 10084 10085 return esnap_channel->channel; 10086 } 10087 10088 static int 10089 blob_esnap_channel_compare(struct blob_esnap_channel *c1, struct blob_esnap_channel *c2) 10090 { 10091 return (c1->blob_id < c2->blob_id ? -1 : c1->blob_id > c2->blob_id); 10092 } 10093 10094 struct blob_esnap_destroy_ctx { 10095 spdk_blob_op_with_handle_complete cb_fn; 10096 void *cb_arg; 10097 struct spdk_blob *blob; 10098 struct spdk_bs_dev *back_bs_dev; 10099 bool abort_io; 10100 }; 10101 10102 static void 10103 blob_esnap_destroy_channels_done(struct spdk_io_channel_iter *i, int status) 10104 { 10105 struct blob_esnap_destroy_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 10106 struct spdk_blob *blob = ctx->blob; 10107 struct spdk_blob_store *bs = blob->bs; 10108 10109 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": done destroying channels for this blob\n", 10110 blob->id); 10111 10112 if (ctx->cb_fn != NULL) { 10113 ctx->cb_fn(ctx->cb_arg, blob, status); 10114 } 10115 free(ctx); 10116 10117 bs->esnap_channels_unloading--; 10118 if (bs->esnap_channels_unloading == 0 && bs->esnap_unload_cb_fn != NULL) { 10119 spdk_bs_unload(bs, bs->esnap_unload_cb_fn, bs->esnap_unload_cb_arg); 10120 } 10121 } 10122 10123 static void 10124 blob_esnap_destroy_one_channel(struct spdk_io_channel_iter *i) 10125 { 10126 struct blob_esnap_destroy_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 10127 struct spdk_blob *blob = ctx->blob; 10128 struct spdk_bs_dev *bs_dev = ctx->back_bs_dev; 10129 struct spdk_io_channel *channel = spdk_io_channel_iter_get_channel(i); 10130 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(channel); 10131 struct blob_esnap_channel *esnap_channel; 10132 struct blob_esnap_channel find = {}; 10133 10134 assert(spdk_get_thread() == spdk_io_channel_get_thread(channel)); 10135 10136 find.blob_id = blob->id; 10137 esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find); 10138 if (esnap_channel != NULL) { 10139 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": destroying channel on thread %s\n", 10140 blob->id, spdk_thread_get_name(spdk_get_thread())); 10141 RB_REMOVE(blob_esnap_channel_tree, &bs_channel->esnap_channels, esnap_channel); 10142 10143 if (ctx->abort_io) { 10144 spdk_bs_user_op_t *op, *tmp; 10145 10146 TAILQ_FOREACH_SAFE(op, &bs_channel->queued_io, link, tmp) { 10147 if (op->back_channel == esnap_channel->channel) { 10148 TAILQ_REMOVE(&bs_channel->queued_io, op, link); 10149 bs_user_op_abort(op, -EIO); 10150 } 10151 } 10152 } 10153 10154 bs_dev->destroy_channel(bs_dev, esnap_channel->channel); 10155 free(esnap_channel); 10156 } 10157 10158 spdk_for_each_channel_continue(i, 0); 10159 } 10160 10161 /* 10162 * Destroy the channels for a specific blob on each thread with a blobstore channel. This should be 10163 * used when closing an esnap clone blob and after decoupling from the parent. 10164 */ 10165 static void 10166 blob_esnap_destroy_bs_dev_channels(struct spdk_blob *blob, bool abort_io, 10167 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 10168 { 10169 struct blob_esnap_destroy_ctx *ctx; 10170 10171 if (!blob_is_esnap_clone(blob) || blob->back_bs_dev == NULL) { 10172 if (cb_fn != NULL) { 10173 cb_fn(cb_arg, blob, 0); 10174 } 10175 return; 10176 } 10177 10178 ctx = calloc(1, sizeof(*ctx)); 10179 if (ctx == NULL) { 10180 if (cb_fn != NULL) { 10181 cb_fn(cb_arg, blob, -ENOMEM); 10182 } 10183 return; 10184 } 10185 ctx->cb_fn = cb_fn; 10186 ctx->cb_arg = cb_arg; 10187 ctx->blob = blob; 10188 ctx->back_bs_dev = blob->back_bs_dev; 10189 ctx->abort_io = abort_io; 10190 10191 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": destroying channels for this blob\n", 10192 blob->id); 10193 10194 blob->bs->esnap_channels_unloading++; 10195 spdk_for_each_channel(blob->bs, blob_esnap_destroy_one_channel, ctx, 10196 blob_esnap_destroy_channels_done); 10197 } 10198 10199 /* 10200 * Destroy all bs_dev channels on a specific blobstore channel. This should be used when a 10201 * bs_channel is destroyed. 10202 */ 10203 static void 10204 blob_esnap_destroy_bs_channel(struct spdk_bs_channel *ch) 10205 { 10206 struct blob_esnap_channel *esnap_channel, *esnap_channel_tmp; 10207 10208 assert(spdk_get_thread() == spdk_io_channel_get_thread(spdk_io_channel_from_ctx(ch))); 10209 10210 SPDK_DEBUGLOG(blob_esnap, "destroying channels on thread %s\n", 10211 spdk_thread_get_name(spdk_get_thread())); 10212 RB_FOREACH_SAFE(esnap_channel, blob_esnap_channel_tree, &ch->esnap_channels, 10213 esnap_channel_tmp) { 10214 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 10215 ": destroying one channel in thread %s\n", 10216 esnap_channel->blob_id, spdk_thread_get_name(spdk_get_thread())); 10217 RB_REMOVE(blob_esnap_channel_tree, &ch->esnap_channels, esnap_channel); 10218 spdk_put_io_channel(esnap_channel->channel); 10219 free(esnap_channel); 10220 } 10221 SPDK_DEBUGLOG(blob_esnap, "done destroying channels on thread %s\n", 10222 spdk_thread_get_name(spdk_get_thread())); 10223 } 10224 10225 static void 10226 blob_set_back_bs_dev_done(void *_ctx, int bserrno) 10227 { 10228 struct set_bs_dev_ctx *ctx = _ctx; 10229 10230 if (bserrno != 0) { 10231 /* Even though the unfreeze failed, the update may have succeed. */ 10232 SPDK_ERRLOG("blob 0x%" PRIx64 ": unfreeze failed with error %d\n", ctx->blob->id, 10233 bserrno); 10234 } 10235 ctx->cb_fn(ctx->cb_arg, ctx->bserrno); 10236 free(ctx); 10237 } 10238 10239 static void 10240 blob_frozen_set_back_bs_dev(void *_ctx, struct spdk_blob *blob, int bserrno) 10241 { 10242 struct set_bs_dev_ctx *ctx = _ctx; 10243 int rc; 10244 10245 if (bserrno != 0) { 10246 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to release old back_bs_dev with error %d\n", 10247 blob->id, bserrno); 10248 ctx->bserrno = bserrno; 10249 blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx); 10250 return; 10251 } 10252 10253 if (blob->back_bs_dev != NULL) { 10254 blob_unref_back_bs_dev(blob); 10255 } 10256 10257 if (ctx->parent_refs_cb_fn) { 10258 rc = ctx->parent_refs_cb_fn(blob, ctx->parent_refs_cb_arg); 10259 if (rc != 0) { 10260 ctx->bserrno = rc; 10261 blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx); 10262 return; 10263 } 10264 } 10265 10266 SPDK_NOTICELOG("blob 0x%" PRIx64 ": hotplugged back_bs_dev\n", blob->id); 10267 blob->back_bs_dev = ctx->back_bs_dev; 10268 ctx->bserrno = 0; 10269 10270 blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx); 10271 } 10272 10273 static void 10274 blob_set_back_bs_dev_frozen(void *_ctx, int bserrno) 10275 { 10276 struct set_bs_dev_ctx *ctx = _ctx; 10277 struct spdk_blob *blob = ctx->blob; 10278 10279 if (bserrno != 0) { 10280 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to freeze with error %d\n", blob->id, 10281 bserrno); 10282 ctx->cb_fn(ctx->cb_arg, bserrno); 10283 free(ctx); 10284 return; 10285 } 10286 10287 /* 10288 * This does not prevent future reads from the esnap device because any future IO will 10289 * lazily create a new esnap IO channel. 10290 */ 10291 blob_esnap_destroy_bs_dev_channels(blob, true, blob_frozen_set_back_bs_dev, ctx); 10292 } 10293 10294 void 10295 spdk_blob_set_esnap_bs_dev(struct spdk_blob *blob, struct spdk_bs_dev *back_bs_dev, 10296 spdk_blob_op_complete cb_fn, void *cb_arg) 10297 { 10298 if (!blob_is_esnap_clone(blob)) { 10299 SPDK_ERRLOG("blob 0x%" PRIx64 ": not an esnap clone\n", blob->id); 10300 cb_fn(cb_arg, -EINVAL); 10301 return; 10302 } 10303 10304 blob_set_back_bs_dev(blob, back_bs_dev, NULL, NULL, cb_fn, cb_arg); 10305 } 10306 10307 struct spdk_bs_dev * 10308 spdk_blob_get_esnap_bs_dev(const struct spdk_blob *blob) 10309 { 10310 if (!blob_is_esnap_clone(blob)) { 10311 SPDK_ERRLOG("blob 0x%" PRIx64 ": not an esnap clone\n", blob->id); 10312 return NULL; 10313 } 10314 10315 return blob->back_bs_dev; 10316 } 10317 10318 bool 10319 spdk_blob_is_degraded(const struct spdk_blob *blob) 10320 { 10321 if (blob->bs->dev->is_degraded != NULL && blob->bs->dev->is_degraded(blob->bs->dev)) { 10322 return true; 10323 } 10324 if (blob->back_bs_dev == NULL || blob->back_bs_dev->is_degraded == NULL) { 10325 return false; 10326 } 10327 10328 return blob->back_bs_dev->is_degraded(blob->back_bs_dev); 10329 } 10330 10331 SPDK_LOG_REGISTER_COMPONENT(blob) 10332 SPDK_LOG_REGISTER_COMPONENT(blob_esnap) 10333 10334 static void 10335 blob_trace(void) 10336 { 10337 struct spdk_trace_tpoint_opts opts[] = { 10338 { 10339 "BLOB_REQ_SET_START", TRACE_BLOB_REQ_SET_START, 10340 OWNER_TYPE_NONE, OBJECT_BLOB_CB_ARG, 1, 10341 { 10342 { "ctx", SPDK_TRACE_ARG_TYPE_PTR, 8 } 10343 } 10344 }, 10345 { 10346 "BLOB_REQ_SET_COMPLETE", TRACE_BLOB_REQ_SET_COMPLETE, 10347 OWNER_TYPE_NONE, OBJECT_BLOB_CB_ARG, 0, 10348 { 10349 { "ctx", SPDK_TRACE_ARG_TYPE_PTR, 8 } 10350 } 10351 }, 10352 }; 10353 10354 spdk_trace_register_object(OBJECT_BLOB_CB_ARG, 'a'); 10355 spdk_trace_register_description_ext(opts, SPDK_COUNTOF(opts)); 10356 spdk_trace_tpoint_register_relation(TRACE_BDEV_IO_START, OBJECT_BLOB_CB_ARG, 1); 10357 spdk_trace_tpoint_register_relation(TRACE_BDEV_IO_DONE, OBJECT_BLOB_CB_ARG, 0); 10358 } 10359 SPDK_TRACE_REGISTER_FN(blob_trace, "blob", TRACE_GROUP_BLOB) 10360