1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "spdk/blob.h" 10 #include "spdk/crc32.h" 11 #include "spdk/env.h" 12 #include "spdk/queue.h" 13 #include "spdk/thread.h" 14 #include "spdk/bit_array.h" 15 #include "spdk/bit_pool.h" 16 #include "spdk/likely.h" 17 #include "spdk/util.h" 18 #include "spdk/string.h" 19 #include "spdk/trace.h" 20 21 #include "spdk_internal/assert.h" 22 #include "spdk_internal/trace_defs.h" 23 #include "spdk/log.h" 24 25 #include "blobstore.h" 26 27 #define BLOB_CRC32C_INITIAL 0xffffffffUL 28 29 static int bs_register_md_thread(struct spdk_blob_store *bs); 30 static int bs_unregister_md_thread(struct spdk_blob_store *bs); 31 static void blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 32 static void blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 33 uint64_t cluster, uint32_t extent, struct spdk_blob_md_page *page, 34 spdk_blob_op_complete cb_fn, void *cb_arg); 35 static void blob_free_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 36 uint32_t extent_page, struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg); 37 38 static int blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 39 uint16_t value_len, bool internal); 40 static int blob_get_xattr_value(struct spdk_blob *blob, const char *name, 41 const void **value, size_t *value_len, bool internal); 42 static int blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal); 43 44 static void blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 45 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg); 46 static void blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg); 47 48 static void bs_shallow_copy_cluster_find_next(void *cb_arg); 49 50 /* 51 * External snapshots require a channel per thread per esnap bdev. The tree 52 * is populated lazily as blob IOs are handled by the back_bs_dev. When this 53 * channel is destroyed, all the channels in the tree are destroyed. 54 */ 55 56 struct blob_esnap_channel { 57 RB_ENTRY(blob_esnap_channel) node; 58 spdk_blob_id blob_id; 59 struct spdk_io_channel *channel; 60 }; 61 62 static int blob_esnap_channel_compare(struct blob_esnap_channel *c1, struct blob_esnap_channel *c2); 63 static void blob_esnap_destroy_bs_dev_channels(struct spdk_blob *blob, bool abort_io, 64 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg); 65 static void blob_esnap_destroy_bs_channel(struct spdk_bs_channel *ch); 66 static void blob_set_back_bs_dev_frozen(void *_ctx, int bserrno); 67 RB_GENERATE_STATIC(blob_esnap_channel_tree, blob_esnap_channel, node, blob_esnap_channel_compare) 68 69 static inline bool 70 blob_is_esnap_clone(const struct spdk_blob *blob) 71 { 72 assert(blob != NULL); 73 return !!(blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT); 74 } 75 76 static int 77 blob_id_cmp(struct spdk_blob *blob1, struct spdk_blob *blob2) 78 { 79 assert(blob1 != NULL && blob2 != NULL); 80 return (blob1->id < blob2->id ? -1 : blob1->id > blob2->id); 81 } 82 83 RB_GENERATE_STATIC(spdk_blob_tree, spdk_blob, link, blob_id_cmp); 84 85 static void 86 blob_verify_md_op(struct spdk_blob *blob) 87 { 88 assert(blob != NULL); 89 assert(spdk_get_thread() == blob->bs->md_thread); 90 assert(blob->state != SPDK_BLOB_STATE_LOADING); 91 } 92 93 static struct spdk_blob_list * 94 bs_get_snapshot_entry(struct spdk_blob_store *bs, spdk_blob_id blobid) 95 { 96 struct spdk_blob_list *snapshot_entry = NULL; 97 98 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 99 if (snapshot_entry->id == blobid) { 100 break; 101 } 102 } 103 104 return snapshot_entry; 105 } 106 107 static void 108 bs_claim_md_page(struct spdk_blob_store *bs, uint32_t page) 109 { 110 assert(spdk_spin_held(&bs->used_lock)); 111 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 112 assert(spdk_bit_array_get(bs->used_md_pages, page) == false); 113 114 spdk_bit_array_set(bs->used_md_pages, page); 115 } 116 117 static void 118 bs_release_md_page(struct spdk_blob_store *bs, uint32_t page) 119 { 120 assert(spdk_spin_held(&bs->used_lock)); 121 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 122 assert(spdk_bit_array_get(bs->used_md_pages, page) == true); 123 124 spdk_bit_array_clear(bs->used_md_pages, page); 125 } 126 127 static uint32_t 128 bs_claim_cluster(struct spdk_blob_store *bs) 129 { 130 uint32_t cluster_num; 131 132 assert(spdk_spin_held(&bs->used_lock)); 133 134 cluster_num = spdk_bit_pool_allocate_bit(bs->used_clusters); 135 if (cluster_num == UINT32_MAX) { 136 return UINT32_MAX; 137 } 138 139 SPDK_DEBUGLOG(blob, "Claiming cluster %u\n", cluster_num); 140 bs->num_free_clusters--; 141 142 return cluster_num; 143 } 144 145 static void 146 bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 147 { 148 assert(spdk_spin_held(&bs->used_lock)); 149 assert(cluster_num < spdk_bit_pool_capacity(bs->used_clusters)); 150 assert(spdk_bit_pool_is_allocated(bs->used_clusters, cluster_num) == true); 151 assert(bs->num_free_clusters < bs->total_clusters); 152 153 SPDK_DEBUGLOG(blob, "Releasing cluster %u\n", cluster_num); 154 155 spdk_bit_pool_free_bit(bs->used_clusters, cluster_num); 156 bs->num_free_clusters++; 157 } 158 159 static int 160 blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster) 161 { 162 uint64_t *cluster_lba = &blob->active.clusters[cluster_num]; 163 164 blob_verify_md_op(blob); 165 166 if (*cluster_lba != 0) { 167 return -EEXIST; 168 } 169 170 *cluster_lba = bs_cluster_to_lba(blob->bs, cluster); 171 blob->active.num_allocated_clusters++; 172 173 return 0; 174 } 175 176 static int 177 bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num, 178 uint64_t *cluster, uint32_t *lowest_free_md_page, bool update_map) 179 { 180 uint32_t *extent_page = 0; 181 182 assert(spdk_spin_held(&blob->bs->used_lock)); 183 184 *cluster = bs_claim_cluster(blob->bs); 185 if (*cluster == UINT32_MAX) { 186 /* No more free clusters. Cannot satisfy the request */ 187 return -ENOSPC; 188 } 189 190 if (blob->use_extent_table) { 191 extent_page = bs_cluster_to_extent_page(blob, cluster_num); 192 if (*extent_page == 0) { 193 /* Extent page shall never occupy md_page so start the search from 1 */ 194 if (*lowest_free_md_page == 0) { 195 *lowest_free_md_page = 1; 196 } 197 /* No extent_page is allocated for the cluster */ 198 *lowest_free_md_page = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, 199 *lowest_free_md_page); 200 if (*lowest_free_md_page == UINT32_MAX) { 201 /* No more free md pages. Cannot satisfy the request */ 202 bs_release_cluster(blob->bs, *cluster); 203 return -ENOSPC; 204 } 205 bs_claim_md_page(blob->bs, *lowest_free_md_page); 206 } 207 } 208 209 SPDK_DEBUGLOG(blob, "Claiming cluster %" PRIu64 " for blob 0x%" PRIx64 "\n", *cluster, 210 blob->id); 211 212 if (update_map) { 213 blob_insert_cluster(blob, cluster_num, *cluster); 214 if (blob->use_extent_table && *extent_page == 0) { 215 *extent_page = *lowest_free_md_page; 216 } 217 } 218 219 return 0; 220 } 221 222 static void 223 blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs) 224 { 225 xattrs->count = 0; 226 xattrs->names = NULL; 227 xattrs->ctx = NULL; 228 xattrs->get_value = NULL; 229 } 230 231 void 232 spdk_blob_opts_init(struct spdk_blob_opts *opts, size_t opts_size) 233 { 234 if (!opts) { 235 SPDK_ERRLOG("opts should not be NULL\n"); 236 return; 237 } 238 239 if (!opts_size) { 240 SPDK_ERRLOG("opts_size should not be zero value\n"); 241 return; 242 } 243 244 memset(opts, 0, opts_size); 245 opts->opts_size = opts_size; 246 247 #define FIELD_OK(field) \ 248 offsetof(struct spdk_blob_opts, field) + sizeof(opts->field) <= opts_size 249 250 #define SET_FIELD(field, value) \ 251 if (FIELD_OK(field)) { \ 252 opts->field = value; \ 253 } \ 254 255 SET_FIELD(num_clusters, 0); 256 SET_FIELD(thin_provision, false); 257 SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT); 258 259 if (FIELD_OK(xattrs)) { 260 blob_xattrs_init(&opts->xattrs); 261 } 262 263 SET_FIELD(use_extent_table, true); 264 265 #undef FIELD_OK 266 #undef SET_FIELD 267 } 268 269 void 270 spdk_blob_open_opts_init(struct spdk_blob_open_opts *opts, size_t opts_size) 271 { 272 if (!opts) { 273 SPDK_ERRLOG("opts should not be NULL\n"); 274 return; 275 } 276 277 if (!opts_size) { 278 SPDK_ERRLOG("opts_size should not be zero value\n"); 279 return; 280 } 281 282 memset(opts, 0, opts_size); 283 opts->opts_size = opts_size; 284 285 #define FIELD_OK(field) \ 286 offsetof(struct spdk_blob_open_opts, field) + sizeof(opts->field) <= opts_size 287 288 #define SET_FIELD(field, value) \ 289 if (FIELD_OK(field)) { \ 290 opts->field = value; \ 291 } \ 292 293 SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT); 294 295 #undef FIELD_OK 296 #undef SET_FILED 297 } 298 299 static struct spdk_blob * 300 blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id) 301 { 302 struct spdk_blob *blob; 303 304 blob = calloc(1, sizeof(*blob)); 305 if (!blob) { 306 return NULL; 307 } 308 309 blob->id = id; 310 blob->bs = bs; 311 312 blob->parent_id = SPDK_BLOBID_INVALID; 313 314 blob->state = SPDK_BLOB_STATE_DIRTY; 315 blob->extent_rle_found = false; 316 blob->extent_table_found = false; 317 blob->active.num_pages = 1; 318 blob->active.pages = calloc(1, sizeof(*blob->active.pages)); 319 if (!blob->active.pages) { 320 free(blob); 321 return NULL; 322 } 323 324 blob->active.pages[0] = bs_blobid_to_page(id); 325 326 TAILQ_INIT(&blob->xattrs); 327 TAILQ_INIT(&blob->xattrs_internal); 328 TAILQ_INIT(&blob->pending_persists); 329 TAILQ_INIT(&blob->persists_to_complete); 330 331 return blob; 332 } 333 334 static void 335 xattrs_free(struct spdk_xattr_tailq *xattrs) 336 { 337 struct spdk_xattr *xattr, *xattr_tmp; 338 339 TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) { 340 TAILQ_REMOVE(xattrs, xattr, link); 341 free(xattr->name); 342 free(xattr->value); 343 free(xattr); 344 } 345 } 346 347 static void 348 blob_unref_back_bs_dev(struct spdk_blob *blob) 349 { 350 blob->back_bs_dev->destroy(blob->back_bs_dev); 351 blob->back_bs_dev = NULL; 352 } 353 354 static void 355 blob_free(struct spdk_blob *blob) 356 { 357 assert(blob != NULL); 358 assert(TAILQ_EMPTY(&blob->pending_persists)); 359 assert(TAILQ_EMPTY(&blob->persists_to_complete)); 360 361 free(blob->active.extent_pages); 362 free(blob->clean.extent_pages); 363 free(blob->active.clusters); 364 free(blob->clean.clusters); 365 free(blob->active.pages); 366 free(blob->clean.pages); 367 368 xattrs_free(&blob->xattrs); 369 xattrs_free(&blob->xattrs_internal); 370 371 if (blob->back_bs_dev) { 372 blob_unref_back_bs_dev(blob); 373 } 374 375 free(blob); 376 } 377 378 static void 379 blob_back_bs_destroy_esnap_done(void *ctx, struct spdk_blob *blob, int bserrno) 380 { 381 struct spdk_bs_dev *bs_dev = ctx; 382 383 if (bserrno != 0) { 384 /* 385 * This is probably due to a memory allocation failure when creating the 386 * blob_esnap_destroy_ctx before iterating threads. 387 */ 388 SPDK_ERRLOG("blob 0x%" PRIx64 ": Unable to destroy bs dev channels: error %d\n", 389 blob->id, bserrno); 390 assert(false); 391 } 392 393 if (bs_dev == NULL) { 394 /* 395 * This check exists to make scanbuild happy. 396 * 397 * blob->back_bs_dev for an esnap is NULL during the first iteration of blobs while 398 * the blobstore is being loaded. It could also be NULL if there was an error 399 * opening the esnap device. In each of these cases, no channels could have been 400 * created because back_bs_dev->create_channel() would have led to a NULL pointer 401 * deref. 402 */ 403 assert(false); 404 return; 405 } 406 407 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": calling destroy on back_bs_dev\n", blob->id); 408 bs_dev->destroy(bs_dev); 409 } 410 411 static void 412 blob_back_bs_destroy(struct spdk_blob *blob) 413 { 414 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": preparing to destroy back_bs_dev\n", 415 blob->id); 416 417 blob_esnap_destroy_bs_dev_channels(blob, false, blob_back_bs_destroy_esnap_done, 418 blob->back_bs_dev); 419 blob->back_bs_dev = NULL; 420 } 421 422 struct blob_parent { 423 union { 424 struct { 425 spdk_blob_id id; 426 struct spdk_blob *blob; 427 } snapshot; 428 429 struct { 430 void *id; 431 uint32_t id_len; 432 struct spdk_bs_dev *back_bs_dev; 433 } esnap; 434 } u; 435 }; 436 437 typedef int (*set_parent_refs_cb)(struct spdk_blob *blob, struct blob_parent *parent); 438 439 struct set_bs_dev_ctx { 440 struct spdk_blob *blob; 441 struct spdk_bs_dev *back_bs_dev; 442 443 /* 444 * This callback is used during a set parent operation to change the references 445 * to the parent of the blob. 446 */ 447 set_parent_refs_cb parent_refs_cb_fn; 448 struct blob_parent *parent_refs_cb_arg; 449 450 spdk_blob_op_complete cb_fn; 451 void *cb_arg; 452 int bserrno; 453 }; 454 455 static void 456 blob_set_back_bs_dev(struct spdk_blob *blob, struct spdk_bs_dev *back_bs_dev, 457 set_parent_refs_cb parent_refs_cb_fn, struct blob_parent *parent_refs_cb_arg, 458 spdk_blob_op_complete cb_fn, void *cb_arg) 459 { 460 struct set_bs_dev_ctx *ctx; 461 462 ctx = calloc(1, sizeof(*ctx)); 463 if (ctx == NULL) { 464 SPDK_ERRLOG("blob 0x%" PRIx64 ": out of memory while setting back_bs_dev\n", 465 blob->id); 466 cb_fn(cb_arg, -ENOMEM); 467 return; 468 } 469 470 ctx->parent_refs_cb_fn = parent_refs_cb_fn; 471 ctx->parent_refs_cb_arg = parent_refs_cb_arg; 472 ctx->cb_fn = cb_fn; 473 ctx->cb_arg = cb_arg; 474 ctx->back_bs_dev = back_bs_dev; 475 ctx->blob = blob; 476 477 blob_freeze_io(blob, blob_set_back_bs_dev_frozen, ctx); 478 } 479 480 struct freeze_io_ctx { 481 struct spdk_bs_cpl cpl; 482 struct spdk_blob *blob; 483 }; 484 485 static void 486 blob_io_sync(struct spdk_io_channel_iter *i) 487 { 488 spdk_for_each_channel_continue(i, 0); 489 } 490 491 static void 492 blob_execute_queued_io(struct spdk_io_channel_iter *i) 493 { 494 struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); 495 struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch); 496 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 497 struct spdk_bs_request_set *set; 498 struct spdk_bs_user_op_args *args; 499 spdk_bs_user_op_t *op, *tmp; 500 501 TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) { 502 set = (struct spdk_bs_request_set *)op; 503 args = &set->u.user_op; 504 505 if (args->blob == ctx->blob) { 506 TAILQ_REMOVE(&ch->queued_io, op, link); 507 bs_user_op_execute(op); 508 } 509 } 510 511 spdk_for_each_channel_continue(i, 0); 512 } 513 514 static void 515 blob_io_cpl(struct spdk_io_channel_iter *i, int status) 516 { 517 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 518 519 ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0); 520 521 free(ctx); 522 } 523 524 static void 525 blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 526 { 527 struct freeze_io_ctx *ctx; 528 529 blob_verify_md_op(blob); 530 531 ctx = calloc(1, sizeof(*ctx)); 532 if (!ctx) { 533 cb_fn(cb_arg, -ENOMEM); 534 return; 535 } 536 537 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 538 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 539 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 540 ctx->blob = blob; 541 542 /* Freeze I/O on blob */ 543 blob->frozen_refcnt++; 544 545 spdk_for_each_channel(blob->bs, blob_io_sync, ctx, blob_io_cpl); 546 } 547 548 static void 549 blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 550 { 551 struct freeze_io_ctx *ctx; 552 553 blob_verify_md_op(blob); 554 555 ctx = calloc(1, sizeof(*ctx)); 556 if (!ctx) { 557 cb_fn(cb_arg, -ENOMEM); 558 return; 559 } 560 561 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 562 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 563 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 564 ctx->blob = blob; 565 566 assert(blob->frozen_refcnt > 0); 567 568 blob->frozen_refcnt--; 569 570 spdk_for_each_channel(blob->bs, blob_execute_queued_io, ctx, blob_io_cpl); 571 } 572 573 static int 574 blob_mark_clean(struct spdk_blob *blob) 575 { 576 uint32_t *extent_pages = NULL; 577 uint64_t *clusters = NULL; 578 uint32_t *pages = NULL; 579 580 assert(blob != NULL); 581 582 if (blob->active.num_extent_pages) { 583 assert(blob->active.extent_pages); 584 extent_pages = calloc(blob->active.num_extent_pages, sizeof(*blob->active.extent_pages)); 585 if (!extent_pages) { 586 return -ENOMEM; 587 } 588 memcpy(extent_pages, blob->active.extent_pages, 589 blob->active.num_extent_pages * sizeof(*extent_pages)); 590 } 591 592 if (blob->active.num_clusters) { 593 assert(blob->active.clusters); 594 clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters)); 595 if (!clusters) { 596 free(extent_pages); 597 return -ENOMEM; 598 } 599 memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 600 } 601 602 if (blob->active.num_pages) { 603 assert(blob->active.pages); 604 pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages)); 605 if (!pages) { 606 free(extent_pages); 607 free(clusters); 608 return -ENOMEM; 609 } 610 memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 611 } 612 613 free(blob->clean.extent_pages); 614 free(blob->clean.clusters); 615 free(blob->clean.pages); 616 617 blob->clean.num_extent_pages = blob->active.num_extent_pages; 618 blob->clean.extent_pages = blob->active.extent_pages; 619 blob->clean.num_clusters = blob->active.num_clusters; 620 blob->clean.clusters = blob->active.clusters; 621 blob->clean.num_allocated_clusters = blob->active.num_allocated_clusters; 622 blob->clean.num_pages = blob->active.num_pages; 623 blob->clean.pages = blob->active.pages; 624 625 blob->active.extent_pages = extent_pages; 626 blob->active.clusters = clusters; 627 blob->active.pages = pages; 628 629 /* If the metadata was dirtied again while the metadata was being written to disk, 630 * we do not want to revert the DIRTY state back to CLEAN here. 631 */ 632 if (blob->state == SPDK_BLOB_STATE_LOADING) { 633 blob->state = SPDK_BLOB_STATE_CLEAN; 634 } 635 636 return 0; 637 } 638 639 static int 640 blob_deserialize_xattr(struct spdk_blob *blob, 641 struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal) 642 { 643 struct spdk_xattr *xattr; 644 645 if (desc_xattr->length != sizeof(desc_xattr->name_length) + 646 sizeof(desc_xattr->value_length) + 647 desc_xattr->name_length + desc_xattr->value_length) { 648 return -EINVAL; 649 } 650 651 xattr = calloc(1, sizeof(*xattr)); 652 if (xattr == NULL) { 653 return -ENOMEM; 654 } 655 656 xattr->name = malloc(desc_xattr->name_length + 1); 657 if (xattr->name == NULL) { 658 free(xattr); 659 return -ENOMEM; 660 } 661 662 xattr->value = malloc(desc_xattr->value_length); 663 if (xattr->value == NULL) { 664 free(xattr->name); 665 free(xattr); 666 return -ENOMEM; 667 } 668 669 memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length); 670 xattr->name[desc_xattr->name_length] = '\0'; 671 xattr->value_len = desc_xattr->value_length; 672 memcpy(xattr->value, 673 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 674 desc_xattr->value_length); 675 676 TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link); 677 678 return 0; 679 } 680 681 682 static int 683 blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob) 684 { 685 struct spdk_blob_md_descriptor *desc; 686 size_t cur_desc = 0; 687 void *tmp; 688 689 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 690 while (cur_desc < sizeof(page->descriptors)) { 691 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 692 if (desc->length == 0) { 693 /* If padding and length are 0, this terminates the page */ 694 break; 695 } 696 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 697 struct spdk_blob_md_descriptor_flags *desc_flags; 698 699 desc_flags = (struct spdk_blob_md_descriptor_flags *)desc; 700 701 if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) { 702 return -EINVAL; 703 } 704 705 if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) != 706 SPDK_BLOB_INVALID_FLAGS_MASK) { 707 return -EINVAL; 708 } 709 710 if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) != 711 SPDK_BLOB_DATA_RO_FLAGS_MASK) { 712 blob->data_ro = true; 713 blob->md_ro = true; 714 } 715 716 if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) != 717 SPDK_BLOB_MD_RO_FLAGS_MASK) { 718 blob->md_ro = true; 719 } 720 721 if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 722 blob->data_ro = true; 723 blob->md_ro = true; 724 } 725 726 blob->invalid_flags = desc_flags->invalid_flags; 727 blob->data_ro_flags = desc_flags->data_ro_flags; 728 blob->md_ro_flags = desc_flags->md_ro_flags; 729 730 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 731 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 732 unsigned int i, j; 733 unsigned int cluster_count = blob->active.num_clusters; 734 735 if (blob->extent_table_found) { 736 /* Extent Table already present in the md, 737 * both descriptors should never be at the same time. */ 738 return -EINVAL; 739 } 740 blob->extent_rle_found = true; 741 742 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 743 744 if (desc_extent_rle->length == 0 || 745 (desc_extent_rle->length % sizeof(desc_extent_rle->extents[0]) != 0)) { 746 return -EINVAL; 747 } 748 749 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 750 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 751 if (desc_extent_rle->extents[i].cluster_idx != 0) { 752 if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, 753 desc_extent_rle->extents[i].cluster_idx + j)) { 754 return -EINVAL; 755 } 756 } 757 cluster_count++; 758 } 759 } 760 761 if (cluster_count == 0) { 762 return -EINVAL; 763 } 764 tmp = realloc(blob->active.clusters, cluster_count * sizeof(*blob->active.clusters)); 765 if (tmp == NULL) { 766 return -ENOMEM; 767 } 768 blob->active.clusters = tmp; 769 blob->active.cluster_array_size = cluster_count; 770 771 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 772 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 773 if (desc_extent_rle->extents[i].cluster_idx != 0) { 774 blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs, 775 desc_extent_rle->extents[i].cluster_idx + j); 776 blob->active.num_allocated_clusters++; 777 } else if (spdk_blob_is_thin_provisioned(blob)) { 778 blob->active.clusters[blob->active.num_clusters++] = 0; 779 } else { 780 return -EINVAL; 781 } 782 } 783 } 784 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 785 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 786 uint32_t num_extent_pages = blob->active.num_extent_pages; 787 uint32_t i, j; 788 size_t extent_pages_length; 789 790 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 791 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 792 793 if (blob->extent_rle_found) { 794 /* This means that Extent RLE is present in MD, 795 * both should never be at the same time. */ 796 return -EINVAL; 797 } else if (blob->extent_table_found && 798 desc_extent_table->num_clusters != blob->remaining_clusters_in_et) { 799 /* Number of clusters in this ET does not match number 800 * from previously read EXTENT_TABLE. */ 801 return -EINVAL; 802 } 803 804 if (desc_extent_table->length == 0 || 805 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 806 return -EINVAL; 807 } 808 809 blob->extent_table_found = true; 810 811 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 812 num_extent_pages += desc_extent_table->extent_page[i].num_pages; 813 } 814 815 if (num_extent_pages > 0) { 816 tmp = realloc(blob->active.extent_pages, num_extent_pages * sizeof(uint32_t)); 817 if (tmp == NULL) { 818 return -ENOMEM; 819 } 820 blob->active.extent_pages = tmp; 821 } 822 blob->active.extent_pages_array_size = num_extent_pages; 823 824 blob->remaining_clusters_in_et = desc_extent_table->num_clusters; 825 826 /* Extent table entries contain md page numbers for extent pages. 827 * Zeroes represent unallocated extent pages, those are run-length-encoded. 828 */ 829 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 830 if (desc_extent_table->extent_page[i].page_idx != 0) { 831 assert(desc_extent_table->extent_page[i].num_pages == 1); 832 blob->active.extent_pages[blob->active.num_extent_pages++] = 833 desc_extent_table->extent_page[i].page_idx; 834 } else if (spdk_blob_is_thin_provisioned(blob)) { 835 for (j = 0; j < desc_extent_table->extent_page[i].num_pages; j++) { 836 blob->active.extent_pages[blob->active.num_extent_pages++] = 0; 837 } 838 } else { 839 return -EINVAL; 840 } 841 } 842 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 843 struct spdk_blob_md_descriptor_extent_page *desc_extent; 844 unsigned int i; 845 unsigned int cluster_count = 0; 846 size_t cluster_idx_length; 847 848 if (blob->extent_rle_found) { 849 /* This means that Extent RLE is present in MD, 850 * both should never be at the same time. */ 851 return -EINVAL; 852 } 853 854 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 855 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 856 857 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 858 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 859 return -EINVAL; 860 } 861 862 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 863 if (desc_extent->cluster_idx[i] != 0) { 864 if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, desc_extent->cluster_idx[i])) { 865 return -EINVAL; 866 } 867 } 868 cluster_count++; 869 } 870 871 if (cluster_count == 0) { 872 return -EINVAL; 873 } 874 875 /* When reading extent pages sequentially starting cluster idx should match 876 * current size of a blob. 877 * If changed to batch reading, this check shall be removed. */ 878 if (desc_extent->start_cluster_idx != blob->active.num_clusters) { 879 return -EINVAL; 880 } 881 882 tmp = realloc(blob->active.clusters, 883 (cluster_count + blob->active.num_clusters) * sizeof(*blob->active.clusters)); 884 if (tmp == NULL) { 885 return -ENOMEM; 886 } 887 blob->active.clusters = tmp; 888 blob->active.cluster_array_size = (cluster_count + blob->active.num_clusters); 889 890 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 891 if (desc_extent->cluster_idx[i] != 0) { 892 blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs, 893 desc_extent->cluster_idx[i]); 894 blob->active.num_allocated_clusters++; 895 } else if (spdk_blob_is_thin_provisioned(blob)) { 896 blob->active.clusters[blob->active.num_clusters++] = 0; 897 } else { 898 return -EINVAL; 899 } 900 } 901 assert(desc_extent->start_cluster_idx + cluster_count == blob->active.num_clusters); 902 assert(blob->remaining_clusters_in_et >= cluster_count); 903 blob->remaining_clusters_in_et -= cluster_count; 904 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 905 int rc; 906 907 rc = blob_deserialize_xattr(blob, 908 (struct spdk_blob_md_descriptor_xattr *) desc, false); 909 if (rc != 0) { 910 return rc; 911 } 912 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 913 int rc; 914 915 rc = blob_deserialize_xattr(blob, 916 (struct spdk_blob_md_descriptor_xattr *) desc, true); 917 if (rc != 0) { 918 return rc; 919 } 920 } else { 921 /* Unrecognized descriptor type. Do not fail - just continue to the 922 * next descriptor. If this descriptor is associated with some feature 923 * defined in a newer version of blobstore, that version of blobstore 924 * should create and set an associated feature flag to specify if this 925 * blob can be loaded or not. 926 */ 927 } 928 929 /* Advance to the next descriptor */ 930 cur_desc += sizeof(*desc) + desc->length; 931 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 932 break; 933 } 934 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 935 } 936 937 return 0; 938 } 939 940 static bool bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page); 941 942 static int 943 blob_parse_extent_page(struct spdk_blob_md_page *extent_page, struct spdk_blob *blob) 944 { 945 assert(blob != NULL); 946 assert(blob->state == SPDK_BLOB_STATE_LOADING); 947 948 if (bs_load_cur_extent_page_valid(extent_page) == false) { 949 return -ENOENT; 950 } 951 952 return blob_parse_page(extent_page, blob); 953 } 954 955 static int 956 blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count, 957 struct spdk_blob *blob) 958 { 959 const struct spdk_blob_md_page *page; 960 uint32_t i; 961 int rc; 962 void *tmp; 963 964 assert(page_count > 0); 965 assert(pages[0].sequence_num == 0); 966 assert(blob != NULL); 967 assert(blob->state == SPDK_BLOB_STATE_LOADING); 968 assert(blob->active.clusters == NULL); 969 970 /* The blobid provided doesn't match what's in the MD, this can 971 * happen for example if a bogus blobid is passed in through open. 972 */ 973 if (blob->id != pages[0].id) { 974 SPDK_ERRLOG("Blobid (0x%" PRIx64 ") doesn't match what's in metadata " 975 "(0x%" PRIx64 ")\n", blob->id, pages[0].id); 976 return -ENOENT; 977 } 978 979 tmp = realloc(blob->active.pages, page_count * sizeof(*blob->active.pages)); 980 if (!tmp) { 981 return -ENOMEM; 982 } 983 blob->active.pages = tmp; 984 985 blob->active.pages[0] = pages[0].id; 986 987 for (i = 1; i < page_count; i++) { 988 assert(spdk_bit_array_get(blob->bs->used_md_pages, pages[i - 1].next)); 989 blob->active.pages[i] = pages[i - 1].next; 990 } 991 blob->active.num_pages = page_count; 992 993 for (i = 0; i < page_count; i++) { 994 page = &pages[i]; 995 996 assert(page->id == blob->id); 997 assert(page->sequence_num == i); 998 999 rc = blob_parse_page(page, blob); 1000 if (rc != 0) { 1001 return rc; 1002 } 1003 } 1004 1005 return 0; 1006 } 1007 1008 static int 1009 blob_serialize_add_page(const struct spdk_blob *blob, 1010 struct spdk_blob_md_page **pages, 1011 uint32_t *page_count, 1012 struct spdk_blob_md_page **last_page) 1013 { 1014 struct spdk_blob_md_page *page, *tmp_pages; 1015 1016 assert(pages != NULL); 1017 assert(page_count != NULL); 1018 1019 *last_page = NULL; 1020 if (*page_count == 0) { 1021 assert(*pages == NULL); 1022 *pages = spdk_malloc(blob->bs->md_page_size, 0, 1023 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 1024 if (*pages == NULL) { 1025 return -ENOMEM; 1026 } 1027 *page_count = 1; 1028 } else { 1029 assert(*pages != NULL); 1030 tmp_pages = spdk_realloc(*pages, blob->bs->md_page_size * (*page_count + 1), 0); 1031 if (tmp_pages == NULL) { 1032 return -ENOMEM; 1033 } 1034 (*page_count)++; 1035 *pages = tmp_pages; 1036 } 1037 1038 page = &(*pages)[*page_count - 1]; 1039 memset(page, 0, sizeof(*page)); 1040 page->id = blob->id; 1041 page->sequence_num = *page_count - 1; 1042 page->next = SPDK_INVALID_MD_PAGE; 1043 *last_page = page; 1044 1045 return 0; 1046 } 1047 1048 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor. 1049 * Update required_sz on both success and failure. 1050 * 1051 */ 1052 static int 1053 blob_serialize_xattr(const struct spdk_xattr *xattr, 1054 uint8_t *buf, size_t buf_sz, 1055 size_t *required_sz, bool internal) 1056 { 1057 struct spdk_blob_md_descriptor_xattr *desc; 1058 1059 *required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) + 1060 strlen(xattr->name) + 1061 xattr->value_len; 1062 1063 if (buf_sz < *required_sz) { 1064 return -1; 1065 } 1066 1067 desc = (struct spdk_blob_md_descriptor_xattr *)buf; 1068 1069 desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR; 1070 desc->length = sizeof(desc->name_length) + 1071 sizeof(desc->value_length) + 1072 strlen(xattr->name) + 1073 xattr->value_len; 1074 desc->name_length = strlen(xattr->name); 1075 desc->value_length = xattr->value_len; 1076 1077 memcpy(desc->name, xattr->name, desc->name_length); 1078 memcpy((void *)((uintptr_t)desc->name + desc->name_length), 1079 xattr->value, 1080 desc->value_length); 1081 1082 return 0; 1083 } 1084 1085 static void 1086 blob_serialize_extent_table_entry(const struct spdk_blob *blob, 1087 uint64_t start_ep, uint64_t *next_ep, 1088 uint8_t **buf, size_t *remaining_sz) 1089 { 1090 struct spdk_blob_md_descriptor_extent_table *desc; 1091 size_t cur_sz; 1092 uint64_t i, et_idx; 1093 uint32_t extent_page, ep_len; 1094 1095 /* The buffer must have room for at least num_clusters entry */ 1096 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->num_clusters); 1097 if (*remaining_sz < cur_sz) { 1098 *next_ep = start_ep; 1099 return; 1100 } 1101 1102 desc = (struct spdk_blob_md_descriptor_extent_table *)*buf; 1103 desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE; 1104 1105 desc->num_clusters = blob->active.num_clusters; 1106 1107 ep_len = 1; 1108 et_idx = 0; 1109 for (i = start_ep; i < blob->active.num_extent_pages; i++) { 1110 if (*remaining_sz < cur_sz + sizeof(desc->extent_page[0])) { 1111 /* If we ran out of buffer space, return */ 1112 break; 1113 } 1114 1115 extent_page = blob->active.extent_pages[i]; 1116 /* Verify that next extent_page is unallocated */ 1117 if (extent_page == 0 && 1118 (i + 1 < blob->active.num_extent_pages && blob->active.extent_pages[i + 1] == 0)) { 1119 ep_len++; 1120 continue; 1121 } 1122 desc->extent_page[et_idx].page_idx = extent_page; 1123 desc->extent_page[et_idx].num_pages = ep_len; 1124 et_idx++; 1125 1126 ep_len = 1; 1127 cur_sz += sizeof(desc->extent_page[et_idx]); 1128 } 1129 *next_ep = i; 1130 1131 desc->length = sizeof(desc->num_clusters) + sizeof(desc->extent_page[0]) * et_idx; 1132 *remaining_sz -= sizeof(struct spdk_blob_md_descriptor) + desc->length; 1133 *buf += sizeof(struct spdk_blob_md_descriptor) + desc->length; 1134 } 1135 1136 static int 1137 blob_serialize_extent_table(const struct spdk_blob *blob, 1138 struct spdk_blob_md_page **pages, 1139 struct spdk_blob_md_page *cur_page, 1140 uint32_t *page_count, uint8_t **buf, 1141 size_t *remaining_sz) 1142 { 1143 uint64_t last_extent_page; 1144 int rc; 1145 1146 last_extent_page = 0; 1147 /* At least single extent table entry has to be always persisted. 1148 * Such case occurs with num_extent_pages == 0. */ 1149 while (last_extent_page <= blob->active.num_extent_pages) { 1150 blob_serialize_extent_table_entry(blob, last_extent_page, &last_extent_page, buf, 1151 remaining_sz); 1152 1153 if (last_extent_page == blob->active.num_extent_pages) { 1154 break; 1155 } 1156 1157 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1158 if (rc < 0) { 1159 return rc; 1160 } 1161 1162 *buf = (uint8_t *)cur_page->descriptors; 1163 *remaining_sz = sizeof(cur_page->descriptors); 1164 } 1165 1166 return 0; 1167 } 1168 1169 static void 1170 blob_serialize_extent_rle(const struct spdk_blob *blob, 1171 uint64_t start_cluster, uint64_t *next_cluster, 1172 uint8_t **buf, size_t *buf_sz) 1173 { 1174 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 1175 size_t cur_sz; 1176 uint64_t i, extent_idx; 1177 uint64_t lba, lba_per_cluster, lba_count; 1178 1179 /* The buffer must have room for at least one extent */ 1180 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc_extent_rle->extents[0]); 1181 if (*buf_sz < cur_sz) { 1182 *next_cluster = start_cluster; 1183 return; 1184 } 1185 1186 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)*buf; 1187 desc_extent_rle->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE; 1188 1189 lba_per_cluster = bs_cluster_to_lba(blob->bs, 1); 1190 /* Assert for scan-build false positive */ 1191 assert(lba_per_cluster > 0); 1192 1193 lba = blob->active.clusters[start_cluster]; 1194 lba_count = lba_per_cluster; 1195 extent_idx = 0; 1196 for (i = start_cluster + 1; i < blob->active.num_clusters; i++) { 1197 if ((lba + lba_count) == blob->active.clusters[i] && lba != 0) { 1198 /* Run-length encode sequential non-zero LBA */ 1199 lba_count += lba_per_cluster; 1200 continue; 1201 } else if (lba == 0 && blob->active.clusters[i] == 0) { 1202 /* Run-length encode unallocated clusters */ 1203 lba_count += lba_per_cluster; 1204 continue; 1205 } 1206 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1207 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1208 extent_idx++; 1209 1210 cur_sz += sizeof(desc_extent_rle->extents[extent_idx]); 1211 1212 if (*buf_sz < cur_sz) { 1213 /* If we ran out of buffer space, return */ 1214 *next_cluster = i; 1215 break; 1216 } 1217 1218 lba = blob->active.clusters[i]; 1219 lba_count = lba_per_cluster; 1220 } 1221 1222 if (*buf_sz >= cur_sz) { 1223 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1224 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1225 extent_idx++; 1226 1227 *next_cluster = blob->active.num_clusters; 1228 } 1229 1230 desc_extent_rle->length = sizeof(desc_extent_rle->extents[0]) * extent_idx; 1231 *buf_sz -= sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1232 *buf += sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1233 } 1234 1235 static int 1236 blob_serialize_extents_rle(const struct spdk_blob *blob, 1237 struct spdk_blob_md_page **pages, 1238 struct spdk_blob_md_page *cur_page, 1239 uint32_t *page_count, uint8_t **buf, 1240 size_t *remaining_sz) 1241 { 1242 uint64_t last_cluster; 1243 int rc; 1244 1245 last_cluster = 0; 1246 while (last_cluster < blob->active.num_clusters) { 1247 blob_serialize_extent_rle(blob, last_cluster, &last_cluster, buf, remaining_sz); 1248 1249 if (last_cluster == blob->active.num_clusters) { 1250 break; 1251 } 1252 1253 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1254 if (rc < 0) { 1255 return rc; 1256 } 1257 1258 *buf = (uint8_t *)cur_page->descriptors; 1259 *remaining_sz = sizeof(cur_page->descriptors); 1260 } 1261 1262 return 0; 1263 } 1264 1265 static void 1266 blob_serialize_extent_page(const struct spdk_blob *blob, 1267 uint64_t cluster, struct spdk_blob_md_page *page) 1268 { 1269 struct spdk_blob_md_descriptor_extent_page *desc_extent; 1270 uint64_t i, extent_idx; 1271 uint64_t lba, lba_per_cluster; 1272 uint64_t start_cluster_idx = (cluster / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP; 1273 1274 desc_extent = (struct spdk_blob_md_descriptor_extent_page *) page->descriptors; 1275 desc_extent->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE; 1276 1277 lba_per_cluster = bs_cluster_to_lba(blob->bs, 1); 1278 1279 desc_extent->start_cluster_idx = start_cluster_idx; 1280 extent_idx = 0; 1281 for (i = start_cluster_idx; i < blob->active.num_clusters; i++) { 1282 lba = blob->active.clusters[i]; 1283 desc_extent->cluster_idx[extent_idx++] = lba / lba_per_cluster; 1284 if (extent_idx >= SPDK_EXTENTS_PER_EP) { 1285 break; 1286 } 1287 } 1288 desc_extent->length = sizeof(desc_extent->start_cluster_idx) + 1289 sizeof(desc_extent->cluster_idx[0]) * extent_idx; 1290 } 1291 1292 static void 1293 blob_serialize_flags(const struct spdk_blob *blob, 1294 uint8_t *buf, size_t *buf_sz) 1295 { 1296 struct spdk_blob_md_descriptor_flags *desc; 1297 1298 /* 1299 * Flags get serialized first, so we should always have room for the flags 1300 * descriptor. 1301 */ 1302 assert(*buf_sz >= sizeof(*desc)); 1303 1304 desc = (struct spdk_blob_md_descriptor_flags *)buf; 1305 desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS; 1306 desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor); 1307 desc->invalid_flags = blob->invalid_flags; 1308 desc->data_ro_flags = blob->data_ro_flags; 1309 desc->md_ro_flags = blob->md_ro_flags; 1310 1311 *buf_sz -= sizeof(*desc); 1312 } 1313 1314 static int 1315 blob_serialize_xattrs(const struct spdk_blob *blob, 1316 const struct spdk_xattr_tailq *xattrs, bool internal, 1317 struct spdk_blob_md_page **pages, 1318 struct spdk_blob_md_page *cur_page, 1319 uint32_t *page_count, uint8_t **buf, 1320 size_t *remaining_sz) 1321 { 1322 const struct spdk_xattr *xattr; 1323 int rc; 1324 1325 TAILQ_FOREACH(xattr, xattrs, link) { 1326 size_t required_sz = 0; 1327 1328 rc = blob_serialize_xattr(xattr, 1329 *buf, *remaining_sz, 1330 &required_sz, internal); 1331 if (rc < 0) { 1332 /* Need to add a new page to the chain */ 1333 rc = blob_serialize_add_page(blob, pages, page_count, 1334 &cur_page); 1335 if (rc < 0) { 1336 spdk_free(*pages); 1337 *pages = NULL; 1338 *page_count = 0; 1339 return rc; 1340 } 1341 1342 *buf = (uint8_t *)cur_page->descriptors; 1343 *remaining_sz = sizeof(cur_page->descriptors); 1344 1345 /* Try again */ 1346 required_sz = 0; 1347 rc = blob_serialize_xattr(xattr, 1348 *buf, *remaining_sz, 1349 &required_sz, internal); 1350 1351 if (rc < 0) { 1352 spdk_free(*pages); 1353 *pages = NULL; 1354 *page_count = 0; 1355 return rc; 1356 } 1357 } 1358 1359 *remaining_sz -= required_sz; 1360 *buf += required_sz; 1361 } 1362 1363 return 0; 1364 } 1365 1366 static int 1367 blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages, 1368 uint32_t *page_count) 1369 { 1370 struct spdk_blob_md_page *cur_page; 1371 int rc; 1372 uint8_t *buf; 1373 size_t remaining_sz; 1374 1375 assert(pages != NULL); 1376 assert(page_count != NULL); 1377 assert(blob != NULL); 1378 assert(blob->state == SPDK_BLOB_STATE_DIRTY); 1379 1380 *pages = NULL; 1381 *page_count = 0; 1382 1383 /* A blob always has at least 1 page, even if it has no descriptors */ 1384 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1385 if (rc < 0) { 1386 return rc; 1387 } 1388 1389 buf = (uint8_t *)cur_page->descriptors; 1390 remaining_sz = sizeof(cur_page->descriptors); 1391 1392 /* Serialize flags */ 1393 blob_serialize_flags(blob, buf, &remaining_sz); 1394 buf += sizeof(struct spdk_blob_md_descriptor_flags); 1395 1396 /* Serialize xattrs */ 1397 rc = blob_serialize_xattrs(blob, &blob->xattrs, false, 1398 pages, cur_page, page_count, &buf, &remaining_sz); 1399 if (rc < 0) { 1400 return rc; 1401 } 1402 1403 /* Serialize internal xattrs */ 1404 rc = blob_serialize_xattrs(blob, &blob->xattrs_internal, true, 1405 pages, cur_page, page_count, &buf, &remaining_sz); 1406 if (rc < 0) { 1407 return rc; 1408 } 1409 1410 if (blob->use_extent_table) { 1411 /* Serialize extent table */ 1412 rc = blob_serialize_extent_table(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1413 } else { 1414 /* Serialize extents */ 1415 rc = blob_serialize_extents_rle(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1416 } 1417 1418 return rc; 1419 } 1420 1421 struct spdk_blob_load_ctx { 1422 struct spdk_blob *blob; 1423 1424 struct spdk_blob_md_page *pages; 1425 uint32_t num_pages; 1426 uint32_t next_extent_page; 1427 spdk_bs_sequence_t *seq; 1428 1429 spdk_bs_sequence_cpl cb_fn; 1430 void *cb_arg; 1431 }; 1432 1433 static uint32_t 1434 blob_md_page_calc_crc(void *page) 1435 { 1436 uint32_t crc; 1437 1438 crc = BLOB_CRC32C_INITIAL; 1439 crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc); 1440 crc ^= BLOB_CRC32C_INITIAL; 1441 1442 return crc; 1443 1444 } 1445 1446 static void 1447 blob_load_final(struct spdk_blob_load_ctx *ctx, int bserrno) 1448 { 1449 struct spdk_blob *blob = ctx->blob; 1450 1451 if (bserrno == 0) { 1452 blob_mark_clean(blob); 1453 } 1454 1455 ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno); 1456 1457 /* Free the memory */ 1458 spdk_free(ctx->pages); 1459 free(ctx); 1460 } 1461 1462 static void 1463 blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno) 1464 { 1465 struct spdk_blob_load_ctx *ctx = cb_arg; 1466 struct spdk_blob *blob = ctx->blob; 1467 1468 if (bserrno == 0) { 1469 blob->back_bs_dev = bs_create_blob_bs_dev(snapshot); 1470 if (blob->back_bs_dev == NULL) { 1471 bserrno = -ENOMEM; 1472 } 1473 } 1474 if (bserrno != 0) { 1475 SPDK_ERRLOG("Snapshot fail\n"); 1476 } 1477 1478 blob_load_final(ctx, bserrno); 1479 } 1480 1481 static void blob_update_clear_method(struct spdk_blob *blob); 1482 1483 static int 1484 blob_load_esnap(struct spdk_blob *blob, void *blob_ctx) 1485 { 1486 struct spdk_blob_store *bs = blob->bs; 1487 struct spdk_bs_dev *bs_dev = NULL; 1488 const void *esnap_id = NULL; 1489 size_t id_len = 0; 1490 int rc; 1491 1492 if (bs->esnap_bs_dev_create == NULL) { 1493 SPDK_NOTICELOG("blob 0x%" PRIx64 " is an esnap clone but the blobstore was opened " 1494 "without support for esnap clones\n", blob->id); 1495 return -ENOTSUP; 1496 } 1497 assert(blob->back_bs_dev == NULL); 1498 1499 rc = blob_get_xattr_value(blob, BLOB_EXTERNAL_SNAPSHOT_ID, &esnap_id, &id_len, true); 1500 if (rc != 0) { 1501 SPDK_ERRLOG("blob 0x%" PRIx64 " is an esnap clone but has no esnap ID\n", blob->id); 1502 return -EINVAL; 1503 } 1504 assert(id_len > 0 && id_len < UINT32_MAX); 1505 1506 SPDK_INFOLOG(blob, "Creating external snapshot device\n"); 1507 1508 rc = bs->esnap_bs_dev_create(bs->esnap_ctx, blob_ctx, blob, esnap_id, (uint32_t)id_len, 1509 &bs_dev); 1510 if (rc != 0) { 1511 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": failed to load back_bs_dev " 1512 "with error %d\n", blob->id, rc); 1513 return rc; 1514 } 1515 1516 /* 1517 * Note: bs_dev might be NULL if the consumer chose to not open the external snapshot. 1518 * This especially might happen during spdk_bs_load() iteration. 1519 */ 1520 if (bs_dev != NULL) { 1521 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": loaded back_bs_dev\n", blob->id); 1522 if ((bs->io_unit_size % bs_dev->blocklen) != 0) { 1523 SPDK_NOTICELOG("blob 0x%" PRIx64 " external snapshot device block size %u " 1524 "is not compatible with blobstore block size %u\n", 1525 blob->id, bs_dev->blocklen, bs->io_unit_size); 1526 bs_dev->destroy(bs_dev); 1527 return -EINVAL; 1528 } 1529 } 1530 1531 blob->back_bs_dev = bs_dev; 1532 blob->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT; 1533 1534 return 0; 1535 } 1536 1537 static void 1538 blob_load_backing_dev(spdk_bs_sequence_t *seq, void *cb_arg) 1539 { 1540 struct spdk_blob_load_ctx *ctx = cb_arg; 1541 struct spdk_blob *blob = ctx->blob; 1542 const void *value; 1543 size_t len; 1544 int rc; 1545 1546 if (blob_is_esnap_clone(blob)) { 1547 rc = blob_load_esnap(blob, seq->cpl.u.blob_handle.esnap_ctx); 1548 blob_load_final(ctx, rc); 1549 return; 1550 } 1551 1552 if (spdk_blob_is_thin_provisioned(blob)) { 1553 rc = blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true); 1554 if (rc == 0) { 1555 if (len != sizeof(spdk_blob_id)) { 1556 blob_load_final(ctx, -EINVAL); 1557 return; 1558 } 1559 /* open snapshot blob and continue in the callback function */ 1560 blob->parent_id = *(spdk_blob_id *)value; 1561 spdk_bs_open_blob(blob->bs, blob->parent_id, 1562 blob_load_snapshot_cpl, ctx); 1563 return; 1564 } else { 1565 /* add zeroes_dev for thin provisioned blob */ 1566 blob->back_bs_dev = bs_create_zeroes_dev(); 1567 } 1568 } else { 1569 /* standard blob */ 1570 blob->back_bs_dev = NULL; 1571 } 1572 blob_load_final(ctx, 0); 1573 } 1574 1575 static void 1576 blob_load_cpl_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1577 { 1578 struct spdk_blob_load_ctx *ctx = cb_arg; 1579 struct spdk_blob *blob = ctx->blob; 1580 struct spdk_blob_md_page *page; 1581 uint64_t i; 1582 uint32_t crc; 1583 uint64_t lba; 1584 void *tmp; 1585 uint64_t sz; 1586 1587 if (bserrno) { 1588 SPDK_ERRLOG("Extent page read failed: %d\n", bserrno); 1589 blob_load_final(ctx, bserrno); 1590 return; 1591 } 1592 1593 if (ctx->pages == NULL) { 1594 /* First iteration of this function, allocate buffer for single EXTENT_PAGE */ 1595 ctx->pages = spdk_zmalloc(blob->bs->md_page_size, 0, 1596 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 1597 if (!ctx->pages) { 1598 blob_load_final(ctx, -ENOMEM); 1599 return; 1600 } 1601 ctx->num_pages = 1; 1602 ctx->next_extent_page = 0; 1603 } else { 1604 page = &ctx->pages[0]; 1605 crc = blob_md_page_calc_crc(page); 1606 if (crc != page->crc) { 1607 blob_load_final(ctx, -EINVAL); 1608 return; 1609 } 1610 1611 if (page->next != SPDK_INVALID_MD_PAGE) { 1612 blob_load_final(ctx, -EINVAL); 1613 return; 1614 } 1615 1616 bserrno = blob_parse_extent_page(page, blob); 1617 if (bserrno) { 1618 blob_load_final(ctx, bserrno); 1619 return; 1620 } 1621 } 1622 1623 for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) { 1624 if (blob->active.extent_pages[i] != 0) { 1625 /* Extent page was allocated, read and parse it. */ 1626 lba = bs_md_page_to_lba(blob->bs, blob->active.extent_pages[i]); 1627 ctx->next_extent_page = i + 1; 1628 1629 bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1630 bs_byte_to_lba(blob->bs, blob->bs->md_page_size), 1631 blob_load_cpl_extents_cpl, ctx); 1632 return; 1633 } else { 1634 /* Thin provisioned blobs can point to unallocated extent pages. 1635 * In this case blob size should be increased by up to the amount left in remaining_clusters_in_et. */ 1636 1637 sz = spdk_min(blob->remaining_clusters_in_et, SPDK_EXTENTS_PER_EP); 1638 blob->active.num_clusters += sz; 1639 blob->remaining_clusters_in_et -= sz; 1640 1641 assert(spdk_blob_is_thin_provisioned(blob)); 1642 assert(i + 1 < blob->active.num_extent_pages || blob->remaining_clusters_in_et == 0); 1643 1644 tmp = realloc(blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 1645 if (tmp == NULL) { 1646 blob_load_final(ctx, -ENOMEM); 1647 return; 1648 } 1649 memset(tmp + sizeof(*blob->active.clusters) * blob->active.cluster_array_size, 0, 1650 sizeof(*blob->active.clusters) * (blob->active.num_clusters - blob->active.cluster_array_size)); 1651 blob->active.clusters = tmp; 1652 blob->active.cluster_array_size = blob->active.num_clusters; 1653 } 1654 } 1655 1656 blob_load_backing_dev(seq, ctx); 1657 } 1658 1659 static void 1660 blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1661 { 1662 struct spdk_blob_load_ctx *ctx = cb_arg; 1663 struct spdk_blob *blob = ctx->blob; 1664 struct spdk_blob_md_page *page; 1665 int rc; 1666 uint32_t crc; 1667 uint32_t current_page; 1668 1669 if (ctx->num_pages == 1) { 1670 current_page = bs_blobid_to_page(blob->id); 1671 } else { 1672 assert(ctx->num_pages != 0); 1673 page = &ctx->pages[ctx->num_pages - 2]; 1674 current_page = page->next; 1675 } 1676 1677 if (bserrno) { 1678 SPDK_ERRLOG("Metadata page %d read failed for blobid 0x%" PRIx64 ": %d\n", 1679 current_page, blob->id, bserrno); 1680 blob_load_final(ctx, bserrno); 1681 return; 1682 } 1683 1684 page = &ctx->pages[ctx->num_pages - 1]; 1685 crc = blob_md_page_calc_crc(page); 1686 if (crc != page->crc) { 1687 SPDK_ERRLOG("Metadata page %d crc mismatch for blobid 0x%" PRIx64 "\n", 1688 current_page, blob->id); 1689 blob_load_final(ctx, -EINVAL); 1690 return; 1691 } 1692 1693 if (page->next != SPDK_INVALID_MD_PAGE) { 1694 struct spdk_blob_md_page *tmp_pages; 1695 uint32_t next_page = page->next; 1696 uint64_t next_lba = bs_md_page_to_lba(blob->bs, next_page); 1697 1698 /* Read the next page */ 1699 tmp_pages = spdk_realloc(ctx->pages, (sizeof(*page) * (ctx->num_pages + 1)), 0); 1700 if (tmp_pages == NULL) { 1701 blob_load_final(ctx, -ENOMEM); 1702 return; 1703 } 1704 ctx->num_pages++; 1705 ctx->pages = tmp_pages; 1706 1707 bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1], 1708 next_lba, 1709 bs_byte_to_lba(blob->bs, sizeof(*page)), 1710 blob_load_cpl, ctx); 1711 return; 1712 } 1713 1714 /* Parse the pages */ 1715 rc = blob_parse(ctx->pages, ctx->num_pages, blob); 1716 if (rc) { 1717 blob_load_final(ctx, rc); 1718 return; 1719 } 1720 1721 if (blob->extent_table_found == true) { 1722 /* If EXTENT_TABLE was found, that means support for it should be enabled. */ 1723 assert(blob->extent_rle_found == false); 1724 blob->use_extent_table = true; 1725 } else { 1726 /* If EXTENT_RLE or no extent_* descriptor was found disable support 1727 * for extent table. No extent_* descriptors means that blob has length of 0 1728 * and no extent_rle descriptors were persisted for it. 1729 * EXTENT_TABLE if used, is always present in metadata regardless of length. */ 1730 blob->use_extent_table = false; 1731 } 1732 1733 /* Check the clear_method stored in metadata vs what may have been passed 1734 * via spdk_bs_open_blob_ext() and update accordingly. 1735 */ 1736 blob_update_clear_method(blob); 1737 1738 spdk_free(ctx->pages); 1739 ctx->pages = NULL; 1740 1741 if (blob->extent_table_found) { 1742 blob_load_cpl_extents_cpl(seq, ctx, 0); 1743 } else { 1744 blob_load_backing_dev(seq, ctx); 1745 } 1746 } 1747 1748 /* Load a blob from disk given a blobid */ 1749 static void 1750 blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 1751 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 1752 { 1753 struct spdk_blob_load_ctx *ctx; 1754 struct spdk_blob_store *bs; 1755 uint32_t page_num; 1756 uint64_t lba; 1757 1758 blob_verify_md_op(blob); 1759 1760 bs = blob->bs; 1761 1762 ctx = calloc(1, sizeof(*ctx)); 1763 if (!ctx) { 1764 cb_fn(seq, cb_arg, -ENOMEM); 1765 return; 1766 } 1767 1768 ctx->blob = blob; 1769 ctx->pages = spdk_realloc(ctx->pages, bs->md_page_size, 0); 1770 if (!ctx->pages) { 1771 free(ctx); 1772 cb_fn(seq, cb_arg, -ENOMEM); 1773 return; 1774 } 1775 ctx->num_pages = 1; 1776 ctx->cb_fn = cb_fn; 1777 ctx->cb_arg = cb_arg; 1778 ctx->seq = seq; 1779 1780 page_num = bs_blobid_to_page(blob->id); 1781 lba = bs_md_page_to_lba(blob->bs, page_num); 1782 1783 blob->state = SPDK_BLOB_STATE_LOADING; 1784 1785 bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1786 bs_byte_to_lba(bs, bs->md_page_size), 1787 blob_load_cpl, ctx); 1788 } 1789 1790 struct spdk_blob_persist_ctx { 1791 struct spdk_blob *blob; 1792 1793 struct spdk_blob_md_page *pages; 1794 uint32_t next_extent_page; 1795 struct spdk_blob_md_page *extent_page; 1796 1797 spdk_bs_sequence_t *seq; 1798 spdk_bs_sequence_cpl cb_fn; 1799 void *cb_arg; 1800 TAILQ_ENTRY(spdk_blob_persist_ctx) link; 1801 }; 1802 1803 static void 1804 bs_batch_clear_dev(struct spdk_blob *blob, spdk_bs_batch_t *batch, uint64_t lba, 1805 uint64_t lba_count) 1806 { 1807 switch (blob->clear_method) { 1808 case BLOB_CLEAR_WITH_DEFAULT: 1809 case BLOB_CLEAR_WITH_UNMAP: 1810 bs_batch_unmap_dev(batch, lba, lba_count); 1811 break; 1812 case BLOB_CLEAR_WITH_WRITE_ZEROES: 1813 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1814 break; 1815 case BLOB_CLEAR_WITH_NONE: 1816 default: 1817 break; 1818 } 1819 } 1820 1821 static int 1822 bs_super_validate(struct spdk_bs_super_block *super, struct spdk_blob_store *bs) 1823 { 1824 uint32_t crc; 1825 static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH]; 1826 1827 if (super->version > SPDK_BS_VERSION || 1828 super->version < SPDK_BS_INITIAL_VERSION) { 1829 return -EILSEQ; 1830 } 1831 1832 if (memcmp(super->signature, SPDK_BS_SUPER_BLOCK_SIG, 1833 sizeof(super->signature)) != 0) { 1834 return -EILSEQ; 1835 } 1836 1837 crc = blob_md_page_calc_crc(super); 1838 if (crc != super->crc) { 1839 return -EILSEQ; 1840 } 1841 1842 if (memcmp(&bs->bstype, &super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 1843 SPDK_DEBUGLOG(blob, "Bstype matched - loading blobstore\n"); 1844 } else if (memcmp(&bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 1845 SPDK_DEBUGLOG(blob, "Bstype wildcard used - loading blobstore regardless bstype\n"); 1846 } else { 1847 SPDK_DEBUGLOG(blob, "Unexpected bstype\n"); 1848 SPDK_LOGDUMP(blob, "Expected:", bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 1849 SPDK_LOGDUMP(blob, "Found:", super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 1850 return -ENXIO; 1851 } 1852 1853 if (super->size > bs->dev->blockcnt * bs->dev->blocklen) { 1854 SPDK_NOTICELOG("Size mismatch, dev size: %" PRIu64 ", blobstore size: %" PRIu64 "\n", 1855 bs->dev->blockcnt * bs->dev->blocklen, super->size); 1856 return -EILSEQ; 1857 } 1858 1859 return 0; 1860 } 1861 1862 static void bs_mark_dirty(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 1863 spdk_bs_sequence_cpl cb_fn, void *cb_arg); 1864 1865 static void 1866 blob_persist_complete_cb(void *arg) 1867 { 1868 struct spdk_blob_persist_ctx *ctx = arg; 1869 1870 /* Call user callback */ 1871 ctx->cb_fn(ctx->seq, ctx->cb_arg, 0); 1872 1873 /* Free the memory */ 1874 spdk_free(ctx->pages); 1875 free(ctx); 1876 } 1877 1878 static void blob_persist_start(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 1879 1880 static void 1881 blob_persist_complete(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx, int bserrno) 1882 { 1883 struct spdk_blob_persist_ctx *next_persist, *tmp; 1884 struct spdk_blob *blob = ctx->blob; 1885 1886 if (bserrno == 0) { 1887 blob_mark_clean(blob); 1888 } 1889 1890 assert(ctx == TAILQ_FIRST(&blob->persists_to_complete)); 1891 1892 /* Complete all persists that were pending when the current persist started */ 1893 TAILQ_FOREACH_SAFE(next_persist, &blob->persists_to_complete, link, tmp) { 1894 TAILQ_REMOVE(&blob->persists_to_complete, next_persist, link); 1895 spdk_thread_send_msg(spdk_get_thread(), blob_persist_complete_cb, next_persist); 1896 } 1897 1898 if (TAILQ_EMPTY(&blob->pending_persists)) { 1899 return; 1900 } 1901 1902 /* Queue up all pending persists for completion and start blob persist with first one */ 1903 TAILQ_SWAP(&blob->persists_to_complete, &blob->pending_persists, spdk_blob_persist_ctx, link); 1904 next_persist = TAILQ_FIRST(&blob->persists_to_complete); 1905 1906 blob->state = SPDK_BLOB_STATE_DIRTY; 1907 bs_mark_dirty(seq, blob->bs, blob_persist_start, next_persist); 1908 } 1909 1910 static void 1911 blob_persist_clear_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1912 { 1913 struct spdk_blob_persist_ctx *ctx = cb_arg; 1914 struct spdk_blob *blob = ctx->blob; 1915 struct spdk_blob_store *bs = blob->bs; 1916 size_t i; 1917 1918 if (bserrno != 0) { 1919 blob_persist_complete(seq, ctx, bserrno); 1920 return; 1921 } 1922 1923 spdk_spin_lock(&bs->used_lock); 1924 1925 /* Release all extent_pages that were truncated */ 1926 for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) { 1927 /* Nothing to release if it was not allocated */ 1928 if (blob->active.extent_pages[i] != 0) { 1929 bs_release_md_page(bs, blob->active.extent_pages[i]); 1930 } 1931 } 1932 1933 spdk_spin_unlock(&bs->used_lock); 1934 1935 if (blob->active.num_extent_pages == 0) { 1936 free(blob->active.extent_pages); 1937 blob->active.extent_pages = NULL; 1938 blob->active.extent_pages_array_size = 0; 1939 } else if (blob->active.num_extent_pages != blob->active.extent_pages_array_size) { 1940 #ifndef __clang_analyzer__ 1941 void *tmp; 1942 1943 /* scan-build really can't figure reallocs, workaround it */ 1944 tmp = realloc(blob->active.extent_pages, sizeof(uint32_t) * blob->active.num_extent_pages); 1945 assert(tmp != NULL); 1946 blob->active.extent_pages = tmp; 1947 #endif 1948 blob->active.extent_pages_array_size = blob->active.num_extent_pages; 1949 } 1950 1951 blob_persist_complete(seq, ctx, bserrno); 1952 } 1953 1954 static void 1955 blob_persist_clear_extents(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 1956 { 1957 struct spdk_blob *blob = ctx->blob; 1958 struct spdk_blob_store *bs = blob->bs; 1959 size_t i; 1960 uint64_t lba; 1961 uint64_t lba_count; 1962 spdk_bs_batch_t *batch; 1963 1964 batch = bs_sequence_to_batch(seq, blob_persist_clear_extents_cpl, ctx); 1965 lba_count = bs_byte_to_lba(bs, bs->md_page_size); 1966 1967 /* Clear all extent_pages that were truncated */ 1968 for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) { 1969 /* Nothing to clear if it was not allocated */ 1970 if (blob->active.extent_pages[i] != 0) { 1971 lba = bs_md_page_to_lba(bs, blob->active.extent_pages[i]); 1972 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1973 } 1974 } 1975 1976 bs_batch_close(batch); 1977 } 1978 1979 static void 1980 blob_persist_clear_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1981 { 1982 struct spdk_blob_persist_ctx *ctx = cb_arg; 1983 struct spdk_blob *blob = ctx->blob; 1984 struct spdk_blob_store *bs = blob->bs; 1985 size_t i; 1986 1987 if (bserrno != 0) { 1988 blob_persist_complete(seq, ctx, bserrno); 1989 return; 1990 } 1991 1992 spdk_spin_lock(&bs->used_lock); 1993 /* Release all clusters that were truncated */ 1994 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1995 uint32_t cluster_num = bs_lba_to_cluster(bs, blob->active.clusters[i]); 1996 1997 /* Nothing to release if it was not allocated */ 1998 if (blob->active.clusters[i] != 0) { 1999 bs_release_cluster(bs, cluster_num); 2000 } 2001 } 2002 spdk_spin_unlock(&bs->used_lock); 2003 2004 if (blob->active.num_clusters == 0) { 2005 free(blob->active.clusters); 2006 blob->active.clusters = NULL; 2007 blob->active.cluster_array_size = 0; 2008 } else if (blob->active.num_clusters != blob->active.cluster_array_size) { 2009 #ifndef __clang_analyzer__ 2010 void *tmp; 2011 2012 /* scan-build really can't figure reallocs, workaround it */ 2013 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * blob->active.num_clusters); 2014 assert(tmp != NULL); 2015 blob->active.clusters = tmp; 2016 2017 #endif 2018 blob->active.cluster_array_size = blob->active.num_clusters; 2019 } 2020 2021 /* Move on to clearing extent pages */ 2022 blob_persist_clear_extents(seq, ctx); 2023 } 2024 2025 static void 2026 blob_persist_clear_clusters(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 2027 { 2028 struct spdk_blob *blob = ctx->blob; 2029 struct spdk_blob_store *bs = blob->bs; 2030 spdk_bs_batch_t *batch; 2031 size_t i; 2032 uint64_t lba; 2033 uint64_t lba_count; 2034 2035 /* Clusters don't move around in blobs. The list shrinks or grows 2036 * at the end, but no changes ever occur in the middle of the list. 2037 */ 2038 2039 batch = bs_sequence_to_batch(seq, blob_persist_clear_clusters_cpl, ctx); 2040 2041 /* Clear all clusters that were truncated */ 2042 lba = 0; 2043 lba_count = 0; 2044 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 2045 uint64_t next_lba = blob->active.clusters[i]; 2046 uint64_t next_lba_count = bs_cluster_to_lba(bs, 1); 2047 2048 if (next_lba > 0 && (lba + lba_count) == next_lba) { 2049 /* This cluster is contiguous with the previous one. */ 2050 lba_count += next_lba_count; 2051 continue; 2052 } else if (next_lba == 0) { 2053 continue; 2054 } 2055 2056 /* This cluster is not contiguous with the previous one. */ 2057 2058 /* If a run of LBAs previously existing, clear them now */ 2059 if (lba_count > 0) { 2060 bs_batch_clear_dev(ctx->blob, batch, lba, lba_count); 2061 } 2062 2063 /* Start building the next batch */ 2064 lba = next_lba; 2065 if (next_lba > 0) { 2066 lba_count = next_lba_count; 2067 } else { 2068 lba_count = 0; 2069 } 2070 } 2071 2072 /* If we ended with a contiguous set of LBAs, clear them now */ 2073 if (lba_count > 0) { 2074 bs_batch_clear_dev(ctx->blob, batch, lba, lba_count); 2075 } 2076 2077 bs_batch_close(batch); 2078 } 2079 2080 static void 2081 blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2082 { 2083 struct spdk_blob_persist_ctx *ctx = cb_arg; 2084 struct spdk_blob *blob = ctx->blob; 2085 struct spdk_blob_store *bs = blob->bs; 2086 size_t i; 2087 2088 if (bserrno != 0) { 2089 blob_persist_complete(seq, ctx, bserrno); 2090 return; 2091 } 2092 2093 spdk_spin_lock(&bs->used_lock); 2094 2095 /* This loop starts at 1 because the first page is special and handled 2096 * below. The pages (except the first) are never written in place, 2097 * so any pages in the clean list must be zeroed. 2098 */ 2099 for (i = 1; i < blob->clean.num_pages; i++) { 2100 bs_release_md_page(bs, blob->clean.pages[i]); 2101 } 2102 2103 if (blob->active.num_pages == 0) { 2104 uint32_t page_num; 2105 2106 page_num = bs_blobid_to_page(blob->id); 2107 bs_release_md_page(bs, page_num); 2108 } 2109 2110 spdk_spin_unlock(&bs->used_lock); 2111 2112 /* Move on to clearing clusters */ 2113 blob_persist_clear_clusters(seq, ctx); 2114 } 2115 2116 static void 2117 blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2118 { 2119 struct spdk_blob_persist_ctx *ctx = cb_arg; 2120 struct spdk_blob *blob = ctx->blob; 2121 struct spdk_blob_store *bs = blob->bs; 2122 uint64_t lba; 2123 uint64_t lba_count; 2124 spdk_bs_batch_t *batch; 2125 size_t i; 2126 2127 if (bserrno != 0) { 2128 blob_persist_complete(seq, ctx, bserrno); 2129 return; 2130 } 2131 2132 batch = bs_sequence_to_batch(seq, blob_persist_zero_pages_cpl, ctx); 2133 2134 lba_count = bs_byte_to_lba(bs, bs->md_page_size); 2135 2136 /* This loop starts at 1 because the first page is special and handled 2137 * below. The pages (except the first) are never written in place, 2138 * so any pages in the clean list must be zeroed. 2139 */ 2140 for (i = 1; i < blob->clean.num_pages; i++) { 2141 lba = bs_md_page_to_lba(bs, blob->clean.pages[i]); 2142 2143 bs_batch_write_zeroes_dev(batch, lba, lba_count); 2144 } 2145 2146 /* The first page will only be zeroed if this is a delete. */ 2147 if (blob->active.num_pages == 0) { 2148 uint32_t page_num; 2149 2150 /* The first page in the metadata goes where the blobid indicates */ 2151 page_num = bs_blobid_to_page(blob->id); 2152 lba = bs_md_page_to_lba(bs, page_num); 2153 2154 bs_batch_write_zeroes_dev(batch, lba, lba_count); 2155 } 2156 2157 bs_batch_close(batch); 2158 } 2159 2160 static void 2161 blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2162 { 2163 struct spdk_blob_persist_ctx *ctx = cb_arg; 2164 struct spdk_blob *blob = ctx->blob; 2165 struct spdk_blob_store *bs = blob->bs; 2166 uint64_t lba; 2167 uint32_t lba_count; 2168 struct spdk_blob_md_page *page; 2169 2170 if (bserrno != 0) { 2171 blob_persist_complete(seq, ctx, bserrno); 2172 return; 2173 } 2174 2175 if (blob->active.num_pages == 0) { 2176 /* Move on to the next step */ 2177 blob_persist_zero_pages(seq, ctx, 0); 2178 return; 2179 } 2180 2181 lba_count = bs_byte_to_lba(bs, bs->md_page_size); 2182 2183 page = &ctx->pages[0]; 2184 /* The first page in the metadata goes where the blobid indicates */ 2185 lba = bs_md_page_to_lba(bs, bs_blobid_to_page(blob->id)); 2186 2187 bs_sequence_write_dev(seq, page, lba, lba_count, 2188 blob_persist_zero_pages, ctx); 2189 } 2190 2191 static void 2192 blob_persist_write_page_chain(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 2193 { 2194 struct spdk_blob *blob = ctx->blob; 2195 struct spdk_blob_store *bs = blob->bs; 2196 uint64_t lba; 2197 uint32_t lba_count; 2198 struct spdk_blob_md_page *page; 2199 spdk_bs_batch_t *batch; 2200 size_t i; 2201 2202 /* Clusters don't move around in blobs. The list shrinks or grows 2203 * at the end, but no changes ever occur in the middle of the list. 2204 */ 2205 2206 lba_count = bs_byte_to_lba(bs, sizeof(*page)); 2207 2208 batch = bs_sequence_to_batch(seq, blob_persist_write_page_root, ctx); 2209 2210 /* This starts at 1. The root page is not written until 2211 * all of the others are finished 2212 */ 2213 for (i = 1; i < blob->active.num_pages; i++) { 2214 page = &ctx->pages[i]; 2215 assert(page->sequence_num == i); 2216 2217 lba = bs_md_page_to_lba(bs, blob->active.pages[i]); 2218 2219 bs_batch_write_dev(batch, page, lba, lba_count); 2220 } 2221 2222 bs_batch_close(batch); 2223 } 2224 2225 static int 2226 blob_resize(struct spdk_blob *blob, uint64_t sz) 2227 { 2228 uint64_t i; 2229 uint64_t *tmp; 2230 uint64_t cluster; 2231 uint32_t lfmd; /* lowest free md page */ 2232 uint64_t num_clusters; 2233 uint32_t *ep_tmp; 2234 uint64_t new_num_ep = 0, current_num_ep = 0; 2235 struct spdk_blob_store *bs; 2236 int rc; 2237 2238 bs = blob->bs; 2239 2240 blob_verify_md_op(blob); 2241 2242 if (blob->active.num_clusters == sz) { 2243 return 0; 2244 } 2245 2246 if (blob->active.num_clusters < blob->active.cluster_array_size) { 2247 /* If this blob was resized to be larger, then smaller, then 2248 * larger without syncing, then the cluster array already 2249 * contains spare assigned clusters we can use. 2250 */ 2251 num_clusters = spdk_min(blob->active.cluster_array_size, 2252 sz); 2253 } else { 2254 num_clusters = blob->active.num_clusters; 2255 } 2256 2257 if (blob->use_extent_table) { 2258 /* Round up since every cluster beyond current Extent Table size, 2259 * requires new extent page. */ 2260 new_num_ep = spdk_divide_round_up(sz, SPDK_EXTENTS_PER_EP); 2261 current_num_ep = spdk_divide_round_up(num_clusters, SPDK_EXTENTS_PER_EP); 2262 } 2263 2264 assert(!spdk_spin_held(&bs->used_lock)); 2265 2266 /* Check first that we have enough clusters and md pages before we start claiming them. 2267 * bs->used_lock is held to ensure that clusters we think are free are still free when we go 2268 * to claim them later in this function. 2269 */ 2270 if (sz > num_clusters && spdk_blob_is_thin_provisioned(blob) == false) { 2271 spdk_spin_lock(&bs->used_lock); 2272 if ((sz - num_clusters) > bs->num_free_clusters) { 2273 rc = -ENOSPC; 2274 goto out; 2275 } 2276 lfmd = 0; 2277 for (i = current_num_ep; i < new_num_ep ; i++) { 2278 lfmd = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, lfmd); 2279 if (lfmd == UINT32_MAX) { 2280 /* No more free md pages. Cannot satisfy the request */ 2281 rc = -ENOSPC; 2282 goto out; 2283 } 2284 } 2285 } 2286 2287 if (sz > num_clusters) { 2288 /* Expand the cluster array if necessary. 2289 * We only shrink the array when persisting. 2290 */ 2291 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * sz); 2292 if (sz > 0 && tmp == NULL) { 2293 rc = -ENOMEM; 2294 goto out; 2295 } 2296 memset(tmp + blob->active.cluster_array_size, 0, 2297 sizeof(*blob->active.clusters) * (sz - blob->active.cluster_array_size)); 2298 blob->active.clusters = tmp; 2299 blob->active.cluster_array_size = sz; 2300 2301 /* Expand the extents table, only if enough clusters were added */ 2302 if (new_num_ep > current_num_ep && blob->use_extent_table) { 2303 ep_tmp = realloc(blob->active.extent_pages, sizeof(*blob->active.extent_pages) * new_num_ep); 2304 if (new_num_ep > 0 && ep_tmp == NULL) { 2305 rc = -ENOMEM; 2306 goto out; 2307 } 2308 memset(ep_tmp + blob->active.extent_pages_array_size, 0, 2309 sizeof(*blob->active.extent_pages) * (new_num_ep - blob->active.extent_pages_array_size)); 2310 blob->active.extent_pages = ep_tmp; 2311 blob->active.extent_pages_array_size = new_num_ep; 2312 } 2313 } 2314 2315 blob->state = SPDK_BLOB_STATE_DIRTY; 2316 2317 if (spdk_blob_is_thin_provisioned(blob) == false) { 2318 cluster = 0; 2319 lfmd = 0; 2320 for (i = num_clusters; i < sz; i++) { 2321 bs_allocate_cluster(blob, i, &cluster, &lfmd, true); 2322 /* Do not increment lfmd here. lfmd will get updated 2323 * to the md_page allocated (if any) when a new extent 2324 * page is needed. Just pass that value again, 2325 * bs_allocate_cluster will just start at that index 2326 * to find the next free md_page when needed. 2327 */ 2328 } 2329 } 2330 2331 /* If we are shrinking the blob, we must adjust num_allocated_clusters */ 2332 for (i = sz; i < num_clusters; i++) { 2333 if (blob->active.clusters[i] != 0) { 2334 blob->active.num_allocated_clusters--; 2335 } 2336 } 2337 2338 blob->active.num_clusters = sz; 2339 blob->active.num_extent_pages = new_num_ep; 2340 2341 rc = 0; 2342 out: 2343 if (spdk_spin_held(&bs->used_lock)) { 2344 spdk_spin_unlock(&bs->used_lock); 2345 } 2346 2347 return rc; 2348 } 2349 2350 static void 2351 blob_persist_generate_new_md(struct spdk_blob_persist_ctx *ctx) 2352 { 2353 spdk_bs_sequence_t *seq = ctx->seq; 2354 struct spdk_blob *blob = ctx->blob; 2355 struct spdk_blob_store *bs = blob->bs; 2356 uint64_t i; 2357 uint32_t page_num; 2358 void *tmp; 2359 int rc; 2360 2361 /* Generate the new metadata */ 2362 rc = blob_serialize(blob, &ctx->pages, &blob->active.num_pages); 2363 if (rc < 0) { 2364 blob_persist_complete(seq, ctx, rc); 2365 return; 2366 } 2367 2368 assert(blob->active.num_pages >= 1); 2369 2370 /* Resize the cache of page indices */ 2371 tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 2372 if (!tmp) { 2373 blob_persist_complete(seq, ctx, -ENOMEM); 2374 return; 2375 } 2376 blob->active.pages = tmp; 2377 2378 /* Assign this metadata to pages. This requires two passes - one to verify that there are 2379 * enough pages and a second to actually claim them. The used_lock is held across 2380 * both passes to ensure things don't change in the middle. 2381 */ 2382 spdk_spin_lock(&bs->used_lock); 2383 page_num = 0; 2384 /* Note that this loop starts at one. The first page location is fixed by the blobid. */ 2385 for (i = 1; i < blob->active.num_pages; i++) { 2386 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 2387 if (page_num == UINT32_MAX) { 2388 spdk_spin_unlock(&bs->used_lock); 2389 blob_persist_complete(seq, ctx, -ENOMEM); 2390 return; 2391 } 2392 page_num++; 2393 } 2394 2395 page_num = 0; 2396 blob->active.pages[0] = bs_blobid_to_page(blob->id); 2397 for (i = 1; i < blob->active.num_pages; i++) { 2398 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 2399 ctx->pages[i - 1].next = page_num; 2400 /* Now that previous metadata page is complete, calculate the crc for it. */ 2401 ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]); 2402 blob->active.pages[i] = page_num; 2403 bs_claim_md_page(bs, page_num); 2404 SPDK_DEBUGLOG(blob, "Claiming page %u for blob 0x%" PRIx64 "\n", page_num, 2405 blob->id); 2406 page_num++; 2407 } 2408 spdk_spin_unlock(&bs->used_lock); 2409 ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]); 2410 /* Start writing the metadata from last page to first */ 2411 blob->state = SPDK_BLOB_STATE_CLEAN; 2412 blob_persist_write_page_chain(seq, ctx); 2413 } 2414 2415 static void 2416 blob_persist_write_extent_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2417 { 2418 struct spdk_blob_persist_ctx *ctx = cb_arg; 2419 struct spdk_blob *blob = ctx->blob; 2420 size_t i; 2421 uint32_t extent_page_id; 2422 uint32_t page_count = 0; 2423 int rc; 2424 2425 if (ctx->extent_page != NULL) { 2426 spdk_free(ctx->extent_page); 2427 ctx->extent_page = NULL; 2428 } 2429 2430 if (bserrno != 0) { 2431 blob_persist_complete(seq, ctx, bserrno); 2432 return; 2433 } 2434 2435 /* Only write out Extent Pages when blob was resized. */ 2436 for (i = ctx->next_extent_page; i < blob->active.extent_pages_array_size; i++) { 2437 extent_page_id = blob->active.extent_pages[i]; 2438 if (extent_page_id == 0) { 2439 /* No Extent Page to persist */ 2440 assert(spdk_blob_is_thin_provisioned(blob)); 2441 continue; 2442 } 2443 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent_page_id)); 2444 ctx->next_extent_page = i + 1; 2445 rc = blob_serialize_add_page(ctx->blob, &ctx->extent_page, &page_count, &ctx->extent_page); 2446 if (rc < 0) { 2447 blob_persist_complete(seq, ctx, rc); 2448 return; 2449 } 2450 2451 blob->state = SPDK_BLOB_STATE_DIRTY; 2452 blob_serialize_extent_page(blob, i * SPDK_EXTENTS_PER_EP, ctx->extent_page); 2453 2454 ctx->extent_page->crc = blob_md_page_calc_crc(ctx->extent_page); 2455 2456 bs_sequence_write_dev(seq, ctx->extent_page, bs_md_page_to_lba(blob->bs, extent_page_id), 2457 bs_byte_to_lba(blob->bs, blob->bs->md_page_size), 2458 blob_persist_write_extent_pages, ctx); 2459 return; 2460 } 2461 2462 blob_persist_generate_new_md(ctx); 2463 } 2464 2465 static void 2466 blob_persist_start(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2467 { 2468 struct spdk_blob_persist_ctx *ctx = cb_arg; 2469 struct spdk_blob *blob = ctx->blob; 2470 2471 if (bserrno != 0) { 2472 blob_persist_complete(seq, ctx, bserrno); 2473 return; 2474 } 2475 2476 if (blob->active.num_pages == 0) { 2477 /* This is the signal that the blob should be deleted. 2478 * Immediately jump to the clean up routine. */ 2479 assert(blob->clean.num_pages > 0); 2480 blob->state = SPDK_BLOB_STATE_CLEAN; 2481 blob_persist_zero_pages(seq, ctx, 0); 2482 return; 2483 2484 } 2485 2486 if (blob->clean.num_clusters < blob->active.num_clusters) { 2487 /* Blob was resized up */ 2488 assert(blob->clean.num_extent_pages <= blob->active.num_extent_pages); 2489 ctx->next_extent_page = spdk_max(1, blob->clean.num_extent_pages) - 1; 2490 } else if (blob->active.num_clusters < blob->active.cluster_array_size) { 2491 /* Blob was resized down */ 2492 assert(blob->clean.num_extent_pages >= blob->active.num_extent_pages); 2493 ctx->next_extent_page = spdk_max(1, blob->active.num_extent_pages) - 1; 2494 } else { 2495 /* No change in size occurred */ 2496 blob_persist_generate_new_md(ctx); 2497 return; 2498 } 2499 2500 blob_persist_write_extent_pages(seq, ctx, 0); 2501 } 2502 2503 struct spdk_bs_mark_dirty { 2504 struct spdk_blob_store *bs; 2505 struct spdk_bs_super_block *super; 2506 spdk_bs_sequence_cpl cb_fn; 2507 void *cb_arg; 2508 }; 2509 2510 static void 2511 bs_mark_dirty_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2512 { 2513 struct spdk_bs_mark_dirty *ctx = cb_arg; 2514 2515 if (bserrno == 0) { 2516 ctx->bs->clean = 0; 2517 } 2518 2519 ctx->cb_fn(seq, ctx->cb_arg, bserrno); 2520 2521 spdk_free(ctx->super); 2522 free(ctx); 2523 } 2524 2525 static void bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2526 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg); 2527 2528 2529 static void 2530 bs_mark_dirty_write(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2531 { 2532 struct spdk_bs_mark_dirty *ctx = cb_arg; 2533 int rc; 2534 2535 if (bserrno != 0) { 2536 bs_mark_dirty_write_cpl(seq, ctx, bserrno); 2537 return; 2538 } 2539 2540 rc = bs_super_validate(ctx->super, ctx->bs); 2541 if (rc != 0) { 2542 bs_mark_dirty_write_cpl(seq, ctx, rc); 2543 return; 2544 } 2545 2546 ctx->super->clean = 0; 2547 if (ctx->super->size == 0) { 2548 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 2549 } 2550 2551 bs_write_super(seq, ctx->bs, ctx->super, bs_mark_dirty_write_cpl, ctx); 2552 } 2553 2554 static void 2555 bs_mark_dirty(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2556 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2557 { 2558 struct spdk_bs_mark_dirty *ctx; 2559 2560 /* Blobstore is already marked dirty */ 2561 if (bs->clean == 0) { 2562 cb_fn(seq, cb_arg, 0); 2563 return; 2564 } 2565 2566 ctx = calloc(1, sizeof(*ctx)); 2567 if (!ctx) { 2568 cb_fn(seq, cb_arg, -ENOMEM); 2569 return; 2570 } 2571 ctx->bs = bs; 2572 ctx->cb_fn = cb_fn; 2573 ctx->cb_arg = cb_arg; 2574 2575 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 2576 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 2577 if (!ctx->super) { 2578 free(ctx); 2579 cb_fn(seq, cb_arg, -ENOMEM); 2580 return; 2581 } 2582 2583 bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0), 2584 bs_byte_to_lba(bs, sizeof(*ctx->super)), 2585 bs_mark_dirty_write, ctx); 2586 } 2587 2588 /* Write a blob to disk */ 2589 static void 2590 blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 2591 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2592 { 2593 struct spdk_blob_persist_ctx *ctx; 2594 2595 blob_verify_md_op(blob); 2596 2597 if (blob->state == SPDK_BLOB_STATE_CLEAN && TAILQ_EMPTY(&blob->persists_to_complete)) { 2598 cb_fn(seq, cb_arg, 0); 2599 return; 2600 } 2601 2602 ctx = calloc(1, sizeof(*ctx)); 2603 if (!ctx) { 2604 cb_fn(seq, cb_arg, -ENOMEM); 2605 return; 2606 } 2607 ctx->blob = blob; 2608 ctx->seq = seq; 2609 ctx->cb_fn = cb_fn; 2610 ctx->cb_arg = cb_arg; 2611 2612 /* Multiple blob persists can affect one another, via blob->state or 2613 * blob mutable data changes. To prevent it, queue up the persists. */ 2614 if (!TAILQ_EMPTY(&blob->persists_to_complete)) { 2615 TAILQ_INSERT_TAIL(&blob->pending_persists, ctx, link); 2616 return; 2617 } 2618 TAILQ_INSERT_HEAD(&blob->persists_to_complete, ctx, link); 2619 2620 bs_mark_dirty(seq, blob->bs, blob_persist_start, ctx); 2621 } 2622 2623 struct spdk_blob_copy_cluster_ctx { 2624 struct spdk_blob *blob; 2625 uint8_t *buf; 2626 uint64_t io_unit; 2627 uint64_t new_cluster; 2628 uint32_t new_extent_page; 2629 spdk_bs_sequence_t *seq; 2630 struct spdk_blob_md_page *new_cluster_page; 2631 }; 2632 2633 struct spdk_blob_free_cluster_ctx { 2634 struct spdk_blob *blob; 2635 uint64_t page; 2636 struct spdk_blob_md_page *md_page; 2637 uint64_t cluster_num; 2638 uint32_t extent_page; 2639 spdk_bs_sequence_t *seq; 2640 }; 2641 2642 static void 2643 blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno) 2644 { 2645 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2646 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq; 2647 TAILQ_HEAD(, spdk_bs_request_set) requests; 2648 spdk_bs_user_op_t *op; 2649 2650 TAILQ_INIT(&requests); 2651 TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link); 2652 2653 while (!TAILQ_EMPTY(&requests)) { 2654 op = TAILQ_FIRST(&requests); 2655 TAILQ_REMOVE(&requests, op, link); 2656 if (bserrno == 0) { 2657 bs_user_op_execute(op); 2658 } else { 2659 bs_user_op_abort(op, bserrno); 2660 } 2661 } 2662 2663 spdk_free(ctx->buf); 2664 free(ctx); 2665 } 2666 2667 static void 2668 blob_free_cluster_cpl(void *cb_arg, int bserrno) 2669 { 2670 struct spdk_blob_free_cluster_ctx *ctx = cb_arg; 2671 spdk_bs_sequence_t *seq = ctx->seq; 2672 2673 bs_sequence_finish(seq, bserrno); 2674 2675 free(ctx); 2676 } 2677 2678 static void 2679 blob_insert_cluster_revert(struct spdk_blob_copy_cluster_ctx *ctx) 2680 { 2681 spdk_spin_lock(&ctx->blob->bs->used_lock); 2682 bs_release_cluster(ctx->blob->bs, ctx->new_cluster); 2683 if (ctx->new_extent_page != 0) { 2684 bs_release_md_page(ctx->blob->bs, ctx->new_extent_page); 2685 } 2686 spdk_spin_unlock(&ctx->blob->bs->used_lock); 2687 } 2688 2689 static void 2690 blob_insert_cluster_clear_cpl(void *cb_arg, int bserrno) 2691 { 2692 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2693 2694 if (bserrno) { 2695 SPDK_WARNLOG("Failed to clear cluster: %d\n", bserrno); 2696 } 2697 2698 blob_insert_cluster_revert(ctx); 2699 bs_sequence_finish(ctx->seq, bserrno); 2700 } 2701 2702 static void 2703 blob_insert_cluster_clear(struct spdk_blob_copy_cluster_ctx *ctx) 2704 { 2705 struct spdk_bs_cpl cpl; 2706 spdk_bs_batch_t *batch; 2707 struct spdk_io_channel *ch = spdk_io_channel_from_ctx(ctx->seq->channel); 2708 2709 /* 2710 * We allocated a cluster and we copied data to it. But now, we realized that we don't need 2711 * this cluster and we want to release it. We must ensure that we clear the data on this 2712 * cluster. 2713 * The cluster may later be re-allocated by a thick-provisioned blob for example. When 2714 * reading from this thick-provisioned blob before writing data, we should read zeroes. 2715 */ 2716 2717 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2718 cpl.u.blob_basic.cb_fn = blob_insert_cluster_clear_cpl; 2719 cpl.u.blob_basic.cb_arg = ctx; 2720 2721 batch = bs_batch_open(ch, &cpl, ctx->blob); 2722 if (!batch) { 2723 blob_insert_cluster_clear_cpl(ctx, -ENOMEM); 2724 return; 2725 } 2726 2727 bs_batch_clear_dev(ctx->blob, batch, bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 2728 bs_cluster_to_lba(ctx->blob->bs, 1)); 2729 bs_batch_close(batch); 2730 } 2731 2732 static void 2733 blob_insert_cluster_cpl(void *cb_arg, int bserrno) 2734 { 2735 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2736 2737 if (bserrno) { 2738 if (bserrno == -EEXIST) { 2739 /* The metadata insert failed because another thread 2740 * allocated the cluster first. Clear and free our cluster 2741 * but continue without error. */ 2742 blob_insert_cluster_clear(ctx); 2743 return; 2744 } 2745 2746 blob_insert_cluster_revert(ctx); 2747 } 2748 2749 bs_sequence_finish(ctx->seq, bserrno); 2750 } 2751 2752 static void 2753 blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2754 { 2755 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2756 uint32_t cluster_number; 2757 2758 if (bserrno) { 2759 /* The write failed, so jump to the final completion handler */ 2760 bs_sequence_finish(seq, bserrno); 2761 return; 2762 } 2763 2764 cluster_number = bs_io_unit_to_cluster(ctx->blob->bs, ctx->io_unit); 2765 2766 blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2767 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx); 2768 } 2769 2770 static void 2771 blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2772 { 2773 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2774 2775 if (bserrno != 0) { 2776 /* The read failed, so jump to the final completion handler */ 2777 bs_sequence_finish(seq, bserrno); 2778 return; 2779 } 2780 2781 /* Write whole cluster */ 2782 bs_sequence_write_dev(seq, ctx->buf, 2783 bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 2784 bs_cluster_to_lba(ctx->blob->bs, 1), 2785 blob_write_copy_cpl, ctx); 2786 } 2787 2788 static bool 2789 blob_can_copy(struct spdk_blob *blob, uint64_t cluster_start_io_unit, uint64_t *base_lba) 2790 { 2791 uint64_t lba = bs_dev_io_unit_to_lba(blob, blob->back_bs_dev, cluster_start_io_unit); 2792 2793 return (!blob_is_esnap_clone(blob) && blob->bs->dev->copy != NULL) && 2794 blob->back_bs_dev->translate_lba(blob->back_bs_dev, lba, base_lba); 2795 } 2796 2797 static void 2798 blob_copy(struct spdk_blob_copy_cluster_ctx *ctx, spdk_bs_user_op_t *op, uint64_t src_lba) 2799 { 2800 struct spdk_blob *blob = ctx->blob; 2801 uint64_t lba_count = bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz); 2802 2803 bs_sequence_copy_dev(ctx->seq, 2804 bs_cluster_to_lba(blob->bs, ctx->new_cluster), 2805 src_lba, 2806 lba_count, 2807 blob_write_copy_cpl, ctx); 2808 } 2809 2810 static void 2811 bs_allocate_and_copy_cluster(struct spdk_blob *blob, 2812 struct spdk_io_channel *_ch, 2813 uint64_t io_unit, spdk_bs_user_op_t *op) 2814 { 2815 struct spdk_bs_cpl cpl; 2816 struct spdk_bs_channel *ch; 2817 struct spdk_blob_copy_cluster_ctx *ctx; 2818 uint64_t cluster_start_io_unit; 2819 uint32_t cluster_number; 2820 bool is_zeroes; 2821 bool can_copy; 2822 bool is_valid_range; 2823 uint64_t copy_src_lba; 2824 int rc; 2825 2826 ch = spdk_io_channel_get_ctx(_ch); 2827 2828 if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) { 2829 /* There are already operations pending. Queue this user op 2830 * and return because it will be re-executed when the outstanding 2831 * cluster allocation completes. */ 2832 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2833 return; 2834 } 2835 2836 /* Round the io_unit offset down to the first io_unit in the cluster */ 2837 cluster_start_io_unit = bs_io_unit_to_cluster_start(blob, io_unit); 2838 2839 /* Calculate which index in the metadata cluster array the corresponding 2840 * cluster is supposed to be at. */ 2841 cluster_number = bs_io_unit_to_cluster_number(blob, io_unit); 2842 2843 ctx = calloc(1, sizeof(*ctx)); 2844 if (!ctx) { 2845 bs_user_op_abort(op, -ENOMEM); 2846 return; 2847 } 2848 2849 assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0); 2850 2851 ctx->blob = blob; 2852 ctx->io_unit = cluster_start_io_unit; 2853 ctx->new_cluster_page = ch->new_cluster_page; 2854 memset(ctx->new_cluster_page, 0, blob->bs->md_page_size); 2855 2856 /* Check if the cluster that we intend to do CoW for is valid for 2857 * the backing dev. For zeroes backing dev, it'll be always valid. 2858 * For other backing dev e.g. a snapshot, it could be invalid if 2859 * the blob has been resized after snapshot was taken. */ 2860 is_valid_range = blob->back_bs_dev->is_range_valid(blob->back_bs_dev, 2861 bs_dev_io_unit_to_lba(blob, blob->back_bs_dev, cluster_start_io_unit), 2862 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz)); 2863 2864 can_copy = is_valid_range && blob_can_copy(blob, cluster_start_io_unit, ©_src_lba); 2865 2866 is_zeroes = is_valid_range && blob->back_bs_dev->is_zeroes(blob->back_bs_dev, 2867 bs_dev_io_unit_to_lba(blob, blob->back_bs_dev, cluster_start_io_unit), 2868 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz)); 2869 if (blob->parent_id != SPDK_BLOBID_INVALID && !is_zeroes && !can_copy) { 2870 ctx->buf = spdk_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen, 2871 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 2872 if (!ctx->buf) { 2873 SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n", 2874 blob->bs->cluster_sz); 2875 free(ctx); 2876 bs_user_op_abort(op, -ENOMEM); 2877 return; 2878 } 2879 } 2880 2881 spdk_spin_lock(&blob->bs->used_lock); 2882 rc = bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, &ctx->new_extent_page, 2883 false); 2884 spdk_spin_unlock(&blob->bs->used_lock); 2885 if (rc != 0) { 2886 spdk_free(ctx->buf); 2887 free(ctx); 2888 bs_user_op_abort(op, rc); 2889 return; 2890 } 2891 2892 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2893 cpl.u.blob_basic.cb_fn = blob_allocate_and_copy_cluster_cpl; 2894 cpl.u.blob_basic.cb_arg = ctx; 2895 2896 ctx->seq = bs_sequence_start_blob(_ch, &cpl, blob); 2897 if (!ctx->seq) { 2898 spdk_spin_lock(&blob->bs->used_lock); 2899 bs_release_cluster(blob->bs, ctx->new_cluster); 2900 spdk_spin_unlock(&blob->bs->used_lock); 2901 spdk_free(ctx->buf); 2902 free(ctx); 2903 bs_user_op_abort(op, -ENOMEM); 2904 return; 2905 } 2906 2907 /* Queue the user op to block other incoming operations */ 2908 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2909 2910 if (blob->parent_id != SPDK_BLOBID_INVALID && !is_zeroes) { 2911 if (can_copy) { 2912 blob_copy(ctx, op, copy_src_lba); 2913 } else { 2914 /* Read cluster from backing device */ 2915 bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf, 2916 bs_dev_io_unit_to_lba(blob, blob->back_bs_dev, cluster_start_io_unit), 2917 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz), 2918 blob_write_copy, ctx); 2919 } 2920 2921 } else { 2922 blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2923 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx); 2924 } 2925 } 2926 2927 static inline bool 2928 blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t io_unit, uint64_t length, 2929 uint64_t *lba, uint64_t *lba_count) 2930 { 2931 *lba_count = length; 2932 2933 if (!bs_io_unit_is_allocated(blob, io_unit)) { 2934 assert(blob->back_bs_dev != NULL); 2935 *lba = bs_io_unit_to_back_dev_lba(blob, io_unit); 2936 *lba_count = bs_io_unit_to_back_dev_lba(blob, *lba_count); 2937 return false; 2938 } else { 2939 *lba = bs_blob_io_unit_to_lba(blob, io_unit); 2940 return true; 2941 } 2942 } 2943 2944 struct op_split_ctx { 2945 struct spdk_blob *blob; 2946 struct spdk_io_channel *channel; 2947 uint64_t io_unit_offset; 2948 uint64_t io_units_remaining; 2949 void *curr_payload; 2950 enum spdk_blob_op_type op_type; 2951 spdk_bs_sequence_t *seq; 2952 bool in_submit_ctx; 2953 bool completed_in_submit_ctx; 2954 bool done; 2955 }; 2956 2957 static void 2958 blob_request_submit_op_split_next(void *cb_arg, int bserrno) 2959 { 2960 struct op_split_ctx *ctx = cb_arg; 2961 struct spdk_blob *blob = ctx->blob; 2962 struct spdk_io_channel *ch = ctx->channel; 2963 enum spdk_blob_op_type op_type = ctx->op_type; 2964 uint8_t *buf; 2965 uint64_t offset; 2966 uint64_t length; 2967 uint64_t op_length; 2968 2969 if (bserrno != 0 || ctx->io_units_remaining == 0) { 2970 bs_sequence_finish(ctx->seq, bserrno); 2971 if (ctx->in_submit_ctx) { 2972 /* Defer freeing of the ctx object, since it will be 2973 * accessed when this unwinds back to the submission 2974 * context. 2975 */ 2976 ctx->done = true; 2977 } else { 2978 free(ctx); 2979 } 2980 return; 2981 } 2982 2983 if (ctx->in_submit_ctx) { 2984 /* If this split operation completed in the context 2985 * of its submission, mark the flag and return immediately 2986 * to avoid recursion. 2987 */ 2988 ctx->completed_in_submit_ctx = true; 2989 return; 2990 } 2991 2992 while (true) { 2993 ctx->completed_in_submit_ctx = false; 2994 2995 offset = ctx->io_unit_offset; 2996 length = ctx->io_units_remaining; 2997 buf = ctx->curr_payload; 2998 op_length = spdk_min(length, bs_num_io_units_to_cluster_boundary(blob, 2999 offset)); 3000 3001 /* Update length and payload for next operation */ 3002 ctx->io_units_remaining -= op_length; 3003 ctx->io_unit_offset += op_length; 3004 if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) { 3005 ctx->curr_payload += op_length * blob->bs->io_unit_size; 3006 } 3007 3008 assert(!ctx->in_submit_ctx); 3009 ctx->in_submit_ctx = true; 3010 3011 switch (op_type) { 3012 case SPDK_BLOB_READ: 3013 spdk_blob_io_read(blob, ch, buf, offset, op_length, 3014 blob_request_submit_op_split_next, ctx); 3015 break; 3016 case SPDK_BLOB_WRITE: 3017 spdk_blob_io_write(blob, ch, buf, offset, op_length, 3018 blob_request_submit_op_split_next, ctx); 3019 break; 3020 case SPDK_BLOB_UNMAP: 3021 spdk_blob_io_unmap(blob, ch, offset, op_length, 3022 blob_request_submit_op_split_next, ctx); 3023 break; 3024 case SPDK_BLOB_WRITE_ZEROES: 3025 spdk_blob_io_write_zeroes(blob, ch, offset, op_length, 3026 blob_request_submit_op_split_next, ctx); 3027 break; 3028 case SPDK_BLOB_READV: 3029 case SPDK_BLOB_WRITEV: 3030 SPDK_ERRLOG("readv/write not valid\n"); 3031 bs_sequence_finish(ctx->seq, -EINVAL); 3032 free(ctx); 3033 return; 3034 } 3035 3036 #ifndef __clang_analyzer__ 3037 /* scan-build reports a false positive around accessing the ctx here. It 3038 * forms a path that recursively calls this function, but then says 3039 * "assuming ctx->in_submit_ctx is false", when that isn't possible. 3040 * This path does free(ctx), returns to here, and reports a use-after-free 3041 * bug. Wrapping this bit of code so that scan-build doesn't see it 3042 * works around the scan-build bug. 3043 */ 3044 assert(ctx->in_submit_ctx); 3045 ctx->in_submit_ctx = false; 3046 3047 /* If the operation completed immediately, loop back and submit the 3048 * next operation. Otherwise we can return and the next split 3049 * operation will get submitted when this current operation is 3050 * later completed asynchronously. 3051 */ 3052 if (ctx->completed_in_submit_ctx) { 3053 continue; 3054 } else if (ctx->done) { 3055 free(ctx); 3056 } 3057 #endif 3058 break; 3059 } 3060 } 3061 3062 static void 3063 blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob, 3064 void *payload, uint64_t offset, uint64_t length, 3065 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 3066 { 3067 struct op_split_ctx *ctx; 3068 spdk_bs_sequence_t *seq; 3069 struct spdk_bs_cpl cpl; 3070 3071 assert(blob != NULL); 3072 3073 ctx = calloc(1, sizeof(struct op_split_ctx)); 3074 if (ctx == NULL) { 3075 cb_fn(cb_arg, -ENOMEM); 3076 return; 3077 } 3078 3079 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 3080 cpl.u.blob_basic.cb_fn = cb_fn; 3081 cpl.u.blob_basic.cb_arg = cb_arg; 3082 3083 seq = bs_sequence_start_blob(ch, &cpl, blob); 3084 if (!seq) { 3085 free(ctx); 3086 cb_fn(cb_arg, -ENOMEM); 3087 return; 3088 } 3089 3090 ctx->blob = blob; 3091 ctx->channel = ch; 3092 ctx->curr_payload = payload; 3093 ctx->io_unit_offset = offset; 3094 ctx->io_units_remaining = length; 3095 ctx->op_type = op_type; 3096 ctx->seq = seq; 3097 3098 blob_request_submit_op_split_next(ctx, 0); 3099 } 3100 3101 static void 3102 spdk_free_cluster_unmap_complete(void *cb_arg, int bserrno) 3103 { 3104 struct spdk_blob_free_cluster_ctx *ctx = cb_arg; 3105 3106 if (bserrno) { 3107 bs_sequence_finish(ctx->seq, bserrno); 3108 free(ctx); 3109 return; 3110 } 3111 3112 blob_free_cluster_on_md_thread(ctx->blob, ctx->cluster_num, 3113 ctx->extent_page, ctx->md_page, blob_free_cluster_cpl, ctx); 3114 } 3115 3116 static void 3117 blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob, 3118 void *payload, uint64_t offset, uint64_t length, 3119 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 3120 { 3121 struct spdk_bs_cpl cpl; 3122 uint64_t lba; 3123 uint64_t lba_count; 3124 bool is_allocated; 3125 3126 assert(blob != NULL); 3127 3128 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 3129 cpl.u.blob_basic.cb_fn = cb_fn; 3130 cpl.u.blob_basic.cb_arg = cb_arg; 3131 3132 if (blob->frozen_refcnt) { 3133 /* This blob I/O is frozen */ 3134 spdk_bs_user_op_t *op; 3135 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 3136 3137 op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 3138 if (!op) { 3139 cb_fn(cb_arg, -ENOMEM); 3140 return; 3141 } 3142 3143 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 3144 3145 return; 3146 } 3147 3148 is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 3149 3150 switch (op_type) { 3151 case SPDK_BLOB_READ: { 3152 spdk_bs_batch_t *batch; 3153 3154 batch = bs_batch_open(_ch, &cpl, blob); 3155 if (!batch) { 3156 cb_fn(cb_arg, -ENOMEM); 3157 return; 3158 } 3159 3160 if (is_allocated) { 3161 /* Read from the blob */ 3162 bs_batch_read_dev(batch, payload, lba, lba_count); 3163 } else { 3164 /* Read from the backing block device */ 3165 bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count); 3166 } 3167 3168 bs_batch_close(batch); 3169 break; 3170 } 3171 case SPDK_BLOB_WRITE: 3172 case SPDK_BLOB_WRITE_ZEROES: { 3173 if (is_allocated) { 3174 /* Write to the blob */ 3175 spdk_bs_batch_t *batch; 3176 3177 if (lba_count == 0) { 3178 cb_fn(cb_arg, 0); 3179 return; 3180 } 3181 3182 batch = bs_batch_open(_ch, &cpl, blob); 3183 if (!batch) { 3184 cb_fn(cb_arg, -ENOMEM); 3185 return; 3186 } 3187 3188 if (op_type == SPDK_BLOB_WRITE) { 3189 bs_batch_write_dev(batch, payload, lba, lba_count); 3190 } else { 3191 bs_batch_write_zeroes_dev(batch, lba, lba_count); 3192 } 3193 3194 bs_batch_close(batch); 3195 } else { 3196 /* Queue this operation and allocate the cluster */ 3197 spdk_bs_user_op_t *op; 3198 3199 op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 3200 if (!op) { 3201 cb_fn(cb_arg, -ENOMEM); 3202 return; 3203 } 3204 3205 bs_allocate_and_copy_cluster(blob, _ch, offset, op); 3206 } 3207 break; 3208 } 3209 case SPDK_BLOB_UNMAP: { 3210 struct spdk_blob_free_cluster_ctx *ctx = NULL; 3211 spdk_bs_batch_t *batch; 3212 3213 /* if aligned with cluster release cluster */ 3214 if (spdk_blob_is_thin_provisioned(blob) && is_allocated && 3215 blob_backed_with_zeroes_dev(blob) && 3216 bs_io_units_per_cluster(blob) == length) { 3217 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 3218 uint64_t cluster_start_page; 3219 uint32_t cluster_number; 3220 3221 assert(offset % bs_io_units_per_cluster(blob) == 0); 3222 3223 /* Round the io_unit offset down to the first page in the cluster */ 3224 cluster_start_page = bs_io_unit_to_cluster_start(blob, offset); 3225 3226 /* Calculate which index in the metadata cluster array the corresponding 3227 * cluster is supposed to be at. */ 3228 cluster_number = bs_io_unit_to_cluster_number(blob, offset); 3229 3230 ctx = calloc(1, sizeof(*ctx)); 3231 if (!ctx) { 3232 cb_fn(cb_arg, -ENOMEM); 3233 return; 3234 } 3235 /* When freeing a cluster the flow should be (in order): 3236 * 1. Unmap the underlying area (so if the cluster is reclaimed in the future, it won't leak 3237 * old data) 3238 * 2. Once the unmap completes (to avoid any races with incoming writes that may claim the 3239 * cluster), update and sync metadata freeing the cluster 3240 * 3. Once metadata update is done, complete the user unmap request 3241 */ 3242 ctx->blob = blob; 3243 ctx->page = cluster_start_page; 3244 ctx->cluster_num = cluster_number; 3245 ctx->md_page = bs_channel->new_cluster_page; 3246 ctx->seq = bs_sequence_start_bs(_ch, &cpl); 3247 if (!ctx->seq) { 3248 free(ctx); 3249 cb_fn(cb_arg, -ENOMEM); 3250 return; 3251 } 3252 3253 if (blob->use_extent_table) { 3254 ctx->extent_page = *bs_cluster_to_extent_page(blob, cluster_number); 3255 } 3256 3257 cpl.u.blob_basic.cb_fn = spdk_free_cluster_unmap_complete; 3258 cpl.u.blob_basic.cb_arg = ctx; 3259 } 3260 3261 batch = bs_batch_open(_ch, &cpl, blob); 3262 if (!batch) { 3263 free(ctx); 3264 cb_fn(cb_arg, -ENOMEM); 3265 return; 3266 } 3267 3268 if (is_allocated) { 3269 bs_batch_unmap_dev(batch, lba, lba_count); 3270 } 3271 3272 bs_batch_close(batch); 3273 break; 3274 } 3275 case SPDK_BLOB_READV: 3276 case SPDK_BLOB_WRITEV: 3277 SPDK_ERRLOG("readv/write not valid\n"); 3278 cb_fn(cb_arg, -EINVAL); 3279 break; 3280 } 3281 } 3282 3283 static void 3284 blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel, 3285 void *payload, uint64_t offset, uint64_t length, 3286 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 3287 { 3288 assert(blob != NULL); 3289 3290 if (blob->data_ro && op_type != SPDK_BLOB_READ) { 3291 cb_fn(cb_arg, -EPERM); 3292 return; 3293 } 3294 3295 if (length == 0) { 3296 cb_fn(cb_arg, 0); 3297 return; 3298 } 3299 3300 if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 3301 cb_fn(cb_arg, -EINVAL); 3302 return; 3303 } 3304 if (length <= bs_num_io_units_to_cluster_boundary(blob, offset)) { 3305 blob_request_submit_op_single(_channel, blob, payload, offset, length, 3306 cb_fn, cb_arg, op_type); 3307 } else { 3308 blob_request_submit_op_split(_channel, blob, payload, offset, length, 3309 cb_fn, cb_arg, op_type); 3310 } 3311 } 3312 3313 struct rw_iov_ctx { 3314 struct spdk_blob *blob; 3315 struct spdk_io_channel *channel; 3316 spdk_blob_op_complete cb_fn; 3317 void *cb_arg; 3318 bool read; 3319 int iovcnt; 3320 struct iovec *orig_iov; 3321 uint64_t io_unit_offset; 3322 uint64_t io_units_remaining; 3323 uint64_t io_units_done; 3324 struct spdk_blob_ext_io_opts *ext_io_opts; 3325 struct iovec iov[0]; 3326 }; 3327 3328 static void 3329 rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3330 { 3331 assert(cb_arg == NULL); 3332 bs_sequence_finish(seq, bserrno); 3333 } 3334 3335 static void 3336 rw_iov_split_next(void *cb_arg, int bserrno) 3337 { 3338 struct rw_iov_ctx *ctx = cb_arg; 3339 struct spdk_blob *blob = ctx->blob; 3340 struct iovec *iov, *orig_iov; 3341 int iovcnt; 3342 size_t orig_iovoff; 3343 uint64_t io_units_count, io_units_to_boundary, io_unit_offset; 3344 uint64_t byte_count; 3345 3346 if (bserrno != 0 || ctx->io_units_remaining == 0) { 3347 ctx->cb_fn(ctx->cb_arg, bserrno); 3348 free(ctx); 3349 return; 3350 } 3351 3352 io_unit_offset = ctx->io_unit_offset; 3353 io_units_to_boundary = bs_num_io_units_to_cluster_boundary(blob, io_unit_offset); 3354 io_units_count = spdk_min(ctx->io_units_remaining, io_units_to_boundary); 3355 /* 3356 * Get index and offset into the original iov array for our current position in the I/O sequence. 3357 * byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will 3358 * point to the current position in the I/O sequence. 3359 */ 3360 byte_count = ctx->io_units_done * blob->bs->io_unit_size; 3361 orig_iov = &ctx->orig_iov[0]; 3362 orig_iovoff = 0; 3363 while (byte_count > 0) { 3364 if (byte_count >= orig_iov->iov_len) { 3365 byte_count -= orig_iov->iov_len; 3366 orig_iov++; 3367 } else { 3368 orig_iovoff = byte_count; 3369 byte_count = 0; 3370 } 3371 } 3372 3373 /* 3374 * Build an iov array for the next I/O in the sequence. byte_count will keep track of how many 3375 * bytes of this next I/O remain to be accounted for in the new iov array. 3376 */ 3377 byte_count = io_units_count * blob->bs->io_unit_size; 3378 iov = &ctx->iov[0]; 3379 iovcnt = 0; 3380 while (byte_count > 0) { 3381 assert(iovcnt < ctx->iovcnt); 3382 iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff); 3383 iov->iov_base = orig_iov->iov_base + orig_iovoff; 3384 byte_count -= iov->iov_len; 3385 orig_iovoff = 0; 3386 orig_iov++; 3387 iov++; 3388 iovcnt++; 3389 } 3390 3391 ctx->io_unit_offset += io_units_count; 3392 ctx->io_units_remaining -= io_units_count; 3393 ctx->io_units_done += io_units_count; 3394 iov = &ctx->iov[0]; 3395 3396 if (ctx->read) { 3397 spdk_blob_io_readv_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 3398 io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts); 3399 } else { 3400 spdk_blob_io_writev_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 3401 io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts); 3402 } 3403 } 3404 3405 static void 3406 blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel, 3407 struct iovec *iov, int iovcnt, 3408 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg, bool read, 3409 struct spdk_blob_ext_io_opts *ext_io_opts) 3410 { 3411 struct spdk_bs_cpl cpl; 3412 3413 assert(blob != NULL); 3414 3415 if (!read && blob->data_ro) { 3416 cb_fn(cb_arg, -EPERM); 3417 return; 3418 } 3419 3420 if (length == 0) { 3421 cb_fn(cb_arg, 0); 3422 return; 3423 } 3424 3425 if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 3426 cb_fn(cb_arg, -EINVAL); 3427 return; 3428 } 3429 3430 /* 3431 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having 3432 * to split a request that spans a cluster boundary. For I/O that do not span a cluster boundary, 3433 * there will be no noticeable difference compared to using a batch. For I/O that do span a cluster 3434 * boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need 3435 * to allocate a separate iov array and split the I/O such that none of the resulting 3436 * smaller I/O cross a cluster boundary. These smaller I/O will be issued in sequence (not in parallel) 3437 * but since this case happens very infrequently, any performance impact will be negligible. 3438 * 3439 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs 3440 * for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them 3441 * in a batch. That would also require creating an intermediate spdk_bs_cpl that would get called 3442 * when the batch was completed, to allow for freeing the memory for the iov arrays. 3443 */ 3444 if (spdk_likely(length <= bs_num_io_units_to_cluster_boundary(blob, offset))) { 3445 uint64_t lba_count; 3446 uint64_t lba; 3447 bool is_allocated; 3448 3449 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 3450 cpl.u.blob_basic.cb_fn = cb_fn; 3451 cpl.u.blob_basic.cb_arg = cb_arg; 3452 3453 if (blob->frozen_refcnt) { 3454 /* This blob I/O is frozen */ 3455 enum spdk_blob_op_type op_type; 3456 spdk_bs_user_op_t *op; 3457 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel); 3458 3459 op_type = read ? SPDK_BLOB_READV : SPDK_BLOB_WRITEV; 3460 op = bs_user_op_alloc(_channel, &cpl, op_type, blob, iov, iovcnt, offset, length); 3461 if (!op) { 3462 cb_fn(cb_arg, -ENOMEM); 3463 return; 3464 } 3465 3466 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 3467 3468 return; 3469 } 3470 3471 is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 3472 3473 if (read) { 3474 spdk_bs_sequence_t *seq; 3475 3476 seq = bs_sequence_start_blob(_channel, &cpl, blob); 3477 if (!seq) { 3478 cb_fn(cb_arg, -ENOMEM); 3479 return; 3480 } 3481 3482 seq->ext_io_opts = ext_io_opts; 3483 3484 if (is_allocated) { 3485 bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL); 3486 } else { 3487 bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count, 3488 rw_iov_done, NULL); 3489 } 3490 } else { 3491 if (is_allocated) { 3492 spdk_bs_sequence_t *seq; 3493 3494 seq = bs_sequence_start_blob(_channel, &cpl, blob); 3495 if (!seq) { 3496 cb_fn(cb_arg, -ENOMEM); 3497 return; 3498 } 3499 3500 seq->ext_io_opts = ext_io_opts; 3501 3502 bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL); 3503 } else { 3504 /* Queue this operation and allocate the cluster */ 3505 spdk_bs_user_op_t *op; 3506 3507 op = bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset, 3508 length); 3509 if (!op) { 3510 cb_fn(cb_arg, -ENOMEM); 3511 return; 3512 } 3513 3514 op->ext_io_opts = ext_io_opts; 3515 3516 bs_allocate_and_copy_cluster(blob, _channel, offset, op); 3517 } 3518 } 3519 } else { 3520 struct rw_iov_ctx *ctx; 3521 3522 ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec)); 3523 if (ctx == NULL) { 3524 cb_fn(cb_arg, -ENOMEM); 3525 return; 3526 } 3527 3528 ctx->blob = blob; 3529 ctx->channel = _channel; 3530 ctx->cb_fn = cb_fn; 3531 ctx->cb_arg = cb_arg; 3532 ctx->read = read; 3533 ctx->orig_iov = iov; 3534 ctx->iovcnt = iovcnt; 3535 ctx->io_unit_offset = offset; 3536 ctx->io_units_remaining = length; 3537 ctx->io_units_done = 0; 3538 ctx->ext_io_opts = ext_io_opts; 3539 3540 rw_iov_split_next(ctx, 0); 3541 } 3542 } 3543 3544 static struct spdk_blob * 3545 blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid) 3546 { 3547 struct spdk_blob find; 3548 3549 if (spdk_bit_array_get(bs->open_blobids, blobid) == 0) { 3550 return NULL; 3551 } 3552 3553 find.id = blobid; 3554 return RB_FIND(spdk_blob_tree, &bs->open_blobs, &find); 3555 } 3556 3557 static void 3558 blob_get_snapshot_and_clone_entries(struct spdk_blob *blob, 3559 struct spdk_blob_list **snapshot_entry, struct spdk_blob_list **clone_entry) 3560 { 3561 assert(blob != NULL); 3562 *snapshot_entry = NULL; 3563 *clone_entry = NULL; 3564 3565 if (blob->parent_id == SPDK_BLOBID_INVALID) { 3566 return; 3567 } 3568 3569 TAILQ_FOREACH(*snapshot_entry, &blob->bs->snapshots, link) { 3570 if ((*snapshot_entry)->id == blob->parent_id) { 3571 break; 3572 } 3573 } 3574 3575 if (*snapshot_entry != NULL) { 3576 TAILQ_FOREACH(*clone_entry, &(*snapshot_entry)->clones, link) { 3577 if ((*clone_entry)->id == blob->id) { 3578 break; 3579 } 3580 } 3581 3582 assert(*clone_entry != NULL); 3583 } 3584 } 3585 3586 static int 3587 bs_channel_create(void *io_device, void *ctx_buf) 3588 { 3589 struct spdk_blob_store *bs = io_device; 3590 struct spdk_bs_channel *channel = ctx_buf; 3591 struct spdk_bs_dev *dev; 3592 uint32_t max_ops = bs->max_channel_ops; 3593 uint32_t i; 3594 3595 dev = bs->dev; 3596 3597 channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set)); 3598 if (!channel->req_mem) { 3599 return -1; 3600 } 3601 3602 TAILQ_INIT(&channel->reqs); 3603 3604 for (i = 0; i < max_ops; i++) { 3605 TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link); 3606 } 3607 3608 channel->bs = bs; 3609 channel->dev = dev; 3610 channel->dev_channel = dev->create_channel(dev); 3611 3612 if (!channel->dev_channel) { 3613 SPDK_ERRLOG("Failed to create device channel.\n"); 3614 free(channel->req_mem); 3615 return -1; 3616 } 3617 3618 channel->new_cluster_page = spdk_zmalloc(bs->md_page_size, 0, NULL, SPDK_ENV_NUMA_ID_ANY, 3619 SPDK_MALLOC_DMA); 3620 if (!channel->new_cluster_page) { 3621 SPDK_ERRLOG("Failed to allocate new cluster page\n"); 3622 free(channel->req_mem); 3623 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 3624 return -1; 3625 } 3626 3627 TAILQ_INIT(&channel->need_cluster_alloc); 3628 TAILQ_INIT(&channel->queued_io); 3629 RB_INIT(&channel->esnap_channels); 3630 3631 return 0; 3632 } 3633 3634 static void 3635 bs_channel_destroy(void *io_device, void *ctx_buf) 3636 { 3637 struct spdk_bs_channel *channel = ctx_buf; 3638 spdk_bs_user_op_t *op; 3639 3640 while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) { 3641 op = TAILQ_FIRST(&channel->need_cluster_alloc); 3642 TAILQ_REMOVE(&channel->need_cluster_alloc, op, link); 3643 bs_user_op_abort(op, -EIO); 3644 } 3645 3646 while (!TAILQ_EMPTY(&channel->queued_io)) { 3647 op = TAILQ_FIRST(&channel->queued_io); 3648 TAILQ_REMOVE(&channel->queued_io, op, link); 3649 bs_user_op_abort(op, -EIO); 3650 } 3651 3652 blob_esnap_destroy_bs_channel(channel); 3653 3654 free(channel->req_mem); 3655 spdk_free(channel->new_cluster_page); 3656 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 3657 } 3658 3659 static void 3660 bs_dev_destroy(void *io_device) 3661 { 3662 struct spdk_blob_store *bs = io_device; 3663 struct spdk_blob *blob, *blob_tmp; 3664 3665 bs->dev->destroy(bs->dev); 3666 3667 RB_FOREACH_SAFE(blob, spdk_blob_tree, &bs->open_blobs, blob_tmp) { 3668 RB_REMOVE(spdk_blob_tree, &bs->open_blobs, blob); 3669 spdk_bit_array_clear(bs->open_blobids, blob->id); 3670 blob_free(blob); 3671 } 3672 3673 spdk_spin_destroy(&bs->used_lock); 3674 3675 spdk_bit_array_free(&bs->open_blobids); 3676 spdk_bit_array_free(&bs->used_blobids); 3677 spdk_bit_array_free(&bs->used_md_pages); 3678 spdk_bit_pool_free(&bs->used_clusters); 3679 /* 3680 * If this function is called for any reason except a successful unload, 3681 * the unload_cpl type will be NONE and this will be a nop. 3682 */ 3683 bs_call_cpl(&bs->unload_cpl, bs->unload_err); 3684 3685 free(bs); 3686 } 3687 3688 static int 3689 bs_blob_list_add(struct spdk_blob *blob) 3690 { 3691 spdk_blob_id snapshot_id; 3692 struct spdk_blob_list *snapshot_entry = NULL; 3693 struct spdk_blob_list *clone_entry = NULL; 3694 3695 assert(blob != NULL); 3696 3697 snapshot_id = blob->parent_id; 3698 if (snapshot_id == SPDK_BLOBID_INVALID || 3699 snapshot_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 3700 return 0; 3701 } 3702 3703 snapshot_entry = bs_get_snapshot_entry(blob->bs, snapshot_id); 3704 if (snapshot_entry == NULL) { 3705 /* Snapshot not found */ 3706 snapshot_entry = calloc(1, sizeof(struct spdk_blob_list)); 3707 if (snapshot_entry == NULL) { 3708 return -ENOMEM; 3709 } 3710 snapshot_entry->id = snapshot_id; 3711 TAILQ_INIT(&snapshot_entry->clones); 3712 TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link); 3713 } else { 3714 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 3715 if (clone_entry->id == blob->id) { 3716 break; 3717 } 3718 } 3719 } 3720 3721 if (clone_entry == NULL) { 3722 /* Clone not found */ 3723 clone_entry = calloc(1, sizeof(struct spdk_blob_list)); 3724 if (clone_entry == NULL) { 3725 return -ENOMEM; 3726 } 3727 clone_entry->id = blob->id; 3728 TAILQ_INIT(&clone_entry->clones); 3729 TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link); 3730 snapshot_entry->clone_count++; 3731 } 3732 3733 return 0; 3734 } 3735 3736 static void 3737 bs_blob_list_remove(struct spdk_blob *blob) 3738 { 3739 struct spdk_blob_list *snapshot_entry = NULL; 3740 struct spdk_blob_list *clone_entry = NULL; 3741 3742 blob_get_snapshot_and_clone_entries(blob, &snapshot_entry, &clone_entry); 3743 3744 if (snapshot_entry == NULL) { 3745 return; 3746 } 3747 3748 blob->parent_id = SPDK_BLOBID_INVALID; 3749 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3750 free(clone_entry); 3751 3752 snapshot_entry->clone_count--; 3753 } 3754 3755 static int 3756 bs_blob_list_free(struct spdk_blob_store *bs) 3757 { 3758 struct spdk_blob_list *snapshot_entry; 3759 struct spdk_blob_list *snapshot_entry_tmp; 3760 struct spdk_blob_list *clone_entry; 3761 struct spdk_blob_list *clone_entry_tmp; 3762 3763 TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) { 3764 TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) { 3765 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3766 free(clone_entry); 3767 } 3768 TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link); 3769 free(snapshot_entry); 3770 } 3771 3772 return 0; 3773 } 3774 3775 static void 3776 bs_free(struct spdk_blob_store *bs) 3777 { 3778 bs_blob_list_free(bs); 3779 3780 bs_unregister_md_thread(bs); 3781 spdk_io_device_unregister(bs, bs_dev_destroy); 3782 } 3783 3784 void 3785 spdk_bs_opts_init(struct spdk_bs_opts *opts, size_t opts_size) 3786 { 3787 3788 if (!opts) { 3789 SPDK_ERRLOG("opts should not be NULL\n"); 3790 return; 3791 } 3792 3793 if (!opts_size) { 3794 SPDK_ERRLOG("opts_size should not be zero value\n"); 3795 return; 3796 } 3797 3798 memset(opts, 0, opts_size); 3799 opts->opts_size = opts_size; 3800 3801 #define FIELD_OK(field) \ 3802 offsetof(struct spdk_bs_opts, field) + sizeof(opts->field) <= opts_size 3803 3804 #define SET_FIELD(field, value) \ 3805 if (FIELD_OK(field)) { \ 3806 opts->field = value; \ 3807 } \ 3808 3809 SET_FIELD(cluster_sz, SPDK_BLOB_OPTS_CLUSTER_SZ); 3810 SET_FIELD(num_md_pages, SPDK_BLOB_OPTS_NUM_MD_PAGES); 3811 SET_FIELD(max_md_ops, SPDK_BLOB_OPTS_NUM_MD_PAGES); 3812 SET_FIELD(max_channel_ops, SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS); 3813 SET_FIELD(clear_method, BS_CLEAR_WITH_UNMAP); 3814 3815 if (FIELD_OK(bstype)) { 3816 memset(&opts->bstype, 0, sizeof(opts->bstype)); 3817 } 3818 3819 SET_FIELD(iter_cb_fn, NULL); 3820 SET_FIELD(iter_cb_arg, NULL); 3821 SET_FIELD(force_recover, false); 3822 SET_FIELD(esnap_bs_dev_create, NULL); 3823 SET_FIELD(esnap_ctx, NULL); 3824 3825 #undef FIELD_OK 3826 #undef SET_FIELD 3827 } 3828 3829 static int 3830 bs_opts_verify(struct spdk_bs_opts *opts) 3831 { 3832 if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 || 3833 opts->max_channel_ops == 0) { 3834 SPDK_ERRLOG("Blobstore options cannot be set to 0\n"); 3835 return -1; 3836 } 3837 3838 if ((opts->cluster_sz % SPDK_BS_PAGE_SIZE) != 0) { 3839 SPDK_ERRLOG("Cluster size %" PRIu32 " is not an integral multiple of blocklen %" PRIu32"\n", 3840 opts->cluster_sz, SPDK_BS_PAGE_SIZE); 3841 return -1; 3842 } 3843 3844 return 0; 3845 } 3846 3847 /* START spdk_bs_load */ 3848 3849 /* spdk_bs_load_ctx is used for init, load, unload and dump code paths. */ 3850 3851 struct spdk_bs_load_ctx { 3852 struct spdk_blob_store *bs; 3853 struct spdk_bs_super_block *super; 3854 3855 struct spdk_bs_md_mask *mask; 3856 bool in_page_chain; 3857 uint32_t page_index; 3858 uint32_t cur_page; 3859 struct spdk_blob_md_page *page; 3860 3861 uint64_t num_extent_pages; 3862 uint32_t *extent_page_num; 3863 struct spdk_blob_md_page *extent_pages; 3864 struct spdk_bit_array *used_clusters; 3865 3866 spdk_bs_sequence_t *seq; 3867 spdk_blob_op_with_handle_complete iter_cb_fn; 3868 void *iter_cb_arg; 3869 struct spdk_blob *blob; 3870 spdk_blob_id blobid; 3871 3872 bool force_recover; 3873 3874 /* These fields are used in the spdk_bs_dump path. */ 3875 bool dumping; 3876 FILE *fp; 3877 spdk_bs_dump_print_xattr print_xattr_fn; 3878 char xattr_name[4096]; 3879 }; 3880 3881 static void 3882 bs_init_per_cluster_fields(struct spdk_blob_store *bs) 3883 { 3884 bs->pages_per_cluster = bs->cluster_sz / bs->md_page_size; 3885 if (spdk_u32_is_pow2(bs->pages_per_cluster)) { 3886 bs->pages_per_cluster_shift = spdk_u32log2(bs->pages_per_cluster); 3887 } 3888 bs->io_units_per_cluster = bs->cluster_sz / bs->io_unit_size; 3889 if (spdk_u32_is_pow2(bs->io_units_per_cluster)) { 3890 bs->io_units_per_cluster_shift = spdk_u32log2(bs->io_units_per_cluster); 3891 } 3892 } 3893 3894 static int 3895 bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_blob_store **_bs, 3896 struct spdk_bs_load_ctx **_ctx) 3897 { 3898 struct spdk_blob_store *bs; 3899 struct spdk_bs_load_ctx *ctx; 3900 uint64_t dev_size; 3901 uint32_t md_page_size; 3902 int rc; 3903 3904 dev_size = dev->blocklen * dev->blockcnt; 3905 if (dev_size < opts->cluster_sz) { 3906 /* Device size cannot be smaller than cluster size of blobstore */ 3907 SPDK_INFOLOG(blob, "Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n", 3908 dev_size, opts->cluster_sz); 3909 return -ENOSPC; 3910 } 3911 3912 md_page_size = spdk_max(spdk_max(dev->phys_blocklen, SPDK_BS_PAGE_SIZE), 3913 opts->md_page_size); 3914 if (opts->cluster_sz < md_page_size) { 3915 /* Cluster size cannot be smaller than page size */ 3916 SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n", 3917 opts->cluster_sz, md_page_size); 3918 return -EINVAL; 3919 } 3920 bs = calloc(1, sizeof(struct spdk_blob_store)); 3921 if (!bs) { 3922 return -ENOMEM; 3923 } 3924 3925 ctx = calloc(1, sizeof(struct spdk_bs_load_ctx)); 3926 if (!ctx) { 3927 free(bs); 3928 return -ENOMEM; 3929 } 3930 3931 ctx->bs = bs; 3932 ctx->iter_cb_fn = opts->iter_cb_fn; 3933 ctx->iter_cb_arg = opts->iter_cb_arg; 3934 ctx->force_recover = opts->force_recover; 3935 3936 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 3937 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 3938 if (!ctx->super) { 3939 free(ctx); 3940 free(bs); 3941 return -ENOMEM; 3942 } 3943 3944 RB_INIT(&bs->open_blobs); 3945 TAILQ_INIT(&bs->snapshots); 3946 bs->dev = dev; 3947 bs->md_page_size = md_page_size; 3948 bs->md_thread = spdk_get_thread(); 3949 assert(bs->md_thread != NULL); 3950 3951 /* 3952 * Do not use bs_lba_to_cluster() here since blockcnt may not be an 3953 * even multiple of the cluster size. 3954 */ 3955 bs->cluster_sz = opts->cluster_sz; 3956 bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen); 3957 ctx->used_clusters = spdk_bit_array_create(bs->total_clusters); 3958 if (!ctx->used_clusters) { 3959 spdk_free(ctx->super); 3960 free(ctx); 3961 free(bs); 3962 return -ENOMEM; 3963 } 3964 3965 bs->num_free_clusters = bs->total_clusters; 3966 bs->io_unit_size = dev->blocklen; 3967 bs_init_per_cluster_fields(bs); 3968 3969 bs->max_channel_ops = opts->max_channel_ops; 3970 bs->super_blob = SPDK_BLOBID_INVALID; 3971 memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype)); 3972 bs->esnap_bs_dev_create = opts->esnap_bs_dev_create; 3973 bs->esnap_ctx = opts->esnap_ctx; 3974 3975 /* The metadata is assumed to be at least 1 page */ 3976 bs->used_md_pages = spdk_bit_array_create(1); 3977 bs->used_blobids = spdk_bit_array_create(0); 3978 bs->open_blobids = spdk_bit_array_create(0); 3979 3980 spdk_spin_init(&bs->used_lock); 3981 3982 spdk_io_device_register(bs, bs_channel_create, bs_channel_destroy, 3983 sizeof(struct spdk_bs_channel), "blobstore"); 3984 rc = bs_register_md_thread(bs); 3985 if (rc == -1) { 3986 spdk_io_device_unregister(bs, NULL); 3987 spdk_spin_destroy(&bs->used_lock); 3988 spdk_bit_array_free(&bs->open_blobids); 3989 spdk_bit_array_free(&bs->used_blobids); 3990 spdk_bit_array_free(&bs->used_md_pages); 3991 spdk_bit_array_free(&ctx->used_clusters); 3992 spdk_free(ctx->super); 3993 free(ctx); 3994 free(bs); 3995 /* FIXME: this is a lie but don't know how to get a proper error code here */ 3996 return -ENOMEM; 3997 } 3998 3999 *_ctx = ctx; 4000 *_bs = bs; 4001 return 0; 4002 } 4003 4004 static void 4005 bs_load_ctx_fail(struct spdk_bs_load_ctx *ctx, int bserrno) 4006 { 4007 assert(bserrno != 0); 4008 4009 spdk_free(ctx->mask); 4010 spdk_free(ctx->super); 4011 bs_sequence_finish(ctx->seq, bserrno); 4012 bs_free(ctx->bs); 4013 spdk_bit_array_free(&ctx->used_clusters); 4014 free(ctx); 4015 } 4016 4017 static void 4018 bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 4019 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg) 4020 { 4021 /* Update the values in the super block */ 4022 super->super_blob = bs->super_blob; 4023 memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype)); 4024 super->crc = blob_md_page_calc_crc(super); 4025 bs_sequence_write_dev(seq, super, bs_page_to_lba(bs, 0), 4026 bs_byte_to_lba(bs, sizeof(*super)), 4027 cb_fn, cb_arg); 4028 } 4029 4030 static void 4031 bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 4032 { 4033 struct spdk_bs_load_ctx *ctx = arg; 4034 uint64_t mask_size, lba, lba_count; 4035 4036 /* Write out the used clusters mask */ 4037 mask_size = ctx->super->used_cluster_mask_len * ctx->bs->md_page_size; 4038 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 4039 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4040 if (!ctx->mask) { 4041 bs_load_ctx_fail(ctx, -ENOMEM); 4042 return; 4043 } 4044 4045 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS; 4046 ctx->mask->length = ctx->bs->total_clusters; 4047 /* We could get here through the normal unload path, or through dirty 4048 * shutdown recovery. For the normal unload path, we use the mask from 4049 * the bit pool. For dirty shutdown recovery, we don't have a bit pool yet - 4050 * only the bit array from the load ctx. 4051 */ 4052 if (ctx->bs->used_clusters) { 4053 assert(ctx->mask->length == spdk_bit_pool_capacity(ctx->bs->used_clusters)); 4054 spdk_bit_pool_store_mask(ctx->bs->used_clusters, ctx->mask->mask); 4055 } else { 4056 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->used_clusters)); 4057 spdk_bit_array_store_mask(ctx->used_clusters, ctx->mask->mask); 4058 } 4059 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 4060 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 4061 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 4062 } 4063 4064 static void 4065 bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 4066 { 4067 struct spdk_bs_load_ctx *ctx = arg; 4068 uint64_t mask_size, lba, lba_count; 4069 4070 mask_size = ctx->super->used_page_mask_len * ctx->bs->md_page_size; 4071 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 4072 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4073 if (!ctx->mask) { 4074 bs_load_ctx_fail(ctx, -ENOMEM); 4075 return; 4076 } 4077 4078 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES; 4079 ctx->mask->length = ctx->super->md_len; 4080 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages)); 4081 4082 spdk_bit_array_store_mask(ctx->bs->used_md_pages, ctx->mask->mask); 4083 lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 4084 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 4085 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 4086 } 4087 4088 static void 4089 bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 4090 { 4091 struct spdk_bs_load_ctx *ctx = arg; 4092 uint64_t mask_size, lba, lba_count; 4093 4094 if (ctx->super->used_blobid_mask_len == 0) { 4095 /* 4096 * This is a pre-v3 on-disk format where the blobid mask does not get 4097 * written to disk. 4098 */ 4099 cb_fn(seq, arg, 0); 4100 return; 4101 } 4102 4103 mask_size = ctx->super->used_blobid_mask_len * ctx->bs->md_page_size; 4104 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_NUMA_ID_ANY, 4105 SPDK_MALLOC_DMA); 4106 if (!ctx->mask) { 4107 bs_load_ctx_fail(ctx, -ENOMEM); 4108 return; 4109 } 4110 4111 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS; 4112 ctx->mask->length = ctx->super->md_len; 4113 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids)); 4114 4115 spdk_bit_array_store_mask(ctx->bs->used_blobids, ctx->mask->mask); 4116 lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 4117 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 4118 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 4119 } 4120 4121 static void 4122 blob_set_thin_provision(struct spdk_blob *blob) 4123 { 4124 blob_verify_md_op(blob); 4125 blob->invalid_flags |= SPDK_BLOB_THIN_PROV; 4126 blob->state = SPDK_BLOB_STATE_DIRTY; 4127 } 4128 4129 static void 4130 blob_set_clear_method(struct spdk_blob *blob, enum blob_clear_method clear_method) 4131 { 4132 blob_verify_md_op(blob); 4133 blob->clear_method = clear_method; 4134 blob->md_ro_flags |= (clear_method << SPDK_BLOB_CLEAR_METHOD_SHIFT); 4135 blob->state = SPDK_BLOB_STATE_DIRTY; 4136 } 4137 4138 static void bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno); 4139 4140 static void 4141 bs_delete_corrupted_blob_cpl(void *cb_arg, int bserrno) 4142 { 4143 struct spdk_bs_load_ctx *ctx = cb_arg; 4144 spdk_blob_id id; 4145 int64_t page_num; 4146 4147 /* Iterate to next blob (we can't use spdk_bs_iter_next function as our 4148 * last blob has been removed */ 4149 page_num = bs_blobid_to_page(ctx->blobid); 4150 page_num++; 4151 page_num = spdk_bit_array_find_first_set(ctx->bs->used_blobids, page_num); 4152 if (page_num >= spdk_bit_array_capacity(ctx->bs->used_blobids)) { 4153 bs_load_iter(ctx, NULL, -ENOENT); 4154 return; 4155 } 4156 4157 id = bs_page_to_blobid(page_num); 4158 4159 spdk_bs_open_blob(ctx->bs, id, bs_load_iter, ctx); 4160 } 4161 4162 static void 4163 bs_delete_corrupted_close_cb(void *cb_arg, int bserrno) 4164 { 4165 struct spdk_bs_load_ctx *ctx = cb_arg; 4166 4167 if (bserrno != 0) { 4168 SPDK_ERRLOG("Failed to close corrupted blob\n"); 4169 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4170 return; 4171 } 4172 4173 spdk_bs_delete_blob(ctx->bs, ctx->blobid, bs_delete_corrupted_blob_cpl, ctx); 4174 } 4175 4176 static void 4177 bs_delete_corrupted_blob(void *cb_arg, int bserrno) 4178 { 4179 struct spdk_bs_load_ctx *ctx = cb_arg; 4180 uint64_t i; 4181 4182 if (bserrno != 0) { 4183 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 4184 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4185 return; 4186 } 4187 4188 /* Snapshot and clone have the same copy of cluster map and extent pages 4189 * at this point. Let's clear both for snapshot now, 4190 * so that it won't be cleared for clone later when we remove snapshot. 4191 * Also set thin provision to pass data corruption check */ 4192 for (i = 0; i < ctx->blob->active.num_clusters; i++) { 4193 ctx->blob->active.clusters[i] = 0; 4194 } 4195 for (i = 0; i < ctx->blob->active.num_extent_pages; i++) { 4196 ctx->blob->active.extent_pages[i] = 0; 4197 } 4198 4199 ctx->blob->active.num_allocated_clusters = 0; 4200 4201 ctx->blob->md_ro = false; 4202 4203 blob_set_thin_provision(ctx->blob); 4204 4205 ctx->blobid = ctx->blob->id; 4206 4207 spdk_blob_close(ctx->blob, bs_delete_corrupted_close_cb, ctx); 4208 } 4209 4210 static void 4211 bs_update_corrupted_blob(void *cb_arg, int bserrno) 4212 { 4213 struct spdk_bs_load_ctx *ctx = cb_arg; 4214 4215 if (bserrno != 0) { 4216 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 4217 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4218 return; 4219 } 4220 4221 ctx->blob->md_ro = false; 4222 blob_remove_xattr(ctx->blob, SNAPSHOT_PENDING_REMOVAL, true); 4223 blob_remove_xattr(ctx->blob, SNAPSHOT_IN_PROGRESS, true); 4224 spdk_blob_set_read_only(ctx->blob); 4225 4226 if (ctx->iter_cb_fn) { 4227 ctx->iter_cb_fn(ctx->iter_cb_arg, ctx->blob, 0); 4228 } 4229 bs_blob_list_add(ctx->blob); 4230 4231 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4232 } 4233 4234 static void 4235 bs_examine_clone(void *cb_arg, struct spdk_blob *blob, int bserrno) 4236 { 4237 struct spdk_bs_load_ctx *ctx = cb_arg; 4238 4239 if (bserrno != 0) { 4240 SPDK_ERRLOG("Failed to open clone of a corrupted blob\n"); 4241 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4242 return; 4243 } 4244 4245 if (blob->parent_id == ctx->blob->id) { 4246 /* Power failure occurred before updating clone (snapshot delete case) 4247 * or after updating clone (creating snapshot case) - keep snapshot */ 4248 spdk_blob_close(blob, bs_update_corrupted_blob, ctx); 4249 } else { 4250 /* Power failure occurred after updating clone (snapshot delete case) 4251 * or before updating clone (creating snapshot case) - remove snapshot */ 4252 spdk_blob_close(blob, bs_delete_corrupted_blob, ctx); 4253 } 4254 } 4255 4256 static void 4257 bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno) 4258 { 4259 struct spdk_bs_load_ctx *ctx = arg; 4260 const void *value; 4261 size_t len; 4262 int rc = 0; 4263 4264 if (bserrno == 0) { 4265 /* Examine blob if it is corrupted after power failure. Fix 4266 * the ones that can be fixed and remove any other corrupted 4267 * ones. If it is not corrupted just process it */ 4268 rc = blob_get_xattr_value(blob, SNAPSHOT_PENDING_REMOVAL, &value, &len, true); 4269 if (rc != 0) { 4270 rc = blob_get_xattr_value(blob, SNAPSHOT_IN_PROGRESS, &value, &len, true); 4271 if (rc != 0) { 4272 /* Not corrupted - process it and continue with iterating through blobs */ 4273 if (ctx->iter_cb_fn) { 4274 ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0); 4275 } 4276 bs_blob_list_add(blob); 4277 spdk_bs_iter_next(ctx->bs, blob, bs_load_iter, ctx); 4278 return; 4279 } 4280 4281 } 4282 4283 assert(len == sizeof(spdk_blob_id)); 4284 4285 ctx->blob = blob; 4286 4287 /* Open clone to check if we are able to fix this blob or should we remove it */ 4288 spdk_bs_open_blob(ctx->bs, *(spdk_blob_id *)value, bs_examine_clone, ctx); 4289 return; 4290 } else if (bserrno == -ENOENT) { 4291 bserrno = 0; 4292 } else { 4293 /* 4294 * This case needs to be looked at further. Same problem 4295 * exists with applications that rely on explicit blob 4296 * iteration. We should just skip the blob that failed 4297 * to load and continue on to the next one. 4298 */ 4299 SPDK_ERRLOG("Error in iterating blobs\n"); 4300 } 4301 4302 ctx->iter_cb_fn = NULL; 4303 4304 spdk_free(ctx->super); 4305 bs_sequence_finish(ctx->seq, bserrno); 4306 free(ctx); 4307 } 4308 4309 static void bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg); 4310 4311 static void 4312 bs_load_complete(struct spdk_bs_load_ctx *ctx) 4313 { 4314 ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters); 4315 if (ctx->dumping) { 4316 bs_dump_read_md_page(ctx->seq, ctx); 4317 return; 4318 } 4319 spdk_bs_iter_first(ctx->bs, bs_load_iter, ctx); 4320 } 4321 4322 static void 4323 bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4324 { 4325 struct spdk_bs_load_ctx *ctx = cb_arg; 4326 int rc; 4327 4328 /* The type must be correct */ 4329 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS); 4330 4331 /* The length of the mask (in bits) must not be greater than 4332 * the length of the buffer (converted to bits) */ 4333 assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * ctx->super->md_page_size * 8)); 4334 4335 /* The length of the mask must be exactly equal to the size 4336 * (in pages) of the metadata region */ 4337 assert(ctx->mask->length == ctx->super->md_len); 4338 4339 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->mask->length); 4340 if (rc < 0) { 4341 bs_load_ctx_fail(ctx, rc); 4342 return; 4343 } 4344 4345 spdk_bit_array_load_mask(ctx->bs->used_blobids, ctx->mask->mask); 4346 spdk_free(ctx->mask); 4347 4348 bs_load_complete(ctx); 4349 } 4350 4351 static void 4352 bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4353 { 4354 struct spdk_bs_load_ctx *ctx = cb_arg; 4355 uint64_t lba, lba_count, mask_size; 4356 int rc; 4357 4358 if (bserrno != 0) { 4359 bs_load_ctx_fail(ctx, bserrno); 4360 return; 4361 } 4362 4363 /* The type must be correct */ 4364 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 4365 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 4366 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 4367 struct spdk_blob_md_page) * 8)); 4368 /* 4369 * The length of the mask must be equal to or larger than the total number of clusters. It may be 4370 * larger than the total number of clusters due to a failure spdk_bs_grow. 4371 */ 4372 assert(ctx->mask->length >= ctx->bs->total_clusters); 4373 if (ctx->mask->length > ctx->bs->total_clusters) { 4374 SPDK_WARNLOG("Shrink the used_custers mask length to total_clusters"); 4375 ctx->mask->length = ctx->bs->total_clusters; 4376 } 4377 4378 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->mask->length); 4379 if (rc < 0) { 4380 spdk_free(ctx->mask); 4381 bs_load_ctx_fail(ctx, rc); 4382 return; 4383 } 4384 4385 spdk_bit_array_load_mask(ctx->used_clusters, ctx->mask->mask); 4386 ctx->bs->num_free_clusters = spdk_bit_array_count_clear(ctx->used_clusters); 4387 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 4388 4389 spdk_free(ctx->mask); 4390 4391 /* Read the used blobids mask */ 4392 mask_size = ctx->super->used_blobid_mask_len * ctx->super->md_page_size; 4393 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_NUMA_ID_ANY, 4394 SPDK_MALLOC_DMA); 4395 if (!ctx->mask) { 4396 bs_load_ctx_fail(ctx, -ENOMEM); 4397 return; 4398 } 4399 lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 4400 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 4401 bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 4402 bs_load_used_blobids_cpl, ctx); 4403 } 4404 4405 static void 4406 bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4407 { 4408 struct spdk_bs_load_ctx *ctx = cb_arg; 4409 uint64_t lba, lba_count, mask_size; 4410 int rc; 4411 4412 if (bserrno != 0) { 4413 bs_load_ctx_fail(ctx, bserrno); 4414 return; 4415 } 4416 4417 /* The type must be correct */ 4418 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES); 4419 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 4420 assert(ctx->mask->length <= (ctx->super->used_page_mask_len * ctx->super->md_page_size * 4421 8)); 4422 /* The length of the mask must be exactly equal to the size (in pages) of the metadata region */ 4423 if (ctx->mask->length != ctx->super->md_len) { 4424 SPDK_ERRLOG("mismatched md_len in used_pages mask: " 4425 "mask->length=%" PRIu32 " super->md_len=%" PRIu32 "\n", 4426 ctx->mask->length, ctx->super->md_len); 4427 assert(false); 4428 } 4429 4430 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->mask->length); 4431 if (rc < 0) { 4432 bs_load_ctx_fail(ctx, rc); 4433 return; 4434 } 4435 4436 spdk_bit_array_load_mask(ctx->bs->used_md_pages, ctx->mask->mask); 4437 spdk_free(ctx->mask); 4438 4439 /* Read the used clusters mask */ 4440 mask_size = ctx->super->used_cluster_mask_len * ctx->super->md_page_size; 4441 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_NUMA_ID_ANY, 4442 SPDK_MALLOC_DMA); 4443 if (!ctx->mask) { 4444 bs_load_ctx_fail(ctx, -ENOMEM); 4445 return; 4446 } 4447 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 4448 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 4449 bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 4450 bs_load_used_clusters_cpl, ctx); 4451 } 4452 4453 static void 4454 bs_load_read_used_pages(struct spdk_bs_load_ctx *ctx) 4455 { 4456 uint64_t lba, lba_count, mask_size; 4457 4458 /* Read the used pages mask */ 4459 mask_size = ctx->super->used_page_mask_len * ctx->super->md_page_size; 4460 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 4461 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4462 if (!ctx->mask) { 4463 bs_load_ctx_fail(ctx, -ENOMEM); 4464 return; 4465 } 4466 4467 lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 4468 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 4469 bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count, 4470 bs_load_used_pages_cpl, ctx); 4471 } 4472 4473 static int 4474 bs_load_replay_md_parse_page(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_page *page) 4475 { 4476 struct spdk_blob_store *bs = ctx->bs; 4477 struct spdk_blob_md_descriptor *desc; 4478 size_t cur_desc = 0; 4479 4480 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4481 while (cur_desc < sizeof(page->descriptors)) { 4482 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 4483 if (desc->length == 0) { 4484 /* If padding and length are 0, this terminates the page */ 4485 break; 4486 } 4487 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 4488 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 4489 unsigned int i, j; 4490 unsigned int cluster_count = 0; 4491 uint32_t cluster_idx; 4492 4493 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 4494 4495 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 4496 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 4497 cluster_idx = desc_extent_rle->extents[i].cluster_idx; 4498 /* 4499 * cluster_idx = 0 means an unallocated cluster - don't mark that 4500 * in the used cluster map. 4501 */ 4502 if (cluster_idx != 0) { 4503 SPDK_NOTICELOG("Recover: cluster %" PRIu32 "\n", cluster_idx + j); 4504 spdk_bit_array_set(ctx->used_clusters, cluster_idx + j); 4505 if (bs->num_free_clusters == 0) { 4506 return -ENOSPC; 4507 } 4508 bs->num_free_clusters--; 4509 } 4510 cluster_count++; 4511 } 4512 } 4513 if (cluster_count == 0) { 4514 return -EINVAL; 4515 } 4516 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4517 struct spdk_blob_md_descriptor_extent_page *desc_extent; 4518 uint32_t i; 4519 uint32_t cluster_count = 0; 4520 uint32_t cluster_idx; 4521 size_t cluster_idx_length; 4522 4523 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 4524 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 4525 4526 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 4527 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 4528 return -EINVAL; 4529 } 4530 4531 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 4532 cluster_idx = desc_extent->cluster_idx[i]; 4533 /* 4534 * cluster_idx = 0 means an unallocated cluster - don't mark that 4535 * in the used cluster map. 4536 */ 4537 if (cluster_idx != 0) { 4538 if (cluster_idx < desc_extent->start_cluster_idx && 4539 cluster_idx >= desc_extent->start_cluster_idx + cluster_count) { 4540 return -EINVAL; 4541 } 4542 spdk_bit_array_set(ctx->used_clusters, cluster_idx); 4543 if (bs->num_free_clusters == 0) { 4544 return -ENOSPC; 4545 } 4546 bs->num_free_clusters--; 4547 } 4548 cluster_count++; 4549 } 4550 4551 if (cluster_count == 0) { 4552 return -EINVAL; 4553 } 4554 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 4555 /* Skip this item */ 4556 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 4557 /* Skip this item */ 4558 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 4559 /* Skip this item */ 4560 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 4561 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 4562 uint32_t num_extent_pages = ctx->num_extent_pages; 4563 uint32_t i; 4564 size_t extent_pages_length; 4565 void *tmp; 4566 4567 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 4568 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 4569 4570 if (desc_extent_table->length == 0 || 4571 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 4572 return -EINVAL; 4573 } 4574 4575 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 4576 if (desc_extent_table->extent_page[i].page_idx != 0) { 4577 if (desc_extent_table->extent_page[i].num_pages != 1) { 4578 return -EINVAL; 4579 } 4580 num_extent_pages += 1; 4581 } 4582 } 4583 4584 if (num_extent_pages > 0) { 4585 tmp = realloc(ctx->extent_page_num, num_extent_pages * sizeof(uint32_t)); 4586 if (tmp == NULL) { 4587 return -ENOMEM; 4588 } 4589 ctx->extent_page_num = tmp; 4590 4591 /* Extent table entries contain md page numbers for extent pages. 4592 * Zeroes represent unallocated extent pages, those are run-length-encoded. 4593 */ 4594 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 4595 if (desc_extent_table->extent_page[i].page_idx != 0) { 4596 ctx->extent_page_num[ctx->num_extent_pages] = desc_extent_table->extent_page[i].page_idx; 4597 ctx->num_extent_pages += 1; 4598 } 4599 } 4600 } 4601 } else { 4602 /* Error */ 4603 return -EINVAL; 4604 } 4605 /* Advance to the next descriptor */ 4606 cur_desc += sizeof(*desc) + desc->length; 4607 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 4608 break; 4609 } 4610 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 4611 } 4612 return 0; 4613 } 4614 4615 static bool 4616 bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page) 4617 { 4618 uint32_t crc; 4619 struct spdk_blob_md_descriptor *desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4620 size_t desc_len; 4621 4622 crc = blob_md_page_calc_crc(page); 4623 if (crc != page->crc) { 4624 return false; 4625 } 4626 4627 /* Extent page should always be of sequence num 0. */ 4628 if (page->sequence_num != 0) { 4629 return false; 4630 } 4631 4632 /* Descriptor type must be EXTENT_PAGE. */ 4633 if (desc->type != SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4634 return false; 4635 } 4636 4637 /* Descriptor length cannot exceed the page. */ 4638 desc_len = sizeof(*desc) + desc->length; 4639 if (desc_len > sizeof(page->descriptors)) { 4640 return false; 4641 } 4642 4643 /* It has to be the only descriptor in the page. */ 4644 if (desc_len + sizeof(*desc) <= sizeof(page->descriptors)) { 4645 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + desc_len); 4646 if (desc->length != 0) { 4647 return false; 4648 } 4649 } 4650 4651 return true; 4652 } 4653 4654 static bool 4655 bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx) 4656 { 4657 uint32_t crc; 4658 struct spdk_blob_md_page *page = ctx->page; 4659 4660 crc = blob_md_page_calc_crc(page); 4661 if (crc != page->crc) { 4662 return false; 4663 } 4664 4665 /* First page of a sequence should match the blobid. */ 4666 if (page->sequence_num == 0 && 4667 bs_page_to_blobid(ctx->cur_page) != page->id) { 4668 return false; 4669 } 4670 assert(bs_load_cur_extent_page_valid(page) == false); 4671 4672 return true; 4673 } 4674 4675 static void bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx); 4676 4677 static void 4678 bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4679 { 4680 struct spdk_bs_load_ctx *ctx = cb_arg; 4681 4682 spdk_free(ctx->mask); 4683 ctx->mask = NULL; 4684 4685 if (bserrno != 0) { 4686 bs_load_ctx_fail(ctx, bserrno); 4687 return; 4688 } 4689 4690 bs_load_complete(ctx); 4691 } 4692 4693 static void 4694 bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4695 { 4696 struct spdk_bs_load_ctx *ctx = cb_arg; 4697 4698 spdk_free(ctx->mask); 4699 ctx->mask = NULL; 4700 4701 if (bserrno != 0) { 4702 bs_load_ctx_fail(ctx, bserrno); 4703 return; 4704 } 4705 4706 bs_write_used_clusters(seq, ctx, bs_load_write_used_clusters_cpl); 4707 } 4708 4709 static void 4710 bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4711 { 4712 struct spdk_bs_load_ctx *ctx = cb_arg; 4713 4714 spdk_free(ctx->mask); 4715 ctx->mask = NULL; 4716 4717 if (bserrno != 0) { 4718 bs_load_ctx_fail(ctx, bserrno); 4719 return; 4720 } 4721 4722 bs_write_used_blobids(seq, ctx, bs_load_write_used_blobids_cpl); 4723 } 4724 4725 static void 4726 bs_load_write_used_md(struct spdk_bs_load_ctx *ctx) 4727 { 4728 bs_write_used_md(ctx->seq, ctx, bs_load_write_used_pages_cpl); 4729 } 4730 4731 static void 4732 bs_load_replay_md_chain_cpl(struct spdk_bs_load_ctx *ctx) 4733 { 4734 uint64_t num_md_clusters; 4735 uint64_t i; 4736 4737 ctx->in_page_chain = false; 4738 4739 do { 4740 ctx->page_index++; 4741 } while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true); 4742 4743 if (ctx->page_index < ctx->super->md_len) { 4744 ctx->cur_page = ctx->page_index; 4745 bs_load_replay_cur_md_page(ctx); 4746 } else { 4747 /* Claim all of the clusters used by the metadata */ 4748 num_md_clusters = spdk_divide_round_up( 4749 ctx->super->md_start + ctx->super->md_len, ctx->bs->pages_per_cluster); 4750 for (i = 0; i < num_md_clusters; i++) { 4751 spdk_bit_array_set(ctx->used_clusters, i); 4752 } 4753 ctx->bs->num_free_clusters -= num_md_clusters; 4754 spdk_free(ctx->page); 4755 bs_load_write_used_md(ctx); 4756 } 4757 } 4758 4759 static void 4760 bs_load_replay_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4761 { 4762 struct spdk_bs_load_ctx *ctx = cb_arg; 4763 uint32_t page_num; 4764 uint64_t i; 4765 4766 if (bserrno != 0) { 4767 spdk_free(ctx->extent_pages); 4768 bs_load_ctx_fail(ctx, bserrno); 4769 return; 4770 } 4771 4772 for (i = 0; i < ctx->num_extent_pages; i++) { 4773 /* Extent pages are only read when present within in chain md. 4774 * Integrity of md is not right if that page was not a valid extent page. */ 4775 if (bs_load_cur_extent_page_valid(&ctx->extent_pages[i]) != true) { 4776 spdk_free(ctx->extent_pages); 4777 bs_load_ctx_fail(ctx, -EILSEQ); 4778 return; 4779 } 4780 4781 page_num = ctx->extent_page_num[i]; 4782 spdk_bit_array_set(ctx->bs->used_md_pages, page_num); 4783 if (bs_load_replay_md_parse_page(ctx, &ctx->extent_pages[i])) { 4784 spdk_free(ctx->extent_pages); 4785 bs_load_ctx_fail(ctx, -EILSEQ); 4786 return; 4787 } 4788 } 4789 4790 spdk_free(ctx->extent_pages); 4791 free(ctx->extent_page_num); 4792 ctx->extent_page_num = NULL; 4793 ctx->num_extent_pages = 0; 4794 4795 bs_load_replay_md_chain_cpl(ctx); 4796 } 4797 4798 static void 4799 bs_load_replay_extent_pages(struct spdk_bs_load_ctx *ctx) 4800 { 4801 spdk_bs_batch_t *batch; 4802 uint32_t page; 4803 uint64_t lba; 4804 uint64_t i; 4805 4806 ctx->extent_pages = spdk_zmalloc(ctx->super->md_page_size * ctx->num_extent_pages, 0, 4807 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4808 if (!ctx->extent_pages) { 4809 bs_load_ctx_fail(ctx, -ENOMEM); 4810 return; 4811 } 4812 4813 batch = bs_sequence_to_batch(ctx->seq, bs_load_replay_extent_page_cpl, ctx); 4814 4815 for (i = 0; i < ctx->num_extent_pages; i++) { 4816 page = ctx->extent_page_num[i]; 4817 assert(page < ctx->super->md_len); 4818 lba = bs_md_page_to_lba(ctx->bs, page); 4819 bs_batch_read_dev(batch, &ctx->extent_pages[i], lba, 4820 bs_byte_to_lba(ctx->bs, ctx->super->md_page_size)); 4821 } 4822 4823 bs_batch_close(batch); 4824 } 4825 4826 static void 4827 bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4828 { 4829 struct spdk_bs_load_ctx *ctx = cb_arg; 4830 uint32_t page_num; 4831 struct spdk_blob_md_page *page; 4832 4833 if (bserrno != 0) { 4834 bs_load_ctx_fail(ctx, bserrno); 4835 return; 4836 } 4837 4838 page_num = ctx->cur_page; 4839 page = ctx->page; 4840 if (bs_load_cur_md_page_valid(ctx) == true) { 4841 if (page->sequence_num == 0 || ctx->in_page_chain == true) { 4842 spdk_spin_lock(&ctx->bs->used_lock); 4843 bs_claim_md_page(ctx->bs, page_num); 4844 spdk_spin_unlock(&ctx->bs->used_lock); 4845 if (page->sequence_num == 0) { 4846 SPDK_NOTICELOG("Recover: blob 0x%" PRIx32 "\n", page_num); 4847 spdk_bit_array_set(ctx->bs->used_blobids, page_num); 4848 } 4849 if (bs_load_replay_md_parse_page(ctx, page)) { 4850 bs_load_ctx_fail(ctx, -EILSEQ); 4851 return; 4852 } 4853 if (page->next != SPDK_INVALID_MD_PAGE) { 4854 ctx->in_page_chain = true; 4855 ctx->cur_page = page->next; 4856 bs_load_replay_cur_md_page(ctx); 4857 return; 4858 } 4859 if (ctx->num_extent_pages != 0) { 4860 bs_load_replay_extent_pages(ctx); 4861 return; 4862 } 4863 } 4864 } 4865 bs_load_replay_md_chain_cpl(ctx); 4866 } 4867 4868 static void 4869 bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx) 4870 { 4871 uint64_t lba; 4872 4873 assert(ctx->cur_page < ctx->super->md_len); 4874 lba = bs_md_page_to_lba(ctx->bs, ctx->cur_page); 4875 bs_sequence_read_dev(ctx->seq, ctx->page, lba, 4876 bs_byte_to_lba(ctx->bs, ctx->super->md_page_size), 4877 bs_load_replay_md_cpl, ctx); 4878 } 4879 4880 static void 4881 bs_load_replay_md(struct spdk_bs_load_ctx *ctx) 4882 { 4883 ctx->page_index = 0; 4884 ctx->cur_page = 0; 4885 ctx->page = spdk_zmalloc(ctx->bs->md_page_size, 0, 4886 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4887 if (!ctx->page) { 4888 bs_load_ctx_fail(ctx, -ENOMEM); 4889 return; 4890 } 4891 bs_load_replay_cur_md_page(ctx); 4892 } 4893 4894 static void 4895 bs_recover(struct spdk_bs_load_ctx *ctx) 4896 { 4897 int rc; 4898 4899 SPDK_NOTICELOG("Performing recovery on blobstore\n"); 4900 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len); 4901 if (rc < 0) { 4902 bs_load_ctx_fail(ctx, -ENOMEM); 4903 return; 4904 } 4905 4906 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len); 4907 if (rc < 0) { 4908 bs_load_ctx_fail(ctx, -ENOMEM); 4909 return; 4910 } 4911 4912 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 4913 if (rc < 0) { 4914 bs_load_ctx_fail(ctx, -ENOMEM); 4915 return; 4916 } 4917 4918 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->super->md_len); 4919 if (rc < 0) { 4920 bs_load_ctx_fail(ctx, -ENOMEM); 4921 return; 4922 } 4923 4924 ctx->bs->num_free_clusters = ctx->bs->total_clusters; 4925 bs_load_replay_md(ctx); 4926 } 4927 4928 static int 4929 bs_parse_super(struct spdk_bs_load_ctx *ctx) 4930 { 4931 int rc; 4932 4933 if (ctx->super->size == 0) { 4934 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 4935 } 4936 4937 if (ctx->super->io_unit_size == 0) { 4938 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 4939 } 4940 if (ctx->super->md_page_size == 0) { 4941 ctx->super->md_page_size = SPDK_BS_PAGE_SIZE; 4942 } 4943 4944 ctx->bs->clean = 1; 4945 ctx->bs->cluster_sz = ctx->super->cluster_size; 4946 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 4947 ctx->bs->io_unit_size = ctx->super->io_unit_size; 4948 ctx->bs->md_page_size = ctx->super->md_page_size; 4949 bs_init_per_cluster_fields(ctx->bs); 4950 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 4951 if (rc < 0) { 4952 return -ENOMEM; 4953 } 4954 ctx->bs->md_start = ctx->super->md_start; 4955 ctx->bs->md_len = ctx->super->md_len; 4956 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len); 4957 if (rc < 0) { 4958 return -ENOMEM; 4959 } 4960 4961 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 4962 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 4963 ctx->bs->super_blob = ctx->super->super_blob; 4964 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 4965 4966 return 0; 4967 } 4968 4969 static void 4970 bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4971 { 4972 struct spdk_bs_load_ctx *ctx = cb_arg; 4973 int rc; 4974 4975 rc = bs_super_validate(ctx->super, ctx->bs); 4976 if (rc != 0) { 4977 bs_load_ctx_fail(ctx, rc); 4978 return; 4979 } 4980 4981 rc = bs_parse_super(ctx); 4982 if (rc < 0) { 4983 bs_load_ctx_fail(ctx, rc); 4984 return; 4985 } 4986 4987 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0 || ctx->force_recover) { 4988 bs_recover(ctx); 4989 } else { 4990 bs_load_read_used_pages(ctx); 4991 } 4992 } 4993 4994 static inline int 4995 bs_opts_copy(struct spdk_bs_opts *src, struct spdk_bs_opts *dst) 4996 { 4997 4998 if (!src->opts_size) { 4999 SPDK_ERRLOG("opts_size should not be zero value\n"); 5000 return -1; 5001 } 5002 5003 #define FIELD_OK(field) \ 5004 offsetof(struct spdk_bs_opts, field) + sizeof(src->field) <= src->opts_size 5005 5006 #define SET_FIELD(field) \ 5007 if (FIELD_OK(field)) { \ 5008 dst->field = src->field; \ 5009 } \ 5010 5011 SET_FIELD(cluster_sz); 5012 SET_FIELD(num_md_pages); 5013 SET_FIELD(max_md_ops); 5014 SET_FIELD(max_channel_ops); 5015 SET_FIELD(clear_method); 5016 5017 if (FIELD_OK(bstype)) { 5018 memcpy(&dst->bstype, &src->bstype, sizeof(dst->bstype)); 5019 } 5020 SET_FIELD(md_page_size); 5021 SET_FIELD(iter_cb_fn); 5022 SET_FIELD(iter_cb_arg); 5023 SET_FIELD(force_recover); 5024 SET_FIELD(esnap_bs_dev_create); 5025 SET_FIELD(esnap_ctx); 5026 5027 dst->opts_size = src->opts_size; 5028 5029 /* You should not remove this statement, but need to update the assert statement 5030 * if you add a new field, and also add a corresponding SET_FIELD statement */ 5031 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_opts) == 88, "Incorrect size"); 5032 5033 #undef FIELD_OK 5034 #undef SET_FIELD 5035 5036 return 0; 5037 } 5038 5039 void 5040 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 5041 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 5042 { 5043 struct spdk_blob_store *bs; 5044 struct spdk_bs_cpl cpl; 5045 struct spdk_bs_load_ctx *ctx; 5046 struct spdk_bs_opts opts = {}; 5047 int err; 5048 5049 SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev); 5050 5051 if ((dev->phys_blocklen % dev->blocklen) != 0) { 5052 SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen); 5053 dev->destroy(dev); 5054 cb_fn(cb_arg, NULL, -EINVAL); 5055 return; 5056 } 5057 5058 spdk_bs_opts_init(&opts, sizeof(opts)); 5059 if (o) { 5060 if (bs_opts_copy(o, &opts)) { 5061 dev->destroy(dev); 5062 cb_fn(cb_arg, NULL, -EINVAL); 5063 return; 5064 } 5065 } 5066 5067 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 5068 dev->destroy(dev); 5069 cb_fn(cb_arg, NULL, -EINVAL); 5070 return; 5071 } 5072 5073 err = bs_alloc(dev, &opts, &bs, &ctx); 5074 if (err) { 5075 dev->destroy(dev); 5076 cb_fn(cb_arg, NULL, err); 5077 return; 5078 } 5079 5080 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 5081 cpl.u.bs_handle.cb_fn = cb_fn; 5082 cpl.u.bs_handle.cb_arg = cb_arg; 5083 cpl.u.bs_handle.bs = bs; 5084 5085 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5086 if (!ctx->seq) { 5087 spdk_free(ctx->super); 5088 free(ctx); 5089 bs_free(bs); 5090 cb_fn(cb_arg, NULL, -ENOMEM); 5091 return; 5092 } 5093 5094 /* Read the super block */ 5095 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5096 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5097 bs_load_super_cpl, ctx); 5098 } 5099 5100 /* END spdk_bs_load */ 5101 5102 /* START spdk_bs_dump */ 5103 5104 static void 5105 bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno) 5106 { 5107 spdk_free(ctx->super); 5108 5109 /* 5110 * We need to defer calling bs_call_cpl() until after 5111 * dev destruction, so tuck these away for later use. 5112 */ 5113 ctx->bs->unload_err = bserrno; 5114 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5115 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5116 5117 bs_sequence_finish(seq, 0); 5118 bs_free(ctx->bs); 5119 free(ctx); 5120 } 5121 5122 static void 5123 bs_dump_print_xattr(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 5124 { 5125 struct spdk_blob_md_descriptor_xattr *desc_xattr; 5126 uint32_t i; 5127 const char *type; 5128 5129 desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc; 5130 5131 if (desc_xattr->length != 5132 sizeof(desc_xattr->name_length) + sizeof(desc_xattr->value_length) + 5133 desc_xattr->name_length + desc_xattr->value_length) { 5134 } 5135 5136 memcpy(ctx->xattr_name, desc_xattr->name, desc_xattr->name_length); 5137 ctx->xattr_name[desc_xattr->name_length] = '\0'; 5138 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 5139 type = "XATTR"; 5140 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 5141 type = "XATTR_INTERNAL"; 5142 } else { 5143 assert(false); 5144 type = "XATTR_?"; 5145 } 5146 fprintf(ctx->fp, "%s: name = \"%s\"\n", type, ctx->xattr_name); 5147 fprintf(ctx->fp, " value = \""); 5148 ctx->print_xattr_fn(ctx->fp, ctx->super->bstype.bstype, ctx->xattr_name, 5149 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 5150 desc_xattr->value_length); 5151 fprintf(ctx->fp, "\"\n"); 5152 for (i = 0; i < desc_xattr->value_length; i++) { 5153 if (i % 16 == 0) { 5154 fprintf(ctx->fp, " "); 5155 } 5156 fprintf(ctx->fp, "%02" PRIx8 " ", *((uint8_t *)desc_xattr->name + desc_xattr->name_length + i)); 5157 if ((i + 1) % 16 == 0) { 5158 fprintf(ctx->fp, "\n"); 5159 } 5160 } 5161 if (i % 16 != 0) { 5162 fprintf(ctx->fp, "\n"); 5163 } 5164 } 5165 5166 struct type_flag_desc { 5167 uint64_t mask; 5168 uint64_t val; 5169 const char *name; 5170 }; 5171 5172 static void 5173 bs_dump_print_type_bits(struct spdk_bs_load_ctx *ctx, uint64_t flags, 5174 struct type_flag_desc *desc, size_t numflags) 5175 { 5176 uint64_t covered = 0; 5177 size_t i; 5178 5179 for (i = 0; i < numflags; i++) { 5180 if ((desc[i].mask & flags) != desc[i].val) { 5181 continue; 5182 } 5183 fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " %s", desc[i].val, desc[i].name); 5184 if (desc[i].mask != desc[i].val) { 5185 fprintf(ctx->fp, " (mask 0x%" PRIx64 " value 0x%" PRIx64 ")", 5186 desc[i].mask, desc[i].val); 5187 } 5188 fprintf(ctx->fp, "\n"); 5189 covered |= desc[i].mask; 5190 } 5191 if ((flags & ~covered) != 0) { 5192 fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " Unknown\n", flags & ~covered); 5193 } 5194 } 5195 5196 static void 5197 bs_dump_print_type_flags(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 5198 { 5199 struct spdk_blob_md_descriptor_flags *type_desc; 5200 #define ADD_FLAG(f) { f, f, #f } 5201 #define ADD_MASK_VAL(m, v) { m, v, #v } 5202 static struct type_flag_desc invalid[] = { 5203 ADD_FLAG(SPDK_BLOB_THIN_PROV), 5204 ADD_FLAG(SPDK_BLOB_INTERNAL_XATTR), 5205 ADD_FLAG(SPDK_BLOB_EXTENT_TABLE), 5206 }; 5207 static struct type_flag_desc data_ro[] = { 5208 ADD_FLAG(SPDK_BLOB_READ_ONLY), 5209 }; 5210 static struct type_flag_desc md_ro[] = { 5211 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_DEFAULT), 5212 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_NONE), 5213 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_UNMAP), 5214 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_WRITE_ZEROES), 5215 }; 5216 #undef ADD_FLAG 5217 #undef ADD_MASK_VAL 5218 5219 type_desc = (struct spdk_blob_md_descriptor_flags *)desc; 5220 fprintf(ctx->fp, "Flags:\n"); 5221 fprintf(ctx->fp, "\tinvalid: 0x%016" PRIx64 "\n", type_desc->invalid_flags); 5222 bs_dump_print_type_bits(ctx, type_desc->invalid_flags, invalid, 5223 SPDK_COUNTOF(invalid)); 5224 fprintf(ctx->fp, "\tdata_ro: 0x%016" PRIx64 "\n", type_desc->data_ro_flags); 5225 bs_dump_print_type_bits(ctx, type_desc->data_ro_flags, data_ro, 5226 SPDK_COUNTOF(data_ro)); 5227 fprintf(ctx->fp, "\t md_ro: 0x%016" PRIx64 "\n", type_desc->md_ro_flags); 5228 bs_dump_print_type_bits(ctx, type_desc->md_ro_flags, md_ro, 5229 SPDK_COUNTOF(md_ro)); 5230 } 5231 5232 static void 5233 bs_dump_print_extent_table(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 5234 { 5235 struct spdk_blob_md_descriptor_extent_table *et_desc; 5236 uint64_t num_extent_pages; 5237 uint32_t et_idx; 5238 5239 et_desc = (struct spdk_blob_md_descriptor_extent_table *)desc; 5240 num_extent_pages = (et_desc->length - sizeof(et_desc->num_clusters)) / 5241 sizeof(et_desc->extent_page[0]); 5242 5243 fprintf(ctx->fp, "Extent table:\n"); 5244 for (et_idx = 0; et_idx < num_extent_pages; et_idx++) { 5245 if (et_desc->extent_page[et_idx].page_idx == 0) { 5246 /* Zeroes represent unallocated extent pages. */ 5247 continue; 5248 } 5249 fprintf(ctx->fp, "\tExtent page: %5" PRIu32 " length %3" PRIu32 5250 " at LBA %" PRIu64 "\n", et_desc->extent_page[et_idx].page_idx, 5251 et_desc->extent_page[et_idx].num_pages, 5252 bs_md_page_to_lba(ctx->bs, et_desc->extent_page[et_idx].page_idx)); 5253 } 5254 } 5255 5256 static void 5257 bs_dump_print_md_page(struct spdk_bs_load_ctx *ctx) 5258 { 5259 uint32_t page_idx = ctx->cur_page; 5260 struct spdk_blob_md_page *page = ctx->page; 5261 struct spdk_blob_md_descriptor *desc; 5262 size_t cur_desc = 0; 5263 uint32_t crc; 5264 5265 fprintf(ctx->fp, "=========\n"); 5266 fprintf(ctx->fp, "Metadata Page Index: %" PRIu32 " (0x%" PRIx32 ")\n", page_idx, page_idx); 5267 fprintf(ctx->fp, "Start LBA: %" PRIu64 "\n", bs_md_page_to_lba(ctx->bs, page_idx)); 5268 fprintf(ctx->fp, "Blob ID: 0x%" PRIx64 "\n", page->id); 5269 fprintf(ctx->fp, "Sequence: %" PRIu32 "\n", page->sequence_num); 5270 if (page->next == SPDK_INVALID_MD_PAGE) { 5271 fprintf(ctx->fp, "Next: None\n"); 5272 } else { 5273 fprintf(ctx->fp, "Next: %" PRIu32 "\n", page->next); 5274 } 5275 fprintf(ctx->fp, "In used bit array%s:", ctx->super->clean ? "" : " (not clean: dubious)"); 5276 if (spdk_bit_array_get(ctx->bs->used_md_pages, page_idx)) { 5277 fprintf(ctx->fp, " md"); 5278 } 5279 if (spdk_bit_array_get(ctx->bs->used_blobids, page_idx)) { 5280 fprintf(ctx->fp, " blob"); 5281 } 5282 fprintf(ctx->fp, "\n"); 5283 5284 crc = blob_md_page_calc_crc(page); 5285 fprintf(ctx->fp, "CRC: 0x%" PRIx32 " (%s)\n", page->crc, crc == page->crc ? "OK" : "Mismatch"); 5286 5287 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 5288 while (cur_desc < sizeof(page->descriptors)) { 5289 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 5290 if (desc->length == 0) { 5291 /* If padding and length are 0, this terminates the page */ 5292 break; 5293 } 5294 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 5295 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 5296 unsigned int i; 5297 5298 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 5299 5300 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 5301 if (desc_extent_rle->extents[i].cluster_idx != 0) { 5302 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 5303 desc_extent_rle->extents[i].cluster_idx); 5304 } else { 5305 fprintf(ctx->fp, "Unallocated Extent - "); 5306 } 5307 fprintf(ctx->fp, " Length: %" PRIu32, desc_extent_rle->extents[i].length); 5308 fprintf(ctx->fp, "\n"); 5309 } 5310 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 5311 struct spdk_blob_md_descriptor_extent_page *desc_extent; 5312 unsigned int i; 5313 5314 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 5315 5316 for (i = 0; i < desc_extent->length / sizeof(desc_extent->cluster_idx[0]); i++) { 5317 if (desc_extent->cluster_idx[i] != 0) { 5318 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 5319 desc_extent->cluster_idx[i]); 5320 } else { 5321 fprintf(ctx->fp, "Unallocated Extent"); 5322 } 5323 fprintf(ctx->fp, "\n"); 5324 } 5325 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 5326 bs_dump_print_xattr(ctx, desc); 5327 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 5328 bs_dump_print_xattr(ctx, desc); 5329 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 5330 bs_dump_print_type_flags(ctx, desc); 5331 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 5332 bs_dump_print_extent_table(ctx, desc); 5333 } else { 5334 /* Error */ 5335 fprintf(ctx->fp, "Unknown descriptor type %" PRIu8 "\n", desc->type); 5336 } 5337 /* Advance to the next descriptor */ 5338 cur_desc += sizeof(*desc) + desc->length; 5339 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 5340 break; 5341 } 5342 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 5343 } 5344 } 5345 5346 static void 5347 bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5348 { 5349 struct spdk_bs_load_ctx *ctx = cb_arg; 5350 5351 if (bserrno != 0) { 5352 bs_dump_finish(seq, ctx, bserrno); 5353 return; 5354 } 5355 5356 if (ctx->page->id != 0) { 5357 bs_dump_print_md_page(ctx); 5358 } 5359 5360 ctx->cur_page++; 5361 5362 if (ctx->cur_page < ctx->super->md_len) { 5363 bs_dump_read_md_page(seq, ctx); 5364 } else { 5365 spdk_free(ctx->page); 5366 bs_dump_finish(seq, ctx, 0); 5367 } 5368 } 5369 5370 static void 5371 bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg) 5372 { 5373 struct spdk_bs_load_ctx *ctx = cb_arg; 5374 uint64_t lba; 5375 5376 assert(ctx->cur_page < ctx->super->md_len); 5377 lba = bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page); 5378 bs_sequence_read_dev(seq, ctx->page, lba, 5379 bs_byte_to_lba(ctx->bs, ctx->super->md_page_size), 5380 bs_dump_read_md_page_cpl, ctx); 5381 } 5382 5383 static void 5384 bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5385 { 5386 struct spdk_bs_load_ctx *ctx = cb_arg; 5387 int rc; 5388 5389 fprintf(ctx->fp, "Signature: \"%.8s\" ", ctx->super->signature); 5390 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 5391 sizeof(ctx->super->signature)) != 0) { 5392 fprintf(ctx->fp, "(Mismatch)\n"); 5393 bs_dump_finish(seq, ctx, bserrno); 5394 return; 5395 } else { 5396 fprintf(ctx->fp, "(OK)\n"); 5397 } 5398 fprintf(ctx->fp, "Version: %" PRIu32 "\n", ctx->super->version); 5399 fprintf(ctx->fp, "CRC: 0x%x (%s)\n", ctx->super->crc, 5400 (ctx->super->crc == blob_md_page_calc_crc(ctx->super)) ? "OK" : "Mismatch"); 5401 fprintf(ctx->fp, "Blobstore Type: %.*s\n", SPDK_BLOBSTORE_TYPE_LENGTH, ctx->super->bstype.bstype); 5402 fprintf(ctx->fp, "Cluster Size: %" PRIu32 "\n", ctx->super->cluster_size); 5403 fprintf(ctx->fp, "Super Blob ID: "); 5404 if (ctx->super->super_blob == SPDK_BLOBID_INVALID) { 5405 fprintf(ctx->fp, "(None)\n"); 5406 } else { 5407 fprintf(ctx->fp, "0x%" PRIx64 "\n", ctx->super->super_blob); 5408 } 5409 fprintf(ctx->fp, "Clean: %" PRIu32 "\n", ctx->super->clean); 5410 fprintf(ctx->fp, "Used Metadata Page Mask Start: %" PRIu32 "\n", ctx->super->used_page_mask_start); 5411 fprintf(ctx->fp, "Used Metadata Page Mask Length: %" PRIu32 "\n", ctx->super->used_page_mask_len); 5412 fprintf(ctx->fp, "Used Cluster Mask Start: %" PRIu32 "\n", ctx->super->used_cluster_mask_start); 5413 fprintf(ctx->fp, "Used Cluster Mask Length: %" PRIu32 "\n", ctx->super->used_cluster_mask_len); 5414 fprintf(ctx->fp, "Used Blob ID Mask Start: %" PRIu32 "\n", ctx->super->used_blobid_mask_start); 5415 fprintf(ctx->fp, "Used Blob ID Mask Length: %" PRIu32 "\n", ctx->super->used_blobid_mask_len); 5416 fprintf(ctx->fp, "Metadata Start: %" PRIu32 "\n", ctx->super->md_start); 5417 fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len); 5418 5419 ctx->cur_page = 0; 5420 ctx->page = spdk_zmalloc(ctx->super->md_page_size, 0, 5421 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 5422 if (!ctx->page) { 5423 bs_dump_finish(seq, ctx, -ENOMEM); 5424 return; 5425 } 5426 5427 rc = bs_parse_super(ctx); 5428 if (rc < 0) { 5429 bs_load_ctx_fail(ctx, rc); 5430 return; 5431 } 5432 5433 bs_load_read_used_pages(ctx); 5434 } 5435 5436 void 5437 spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_xattr_fn, 5438 spdk_bs_op_complete cb_fn, void *cb_arg) 5439 { 5440 struct spdk_blob_store *bs; 5441 struct spdk_bs_cpl cpl; 5442 struct spdk_bs_load_ctx *ctx; 5443 struct spdk_bs_opts opts = {}; 5444 int err; 5445 5446 SPDK_DEBUGLOG(blob, "Dumping blobstore from dev %p\n", dev); 5447 5448 spdk_bs_opts_init(&opts, sizeof(opts)); 5449 5450 err = bs_alloc(dev, &opts, &bs, &ctx); 5451 if (err) { 5452 dev->destroy(dev); 5453 cb_fn(cb_arg, err); 5454 return; 5455 } 5456 5457 ctx->dumping = true; 5458 ctx->fp = fp; 5459 ctx->print_xattr_fn = print_xattr_fn; 5460 5461 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5462 cpl.u.bs_basic.cb_fn = cb_fn; 5463 cpl.u.bs_basic.cb_arg = cb_arg; 5464 5465 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5466 if (!ctx->seq) { 5467 spdk_free(ctx->super); 5468 free(ctx); 5469 bs_free(bs); 5470 cb_fn(cb_arg, -ENOMEM); 5471 return; 5472 } 5473 5474 /* Read the super block */ 5475 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5476 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5477 bs_dump_super_cpl, ctx); 5478 } 5479 5480 /* END spdk_bs_dump */ 5481 5482 /* START spdk_bs_init */ 5483 5484 static void 5485 bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5486 { 5487 struct spdk_bs_load_ctx *ctx = cb_arg; 5488 5489 ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters); 5490 spdk_free(ctx->super); 5491 free(ctx); 5492 5493 bs_sequence_finish(seq, bserrno); 5494 } 5495 5496 static void 5497 bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5498 { 5499 struct spdk_bs_load_ctx *ctx = cb_arg; 5500 5501 /* Write super block */ 5502 bs_sequence_write_dev(seq, ctx->super, bs_page_to_lba(ctx->bs, 0), 5503 bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 5504 bs_init_persist_super_cpl, ctx); 5505 } 5506 5507 void 5508 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 5509 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 5510 { 5511 struct spdk_bs_load_ctx *ctx; 5512 struct spdk_blob_store *bs; 5513 struct spdk_bs_cpl cpl; 5514 spdk_bs_sequence_t *seq; 5515 spdk_bs_batch_t *batch; 5516 uint64_t num_md_lba; 5517 uint64_t num_md_pages; 5518 uint64_t num_md_clusters; 5519 uint64_t max_used_cluster_mask_len; 5520 uint32_t i; 5521 struct spdk_bs_opts opts = {}; 5522 int rc; 5523 uint64_t lba, lba_count; 5524 5525 SPDK_DEBUGLOG(blob, "Initializing blobstore on dev %p\n", dev); 5526 if ((dev->phys_blocklen % dev->blocklen) != 0) { 5527 SPDK_ERRLOG("unsupported dev block length of %d\n", 5528 dev->blocklen); 5529 dev->destroy(dev); 5530 cb_fn(cb_arg, NULL, -EINVAL); 5531 return; 5532 } 5533 5534 spdk_bs_opts_init(&opts, sizeof(opts)); 5535 if (o) { 5536 if (bs_opts_copy(o, &opts)) { 5537 dev->destroy(dev); 5538 cb_fn(cb_arg, NULL, -EINVAL); 5539 return; 5540 } 5541 } 5542 5543 if (bs_opts_verify(&opts) != 0) { 5544 dev->destroy(dev); 5545 cb_fn(cb_arg, NULL, -EINVAL); 5546 return; 5547 } 5548 5549 rc = bs_alloc(dev, &opts, &bs, &ctx); 5550 if (rc) { 5551 dev->destroy(dev); 5552 cb_fn(cb_arg, NULL, rc); 5553 return; 5554 } 5555 5556 if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) { 5557 /* By default, allocate 1 page per cluster. 5558 * Technically, this over-allocates metadata 5559 * because more metadata will reduce the number 5560 * of usable clusters. This can be addressed with 5561 * more complex math in the future. 5562 */ 5563 bs->md_len = bs->total_clusters; 5564 } else { 5565 bs->md_len = opts.num_md_pages; 5566 } 5567 rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len); 5568 if (rc < 0) { 5569 spdk_free(ctx->super); 5570 free(ctx); 5571 bs_free(bs); 5572 cb_fn(cb_arg, NULL, -ENOMEM); 5573 return; 5574 } 5575 5576 rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len); 5577 if (rc < 0) { 5578 spdk_free(ctx->super); 5579 free(ctx); 5580 bs_free(bs); 5581 cb_fn(cb_arg, NULL, -ENOMEM); 5582 return; 5583 } 5584 5585 rc = spdk_bit_array_resize(&bs->open_blobids, bs->md_len); 5586 if (rc < 0) { 5587 spdk_free(ctx->super); 5588 free(ctx); 5589 bs_free(bs); 5590 cb_fn(cb_arg, NULL, -ENOMEM); 5591 return; 5592 } 5593 5594 memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 5595 sizeof(ctx->super->signature)); 5596 ctx->super->version = SPDK_BS_VERSION; 5597 ctx->super->length = sizeof(*ctx->super); 5598 ctx->super->super_blob = bs->super_blob; 5599 ctx->super->clean = 0; 5600 ctx->super->cluster_size = bs->cluster_sz; 5601 ctx->super->io_unit_size = bs->io_unit_size; 5602 ctx->super->md_page_size = bs->md_page_size; 5603 memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype)); 5604 5605 /* Calculate how many pages the metadata consumes at the front 5606 * of the disk. 5607 */ 5608 5609 /* The super block uses 1 page */ 5610 num_md_pages = 1; 5611 5612 /* The used_md_pages mask requires 1 bit per metadata page, rounded 5613 * up to the nearest page, plus a header. 5614 */ 5615 ctx->super->used_page_mask_start = num_md_pages; 5616 ctx->super->used_page_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5617 spdk_divide_round_up(bs->md_len, 8), 5618 ctx->super->md_page_size); 5619 num_md_pages += ctx->super->used_page_mask_len; 5620 5621 /* The used_clusters mask requires 1 bit per cluster, rounded 5622 * up to the nearest page, plus a header. 5623 */ 5624 ctx->super->used_cluster_mask_start = num_md_pages; 5625 ctx->super->used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5626 spdk_divide_round_up(bs->total_clusters, 8), 5627 ctx->super->md_page_size); 5628 /* The blobstore might be extended, then the used_cluster bitmap will need more space. 5629 * Here we calculate the max clusters we can support according to the 5630 * num_md_pages (bs->md_len). 5631 */ 5632 max_used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5633 spdk_divide_round_up(bs->md_len, 8), 5634 ctx->super->md_page_size); 5635 max_used_cluster_mask_len = spdk_max(max_used_cluster_mask_len, 5636 ctx->super->used_cluster_mask_len); 5637 num_md_pages += max_used_cluster_mask_len; 5638 5639 /* The used_blobids mask requires 1 bit per metadata page, rounded 5640 * up to the nearest page, plus a header. 5641 */ 5642 ctx->super->used_blobid_mask_start = num_md_pages; 5643 ctx->super->used_blobid_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5644 spdk_divide_round_up(bs->md_len, 8), 5645 ctx->super->md_page_size); 5646 num_md_pages += ctx->super->used_blobid_mask_len; 5647 5648 /* The metadata region size was chosen above */ 5649 ctx->super->md_start = bs->md_start = num_md_pages; 5650 ctx->super->md_len = bs->md_len; 5651 num_md_pages += bs->md_len; 5652 5653 num_md_lba = bs_page_to_lba(bs, num_md_pages); 5654 5655 ctx->super->size = dev->blockcnt * dev->blocklen; 5656 5657 ctx->super->crc = blob_md_page_calc_crc(ctx->super); 5658 5659 num_md_clusters = spdk_divide_round_up(num_md_pages, bs->pages_per_cluster); 5660 if (num_md_clusters > bs->total_clusters) { 5661 SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, " 5662 "please decrease number of pages reserved for metadata " 5663 "or increase cluster size.\n"); 5664 spdk_free(ctx->super); 5665 spdk_bit_array_free(&ctx->used_clusters); 5666 free(ctx); 5667 bs_free(bs); 5668 cb_fn(cb_arg, NULL, -ENOMEM); 5669 return; 5670 } 5671 /* Claim all of the clusters used by the metadata */ 5672 for (i = 0; i < num_md_clusters; i++) { 5673 spdk_bit_array_set(ctx->used_clusters, i); 5674 } 5675 5676 bs->num_free_clusters -= num_md_clusters; 5677 bs->total_data_clusters = bs->num_free_clusters; 5678 5679 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 5680 cpl.u.bs_handle.cb_fn = cb_fn; 5681 cpl.u.bs_handle.cb_arg = cb_arg; 5682 cpl.u.bs_handle.bs = bs; 5683 5684 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5685 if (!seq) { 5686 spdk_free(ctx->super); 5687 free(ctx); 5688 bs_free(bs); 5689 cb_fn(cb_arg, NULL, -ENOMEM); 5690 return; 5691 } 5692 5693 batch = bs_sequence_to_batch(seq, bs_init_trim_cpl, ctx); 5694 5695 /* Clear metadata space */ 5696 bs_batch_write_zeroes_dev(batch, 0, num_md_lba); 5697 5698 lba = num_md_lba; 5699 lba_count = ctx->bs->dev->blockcnt - lba; 5700 switch (opts.clear_method) { 5701 case BS_CLEAR_WITH_UNMAP: 5702 /* Trim data clusters */ 5703 bs_batch_unmap_dev(batch, lba, lba_count); 5704 break; 5705 case BS_CLEAR_WITH_WRITE_ZEROES: 5706 /* Write_zeroes to data clusters */ 5707 bs_batch_write_zeroes_dev(batch, lba, lba_count); 5708 break; 5709 case BS_CLEAR_WITH_NONE: 5710 default: 5711 break; 5712 } 5713 5714 bs_batch_close(batch); 5715 } 5716 5717 /* END spdk_bs_init */ 5718 5719 /* START spdk_bs_destroy */ 5720 5721 static void 5722 bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5723 { 5724 struct spdk_bs_load_ctx *ctx = cb_arg; 5725 struct spdk_blob_store *bs = ctx->bs; 5726 5727 /* 5728 * We need to defer calling bs_call_cpl() until after 5729 * dev destruction, so tuck these away for later use. 5730 */ 5731 bs->unload_err = bserrno; 5732 memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5733 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5734 5735 bs_sequence_finish(seq, bserrno); 5736 5737 bs_free(bs); 5738 free(ctx); 5739 } 5740 5741 void 5742 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, 5743 void *cb_arg) 5744 { 5745 struct spdk_bs_cpl cpl; 5746 spdk_bs_sequence_t *seq; 5747 struct spdk_bs_load_ctx *ctx; 5748 5749 SPDK_DEBUGLOG(blob, "Destroying blobstore\n"); 5750 5751 if (!RB_EMPTY(&bs->open_blobs)) { 5752 SPDK_ERRLOG("Blobstore still has open blobs\n"); 5753 cb_fn(cb_arg, -EBUSY); 5754 return; 5755 } 5756 5757 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5758 cpl.u.bs_basic.cb_fn = cb_fn; 5759 cpl.u.bs_basic.cb_arg = cb_arg; 5760 5761 ctx = calloc(1, sizeof(*ctx)); 5762 if (!ctx) { 5763 cb_fn(cb_arg, -ENOMEM); 5764 return; 5765 } 5766 5767 ctx->bs = bs; 5768 5769 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5770 if (!seq) { 5771 free(ctx); 5772 cb_fn(cb_arg, -ENOMEM); 5773 return; 5774 } 5775 5776 /* Write zeroes to the super block */ 5777 bs_sequence_write_zeroes_dev(seq, 5778 bs_page_to_lba(bs, 0), 5779 bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)), 5780 bs_destroy_trim_cpl, ctx); 5781 } 5782 5783 /* END spdk_bs_destroy */ 5784 5785 /* START spdk_bs_unload */ 5786 5787 static void 5788 bs_unload_finish(struct spdk_bs_load_ctx *ctx, int bserrno) 5789 { 5790 spdk_bs_sequence_t *seq = ctx->seq; 5791 5792 spdk_free(ctx->super); 5793 5794 /* 5795 * We need to defer calling bs_call_cpl() until after 5796 * dev destruction, so tuck these away for later use. 5797 */ 5798 ctx->bs->unload_err = bserrno; 5799 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5800 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5801 5802 bs_sequence_finish(seq, bserrno); 5803 5804 bs_free(ctx->bs); 5805 free(ctx); 5806 } 5807 5808 static void 5809 bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5810 { 5811 struct spdk_bs_load_ctx *ctx = cb_arg; 5812 5813 bs_unload_finish(ctx, bserrno); 5814 } 5815 5816 static void 5817 bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5818 { 5819 struct spdk_bs_load_ctx *ctx = cb_arg; 5820 5821 spdk_free(ctx->mask); 5822 5823 if (bserrno != 0) { 5824 bs_unload_finish(ctx, bserrno); 5825 return; 5826 } 5827 5828 ctx->super->clean = 1; 5829 5830 bs_write_super(seq, ctx->bs, ctx->super, bs_unload_write_super_cpl, ctx); 5831 } 5832 5833 static void 5834 bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5835 { 5836 struct spdk_bs_load_ctx *ctx = cb_arg; 5837 5838 spdk_free(ctx->mask); 5839 ctx->mask = NULL; 5840 5841 if (bserrno != 0) { 5842 bs_unload_finish(ctx, bserrno); 5843 return; 5844 } 5845 5846 bs_write_used_clusters(seq, ctx, bs_unload_write_used_clusters_cpl); 5847 } 5848 5849 static void 5850 bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5851 { 5852 struct spdk_bs_load_ctx *ctx = cb_arg; 5853 5854 spdk_free(ctx->mask); 5855 ctx->mask = NULL; 5856 5857 if (bserrno != 0) { 5858 bs_unload_finish(ctx, bserrno); 5859 return; 5860 } 5861 5862 bs_write_used_blobids(seq, ctx, bs_unload_write_used_blobids_cpl); 5863 } 5864 5865 static void 5866 bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5867 { 5868 struct spdk_bs_load_ctx *ctx = cb_arg; 5869 int rc; 5870 5871 if (bserrno != 0) { 5872 bs_unload_finish(ctx, bserrno); 5873 return; 5874 } 5875 5876 rc = bs_super_validate(ctx->super, ctx->bs); 5877 if (rc != 0) { 5878 bs_unload_finish(ctx, rc); 5879 return; 5880 } 5881 5882 bs_write_used_md(seq, cb_arg, bs_unload_write_used_pages_cpl); 5883 } 5884 5885 void 5886 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg) 5887 { 5888 struct spdk_bs_cpl cpl; 5889 struct spdk_bs_load_ctx *ctx; 5890 5891 SPDK_DEBUGLOG(blob, "Syncing blobstore\n"); 5892 5893 /* 5894 * If external snapshot channels are being destroyed while the blobstore is unloaded, the 5895 * unload is deferred until after the channel destruction completes. 5896 */ 5897 if (bs->esnap_channels_unloading != 0) { 5898 if (bs->esnap_unload_cb_fn != NULL) { 5899 SPDK_ERRLOG("Blobstore unload in progress\n"); 5900 cb_fn(cb_arg, -EBUSY); 5901 return; 5902 } 5903 SPDK_DEBUGLOG(blob_esnap, "Blobstore unload deferred: %" PRIu32 5904 " esnap clones are unloading\n", bs->esnap_channels_unloading); 5905 bs->esnap_unload_cb_fn = cb_fn; 5906 bs->esnap_unload_cb_arg = cb_arg; 5907 return; 5908 } 5909 if (bs->esnap_unload_cb_fn != NULL) { 5910 SPDK_DEBUGLOG(blob_esnap, "Blobstore deferred unload progressing\n"); 5911 assert(bs->esnap_unload_cb_fn == cb_fn); 5912 assert(bs->esnap_unload_cb_arg == cb_arg); 5913 bs->esnap_unload_cb_fn = NULL; 5914 bs->esnap_unload_cb_arg = NULL; 5915 } 5916 5917 if (!RB_EMPTY(&bs->open_blobs)) { 5918 SPDK_ERRLOG("Blobstore still has open blobs\n"); 5919 cb_fn(cb_arg, -EBUSY); 5920 return; 5921 } 5922 5923 ctx = calloc(1, sizeof(*ctx)); 5924 if (!ctx) { 5925 cb_fn(cb_arg, -ENOMEM); 5926 return; 5927 } 5928 5929 ctx->bs = bs; 5930 5931 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 5932 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 5933 if (!ctx->super) { 5934 free(ctx); 5935 cb_fn(cb_arg, -ENOMEM); 5936 return; 5937 } 5938 5939 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5940 cpl.u.bs_basic.cb_fn = cb_fn; 5941 cpl.u.bs_basic.cb_arg = cb_arg; 5942 5943 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5944 if (!ctx->seq) { 5945 spdk_free(ctx->super); 5946 free(ctx); 5947 cb_fn(cb_arg, -ENOMEM); 5948 return; 5949 } 5950 5951 /* Read super block */ 5952 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5953 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5954 bs_unload_read_super_cpl, ctx); 5955 } 5956 5957 /* END spdk_bs_unload */ 5958 5959 /* START spdk_bs_set_super */ 5960 5961 struct spdk_bs_set_super_ctx { 5962 struct spdk_blob_store *bs; 5963 struct spdk_bs_super_block *super; 5964 }; 5965 5966 static void 5967 bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5968 { 5969 struct spdk_bs_set_super_ctx *ctx = cb_arg; 5970 5971 if (bserrno != 0) { 5972 SPDK_ERRLOG("Unable to write to super block of blobstore\n"); 5973 } 5974 5975 spdk_free(ctx->super); 5976 5977 bs_sequence_finish(seq, bserrno); 5978 5979 free(ctx); 5980 } 5981 5982 static void 5983 bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5984 { 5985 struct spdk_bs_set_super_ctx *ctx = cb_arg; 5986 int rc; 5987 5988 if (bserrno != 0) { 5989 SPDK_ERRLOG("Unable to read super block of blobstore\n"); 5990 spdk_free(ctx->super); 5991 bs_sequence_finish(seq, bserrno); 5992 free(ctx); 5993 return; 5994 } 5995 5996 rc = bs_super_validate(ctx->super, ctx->bs); 5997 if (rc != 0) { 5998 SPDK_ERRLOG("Not a valid super block\n"); 5999 spdk_free(ctx->super); 6000 bs_sequence_finish(seq, rc); 6001 free(ctx); 6002 return; 6003 } 6004 6005 bs_write_super(seq, ctx->bs, ctx->super, bs_set_super_write_cpl, ctx); 6006 } 6007 6008 void 6009 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid, 6010 spdk_bs_op_complete cb_fn, void *cb_arg) 6011 { 6012 struct spdk_bs_cpl cpl; 6013 spdk_bs_sequence_t *seq; 6014 struct spdk_bs_set_super_ctx *ctx; 6015 6016 SPDK_DEBUGLOG(blob, "Setting super blob id on blobstore\n"); 6017 6018 ctx = calloc(1, sizeof(*ctx)); 6019 if (!ctx) { 6020 cb_fn(cb_arg, -ENOMEM); 6021 return; 6022 } 6023 6024 ctx->bs = bs; 6025 6026 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 6027 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 6028 if (!ctx->super) { 6029 free(ctx); 6030 cb_fn(cb_arg, -ENOMEM); 6031 return; 6032 } 6033 6034 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 6035 cpl.u.bs_basic.cb_fn = cb_fn; 6036 cpl.u.bs_basic.cb_arg = cb_arg; 6037 6038 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 6039 if (!seq) { 6040 spdk_free(ctx->super); 6041 free(ctx); 6042 cb_fn(cb_arg, -ENOMEM); 6043 return; 6044 } 6045 6046 bs->super_blob = blobid; 6047 6048 /* Read super block */ 6049 bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0), 6050 bs_byte_to_lba(bs, sizeof(*ctx->super)), 6051 bs_set_super_read_cpl, ctx); 6052 } 6053 6054 /* END spdk_bs_set_super */ 6055 6056 void 6057 spdk_bs_get_super(struct spdk_blob_store *bs, 6058 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6059 { 6060 if (bs->super_blob == SPDK_BLOBID_INVALID) { 6061 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT); 6062 } else { 6063 cb_fn(cb_arg, bs->super_blob, 0); 6064 } 6065 } 6066 6067 uint64_t 6068 spdk_bs_get_cluster_size(struct spdk_blob_store *bs) 6069 { 6070 return bs->cluster_sz; 6071 } 6072 6073 uint64_t 6074 spdk_bs_get_page_size(struct spdk_blob_store *bs) 6075 { 6076 return bs->md_page_size; 6077 } 6078 6079 uint64_t 6080 spdk_bs_get_io_unit_size(struct spdk_blob_store *bs) 6081 { 6082 return bs->io_unit_size; 6083 } 6084 6085 uint64_t 6086 spdk_bs_free_cluster_count(struct spdk_blob_store *bs) 6087 { 6088 return bs->num_free_clusters; 6089 } 6090 6091 uint64_t 6092 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs) 6093 { 6094 return bs->total_data_clusters; 6095 } 6096 6097 static int 6098 bs_register_md_thread(struct spdk_blob_store *bs) 6099 { 6100 bs->md_channel = spdk_get_io_channel(bs); 6101 if (!bs->md_channel) { 6102 SPDK_ERRLOG("Failed to get IO channel.\n"); 6103 return -1; 6104 } 6105 6106 return 0; 6107 } 6108 6109 static int 6110 bs_unregister_md_thread(struct spdk_blob_store *bs) 6111 { 6112 spdk_put_io_channel(bs->md_channel); 6113 6114 return 0; 6115 } 6116 6117 spdk_blob_id 6118 spdk_blob_get_id(struct spdk_blob *blob) 6119 { 6120 assert(blob != NULL); 6121 6122 return blob->id; 6123 } 6124 6125 uint64_t 6126 spdk_blob_get_num_io_units(struct spdk_blob *blob) 6127 { 6128 assert(blob != NULL); 6129 6130 return bs_cluster_to_io_unit(blob->bs, blob->active.num_clusters); 6131 } 6132 6133 uint64_t 6134 spdk_blob_get_num_clusters(struct spdk_blob *blob) 6135 { 6136 assert(blob != NULL); 6137 6138 return blob->active.num_clusters; 6139 } 6140 6141 uint64_t 6142 spdk_blob_get_num_allocated_clusters(struct spdk_blob *blob) 6143 { 6144 assert(blob != NULL); 6145 6146 return blob->active.num_allocated_clusters; 6147 } 6148 6149 static uint64_t 6150 blob_find_io_unit(struct spdk_blob *blob, uint64_t offset, bool is_allocated) 6151 { 6152 uint64_t blob_io_unit_num = spdk_blob_get_num_io_units(blob); 6153 6154 while (offset < blob_io_unit_num) { 6155 if (bs_io_unit_is_allocated(blob, offset) == is_allocated) { 6156 return offset; 6157 } 6158 6159 offset += bs_num_io_units_to_cluster_boundary(blob, offset); 6160 } 6161 6162 return UINT64_MAX; 6163 } 6164 6165 uint64_t 6166 spdk_blob_get_next_allocated_io_unit(struct spdk_blob *blob, uint64_t offset) 6167 { 6168 return blob_find_io_unit(blob, offset, true); 6169 } 6170 6171 uint64_t 6172 spdk_blob_get_next_unallocated_io_unit(struct spdk_blob *blob, uint64_t offset) 6173 { 6174 return blob_find_io_unit(blob, offset, false); 6175 } 6176 6177 /* START spdk_bs_create_blob */ 6178 6179 static void 6180 bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 6181 { 6182 struct spdk_blob *blob = cb_arg; 6183 uint32_t page_idx = bs_blobid_to_page(blob->id); 6184 6185 if (bserrno != 0) { 6186 spdk_spin_lock(&blob->bs->used_lock); 6187 spdk_bit_array_clear(blob->bs->used_blobids, page_idx); 6188 bs_release_md_page(blob->bs, page_idx); 6189 spdk_spin_unlock(&blob->bs->used_lock); 6190 } 6191 6192 blob_free(blob); 6193 6194 bs_sequence_finish(seq, bserrno); 6195 } 6196 6197 static int 6198 blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs, 6199 bool internal) 6200 { 6201 uint64_t i; 6202 size_t value_len = 0; 6203 int rc; 6204 const void *value = NULL; 6205 if (xattrs->count > 0 && xattrs->get_value == NULL) { 6206 return -EINVAL; 6207 } 6208 for (i = 0; i < xattrs->count; i++) { 6209 xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len); 6210 if (value == NULL || value_len == 0) { 6211 return -EINVAL; 6212 } 6213 rc = blob_set_xattr(blob, xattrs->names[i], value, value_len, internal); 6214 if (rc < 0) { 6215 return rc; 6216 } 6217 } 6218 return 0; 6219 } 6220 6221 static void 6222 blob_opts_copy(const struct spdk_blob_opts *src, struct spdk_blob_opts *dst) 6223 { 6224 #define FIELD_OK(field) \ 6225 offsetof(struct spdk_blob_opts, field) + sizeof(src->field) <= src->opts_size 6226 6227 #define SET_FIELD(field) \ 6228 if (FIELD_OK(field)) { \ 6229 dst->field = src->field; \ 6230 } \ 6231 6232 SET_FIELD(num_clusters); 6233 SET_FIELD(thin_provision); 6234 SET_FIELD(clear_method); 6235 6236 if (FIELD_OK(xattrs)) { 6237 memcpy(&dst->xattrs, &src->xattrs, sizeof(src->xattrs)); 6238 } 6239 6240 SET_FIELD(use_extent_table); 6241 SET_FIELD(esnap_id); 6242 SET_FIELD(esnap_id_len); 6243 6244 dst->opts_size = src->opts_size; 6245 6246 /* You should not remove this statement, but need to update the assert statement 6247 * if you add a new field, and also add a corresponding SET_FIELD statement */ 6248 SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_opts) == 80, "Incorrect size"); 6249 6250 #undef FIELD_OK 6251 #undef SET_FIELD 6252 } 6253 6254 static void 6255 bs_create_blob(struct spdk_blob_store *bs, 6256 const struct spdk_blob_opts *opts, 6257 const struct spdk_blob_xattr_opts *internal_xattrs, 6258 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6259 { 6260 struct spdk_blob *blob; 6261 uint32_t page_idx; 6262 struct spdk_bs_cpl cpl; 6263 struct spdk_blob_opts opts_local; 6264 struct spdk_blob_xattr_opts internal_xattrs_default; 6265 spdk_bs_sequence_t *seq; 6266 spdk_blob_id id; 6267 int rc; 6268 6269 assert(spdk_get_thread() == bs->md_thread); 6270 6271 spdk_spin_lock(&bs->used_lock); 6272 page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); 6273 if (page_idx == UINT32_MAX) { 6274 spdk_spin_unlock(&bs->used_lock); 6275 cb_fn(cb_arg, 0, -ENOMEM); 6276 return; 6277 } 6278 spdk_bit_array_set(bs->used_blobids, page_idx); 6279 bs_claim_md_page(bs, page_idx); 6280 spdk_spin_unlock(&bs->used_lock); 6281 6282 id = bs_page_to_blobid(page_idx); 6283 6284 SPDK_DEBUGLOG(blob, "Creating blob with id 0x%" PRIx64 " at page %u\n", id, page_idx); 6285 6286 spdk_blob_opts_init(&opts_local, sizeof(opts_local)); 6287 if (opts) { 6288 blob_opts_copy(opts, &opts_local); 6289 } 6290 6291 blob = blob_alloc(bs, id); 6292 if (!blob) { 6293 rc = -ENOMEM; 6294 goto error; 6295 } 6296 6297 blob->use_extent_table = opts_local.use_extent_table; 6298 if (blob->use_extent_table) { 6299 blob->invalid_flags |= SPDK_BLOB_EXTENT_TABLE; 6300 } 6301 6302 if (!internal_xattrs) { 6303 blob_xattrs_init(&internal_xattrs_default); 6304 internal_xattrs = &internal_xattrs_default; 6305 } 6306 6307 rc = blob_set_xattrs(blob, &opts_local.xattrs, false); 6308 if (rc < 0) { 6309 goto error; 6310 } 6311 6312 rc = blob_set_xattrs(blob, internal_xattrs, true); 6313 if (rc < 0) { 6314 goto error; 6315 } 6316 6317 if (opts_local.thin_provision) { 6318 blob_set_thin_provision(blob); 6319 } 6320 6321 blob_set_clear_method(blob, opts_local.clear_method); 6322 6323 if (opts_local.esnap_id != NULL) { 6324 if (opts_local.esnap_id_len > UINT16_MAX) { 6325 SPDK_ERRLOG("esnap id length %" PRIu64 "is too long\n", 6326 opts_local.esnap_id_len); 6327 rc = -EINVAL; 6328 goto error; 6329 6330 } 6331 blob_set_thin_provision(blob); 6332 blob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 6333 rc = blob_set_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID, 6334 opts_local.esnap_id, opts_local.esnap_id_len, true); 6335 if (rc != 0) { 6336 goto error; 6337 } 6338 } 6339 6340 rc = blob_resize(blob, opts_local.num_clusters); 6341 if (rc < 0) { 6342 goto error; 6343 } 6344 cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6345 cpl.u.blobid.cb_fn = cb_fn; 6346 cpl.u.blobid.cb_arg = cb_arg; 6347 cpl.u.blobid.blobid = blob->id; 6348 6349 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 6350 if (!seq) { 6351 rc = -ENOMEM; 6352 goto error; 6353 } 6354 6355 blob_persist(seq, blob, bs_create_blob_cpl, blob); 6356 return; 6357 6358 error: 6359 SPDK_ERRLOG("Failed to create blob: %s, size in clusters/size: %lu (clusters)\n", 6360 spdk_strerror(rc), opts_local.num_clusters); 6361 if (blob != NULL) { 6362 blob_free(blob); 6363 } 6364 spdk_spin_lock(&bs->used_lock); 6365 spdk_bit_array_clear(bs->used_blobids, page_idx); 6366 bs_release_md_page(bs, page_idx); 6367 spdk_spin_unlock(&bs->used_lock); 6368 cb_fn(cb_arg, 0, rc); 6369 } 6370 6371 void 6372 spdk_bs_create_blob(struct spdk_blob_store *bs, 6373 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6374 { 6375 bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg); 6376 } 6377 6378 void 6379 spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts, 6380 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6381 { 6382 bs_create_blob(bs, opts, NULL, cb_fn, cb_arg); 6383 } 6384 6385 /* END spdk_bs_create_blob */ 6386 6387 /* START blob_cleanup */ 6388 6389 struct spdk_clone_snapshot_ctx { 6390 struct spdk_bs_cpl cpl; 6391 int bserrno; 6392 bool frozen; 6393 6394 struct spdk_io_channel *channel; 6395 6396 /* Current cluster for inflate operation */ 6397 uint64_t cluster; 6398 6399 /* For inflation force allocation of all unallocated clusters and remove 6400 * thin-provisioning. Otherwise only decouple parent and keep clone thin. */ 6401 bool allocate_all; 6402 6403 struct { 6404 spdk_blob_id id; 6405 struct spdk_blob *blob; 6406 bool md_ro; 6407 } original; 6408 struct { 6409 spdk_blob_id id; 6410 struct spdk_blob *blob; 6411 } new; 6412 6413 /* xattrs specified for snapshot/clones only. They have no impact on 6414 * the original blobs xattrs. */ 6415 const struct spdk_blob_xattr_opts *xattrs; 6416 }; 6417 6418 static void 6419 bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno) 6420 { 6421 struct spdk_clone_snapshot_ctx *ctx = cb_arg; 6422 struct spdk_bs_cpl *cpl = &ctx->cpl; 6423 6424 if (bserrno != 0) { 6425 if (ctx->bserrno != 0) { 6426 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6427 } else { 6428 ctx->bserrno = bserrno; 6429 } 6430 } 6431 6432 switch (cpl->type) { 6433 case SPDK_BS_CPL_TYPE_BLOBID: 6434 cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno); 6435 break; 6436 case SPDK_BS_CPL_TYPE_BLOB_BASIC: 6437 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno); 6438 break; 6439 default: 6440 SPDK_UNREACHABLE(); 6441 break; 6442 } 6443 6444 free(ctx); 6445 } 6446 6447 static void 6448 bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 6449 { 6450 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6451 struct spdk_blob *origblob = ctx->original.blob; 6452 6453 if (bserrno != 0) { 6454 if (ctx->bserrno != 0) { 6455 SPDK_ERRLOG("Unfreeze error %d\n", bserrno); 6456 } else { 6457 ctx->bserrno = bserrno; 6458 } 6459 } 6460 6461 ctx->original.id = origblob->id; 6462 origblob->locked_operation_in_progress = false; 6463 6464 /* Revert md_ro to original state */ 6465 origblob->md_ro = ctx->original.md_ro; 6466 6467 spdk_blob_close(origblob, bs_clone_snapshot_cleanup_finish, ctx); 6468 } 6469 6470 static void 6471 bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno) 6472 { 6473 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6474 struct spdk_blob *origblob = ctx->original.blob; 6475 6476 if (bserrno != 0) { 6477 if (ctx->bserrno != 0) { 6478 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6479 } else { 6480 ctx->bserrno = bserrno; 6481 } 6482 } 6483 6484 if (ctx->frozen) { 6485 /* Unfreeze any outstanding I/O */ 6486 blob_unfreeze_io(origblob, bs_snapshot_unfreeze_cpl, ctx); 6487 } else { 6488 bs_snapshot_unfreeze_cpl(ctx, 0); 6489 } 6490 6491 } 6492 6493 static void 6494 bs_clone_snapshot_newblob_cleanup(struct spdk_clone_snapshot_ctx *ctx, int bserrno) 6495 { 6496 struct spdk_blob *newblob = ctx->new.blob; 6497 6498 if (bserrno != 0) { 6499 if (ctx->bserrno != 0) { 6500 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6501 } else { 6502 ctx->bserrno = bserrno; 6503 } 6504 } 6505 6506 ctx->new.id = newblob->id; 6507 spdk_blob_close(newblob, bs_clone_snapshot_origblob_cleanup, ctx); 6508 } 6509 6510 /* END blob_cleanup */ 6511 6512 /* START spdk_bs_create_snapshot */ 6513 6514 static void 6515 bs_snapshot_swap_cluster_maps(struct spdk_blob *blob1, struct spdk_blob *blob2) 6516 { 6517 uint64_t *cluster_temp; 6518 uint64_t num_allocated_clusters_temp; 6519 uint32_t *extent_page_temp; 6520 6521 cluster_temp = blob1->active.clusters; 6522 blob1->active.clusters = blob2->active.clusters; 6523 blob2->active.clusters = cluster_temp; 6524 6525 num_allocated_clusters_temp = blob1->active.num_allocated_clusters; 6526 blob1->active.num_allocated_clusters = blob2->active.num_allocated_clusters; 6527 blob2->active.num_allocated_clusters = num_allocated_clusters_temp; 6528 6529 extent_page_temp = blob1->active.extent_pages; 6530 blob1->active.extent_pages = blob2->active.extent_pages; 6531 blob2->active.extent_pages = extent_page_temp; 6532 } 6533 6534 /* Copies an internal xattr */ 6535 static int 6536 bs_snapshot_copy_xattr(struct spdk_blob *toblob, struct spdk_blob *fromblob, const char *name) 6537 { 6538 const void *val = NULL; 6539 size_t len; 6540 int bserrno; 6541 6542 bserrno = blob_get_xattr_value(fromblob, name, &val, &len, true); 6543 if (bserrno != 0) { 6544 SPDK_ERRLOG("blob 0x%" PRIx64 " missing %s XATTR\n", fromblob->id, name); 6545 return bserrno; 6546 } 6547 6548 bserrno = blob_set_xattr(toblob, name, val, len, true); 6549 if (bserrno != 0) { 6550 SPDK_ERRLOG("could not set %s XATTR on blob 0x%" PRIx64 "\n", 6551 name, toblob->id); 6552 return bserrno; 6553 } 6554 return 0; 6555 } 6556 6557 static void 6558 bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno) 6559 { 6560 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6561 struct spdk_blob *origblob = ctx->original.blob; 6562 struct spdk_blob *newblob = ctx->new.blob; 6563 6564 if (bserrno != 0) { 6565 bs_snapshot_swap_cluster_maps(newblob, origblob); 6566 if (blob_is_esnap_clone(newblob)) { 6567 bs_snapshot_copy_xattr(origblob, newblob, BLOB_EXTERNAL_SNAPSHOT_ID); 6568 origblob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 6569 } 6570 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6571 return; 6572 } 6573 6574 /* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */ 6575 bserrno = blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true); 6576 if (bserrno != 0) { 6577 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6578 return; 6579 } 6580 6581 bs_blob_list_add(ctx->original.blob); 6582 6583 spdk_blob_set_read_only(newblob); 6584 6585 /* sync snapshot metadata */ 6586 spdk_blob_sync_md(newblob, bs_clone_snapshot_origblob_cleanup, ctx); 6587 } 6588 6589 static void 6590 bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno) 6591 { 6592 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6593 struct spdk_blob *origblob = ctx->original.blob; 6594 struct spdk_blob *newblob = ctx->new.blob; 6595 6596 if (bserrno != 0) { 6597 /* return cluster map back to original */ 6598 bs_snapshot_swap_cluster_maps(newblob, origblob); 6599 6600 /* Newblob md sync failed. Valid clusters are only present in origblob. 6601 * Since I/O is frozen on origblob, not changes to zeroed out cluster map should have occurred. 6602 * Newblob needs to be reverted to thin_provisioned state at creation to properly close. */ 6603 blob_set_thin_provision(newblob); 6604 assert(spdk_mem_all_zero(newblob->active.clusters, 6605 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 6606 assert(spdk_mem_all_zero(newblob->active.extent_pages, 6607 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 6608 6609 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6610 return; 6611 } 6612 6613 /* Set internal xattr for snapshot id */ 6614 bserrno = blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true); 6615 if (bserrno != 0) { 6616 /* return cluster map back to original */ 6617 bs_snapshot_swap_cluster_maps(newblob, origblob); 6618 blob_set_thin_provision(newblob); 6619 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6620 return; 6621 } 6622 6623 /* Create new back_bs_dev for snapshot */ 6624 origblob->back_bs_dev = bs_create_blob_bs_dev(newblob); 6625 if (origblob->back_bs_dev == NULL) { 6626 /* return cluster map back to original */ 6627 bs_snapshot_swap_cluster_maps(newblob, origblob); 6628 blob_set_thin_provision(newblob); 6629 bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL); 6630 return; 6631 } 6632 6633 /* Remove the xattr that references an external snapshot */ 6634 if (blob_is_esnap_clone(origblob)) { 6635 origblob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT; 6636 bserrno = blob_remove_xattr(origblob, BLOB_EXTERNAL_SNAPSHOT_ID, true); 6637 if (bserrno != 0) { 6638 if (bserrno == -ENOENT) { 6639 SPDK_ERRLOG("blob 0x%" PRIx64 " has no " BLOB_EXTERNAL_SNAPSHOT_ID 6640 " xattr to remove\n", origblob->id); 6641 assert(false); 6642 } else { 6643 /* return cluster map back to original */ 6644 bs_snapshot_swap_cluster_maps(newblob, origblob); 6645 blob_set_thin_provision(newblob); 6646 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6647 return; 6648 } 6649 } 6650 } 6651 6652 bs_blob_list_remove(origblob); 6653 origblob->parent_id = newblob->id; 6654 /* set clone blob as thin provisioned */ 6655 blob_set_thin_provision(origblob); 6656 6657 bs_blob_list_add(newblob); 6658 6659 /* sync clone metadata */ 6660 spdk_blob_sync_md(origblob, bs_snapshot_origblob_sync_cpl, ctx); 6661 } 6662 6663 static void 6664 bs_snapshot_freeze_cpl(void *cb_arg, int rc) 6665 { 6666 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6667 struct spdk_blob *origblob = ctx->original.blob; 6668 struct spdk_blob *newblob = ctx->new.blob; 6669 int bserrno; 6670 6671 if (rc != 0) { 6672 bs_clone_snapshot_newblob_cleanup(ctx, rc); 6673 return; 6674 } 6675 6676 ctx->frozen = true; 6677 6678 if (blob_is_esnap_clone(origblob)) { 6679 /* Clean up any channels associated with the original blob id because future IO will 6680 * perform IO using the snapshot blob_id. 6681 */ 6682 blob_esnap_destroy_bs_dev_channels(origblob, false, NULL, NULL); 6683 } 6684 if (newblob->back_bs_dev) { 6685 blob_back_bs_destroy(newblob); 6686 } 6687 /* set new back_bs_dev for snapshot */ 6688 newblob->back_bs_dev = origblob->back_bs_dev; 6689 /* Set invalid flags from origblob */ 6690 newblob->invalid_flags = origblob->invalid_flags; 6691 6692 /* inherit parent from original blob if set */ 6693 newblob->parent_id = origblob->parent_id; 6694 switch (origblob->parent_id) { 6695 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 6696 bserrno = bs_snapshot_copy_xattr(newblob, origblob, BLOB_EXTERNAL_SNAPSHOT_ID); 6697 if (bserrno != 0) { 6698 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6699 return; 6700 } 6701 break; 6702 case SPDK_BLOBID_INVALID: 6703 break; 6704 default: 6705 /* Set internal xattr for snapshot id */ 6706 bserrno = blob_set_xattr(newblob, BLOB_SNAPSHOT, 6707 &origblob->parent_id, sizeof(spdk_blob_id), true); 6708 if (bserrno != 0) { 6709 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6710 return; 6711 } 6712 } 6713 6714 /* swap cluster maps */ 6715 bs_snapshot_swap_cluster_maps(newblob, origblob); 6716 6717 /* Set the clear method on the new blob to match the original. */ 6718 blob_set_clear_method(newblob, origblob->clear_method); 6719 6720 /* sync snapshot metadata */ 6721 spdk_blob_sync_md(newblob, bs_snapshot_newblob_sync_cpl, ctx); 6722 } 6723 6724 static void 6725 bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6726 { 6727 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6728 struct spdk_blob *origblob = ctx->original.blob; 6729 struct spdk_blob *newblob = _blob; 6730 6731 if (bserrno != 0) { 6732 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6733 return; 6734 } 6735 6736 ctx->new.blob = newblob; 6737 assert(spdk_blob_is_thin_provisioned(newblob)); 6738 assert(spdk_mem_all_zero(newblob->active.clusters, 6739 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 6740 assert(spdk_mem_all_zero(newblob->active.extent_pages, 6741 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 6742 6743 blob_freeze_io(origblob, bs_snapshot_freeze_cpl, ctx); 6744 } 6745 6746 static void 6747 bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 6748 { 6749 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6750 struct spdk_blob *origblob = ctx->original.blob; 6751 6752 if (bserrno != 0) { 6753 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6754 return; 6755 } 6756 6757 ctx->new.id = blobid; 6758 ctx->cpl.u.blobid.blobid = blobid; 6759 6760 spdk_bs_open_blob(origblob->bs, ctx->new.id, bs_snapshot_newblob_open_cpl, ctx); 6761 } 6762 6763 6764 static void 6765 bs_xattr_snapshot(void *arg, const char *name, 6766 const void **value, size_t *value_len) 6767 { 6768 assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0); 6769 6770 struct spdk_blob *blob = (struct spdk_blob *)arg; 6771 *value = &blob->id; 6772 *value_len = sizeof(blob->id); 6773 } 6774 6775 static void 6776 bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6777 { 6778 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6779 struct spdk_blob_opts opts; 6780 struct spdk_blob_xattr_opts internal_xattrs; 6781 char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS }; 6782 6783 if (bserrno != 0) { 6784 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6785 return; 6786 } 6787 6788 ctx->original.blob = _blob; 6789 6790 if (_blob->data_ro || _blob->md_ro) { 6791 SPDK_DEBUGLOG(blob, "Cannot create snapshot from read only blob with id 0x%" 6792 PRIx64 "\n", _blob->id); 6793 ctx->bserrno = -EINVAL; 6794 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6795 return; 6796 } 6797 6798 if (_blob->locked_operation_in_progress) { 6799 SPDK_DEBUGLOG(blob, "Cannot create snapshot - another operation in progress\n"); 6800 ctx->bserrno = -EBUSY; 6801 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6802 return; 6803 } 6804 6805 _blob->locked_operation_in_progress = true; 6806 6807 spdk_blob_opts_init(&opts, sizeof(opts)); 6808 blob_xattrs_init(&internal_xattrs); 6809 6810 /* Change the size of new blob to the same as in original blob, 6811 * but do not allocate clusters */ 6812 opts.thin_provision = true; 6813 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 6814 opts.use_extent_table = _blob->use_extent_table; 6815 6816 /* If there are any xattrs specified for snapshot, set them now */ 6817 if (ctx->xattrs) { 6818 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 6819 } 6820 /* Set internal xattr SNAPSHOT_IN_PROGRESS */ 6821 internal_xattrs.count = 1; 6822 internal_xattrs.ctx = _blob; 6823 internal_xattrs.names = xattrs_names; 6824 internal_xattrs.get_value = bs_xattr_snapshot; 6825 6826 bs_create_blob(_blob->bs, &opts, &internal_xattrs, 6827 bs_snapshot_newblob_create_cpl, ctx); 6828 } 6829 6830 void 6831 spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid, 6832 const struct spdk_blob_xattr_opts *snapshot_xattrs, 6833 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6834 { 6835 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6836 6837 if (!ctx) { 6838 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 6839 return; 6840 } 6841 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6842 ctx->cpl.u.blobid.cb_fn = cb_fn; 6843 ctx->cpl.u.blobid.cb_arg = cb_arg; 6844 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 6845 ctx->bserrno = 0; 6846 ctx->frozen = false; 6847 ctx->original.id = blobid; 6848 ctx->xattrs = snapshot_xattrs; 6849 6850 spdk_bs_open_blob(bs, ctx->original.id, bs_snapshot_origblob_open_cpl, ctx); 6851 } 6852 /* END spdk_bs_create_snapshot */ 6853 6854 /* START spdk_bs_create_clone */ 6855 6856 static void 6857 bs_xattr_clone(void *arg, const char *name, 6858 const void **value, size_t *value_len) 6859 { 6860 assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0); 6861 6862 struct spdk_blob *blob = (struct spdk_blob *)arg; 6863 *value = &blob->id; 6864 *value_len = sizeof(blob->id); 6865 } 6866 6867 static void 6868 bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6869 { 6870 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6871 struct spdk_blob *clone = _blob; 6872 6873 ctx->new.blob = clone; 6874 bs_blob_list_add(clone); 6875 6876 spdk_blob_close(clone, bs_clone_snapshot_origblob_cleanup, ctx); 6877 } 6878 6879 static void 6880 bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 6881 { 6882 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6883 6884 ctx->cpl.u.blobid.blobid = blobid; 6885 spdk_bs_open_blob(ctx->original.blob->bs, blobid, bs_clone_newblob_open_cpl, ctx); 6886 } 6887 6888 static void 6889 bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6890 { 6891 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6892 struct spdk_blob_opts opts; 6893 struct spdk_blob_xattr_opts internal_xattrs; 6894 char *xattr_names[] = { BLOB_SNAPSHOT }; 6895 6896 if (bserrno != 0) { 6897 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6898 return; 6899 } 6900 6901 ctx->original.blob = _blob; 6902 ctx->original.md_ro = _blob->md_ro; 6903 6904 if (!_blob->data_ro || !_blob->md_ro) { 6905 SPDK_DEBUGLOG(blob, "Clone not from read-only blob\n"); 6906 ctx->bserrno = -EINVAL; 6907 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6908 return; 6909 } 6910 6911 if (_blob->locked_operation_in_progress) { 6912 SPDK_DEBUGLOG(blob, "Cannot create clone - another operation in progress\n"); 6913 ctx->bserrno = -EBUSY; 6914 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6915 return; 6916 } 6917 6918 _blob->locked_operation_in_progress = true; 6919 6920 spdk_blob_opts_init(&opts, sizeof(opts)); 6921 blob_xattrs_init(&internal_xattrs); 6922 6923 opts.thin_provision = true; 6924 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 6925 opts.use_extent_table = _blob->use_extent_table; 6926 if (ctx->xattrs) { 6927 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 6928 } 6929 6930 /* Set internal xattr BLOB_SNAPSHOT */ 6931 internal_xattrs.count = 1; 6932 internal_xattrs.ctx = _blob; 6933 internal_xattrs.names = xattr_names; 6934 internal_xattrs.get_value = bs_xattr_clone; 6935 6936 bs_create_blob(_blob->bs, &opts, &internal_xattrs, 6937 bs_clone_newblob_create_cpl, ctx); 6938 } 6939 6940 void 6941 spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid, 6942 const struct spdk_blob_xattr_opts *clone_xattrs, 6943 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6944 { 6945 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6946 6947 if (!ctx) { 6948 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 6949 return; 6950 } 6951 6952 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6953 ctx->cpl.u.blobid.cb_fn = cb_fn; 6954 ctx->cpl.u.blobid.cb_arg = cb_arg; 6955 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 6956 ctx->bserrno = 0; 6957 ctx->xattrs = clone_xattrs; 6958 ctx->original.id = blobid; 6959 6960 spdk_bs_open_blob(bs, ctx->original.id, bs_clone_origblob_open_cpl, ctx); 6961 } 6962 6963 /* END spdk_bs_create_clone */ 6964 6965 /* START spdk_bs_inflate_blob */ 6966 6967 static void 6968 bs_inflate_blob_set_parent_cpl(void *cb_arg, struct spdk_blob *_parent, int bserrno) 6969 { 6970 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6971 struct spdk_blob *_blob = ctx->original.blob; 6972 6973 if (bserrno != 0) { 6974 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6975 return; 6976 } 6977 6978 /* Temporarily override md_ro flag for MD modification */ 6979 _blob->md_ro = false; 6980 6981 bserrno = blob_set_xattr(_blob, BLOB_SNAPSHOT, &_parent->id, sizeof(spdk_blob_id), true); 6982 if (bserrno != 0) { 6983 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6984 return; 6985 } 6986 6987 assert(_parent != NULL); 6988 6989 bs_blob_list_remove(_blob); 6990 _blob->parent_id = _parent->id; 6991 6992 blob_back_bs_destroy(_blob); 6993 _blob->back_bs_dev = bs_create_blob_bs_dev(_parent); 6994 bs_blob_list_add(_blob); 6995 6996 spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx); 6997 } 6998 6999 static void 7000 bs_inflate_blob_done(struct spdk_clone_snapshot_ctx *ctx) 7001 { 7002 struct spdk_blob *_blob = ctx->original.blob; 7003 struct spdk_blob *_parent; 7004 7005 if (ctx->allocate_all) { 7006 /* remove thin provisioning */ 7007 bs_blob_list_remove(_blob); 7008 if (_blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 7009 blob_remove_xattr(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, true); 7010 _blob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT; 7011 } else { 7012 blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 7013 } 7014 _blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV; 7015 blob_back_bs_destroy(_blob); 7016 _blob->parent_id = SPDK_BLOBID_INVALID; 7017 } else { 7018 /* For now, esnap clones always have allocate_all set. */ 7019 assert(!blob_is_esnap_clone(_blob)); 7020 7021 _parent = ((struct spdk_blob_bs_dev *)(_blob->back_bs_dev))->blob; 7022 if (_parent->parent_id != SPDK_BLOBID_INVALID) { 7023 /* We must change the parent of the inflated blob */ 7024 spdk_bs_open_blob(_blob->bs, _parent->parent_id, 7025 bs_inflate_blob_set_parent_cpl, ctx); 7026 return; 7027 } 7028 7029 bs_blob_list_remove(_blob); 7030 _blob->parent_id = SPDK_BLOBID_INVALID; 7031 blob_back_bs_destroy(_blob); 7032 _blob->back_bs_dev = bs_create_zeroes_dev(); 7033 } 7034 7035 /* Temporarily override md_ro flag for MD modification */ 7036 _blob->md_ro = false; 7037 blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 7038 _blob->state = SPDK_BLOB_STATE_DIRTY; 7039 7040 spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx); 7041 } 7042 7043 /* Check if cluster needs allocation */ 7044 static inline bool 7045 bs_cluster_needs_allocation(struct spdk_blob *blob, uint64_t cluster, bool allocate_all) 7046 { 7047 struct spdk_blob_bs_dev *b; 7048 7049 assert(blob != NULL); 7050 7051 if (blob->active.clusters[cluster] != 0) { 7052 /* Cluster is already allocated */ 7053 return false; 7054 } 7055 7056 if (blob->parent_id == SPDK_BLOBID_INVALID) { 7057 /* Blob have no parent blob */ 7058 return allocate_all; 7059 } 7060 7061 if (blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 7062 return true; 7063 } 7064 7065 b = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 7066 return (allocate_all || b->blob->active.clusters[cluster] != 0); 7067 } 7068 7069 static void 7070 bs_inflate_blob_touch_next(void *cb_arg, int bserrno) 7071 { 7072 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 7073 struct spdk_blob *_blob = ctx->original.blob; 7074 struct spdk_bs_cpl cpl; 7075 spdk_bs_user_op_t *op; 7076 uint64_t offset; 7077 7078 if (bserrno != 0) { 7079 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 7080 return; 7081 } 7082 7083 for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) { 7084 if (bs_cluster_needs_allocation(_blob, ctx->cluster, ctx->allocate_all)) { 7085 break; 7086 } 7087 } 7088 7089 if (ctx->cluster < _blob->active.num_clusters) { 7090 offset = bs_cluster_to_lba(_blob->bs, ctx->cluster); 7091 7092 /* We may safely increment a cluster before copying */ 7093 ctx->cluster++; 7094 7095 /* Use a dummy 0B read as a context for cluster copy */ 7096 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7097 cpl.u.blob_basic.cb_fn = bs_inflate_blob_touch_next; 7098 cpl.u.blob_basic.cb_arg = ctx; 7099 7100 op = bs_user_op_alloc(ctx->channel, &cpl, SPDK_BLOB_READ, _blob, 7101 NULL, 0, offset, 0); 7102 if (!op) { 7103 bs_clone_snapshot_origblob_cleanup(ctx, -ENOMEM); 7104 return; 7105 } 7106 7107 bs_allocate_and_copy_cluster(_blob, ctx->channel, offset, op); 7108 } else { 7109 bs_inflate_blob_done(ctx); 7110 } 7111 } 7112 7113 static void 7114 bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 7115 { 7116 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 7117 uint64_t clusters_needed; 7118 uint64_t i; 7119 7120 if (bserrno != 0) { 7121 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 7122 return; 7123 } 7124 7125 ctx->original.blob = _blob; 7126 ctx->original.md_ro = _blob->md_ro; 7127 7128 if (_blob->locked_operation_in_progress) { 7129 SPDK_DEBUGLOG(blob, "Cannot inflate blob - another operation in progress\n"); 7130 ctx->bserrno = -EBUSY; 7131 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 7132 return; 7133 } 7134 7135 _blob->locked_operation_in_progress = true; 7136 7137 switch (_blob->parent_id) { 7138 case SPDK_BLOBID_INVALID: 7139 if (!ctx->allocate_all) { 7140 /* This blob has no parent, so we cannot decouple it. */ 7141 SPDK_ERRLOG("Cannot decouple parent of blob with no parent.\n"); 7142 bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 7143 return; 7144 } 7145 break; 7146 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 7147 /* 7148 * It would be better to rely on back_bs_dev->is_zeroes(), to determine which 7149 * clusters require allocation. Until there is a blobstore consumer that 7150 * uses esnaps with an spdk_bs_dev that implements a useful is_zeroes() it is not 7151 * worth the effort. 7152 */ 7153 ctx->allocate_all = true; 7154 break; 7155 default: 7156 break; 7157 } 7158 7159 if (spdk_blob_is_thin_provisioned(_blob) == false) { 7160 /* This is not thin provisioned blob. No need to inflate. */ 7161 bs_clone_snapshot_origblob_cleanup(ctx, 0); 7162 return; 7163 } 7164 7165 /* Do two passes - one to verify that we can obtain enough clusters 7166 * and another to actually claim them. 7167 */ 7168 clusters_needed = 0; 7169 for (i = 0; i < _blob->active.num_clusters; i++) { 7170 if (bs_cluster_needs_allocation(_blob, i, ctx->allocate_all)) { 7171 clusters_needed++; 7172 } 7173 } 7174 7175 if (clusters_needed > _blob->bs->num_free_clusters) { 7176 /* Not enough free clusters. Cannot satisfy the request. */ 7177 bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC); 7178 return; 7179 } 7180 7181 ctx->cluster = 0; 7182 bs_inflate_blob_touch_next(ctx, 0); 7183 } 7184 7185 static void 7186 bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7187 spdk_blob_id blobid, bool allocate_all, spdk_blob_op_complete cb_fn, void *cb_arg) 7188 { 7189 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 7190 7191 if (!ctx) { 7192 cb_fn(cb_arg, -ENOMEM); 7193 return; 7194 } 7195 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7196 ctx->cpl.u.bs_basic.cb_fn = cb_fn; 7197 ctx->cpl.u.bs_basic.cb_arg = cb_arg; 7198 ctx->bserrno = 0; 7199 ctx->original.id = blobid; 7200 ctx->channel = channel; 7201 ctx->allocate_all = allocate_all; 7202 7203 spdk_bs_open_blob(bs, ctx->original.id, bs_inflate_blob_open_cpl, ctx); 7204 } 7205 7206 void 7207 spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7208 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 7209 { 7210 bs_inflate_blob(bs, channel, blobid, true, cb_fn, cb_arg); 7211 } 7212 7213 void 7214 spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7215 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 7216 { 7217 bs_inflate_blob(bs, channel, blobid, false, cb_fn, cb_arg); 7218 } 7219 /* END spdk_bs_inflate_blob */ 7220 7221 /* START spdk_bs_blob_shallow_copy */ 7222 7223 struct shallow_copy_ctx { 7224 struct spdk_bs_cpl cpl; 7225 int bserrno; 7226 7227 /* Blob source for copy */ 7228 struct spdk_blob_store *bs; 7229 spdk_blob_id blobid; 7230 struct spdk_blob *blob; 7231 struct spdk_io_channel *blob_channel; 7232 7233 /* Destination device for copy */ 7234 struct spdk_bs_dev *ext_dev; 7235 struct spdk_io_channel *ext_channel; 7236 7237 /* Current cluster for copy operation */ 7238 uint64_t cluster; 7239 7240 /* Buffer for blob reading */ 7241 uint8_t *read_buff; 7242 7243 /* Struct for external device writing */ 7244 struct spdk_bs_dev_cb_args ext_args; 7245 7246 /* Actual number of copied clusters */ 7247 uint64_t copied_clusters_count; 7248 7249 /* Status callback for updates about the ongoing operation */ 7250 spdk_blob_shallow_copy_status status_cb; 7251 7252 /* Argument passed to function status_cb */ 7253 void *status_cb_arg; 7254 }; 7255 7256 static void 7257 bs_shallow_copy_cleanup_finish(void *cb_arg, int bserrno) 7258 { 7259 struct shallow_copy_ctx *ctx = cb_arg; 7260 struct spdk_bs_cpl *cpl = &ctx->cpl; 7261 7262 if (bserrno != 0) { 7263 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, cleanup error %d\n", ctx->blob->id, bserrno); 7264 ctx->bserrno = bserrno; 7265 } 7266 7267 ctx->ext_dev->destroy_channel(ctx->ext_dev, ctx->ext_channel); 7268 spdk_free(ctx->read_buff); 7269 7270 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno); 7271 7272 free(ctx); 7273 } 7274 7275 static void 7276 bs_shallow_copy_bdev_write_cpl(struct spdk_io_channel *channel, void *cb_arg, int bserrno) 7277 { 7278 struct shallow_copy_ctx *ctx = cb_arg; 7279 struct spdk_blob *_blob = ctx->blob; 7280 7281 if (bserrno != 0) { 7282 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, ext dev write error %d\n", ctx->blob->id, bserrno); 7283 ctx->bserrno = bserrno; 7284 _blob->locked_operation_in_progress = false; 7285 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7286 return; 7287 } 7288 7289 ctx->cluster++; 7290 if (ctx->status_cb) { 7291 ctx->copied_clusters_count++; 7292 ctx->status_cb(ctx->copied_clusters_count, ctx->status_cb_arg); 7293 } 7294 7295 bs_shallow_copy_cluster_find_next(ctx); 7296 } 7297 7298 static void 7299 bs_shallow_copy_blob_read_cpl(void *cb_arg, int bserrno) 7300 { 7301 struct shallow_copy_ctx *ctx = cb_arg; 7302 struct spdk_bs_dev *ext_dev = ctx->ext_dev; 7303 struct spdk_blob *_blob = ctx->blob; 7304 7305 if (bserrno != 0) { 7306 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, blob read error %d\n", ctx->blob->id, bserrno); 7307 ctx->bserrno = bserrno; 7308 _blob->locked_operation_in_progress = false; 7309 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7310 return; 7311 } 7312 7313 ctx->ext_args.channel = ctx->ext_channel; 7314 ctx->ext_args.cb_fn = bs_shallow_copy_bdev_write_cpl; 7315 ctx->ext_args.cb_arg = ctx; 7316 7317 ext_dev->write(ext_dev, ctx->ext_channel, ctx->read_buff, 7318 bs_cluster_to_lba(_blob->bs, ctx->cluster), 7319 bs_dev_byte_to_lba(_blob->bs->dev, _blob->bs->cluster_sz), 7320 &ctx->ext_args); 7321 } 7322 7323 static void 7324 bs_shallow_copy_cluster_find_next(void *cb_arg) 7325 { 7326 struct shallow_copy_ctx *ctx = cb_arg; 7327 struct spdk_blob *_blob = ctx->blob; 7328 7329 while (ctx->cluster < _blob->active.num_clusters) { 7330 if (_blob->active.clusters[ctx->cluster] != 0) { 7331 break; 7332 } 7333 7334 ctx->cluster++; 7335 } 7336 7337 if (ctx->cluster < _blob->active.num_clusters) { 7338 blob_request_submit_op_single(ctx->blob_channel, _blob, ctx->read_buff, 7339 bs_cluster_to_lba(_blob->bs, ctx->cluster), 7340 bs_dev_byte_to_lba(_blob->bs->dev, _blob->bs->cluster_sz), 7341 bs_shallow_copy_blob_read_cpl, ctx, SPDK_BLOB_READ); 7342 } else { 7343 _blob->locked_operation_in_progress = false; 7344 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7345 } 7346 } 7347 7348 static void 7349 bs_shallow_copy_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 7350 { 7351 struct shallow_copy_ctx *ctx = cb_arg; 7352 struct spdk_bs_dev *ext_dev = ctx->ext_dev; 7353 uint32_t blob_block_size; 7354 uint64_t blob_total_size; 7355 7356 if (bserrno != 0) { 7357 SPDK_ERRLOG("Shallow copy blob open error %d\n", bserrno); 7358 ctx->bserrno = bserrno; 7359 bs_shallow_copy_cleanup_finish(ctx, 0); 7360 return; 7361 } 7362 7363 if (!spdk_blob_is_read_only(_blob)) { 7364 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, blob must be read only\n", _blob->id); 7365 ctx->bserrno = -EPERM; 7366 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7367 return; 7368 } 7369 7370 blob_block_size = _blob->bs->dev->blocklen; 7371 blob_total_size = spdk_blob_get_num_clusters(_blob) * spdk_bs_get_cluster_size(_blob->bs); 7372 7373 if (blob_total_size > ext_dev->blockcnt * ext_dev->blocklen) { 7374 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, external device must have at least blob size\n", 7375 _blob->id); 7376 ctx->bserrno = -EINVAL; 7377 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7378 return; 7379 } 7380 7381 if (blob_block_size % ext_dev->blocklen != 0) { 7382 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, external device block size is not compatible with \ 7383 blobstore block size\n", _blob->id); 7384 ctx->bserrno = -EINVAL; 7385 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7386 return; 7387 } 7388 7389 ctx->blob = _blob; 7390 7391 if (_blob->locked_operation_in_progress) { 7392 SPDK_DEBUGLOG(blob, "blob 0x%" PRIx64 " shallow copy - another operation in progress\n", _blob->id); 7393 ctx->bserrno = -EBUSY; 7394 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7395 return; 7396 } 7397 7398 _blob->locked_operation_in_progress = true; 7399 7400 ctx->cluster = 0; 7401 bs_shallow_copy_cluster_find_next(ctx); 7402 } 7403 7404 int 7405 spdk_bs_blob_shallow_copy(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7406 spdk_blob_id blobid, struct spdk_bs_dev *ext_dev, 7407 spdk_blob_shallow_copy_status status_cb_fn, void *status_cb_arg, 7408 spdk_blob_op_complete cb_fn, void *cb_arg) 7409 { 7410 struct shallow_copy_ctx *ctx; 7411 struct spdk_io_channel *ext_channel; 7412 7413 ctx = calloc(1, sizeof(*ctx)); 7414 if (!ctx) { 7415 return -ENOMEM; 7416 } 7417 7418 ctx->bs = bs; 7419 ctx->blobid = blobid; 7420 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7421 ctx->cpl.u.bs_basic.cb_fn = cb_fn; 7422 ctx->cpl.u.bs_basic.cb_arg = cb_arg; 7423 ctx->bserrno = 0; 7424 ctx->blob_channel = channel; 7425 ctx->status_cb = status_cb_fn; 7426 ctx->status_cb_arg = status_cb_arg; 7427 ctx->read_buff = spdk_malloc(bs->cluster_sz, bs->dev->blocklen, NULL, 7428 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 7429 if (!ctx->read_buff) { 7430 free(ctx); 7431 return -ENOMEM; 7432 } 7433 7434 ext_channel = ext_dev->create_channel(ext_dev); 7435 if (!ext_channel) { 7436 spdk_free(ctx->read_buff); 7437 free(ctx); 7438 return -ENOMEM; 7439 } 7440 ctx->ext_dev = ext_dev; 7441 ctx->ext_channel = ext_channel; 7442 7443 spdk_bs_open_blob(ctx->bs, ctx->blobid, bs_shallow_copy_blob_open_cpl, ctx); 7444 7445 return 0; 7446 } 7447 /* END spdk_bs_blob_shallow_copy */ 7448 7449 /* START spdk_bs_blob_set_parent */ 7450 7451 struct set_parent_ctx { 7452 struct spdk_blob_store *bs; 7453 int bserrno; 7454 spdk_bs_op_complete cb_fn; 7455 void *cb_arg; 7456 7457 struct spdk_blob *blob; 7458 bool blob_md_ro; 7459 7460 struct blob_parent parent; 7461 }; 7462 7463 static void 7464 bs_set_parent_cleanup_finish(void *cb_arg, int bserrno) 7465 { 7466 struct set_parent_ctx *ctx = cb_arg; 7467 7468 assert(ctx != NULL); 7469 7470 if (bserrno != 0) { 7471 SPDK_ERRLOG("blob set parent finish error %d\n", bserrno); 7472 if (ctx->bserrno == 0) { 7473 ctx->bserrno = bserrno; 7474 } 7475 } 7476 7477 ctx->cb_fn(ctx->cb_arg, ctx->bserrno); 7478 7479 free(ctx); 7480 } 7481 7482 static void 7483 bs_set_parent_close_snapshot(void *cb_arg, int bserrno) 7484 { 7485 struct set_parent_ctx *ctx = cb_arg; 7486 7487 if (ctx->bserrno != 0) { 7488 spdk_blob_close(ctx->parent.u.snapshot.blob, bs_set_parent_cleanup_finish, ctx); 7489 return; 7490 } 7491 7492 if (bserrno != 0) { 7493 SPDK_ERRLOG("blob close error %d\n", bserrno); 7494 ctx->bserrno = bserrno; 7495 } 7496 7497 bs_set_parent_cleanup_finish(ctx, ctx->bserrno); 7498 } 7499 7500 static void 7501 bs_set_parent_close_blob(void *cb_arg, int bserrno) 7502 { 7503 struct set_parent_ctx *ctx = cb_arg; 7504 struct spdk_blob *blob = ctx->blob; 7505 struct spdk_blob *snapshot = ctx->parent.u.snapshot.blob; 7506 7507 if (bserrno != 0 && ctx->bserrno == 0) { 7508 SPDK_ERRLOG("error %d in metadata sync\n", bserrno); 7509 ctx->bserrno = bserrno; 7510 } 7511 7512 /* Revert md_ro to original state */ 7513 blob->md_ro = ctx->blob_md_ro; 7514 7515 blob->locked_operation_in_progress = false; 7516 snapshot->locked_operation_in_progress = false; 7517 7518 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7519 } 7520 7521 static void 7522 bs_set_parent_set_back_bs_dev_done(void *cb_arg, int bserrno) 7523 { 7524 struct set_parent_ctx *ctx = cb_arg; 7525 struct spdk_blob *blob = ctx->blob; 7526 7527 if (bserrno != 0) { 7528 SPDK_ERRLOG("error %d setting back_bs_dev\n", bserrno); 7529 ctx->bserrno = bserrno; 7530 bs_set_parent_close_blob(ctx, bserrno); 7531 return; 7532 } 7533 7534 spdk_blob_sync_md(blob, bs_set_parent_close_blob, ctx); 7535 } 7536 7537 static int 7538 bs_set_parent_refs(struct spdk_blob *blob, struct blob_parent *parent) 7539 { 7540 int rc; 7541 7542 bs_blob_list_remove(blob); 7543 7544 rc = blob_set_xattr(blob, BLOB_SNAPSHOT, &parent->u.snapshot.id, sizeof(spdk_blob_id), true); 7545 if (rc != 0) { 7546 SPDK_ERRLOG("error %d setting snapshot xattr\n", rc); 7547 return rc; 7548 } 7549 blob->parent_id = parent->u.snapshot.id; 7550 7551 if (blob_is_esnap_clone(blob)) { 7552 /* Remove the xattr that references the external snapshot */ 7553 blob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT; 7554 blob_remove_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID, true); 7555 } 7556 7557 bs_blob_list_add(blob); 7558 7559 return 0; 7560 } 7561 7562 static void 7563 bs_set_parent_snapshot_open_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno) 7564 { 7565 struct set_parent_ctx *ctx = cb_arg; 7566 struct spdk_blob *blob = ctx->blob; 7567 struct spdk_bs_dev *back_bs_dev; 7568 7569 if (bserrno != 0) { 7570 SPDK_ERRLOG("snapshot open error %d\n", bserrno); 7571 ctx->bserrno = bserrno; 7572 spdk_blob_close(blob, bs_set_parent_cleanup_finish, ctx); 7573 return; 7574 } 7575 7576 ctx->parent.u.snapshot.blob = snapshot; 7577 ctx->parent.u.snapshot.id = snapshot->id; 7578 7579 if (!spdk_blob_is_snapshot(snapshot)) { 7580 SPDK_ERRLOG("parent blob is not a snapshot\n"); 7581 ctx->bserrno = -EINVAL; 7582 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7583 return; 7584 } 7585 7586 if (blob->active.num_clusters != snapshot->active.num_clusters) { 7587 SPDK_ERRLOG("parent blob has a number of clusters different from child's ones\n"); 7588 ctx->bserrno = -EINVAL; 7589 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7590 return; 7591 } 7592 7593 if (blob->locked_operation_in_progress || snapshot->locked_operation_in_progress) { 7594 SPDK_ERRLOG("cannot set parent of blob, another operation in progress\n"); 7595 ctx->bserrno = -EBUSY; 7596 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7597 return; 7598 } 7599 7600 blob->locked_operation_in_progress = true; 7601 snapshot->locked_operation_in_progress = true; 7602 7603 /* Temporarily override md_ro flag for MD modification */ 7604 blob->md_ro = false; 7605 7606 back_bs_dev = bs_create_blob_bs_dev(snapshot); 7607 7608 blob_set_back_bs_dev(blob, back_bs_dev, bs_set_parent_refs, &ctx->parent, 7609 bs_set_parent_set_back_bs_dev_done, 7610 ctx); 7611 } 7612 7613 static void 7614 bs_set_parent_blob_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 7615 { 7616 struct set_parent_ctx *ctx = cb_arg; 7617 7618 if (bserrno != 0) { 7619 SPDK_ERRLOG("blob open error %d\n", bserrno); 7620 ctx->bserrno = bserrno; 7621 bs_set_parent_cleanup_finish(ctx, 0); 7622 return; 7623 } 7624 7625 if (!spdk_blob_is_thin_provisioned(blob)) { 7626 SPDK_ERRLOG("blob is not thin-provisioned\n"); 7627 ctx->bserrno = -EINVAL; 7628 spdk_blob_close(blob, bs_set_parent_cleanup_finish, ctx); 7629 return; 7630 } 7631 7632 ctx->blob = blob; 7633 ctx->blob_md_ro = blob->md_ro; 7634 7635 spdk_bs_open_blob(ctx->bs, ctx->parent.u.snapshot.id, bs_set_parent_snapshot_open_cpl, ctx); 7636 } 7637 7638 void 7639 spdk_bs_blob_set_parent(struct spdk_blob_store *bs, spdk_blob_id blob_id, 7640 spdk_blob_id snapshot_id, spdk_blob_op_complete cb_fn, void *cb_arg) 7641 { 7642 struct set_parent_ctx *ctx; 7643 7644 if (snapshot_id == SPDK_BLOBID_INVALID) { 7645 SPDK_ERRLOG("snapshot id not valid\n"); 7646 cb_fn(cb_arg, -EINVAL); 7647 return; 7648 } 7649 7650 if (blob_id == snapshot_id) { 7651 SPDK_ERRLOG("blob id and snapshot id cannot be the same\n"); 7652 cb_fn(cb_arg, -EINVAL); 7653 return; 7654 } 7655 7656 if (spdk_blob_get_parent_snapshot(bs, blob_id) == snapshot_id) { 7657 SPDK_NOTICELOG("snapshot is already the parent of blob\n"); 7658 cb_fn(cb_arg, -EEXIST); 7659 return; 7660 } 7661 7662 ctx = calloc(1, sizeof(*ctx)); 7663 if (!ctx) { 7664 cb_fn(cb_arg, -ENOMEM); 7665 return; 7666 } 7667 7668 ctx->bs = bs; 7669 ctx->parent.u.snapshot.id = snapshot_id; 7670 ctx->cb_fn = cb_fn; 7671 ctx->cb_arg = cb_arg; 7672 ctx->bserrno = 0; 7673 7674 spdk_bs_open_blob(bs, blob_id, bs_set_parent_blob_open_cpl, ctx); 7675 } 7676 /* END spdk_bs_blob_set_parent */ 7677 7678 /* START spdk_bs_blob_set_external_parent */ 7679 7680 static void 7681 bs_set_external_parent_cleanup_finish(void *cb_arg, int bserrno) 7682 { 7683 struct set_parent_ctx *ctx = cb_arg; 7684 7685 if (bserrno != 0) { 7686 SPDK_ERRLOG("blob set external parent finish error %d\n", bserrno); 7687 if (ctx->bserrno == 0) { 7688 ctx->bserrno = bserrno; 7689 } 7690 } 7691 7692 ctx->cb_fn(ctx->cb_arg, ctx->bserrno); 7693 7694 free(ctx->parent.u.esnap.id); 7695 free(ctx); 7696 } 7697 7698 static void 7699 bs_set_external_parent_close_blob(void *cb_arg, int bserrno) 7700 { 7701 struct set_parent_ctx *ctx = cb_arg; 7702 struct spdk_blob *blob = ctx->blob; 7703 7704 if (bserrno != 0 && ctx->bserrno == 0) { 7705 SPDK_ERRLOG("error %d in metadata sync\n", bserrno); 7706 ctx->bserrno = bserrno; 7707 } 7708 7709 /* Revert md_ro to original state */ 7710 blob->md_ro = ctx->blob_md_ro; 7711 7712 blob->locked_operation_in_progress = false; 7713 7714 spdk_blob_close(blob, bs_set_external_parent_cleanup_finish, ctx); 7715 } 7716 7717 static void 7718 bs_set_external_parent_unfrozen(void *cb_arg, int bserrno) 7719 { 7720 struct set_parent_ctx *ctx = cb_arg; 7721 struct spdk_blob *blob = ctx->blob; 7722 7723 if (bserrno != 0) { 7724 SPDK_ERRLOG("error %d setting back_bs_dev\n", bserrno); 7725 ctx->bserrno = bserrno; 7726 bs_set_external_parent_close_blob(ctx, bserrno); 7727 return; 7728 } 7729 7730 spdk_blob_sync_md(blob, bs_set_external_parent_close_blob, ctx); 7731 } 7732 7733 static int 7734 bs_set_external_parent_refs(struct spdk_blob *blob, struct blob_parent *parent) 7735 { 7736 int rc; 7737 7738 bs_blob_list_remove(blob); 7739 7740 if (spdk_blob_is_clone(blob)) { 7741 /* Remove the xattr that references the snapshot */ 7742 blob->parent_id = SPDK_BLOBID_INVALID; 7743 blob_remove_xattr(blob, BLOB_SNAPSHOT, true); 7744 } 7745 7746 rc = blob_set_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID, parent->u.esnap.id, 7747 parent->u.esnap.id_len, true); 7748 if (rc != 0) { 7749 SPDK_ERRLOG("error %d setting external snapshot xattr\n", rc); 7750 return rc; 7751 } 7752 blob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 7753 blob->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT; 7754 7755 bs_blob_list_add(blob); 7756 7757 return 0; 7758 } 7759 7760 static void 7761 bs_set_external_parent_blob_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 7762 { 7763 struct set_parent_ctx *ctx = cb_arg; 7764 const void *esnap_id; 7765 size_t esnap_id_len; 7766 int rc; 7767 7768 if (bserrno != 0) { 7769 SPDK_ERRLOG("blob open error %d\n", bserrno); 7770 ctx->bserrno = bserrno; 7771 bs_set_parent_cleanup_finish(ctx, 0); 7772 return; 7773 } 7774 7775 ctx->blob = blob; 7776 ctx->blob_md_ro = blob->md_ro; 7777 7778 rc = spdk_blob_get_esnap_id(blob, &esnap_id, &esnap_id_len); 7779 if (rc == 0 && esnap_id != NULL && esnap_id_len == ctx->parent.u.esnap.id_len && 7780 memcmp(esnap_id, ctx->parent.u.esnap.id, esnap_id_len) == 0) { 7781 SPDK_ERRLOG("external snapshot is already the parent of blob\n"); 7782 ctx->bserrno = -EEXIST; 7783 goto error; 7784 } 7785 7786 if (!spdk_blob_is_thin_provisioned(blob)) { 7787 SPDK_ERRLOG("blob is not thin-provisioned\n"); 7788 ctx->bserrno = -EINVAL; 7789 goto error; 7790 } 7791 7792 if (blob->locked_operation_in_progress) { 7793 SPDK_ERRLOG("cannot set external parent of blob, another operation in progress\n"); 7794 ctx->bserrno = -EBUSY; 7795 goto error; 7796 } 7797 7798 blob->locked_operation_in_progress = true; 7799 7800 /* Temporarily override md_ro flag for MD modification */ 7801 blob->md_ro = false; 7802 7803 blob_set_back_bs_dev(blob, ctx->parent.u.esnap.back_bs_dev, bs_set_external_parent_refs, 7804 &ctx->parent, bs_set_external_parent_unfrozen, ctx); 7805 return; 7806 7807 error: 7808 spdk_blob_close(blob, bs_set_external_parent_cleanup_finish, ctx); 7809 } 7810 7811 void 7812 spdk_bs_blob_set_external_parent(struct spdk_blob_store *bs, spdk_blob_id blob_id, 7813 struct spdk_bs_dev *esnap_bs_dev, const void *esnap_id, 7814 uint32_t esnap_id_len, spdk_blob_op_complete cb_fn, void *cb_arg) 7815 { 7816 struct set_parent_ctx *ctx; 7817 uint64_t esnap_dev_size, cluster_sz; 7818 7819 if (sizeof(blob_id) == esnap_id_len && memcmp(&blob_id, esnap_id, sizeof(blob_id)) == 0) { 7820 SPDK_ERRLOG("blob id and external snapshot id cannot be the same\n"); 7821 cb_fn(cb_arg, -EINVAL); 7822 return; 7823 } 7824 7825 esnap_dev_size = esnap_bs_dev->blockcnt * esnap_bs_dev->blocklen; 7826 cluster_sz = spdk_bs_get_cluster_size(bs); 7827 if ((esnap_dev_size % cluster_sz) != 0) { 7828 SPDK_ERRLOG("Esnap device size %" PRIu64 " is not an integer multiple of " 7829 "cluster size %" PRIu64 "\n", esnap_dev_size, cluster_sz); 7830 cb_fn(cb_arg, -EINVAL); 7831 return; 7832 } 7833 7834 ctx = calloc(1, sizeof(*ctx)); 7835 if (!ctx) { 7836 cb_fn(cb_arg, -ENOMEM); 7837 return; 7838 } 7839 7840 ctx->parent.u.esnap.id = calloc(1, esnap_id_len); 7841 if (!ctx->parent.u.esnap.id) { 7842 free(ctx); 7843 cb_fn(cb_arg, -ENOMEM); 7844 return; 7845 } 7846 7847 ctx->bs = bs; 7848 ctx->parent.u.esnap.back_bs_dev = esnap_bs_dev; 7849 memcpy(ctx->parent.u.esnap.id, esnap_id, esnap_id_len); 7850 ctx->parent.u.esnap.id_len = esnap_id_len; 7851 ctx->cb_fn = cb_fn; 7852 ctx->cb_arg = cb_arg; 7853 ctx->bserrno = 0; 7854 7855 spdk_bs_open_blob(bs, blob_id, bs_set_external_parent_blob_open_cpl, ctx); 7856 } 7857 /* END spdk_bs_blob_set_external_parent */ 7858 7859 /* START spdk_blob_resize */ 7860 struct spdk_bs_resize_ctx { 7861 spdk_blob_op_complete cb_fn; 7862 void *cb_arg; 7863 struct spdk_blob *blob; 7864 uint64_t sz; 7865 int rc; 7866 }; 7867 7868 static void 7869 bs_resize_unfreeze_cpl(void *cb_arg, int rc) 7870 { 7871 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 7872 7873 if (rc != 0) { 7874 SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc); 7875 } 7876 7877 if (ctx->rc != 0) { 7878 SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc); 7879 rc = ctx->rc; 7880 } 7881 7882 ctx->blob->locked_operation_in_progress = false; 7883 7884 ctx->cb_fn(ctx->cb_arg, rc); 7885 free(ctx); 7886 } 7887 7888 static void 7889 bs_resize_freeze_cpl(void *cb_arg, int rc) 7890 { 7891 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 7892 7893 if (rc != 0) { 7894 ctx->blob->locked_operation_in_progress = false; 7895 ctx->cb_fn(ctx->cb_arg, rc); 7896 free(ctx); 7897 return; 7898 } 7899 7900 ctx->rc = blob_resize(ctx->blob, ctx->sz); 7901 7902 blob_unfreeze_io(ctx->blob, bs_resize_unfreeze_cpl, ctx); 7903 } 7904 7905 void 7906 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg) 7907 { 7908 struct spdk_bs_resize_ctx *ctx; 7909 7910 blob_verify_md_op(blob); 7911 7912 SPDK_DEBUGLOG(blob, "Resizing blob 0x%" PRIx64 " to %" PRIu64 " clusters\n", blob->id, sz); 7913 7914 if (blob->md_ro) { 7915 cb_fn(cb_arg, -EPERM); 7916 return; 7917 } 7918 7919 if (sz == blob->active.num_clusters) { 7920 cb_fn(cb_arg, 0); 7921 return; 7922 } 7923 7924 if (blob->locked_operation_in_progress) { 7925 cb_fn(cb_arg, -EBUSY); 7926 return; 7927 } 7928 7929 ctx = calloc(1, sizeof(*ctx)); 7930 if (!ctx) { 7931 cb_fn(cb_arg, -ENOMEM); 7932 return; 7933 } 7934 7935 blob->locked_operation_in_progress = true; 7936 ctx->cb_fn = cb_fn; 7937 ctx->cb_arg = cb_arg; 7938 ctx->blob = blob; 7939 ctx->sz = sz; 7940 blob_freeze_io(blob, bs_resize_freeze_cpl, ctx); 7941 } 7942 7943 /* END spdk_blob_resize */ 7944 7945 7946 /* START spdk_bs_delete_blob */ 7947 7948 static void 7949 bs_delete_close_cpl(void *cb_arg, int bserrno) 7950 { 7951 spdk_bs_sequence_t *seq = cb_arg; 7952 7953 bs_sequence_finish(seq, bserrno); 7954 } 7955 7956 static void 7957 bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7958 { 7959 struct spdk_blob *blob = cb_arg; 7960 7961 if (bserrno != 0) { 7962 /* 7963 * We already removed this blob from the blobstore tailq, so 7964 * we need to free it here since this is the last reference 7965 * to it. 7966 */ 7967 blob_free(blob); 7968 bs_delete_close_cpl(seq, bserrno); 7969 return; 7970 } 7971 7972 /* 7973 * This will immediately decrement the ref_count and call 7974 * the completion routine since the metadata state is clean. 7975 * By calling spdk_blob_close, we reduce the number of call 7976 * points into code that touches the blob->open_ref count 7977 * and the blobstore's blob list. 7978 */ 7979 spdk_blob_close(blob, bs_delete_close_cpl, seq); 7980 } 7981 7982 struct delete_snapshot_ctx { 7983 struct spdk_blob_list *parent_snapshot_entry; 7984 struct spdk_blob *snapshot; 7985 struct spdk_blob_md_page *page; 7986 bool snapshot_md_ro; 7987 struct spdk_blob *clone; 7988 bool clone_md_ro; 7989 spdk_blob_op_with_handle_complete cb_fn; 7990 void *cb_arg; 7991 int bserrno; 7992 uint32_t next_extent_page; 7993 }; 7994 7995 static void 7996 delete_blob_cleanup_finish(void *cb_arg, int bserrno) 7997 { 7998 struct delete_snapshot_ctx *ctx = cb_arg; 7999 8000 if (bserrno != 0) { 8001 SPDK_ERRLOG("Snapshot cleanup error %d\n", bserrno); 8002 } 8003 8004 assert(ctx != NULL); 8005 8006 if (bserrno != 0 && ctx->bserrno == 0) { 8007 ctx->bserrno = bserrno; 8008 } 8009 8010 ctx->cb_fn(ctx->cb_arg, ctx->snapshot, ctx->bserrno); 8011 spdk_free(ctx->page); 8012 free(ctx); 8013 } 8014 8015 static void 8016 delete_snapshot_cleanup_snapshot(void *cb_arg, int bserrno) 8017 { 8018 struct delete_snapshot_ctx *ctx = cb_arg; 8019 8020 if (bserrno != 0) { 8021 ctx->bserrno = bserrno; 8022 SPDK_ERRLOG("Clone cleanup error %d\n", bserrno); 8023 } 8024 8025 if (ctx->bserrno != 0) { 8026 assert(blob_lookup(ctx->snapshot->bs, ctx->snapshot->id) == NULL); 8027 RB_INSERT(spdk_blob_tree, &ctx->snapshot->bs->open_blobs, ctx->snapshot); 8028 spdk_bit_array_set(ctx->snapshot->bs->open_blobids, ctx->snapshot->id); 8029 } 8030 8031 ctx->snapshot->locked_operation_in_progress = false; 8032 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 8033 8034 spdk_blob_close(ctx->snapshot, delete_blob_cleanup_finish, ctx); 8035 } 8036 8037 static void 8038 delete_snapshot_cleanup_clone(void *cb_arg, int bserrno) 8039 { 8040 struct delete_snapshot_ctx *ctx = cb_arg; 8041 8042 ctx->clone->locked_operation_in_progress = false; 8043 ctx->clone->md_ro = ctx->clone_md_ro; 8044 8045 spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx); 8046 } 8047 8048 static void 8049 delete_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 8050 { 8051 struct delete_snapshot_ctx *ctx = cb_arg; 8052 8053 if (bserrno) { 8054 ctx->bserrno = bserrno; 8055 delete_snapshot_cleanup_clone(ctx, 0); 8056 return; 8057 } 8058 8059 ctx->clone->locked_operation_in_progress = false; 8060 spdk_blob_close(ctx->clone, delete_blob_cleanup_finish, ctx); 8061 } 8062 8063 static void 8064 delete_snapshot_sync_snapshot_cpl(void *cb_arg, int bserrno) 8065 { 8066 struct delete_snapshot_ctx *ctx = cb_arg; 8067 struct spdk_blob_list *parent_snapshot_entry = NULL; 8068 struct spdk_blob_list *snapshot_entry = NULL; 8069 struct spdk_blob_list *clone_entry = NULL; 8070 struct spdk_blob_list *snapshot_clone_entry = NULL; 8071 8072 if (bserrno) { 8073 SPDK_ERRLOG("Failed to sync MD on blob\n"); 8074 ctx->bserrno = bserrno; 8075 delete_snapshot_cleanup_clone(ctx, 0); 8076 return; 8077 } 8078 8079 /* Get snapshot entry for the snapshot we want to remove */ 8080 snapshot_entry = bs_get_snapshot_entry(ctx->snapshot->bs, ctx->snapshot->id); 8081 8082 assert(snapshot_entry != NULL); 8083 8084 /* Remove clone entry in this snapshot (at this point there can be only one clone) */ 8085 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 8086 assert(clone_entry != NULL); 8087 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 8088 snapshot_entry->clone_count--; 8089 assert(TAILQ_EMPTY(&snapshot_entry->clones)); 8090 8091 switch (ctx->snapshot->parent_id) { 8092 case SPDK_BLOBID_INVALID: 8093 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 8094 /* No parent snapshot - just remove clone entry */ 8095 free(clone_entry); 8096 break; 8097 default: 8098 /* This snapshot is at the same time a clone of another snapshot - we need to 8099 * update parent snapshot (remove current clone, add new one inherited from 8100 * the snapshot that is being removed) */ 8101 8102 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 8103 * snapshot that we are removing */ 8104 blob_get_snapshot_and_clone_entries(ctx->snapshot, &parent_snapshot_entry, 8105 &snapshot_clone_entry); 8106 8107 /* Switch clone entry in parent snapshot */ 8108 TAILQ_INSERT_TAIL(&parent_snapshot_entry->clones, clone_entry, link); 8109 TAILQ_REMOVE(&parent_snapshot_entry->clones, snapshot_clone_entry, link); 8110 free(snapshot_clone_entry); 8111 } 8112 8113 /* Restore md_ro flags */ 8114 ctx->clone->md_ro = ctx->clone_md_ro; 8115 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 8116 8117 blob_unfreeze_io(ctx->clone, delete_snapshot_unfreeze_cpl, ctx); 8118 } 8119 8120 static void 8121 delete_snapshot_sync_clone_cpl(void *cb_arg, int bserrno) 8122 { 8123 struct delete_snapshot_ctx *ctx = cb_arg; 8124 uint64_t i; 8125 8126 ctx->snapshot->md_ro = false; 8127 8128 if (bserrno) { 8129 SPDK_ERRLOG("Failed to sync MD on clone\n"); 8130 ctx->bserrno = bserrno; 8131 8132 /* Restore snapshot to previous state */ 8133 bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true); 8134 if (bserrno != 0) { 8135 delete_snapshot_cleanup_clone(ctx, bserrno); 8136 return; 8137 } 8138 8139 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx); 8140 return; 8141 } 8142 8143 /* Clear cluster map entries for snapshot */ 8144 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 8145 if (ctx->clone->active.clusters[i] == ctx->snapshot->active.clusters[i]) { 8146 if (ctx->snapshot->active.clusters[i] != 0) { 8147 ctx->snapshot->active.num_allocated_clusters--; 8148 } 8149 ctx->snapshot->active.clusters[i] = 0; 8150 } 8151 } 8152 for (i = 0; i < ctx->snapshot->active.num_extent_pages && 8153 i < ctx->clone->active.num_extent_pages; i++) { 8154 if (ctx->clone->active.extent_pages[i] == ctx->snapshot->active.extent_pages[i]) { 8155 ctx->snapshot->active.extent_pages[i] = 0; 8156 } 8157 } 8158 8159 blob_set_thin_provision(ctx->snapshot); 8160 ctx->snapshot->state = SPDK_BLOB_STATE_DIRTY; 8161 8162 if (ctx->parent_snapshot_entry != NULL) { 8163 ctx->snapshot->back_bs_dev = NULL; 8164 } 8165 8166 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_cpl, ctx); 8167 } 8168 8169 static void 8170 delete_snapshot_update_extent_pages_cpl(struct delete_snapshot_ctx *ctx) 8171 { 8172 int bserrno; 8173 8174 /* Delete old backing bs_dev from clone (related to snapshot that will be removed) */ 8175 blob_back_bs_destroy(ctx->clone); 8176 8177 /* Set/remove snapshot xattr and switch parent ID and backing bs_dev on clone... */ 8178 if (ctx->snapshot->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 8179 bserrno = bs_snapshot_copy_xattr(ctx->clone, ctx->snapshot, 8180 BLOB_EXTERNAL_SNAPSHOT_ID); 8181 if (bserrno != 0) { 8182 ctx->bserrno = bserrno; 8183 8184 /* Restore snapshot to previous state */ 8185 bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true); 8186 if (bserrno != 0) { 8187 delete_snapshot_cleanup_clone(ctx, bserrno); 8188 return; 8189 } 8190 8191 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx); 8192 return; 8193 } 8194 ctx->clone->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT; 8195 ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev; 8196 /* Do not delete the external snapshot along with this snapshot */ 8197 ctx->snapshot->back_bs_dev = NULL; 8198 ctx->clone->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 8199 } else if (ctx->parent_snapshot_entry != NULL) { 8200 /* ...to parent snapshot */ 8201 ctx->clone->parent_id = ctx->parent_snapshot_entry->id; 8202 ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev; 8203 blob_set_xattr(ctx->clone, BLOB_SNAPSHOT, &ctx->parent_snapshot_entry->id, 8204 sizeof(spdk_blob_id), 8205 true); 8206 } else { 8207 /* ...to blobid invalid and zeroes dev */ 8208 ctx->clone->parent_id = SPDK_BLOBID_INVALID; 8209 ctx->clone->back_bs_dev = bs_create_zeroes_dev(); 8210 blob_remove_xattr(ctx->clone, BLOB_SNAPSHOT, true); 8211 } 8212 8213 spdk_blob_sync_md(ctx->clone, delete_snapshot_sync_clone_cpl, ctx); 8214 } 8215 8216 static void 8217 delete_snapshot_update_extent_pages(void *cb_arg, int bserrno) 8218 { 8219 struct delete_snapshot_ctx *ctx = cb_arg; 8220 uint32_t *extent_page; 8221 uint64_t i; 8222 8223 for (i = ctx->next_extent_page; i < ctx->snapshot->active.num_extent_pages && 8224 i < ctx->clone->active.num_extent_pages; i++) { 8225 if (ctx->snapshot->active.extent_pages[i] == 0) { 8226 /* No extent page to use from snapshot */ 8227 continue; 8228 } 8229 8230 extent_page = &ctx->clone->active.extent_pages[i]; 8231 if (*extent_page == 0) { 8232 /* Copy extent page from snapshot when clone did not have a matching one */ 8233 *extent_page = ctx->snapshot->active.extent_pages[i]; 8234 continue; 8235 } 8236 8237 /* Clone and snapshot both contain partially filled matching extent pages. 8238 * Update the clone extent page in place with cluster map containing the mix of both. */ 8239 ctx->next_extent_page = i + 1; 8240 memset(ctx->page, 0, SPDK_BS_PAGE_SIZE); 8241 8242 blob_write_extent_page(ctx->clone, *extent_page, i * SPDK_EXTENTS_PER_EP, ctx->page, 8243 delete_snapshot_update_extent_pages, ctx); 8244 return; 8245 } 8246 delete_snapshot_update_extent_pages_cpl(ctx); 8247 } 8248 8249 static void 8250 delete_snapshot_sync_snapshot_xattr_cpl(void *cb_arg, int bserrno) 8251 { 8252 struct delete_snapshot_ctx *ctx = cb_arg; 8253 uint64_t i; 8254 8255 /* Temporarily override md_ro flag for clone for MD modification */ 8256 ctx->clone_md_ro = ctx->clone->md_ro; 8257 ctx->clone->md_ro = false; 8258 8259 if (bserrno) { 8260 SPDK_ERRLOG("Failed to sync MD with xattr on blob\n"); 8261 ctx->bserrno = bserrno; 8262 delete_snapshot_cleanup_clone(ctx, 0); 8263 return; 8264 } 8265 8266 /* Copy snapshot map to clone map (only unallocated clusters in clone) */ 8267 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 8268 if (ctx->clone->active.clusters[i] == 0) { 8269 ctx->clone->active.clusters[i] = ctx->snapshot->active.clusters[i]; 8270 if (ctx->clone->active.clusters[i] != 0) { 8271 ctx->clone->active.num_allocated_clusters++; 8272 } 8273 } 8274 } 8275 ctx->next_extent_page = 0; 8276 delete_snapshot_update_extent_pages(ctx, 0); 8277 } 8278 8279 static void 8280 delete_snapshot_esnap_channels_destroyed_cb(void *cb_arg, struct spdk_blob *blob, int bserrno) 8281 { 8282 struct delete_snapshot_ctx *ctx = cb_arg; 8283 8284 if (bserrno != 0) { 8285 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to destroy esnap channels: %d\n", 8286 blob->id, bserrno); 8287 /* That error should not stop us from syncing metadata. */ 8288 } 8289 8290 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx); 8291 } 8292 8293 static void 8294 delete_snapshot_freeze_io_cb(void *cb_arg, int bserrno) 8295 { 8296 struct delete_snapshot_ctx *ctx = cb_arg; 8297 8298 if (bserrno) { 8299 SPDK_ERRLOG("Failed to freeze I/O on clone\n"); 8300 ctx->bserrno = bserrno; 8301 delete_snapshot_cleanup_clone(ctx, 0); 8302 return; 8303 } 8304 8305 /* Temporarily override md_ro flag for snapshot for MD modification */ 8306 ctx->snapshot_md_ro = ctx->snapshot->md_ro; 8307 ctx->snapshot->md_ro = false; 8308 8309 /* Mark blob as pending for removal for power failure safety, use clone id for recovery */ 8310 ctx->bserrno = blob_set_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, &ctx->clone->id, 8311 sizeof(spdk_blob_id), true); 8312 if (ctx->bserrno != 0) { 8313 delete_snapshot_cleanup_clone(ctx, 0); 8314 return; 8315 } 8316 8317 if (blob_is_esnap_clone(ctx->snapshot)) { 8318 blob_esnap_destroy_bs_dev_channels(ctx->snapshot, false, 8319 delete_snapshot_esnap_channels_destroyed_cb, 8320 ctx); 8321 return; 8322 } 8323 8324 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx); 8325 } 8326 8327 static void 8328 delete_snapshot_open_clone_cb(void *cb_arg, struct spdk_blob *clone, int bserrno) 8329 { 8330 struct delete_snapshot_ctx *ctx = cb_arg; 8331 8332 if (bserrno) { 8333 SPDK_ERRLOG("Failed to open clone\n"); 8334 ctx->bserrno = bserrno; 8335 delete_snapshot_cleanup_snapshot(ctx, 0); 8336 return; 8337 } 8338 8339 ctx->clone = clone; 8340 8341 if (clone->locked_operation_in_progress) { 8342 SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress on its clone\n"); 8343 ctx->bserrno = -EBUSY; 8344 spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx); 8345 return; 8346 } 8347 8348 clone->locked_operation_in_progress = true; 8349 8350 blob_freeze_io(clone, delete_snapshot_freeze_io_cb, ctx); 8351 } 8352 8353 static void 8354 update_clone_on_snapshot_deletion(struct spdk_blob *snapshot, struct delete_snapshot_ctx *ctx) 8355 { 8356 struct spdk_blob_list *snapshot_entry = NULL; 8357 struct spdk_blob_list *clone_entry = NULL; 8358 struct spdk_blob_list *snapshot_clone_entry = NULL; 8359 8360 /* Get snapshot entry for the snapshot we want to remove */ 8361 snapshot_entry = bs_get_snapshot_entry(snapshot->bs, snapshot->id); 8362 8363 assert(snapshot_entry != NULL); 8364 8365 /* Get clone of the snapshot (at this point there can be only one clone) */ 8366 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 8367 assert(snapshot_entry->clone_count == 1); 8368 assert(clone_entry != NULL); 8369 8370 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 8371 * snapshot that we are removing */ 8372 blob_get_snapshot_and_clone_entries(snapshot, &ctx->parent_snapshot_entry, 8373 &snapshot_clone_entry); 8374 8375 spdk_bs_open_blob(snapshot->bs, clone_entry->id, delete_snapshot_open_clone_cb, ctx); 8376 } 8377 8378 static void 8379 bs_delete_blob_finish(void *cb_arg, struct spdk_blob *blob, int bserrno) 8380 { 8381 spdk_bs_sequence_t *seq = cb_arg; 8382 struct spdk_blob_list *snapshot_entry = NULL; 8383 uint32_t page_num; 8384 8385 if (bserrno) { 8386 SPDK_ERRLOG("Failed to remove blob\n"); 8387 bs_sequence_finish(seq, bserrno); 8388 return; 8389 } 8390 8391 /* Remove snapshot from the list */ 8392 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 8393 if (snapshot_entry != NULL) { 8394 TAILQ_REMOVE(&blob->bs->snapshots, snapshot_entry, link); 8395 free(snapshot_entry); 8396 } 8397 8398 page_num = bs_blobid_to_page(blob->id); 8399 spdk_bit_array_clear(blob->bs->used_blobids, page_num); 8400 blob->state = SPDK_BLOB_STATE_DIRTY; 8401 blob->active.num_pages = 0; 8402 blob_resize(blob, 0); 8403 8404 blob_persist(seq, blob, bs_delete_persist_cpl, blob); 8405 } 8406 8407 static int 8408 bs_is_blob_deletable(struct spdk_blob *blob, bool *update_clone) 8409 { 8410 struct spdk_blob_list *snapshot_entry = NULL; 8411 struct spdk_blob_list *clone_entry = NULL; 8412 struct spdk_blob *clone = NULL; 8413 bool has_one_clone = false; 8414 8415 /* Check if this is a snapshot with clones */ 8416 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 8417 if (snapshot_entry != NULL) { 8418 if (snapshot_entry->clone_count > 1) { 8419 SPDK_ERRLOG("Cannot remove snapshot with more than one clone\n"); 8420 return -EBUSY; 8421 } else if (snapshot_entry->clone_count == 1) { 8422 has_one_clone = true; 8423 } 8424 } 8425 8426 /* Check if someone has this blob open (besides this delete context): 8427 * - open_ref = 1 - only this context opened blob, so it is ok to remove it 8428 * - open_ref <= 2 && has_one_clone = true - clone is holding snapshot 8429 * and that is ok, because we will update it accordingly */ 8430 if (blob->open_ref <= 2 && has_one_clone) { 8431 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 8432 assert(clone_entry != NULL); 8433 clone = blob_lookup(blob->bs, clone_entry->id); 8434 8435 if (blob->open_ref == 2 && clone == NULL) { 8436 /* Clone is closed and someone else opened this blob */ 8437 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 8438 return -EBUSY; 8439 } 8440 8441 *update_clone = true; 8442 return 0; 8443 } 8444 8445 if (blob->open_ref > 1) { 8446 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 8447 return -EBUSY; 8448 } 8449 8450 assert(has_one_clone == false); 8451 *update_clone = false; 8452 return 0; 8453 } 8454 8455 static void 8456 bs_delete_enomem_close_cpl(void *cb_arg, int bserrno) 8457 { 8458 spdk_bs_sequence_t *seq = cb_arg; 8459 8460 bs_sequence_finish(seq, -ENOMEM); 8461 } 8462 8463 static void 8464 bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 8465 { 8466 spdk_bs_sequence_t *seq = cb_arg; 8467 struct delete_snapshot_ctx *ctx; 8468 bool update_clone = false; 8469 8470 if (bserrno != 0) { 8471 bs_sequence_finish(seq, bserrno); 8472 return; 8473 } 8474 8475 blob_verify_md_op(blob); 8476 8477 ctx = calloc(1, sizeof(*ctx)); 8478 if (ctx == NULL) { 8479 spdk_blob_close(blob, bs_delete_enomem_close_cpl, seq); 8480 return; 8481 } 8482 8483 ctx->snapshot = blob; 8484 ctx->cb_fn = bs_delete_blob_finish; 8485 ctx->cb_arg = seq; 8486 8487 /* Check if blob can be removed and if it is a snapshot with clone on top of it */ 8488 ctx->bserrno = bs_is_blob_deletable(blob, &update_clone); 8489 if (ctx->bserrno) { 8490 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 8491 return; 8492 } 8493 8494 if (blob->locked_operation_in_progress) { 8495 SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress\n"); 8496 ctx->bserrno = -EBUSY; 8497 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 8498 return; 8499 } 8500 8501 blob->locked_operation_in_progress = true; 8502 8503 /* 8504 * Remove the blob from the blob_store list now, to ensure it does not 8505 * get returned after this point by blob_lookup(). 8506 */ 8507 spdk_bit_array_clear(blob->bs->open_blobids, blob->id); 8508 RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob); 8509 8510 if (update_clone) { 8511 ctx->page = spdk_zmalloc(blob->bs->md_page_size, 0, NULL, SPDK_ENV_NUMA_ID_ANY, 8512 SPDK_MALLOC_DMA); 8513 if (!ctx->page) { 8514 ctx->bserrno = -ENOMEM; 8515 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 8516 return; 8517 } 8518 /* This blob is a snapshot with active clone - update clone first */ 8519 update_clone_on_snapshot_deletion(blob, ctx); 8520 } else { 8521 /* This blob does not have any clones - just remove it */ 8522 bs_blob_list_remove(blob); 8523 bs_delete_blob_finish(seq, blob, 0); 8524 free(ctx); 8525 } 8526 } 8527 8528 void 8529 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 8530 spdk_blob_op_complete cb_fn, void *cb_arg) 8531 { 8532 struct spdk_bs_cpl cpl; 8533 spdk_bs_sequence_t *seq; 8534 8535 SPDK_DEBUGLOG(blob, "Deleting blob 0x%" PRIx64 "\n", blobid); 8536 8537 assert(spdk_get_thread() == bs->md_thread); 8538 8539 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 8540 cpl.u.blob_basic.cb_fn = cb_fn; 8541 cpl.u.blob_basic.cb_arg = cb_arg; 8542 8543 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 8544 if (!seq) { 8545 cb_fn(cb_arg, -ENOMEM); 8546 return; 8547 } 8548 8549 spdk_bs_open_blob(bs, blobid, bs_delete_open_cpl, seq); 8550 } 8551 8552 /* END spdk_bs_delete_blob */ 8553 8554 /* START spdk_bs_open_blob */ 8555 8556 static void 8557 bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8558 { 8559 struct spdk_blob *blob = cb_arg; 8560 struct spdk_blob *existing; 8561 8562 if (bserrno != 0) { 8563 blob_free(blob); 8564 seq->cpl.u.blob_handle.blob = NULL; 8565 bs_sequence_finish(seq, bserrno); 8566 return; 8567 } 8568 8569 existing = blob_lookup(blob->bs, blob->id); 8570 if (existing) { 8571 blob_free(blob); 8572 existing->open_ref++; 8573 seq->cpl.u.blob_handle.blob = existing; 8574 bs_sequence_finish(seq, 0); 8575 return; 8576 } 8577 8578 blob->open_ref++; 8579 8580 spdk_bit_array_set(blob->bs->open_blobids, blob->id); 8581 RB_INSERT(spdk_blob_tree, &blob->bs->open_blobs, blob); 8582 8583 bs_sequence_finish(seq, bserrno); 8584 } 8585 8586 static inline void 8587 blob_open_opts_copy(const struct spdk_blob_open_opts *src, struct spdk_blob_open_opts *dst) 8588 { 8589 #define FIELD_OK(field) \ 8590 offsetof(struct spdk_blob_open_opts, field) + sizeof(src->field) <= src->opts_size 8591 8592 #define SET_FIELD(field) \ 8593 if (FIELD_OK(field)) { \ 8594 dst->field = src->field; \ 8595 } \ 8596 8597 SET_FIELD(clear_method); 8598 SET_FIELD(esnap_ctx); 8599 8600 dst->opts_size = src->opts_size; 8601 8602 /* You should not remove this statement, but need to update the assert statement 8603 * if you add a new field, and also add a corresponding SET_FIELD statement */ 8604 SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_open_opts) == 24, "Incorrect size"); 8605 8606 #undef FIELD_OK 8607 #undef SET_FIELD 8608 } 8609 8610 static void 8611 bs_open_blob(struct spdk_blob_store *bs, 8612 spdk_blob_id blobid, 8613 struct spdk_blob_open_opts *opts, 8614 spdk_blob_op_with_handle_complete cb_fn, 8615 void *cb_arg) 8616 { 8617 struct spdk_blob *blob; 8618 struct spdk_bs_cpl cpl; 8619 struct spdk_blob_open_opts opts_local; 8620 spdk_bs_sequence_t *seq; 8621 uint32_t page_num; 8622 8623 SPDK_DEBUGLOG(blob, "Opening blob 0x%" PRIx64 "\n", blobid); 8624 assert(spdk_get_thread() == bs->md_thread); 8625 8626 page_num = bs_blobid_to_page(blobid); 8627 if (spdk_bit_array_get(bs->used_blobids, page_num) == false) { 8628 /* Invalid blobid */ 8629 cb_fn(cb_arg, NULL, -ENOENT); 8630 return; 8631 } 8632 8633 blob = blob_lookup(bs, blobid); 8634 if (blob) { 8635 blob->open_ref++; 8636 cb_fn(cb_arg, blob, 0); 8637 return; 8638 } 8639 8640 blob = blob_alloc(bs, blobid); 8641 if (!blob) { 8642 cb_fn(cb_arg, NULL, -ENOMEM); 8643 return; 8644 } 8645 8646 spdk_blob_open_opts_init(&opts_local, sizeof(opts_local)); 8647 if (opts) { 8648 blob_open_opts_copy(opts, &opts_local); 8649 } 8650 8651 blob->clear_method = opts_local.clear_method; 8652 8653 cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE; 8654 cpl.u.blob_handle.cb_fn = cb_fn; 8655 cpl.u.blob_handle.cb_arg = cb_arg; 8656 cpl.u.blob_handle.blob = blob; 8657 cpl.u.blob_handle.esnap_ctx = opts_local.esnap_ctx; 8658 8659 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 8660 if (!seq) { 8661 blob_free(blob); 8662 cb_fn(cb_arg, NULL, -ENOMEM); 8663 return; 8664 } 8665 8666 blob_load(seq, blob, bs_open_blob_cpl, blob); 8667 } 8668 8669 void 8670 spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 8671 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 8672 { 8673 bs_open_blob(bs, blobid, NULL, cb_fn, cb_arg); 8674 } 8675 8676 void 8677 spdk_bs_open_blob_ext(struct spdk_blob_store *bs, spdk_blob_id blobid, 8678 struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 8679 { 8680 bs_open_blob(bs, blobid, opts, cb_fn, cb_arg); 8681 } 8682 8683 /* END spdk_bs_open_blob */ 8684 8685 /* START spdk_blob_set_read_only */ 8686 int 8687 spdk_blob_set_read_only(struct spdk_blob *blob) 8688 { 8689 blob_verify_md_op(blob); 8690 8691 blob->data_ro_flags |= SPDK_BLOB_READ_ONLY; 8692 8693 blob->state = SPDK_BLOB_STATE_DIRTY; 8694 return 0; 8695 } 8696 /* END spdk_blob_set_read_only */ 8697 8698 /* START spdk_blob_sync_md */ 8699 8700 static void 8701 blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8702 { 8703 struct spdk_blob *blob = cb_arg; 8704 8705 if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 8706 blob->data_ro = true; 8707 blob->md_ro = true; 8708 } 8709 8710 bs_sequence_finish(seq, bserrno); 8711 } 8712 8713 static void 8714 blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 8715 { 8716 struct spdk_bs_cpl cpl; 8717 spdk_bs_sequence_t *seq; 8718 8719 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 8720 cpl.u.blob_basic.cb_fn = cb_fn; 8721 cpl.u.blob_basic.cb_arg = cb_arg; 8722 8723 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 8724 if (!seq) { 8725 cb_fn(cb_arg, -ENOMEM); 8726 return; 8727 } 8728 8729 blob_persist(seq, blob, blob_sync_md_cpl, blob); 8730 } 8731 8732 void 8733 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 8734 { 8735 blob_verify_md_op(blob); 8736 8737 SPDK_DEBUGLOG(blob, "Syncing blob 0x%" PRIx64 "\n", blob->id); 8738 8739 if (blob->md_ro) { 8740 assert(blob->state == SPDK_BLOB_STATE_CLEAN); 8741 cb_fn(cb_arg, 0); 8742 return; 8743 } 8744 8745 blob_sync_md(blob, cb_fn, cb_arg); 8746 } 8747 8748 /* END spdk_blob_sync_md */ 8749 8750 struct spdk_blob_cluster_op_ctx { 8751 struct spdk_thread *thread; 8752 struct spdk_blob *blob; 8753 uint32_t cluster_num; /* cluster index in blob */ 8754 uint32_t cluster; /* cluster on disk */ 8755 uint32_t extent_page; /* extent page on disk */ 8756 struct spdk_blob_md_page *page; /* preallocated extent page */ 8757 int rc; 8758 spdk_blob_op_complete cb_fn; 8759 void *cb_arg; 8760 }; 8761 8762 static void 8763 blob_op_cluster_msg_cpl(void *arg) 8764 { 8765 struct spdk_blob_cluster_op_ctx *ctx = arg; 8766 8767 ctx->cb_fn(ctx->cb_arg, ctx->rc); 8768 free(ctx); 8769 } 8770 8771 static void 8772 blob_op_cluster_msg_cb(void *arg, int bserrno) 8773 { 8774 struct spdk_blob_cluster_op_ctx *ctx = arg; 8775 8776 ctx->rc = bserrno; 8777 spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx); 8778 } 8779 8780 static void 8781 blob_insert_new_ep_cb(void *arg, int bserrno) 8782 { 8783 struct spdk_blob_cluster_op_ctx *ctx = arg; 8784 uint32_t *extent_page; 8785 8786 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 8787 *extent_page = ctx->extent_page; 8788 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8789 blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx); 8790 } 8791 8792 struct spdk_blob_write_extent_page_ctx { 8793 struct spdk_blob_store *bs; 8794 8795 uint32_t extent; 8796 struct spdk_blob_md_page *page; 8797 }; 8798 8799 static void 8800 blob_free_cluster_msg_cb(void *arg, int bserrno) 8801 { 8802 struct spdk_blob_cluster_op_ctx *ctx = arg; 8803 8804 spdk_spin_lock(&ctx->blob->bs->used_lock); 8805 bs_release_cluster(ctx->blob->bs, ctx->cluster); 8806 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8807 8808 ctx->rc = bserrno; 8809 spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx); 8810 } 8811 8812 static void 8813 blob_free_cluster_update_ep_cb(void *arg, int bserrno) 8814 { 8815 struct spdk_blob_cluster_op_ctx *ctx = arg; 8816 8817 if (bserrno != 0 || ctx->blob->bs->clean == 0) { 8818 blob_free_cluster_msg_cb(ctx, bserrno); 8819 return; 8820 } 8821 8822 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8823 blob_sync_md(ctx->blob, blob_free_cluster_msg_cb, ctx); 8824 } 8825 8826 static void 8827 blob_free_cluster_free_ep_cb(void *arg, int bserrno) 8828 { 8829 struct spdk_blob_cluster_op_ctx *ctx = arg; 8830 8831 spdk_spin_lock(&ctx->blob->bs->used_lock); 8832 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8833 bs_release_md_page(ctx->blob->bs, ctx->extent_page); 8834 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8835 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8836 blob_sync_md(ctx->blob, blob_free_cluster_msg_cb, ctx); 8837 } 8838 8839 static void 8840 blob_persist_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8841 { 8842 struct spdk_blob_write_extent_page_ctx *ctx = cb_arg; 8843 8844 free(ctx); 8845 bs_sequence_finish(seq, bserrno); 8846 } 8847 8848 static void 8849 blob_write_extent_page_ready(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8850 { 8851 struct spdk_blob_write_extent_page_ctx *ctx = cb_arg; 8852 8853 if (bserrno != 0) { 8854 blob_persist_extent_page_cpl(seq, ctx, bserrno); 8855 return; 8856 } 8857 bs_sequence_write_dev(seq, ctx->page, bs_md_page_to_lba(ctx->bs, ctx->extent), 8858 bs_byte_to_lba(ctx->bs, ctx->bs->md_page_size), 8859 blob_persist_extent_page_cpl, ctx); 8860 } 8861 8862 static void 8863 blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 8864 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg) 8865 { 8866 struct spdk_blob_write_extent_page_ctx *ctx; 8867 spdk_bs_sequence_t *seq; 8868 struct spdk_bs_cpl cpl; 8869 8870 ctx = calloc(1, sizeof(*ctx)); 8871 if (!ctx) { 8872 cb_fn(cb_arg, -ENOMEM); 8873 return; 8874 } 8875 ctx->bs = blob->bs; 8876 ctx->extent = extent; 8877 ctx->page = page; 8878 8879 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 8880 cpl.u.blob_basic.cb_fn = cb_fn; 8881 cpl.u.blob_basic.cb_arg = cb_arg; 8882 8883 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 8884 if (!seq) { 8885 free(ctx); 8886 cb_fn(cb_arg, -ENOMEM); 8887 return; 8888 } 8889 8890 assert(page); 8891 page->next = SPDK_INVALID_MD_PAGE; 8892 page->id = blob->id; 8893 page->sequence_num = 0; 8894 8895 blob_serialize_extent_page(blob, cluster_num, page); 8896 8897 page->crc = blob_md_page_calc_crc(page); 8898 8899 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent) == true); 8900 8901 bs_mark_dirty(seq, blob->bs, blob_write_extent_page_ready, ctx); 8902 } 8903 8904 static void 8905 blob_insert_cluster_msg(void *arg) 8906 { 8907 struct spdk_blob_cluster_op_ctx *ctx = arg; 8908 uint32_t *extent_page; 8909 8910 ctx->rc = blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster); 8911 if (ctx->rc != 0) { 8912 spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx); 8913 return; 8914 } 8915 8916 if (ctx->blob->use_extent_table == false) { 8917 /* Extent table is not used, proceed with sync of md that will only use extents_rle. */ 8918 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8919 blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx); 8920 return; 8921 } 8922 8923 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 8924 if (*extent_page == 0) { 8925 /* Extent page requires allocation. 8926 * It was already claimed in the used_md_pages map and placed in ctx. */ 8927 assert(ctx->extent_page != 0); 8928 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8929 blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, ctx->page, 8930 blob_insert_new_ep_cb, ctx); 8931 } else { 8932 /* It is possible for original thread to allocate extent page for 8933 * different cluster in the same extent page. In such case proceed with 8934 * updating the existing extent page, but release the additional one. */ 8935 if (ctx->extent_page != 0) { 8936 spdk_spin_lock(&ctx->blob->bs->used_lock); 8937 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8938 bs_release_md_page(ctx->blob->bs, ctx->extent_page); 8939 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8940 ctx->extent_page = 0; 8941 } 8942 /* Extent page already allocated. 8943 * Every cluster allocation, requires just an update of single extent page. */ 8944 blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, ctx->page, 8945 blob_op_cluster_msg_cb, ctx); 8946 } 8947 } 8948 8949 static void 8950 blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 8951 uint64_t cluster, uint32_t extent_page, struct spdk_blob_md_page *page, 8952 spdk_blob_op_complete cb_fn, void *cb_arg) 8953 { 8954 struct spdk_blob_cluster_op_ctx *ctx; 8955 8956 ctx = calloc(1, sizeof(*ctx)); 8957 if (ctx == NULL) { 8958 cb_fn(cb_arg, -ENOMEM); 8959 return; 8960 } 8961 8962 ctx->thread = spdk_get_thread(); 8963 ctx->blob = blob; 8964 ctx->cluster_num = cluster_num; 8965 ctx->cluster = cluster; 8966 ctx->extent_page = extent_page; 8967 ctx->page = page; 8968 ctx->cb_fn = cb_fn; 8969 ctx->cb_arg = cb_arg; 8970 8971 spdk_thread_send_msg(blob->bs->md_thread, blob_insert_cluster_msg, ctx); 8972 } 8973 8974 static void 8975 blob_free_cluster_msg(void *arg) 8976 { 8977 struct spdk_blob_cluster_op_ctx *ctx = arg; 8978 uint32_t *extent_page; 8979 uint32_t start_cluster_idx; 8980 bool free_extent_page = true; 8981 size_t i; 8982 8983 ctx->cluster = bs_lba_to_cluster(ctx->blob->bs, ctx->blob->active.clusters[ctx->cluster_num]); 8984 8985 /* There were concurrent unmaps to the same cluster, only release the cluster on the first one */ 8986 if (ctx->cluster == 0) { 8987 blob_op_cluster_msg_cb(ctx, 0); 8988 return; 8989 } 8990 8991 ctx->blob->active.clusters[ctx->cluster_num] = 0; 8992 if (ctx->cluster != 0) { 8993 ctx->blob->active.num_allocated_clusters--; 8994 } 8995 8996 if (ctx->blob->use_extent_table == false) { 8997 /* Extent table is not used, proceed with sync of md that will only use extents_rle. */ 8998 spdk_spin_lock(&ctx->blob->bs->used_lock); 8999 bs_release_cluster(ctx->blob->bs, ctx->cluster); 9000 spdk_spin_unlock(&ctx->blob->bs->used_lock); 9001 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 9002 blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx); 9003 return; 9004 } 9005 9006 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 9007 9008 /* There shouldn't be parallel release operations on same cluster */ 9009 assert(*extent_page == ctx->extent_page); 9010 9011 start_cluster_idx = (ctx->cluster_num / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP; 9012 for (i = 0; i < SPDK_EXTENTS_PER_EP; ++i) { 9013 if (ctx->blob->active.clusters[start_cluster_idx + i] != 0) { 9014 free_extent_page = false; 9015 break; 9016 } 9017 } 9018 9019 if (free_extent_page) { 9020 assert(ctx->extent_page != 0); 9021 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 9022 ctx->blob->active.extent_pages[bs_cluster_to_extent_table_id(ctx->cluster_num)] = 0; 9023 blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, ctx->page, 9024 blob_free_cluster_free_ep_cb, ctx); 9025 } else { 9026 blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, ctx->page, 9027 blob_free_cluster_update_ep_cb, ctx); 9028 } 9029 } 9030 9031 9032 static void 9033 blob_free_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, uint32_t extent_page, 9034 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg) 9035 { 9036 struct spdk_blob_cluster_op_ctx *ctx; 9037 9038 ctx = calloc(1, sizeof(*ctx)); 9039 if (ctx == NULL) { 9040 cb_fn(cb_arg, -ENOMEM); 9041 return; 9042 } 9043 9044 ctx->thread = spdk_get_thread(); 9045 ctx->blob = blob; 9046 ctx->cluster_num = cluster_num; 9047 ctx->extent_page = extent_page; 9048 ctx->page = page; 9049 ctx->cb_fn = cb_fn; 9050 ctx->cb_arg = cb_arg; 9051 9052 spdk_thread_send_msg(blob->bs->md_thread, blob_free_cluster_msg, ctx); 9053 } 9054 9055 /* START spdk_blob_close */ 9056 9057 static void 9058 blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9059 { 9060 struct spdk_blob *blob = cb_arg; 9061 9062 if (bserrno == 0) { 9063 blob->open_ref--; 9064 if (blob->open_ref == 0) { 9065 /* 9066 * Blobs with active.num_pages == 0 are deleted blobs. 9067 * these blobs are removed from the blob_store list 9068 * when the deletion process starts - so don't try to 9069 * remove them again. 9070 */ 9071 if (blob->active.num_pages > 0) { 9072 spdk_bit_array_clear(blob->bs->open_blobids, blob->id); 9073 RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob); 9074 } 9075 blob_free(blob); 9076 } 9077 } 9078 9079 bs_sequence_finish(seq, bserrno); 9080 } 9081 9082 static void 9083 blob_close_esnap_done(void *cb_arg, struct spdk_blob *blob, int bserrno) 9084 { 9085 spdk_bs_sequence_t *seq = cb_arg; 9086 9087 if (bserrno != 0) { 9088 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": close failed with error %d\n", 9089 blob->id, bserrno); 9090 bs_sequence_finish(seq, bserrno); 9091 return; 9092 } 9093 9094 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": closed, syncing metadata on thread %s\n", 9095 blob->id, spdk_thread_get_name(spdk_get_thread())); 9096 9097 /* Sync metadata */ 9098 blob_persist(seq, blob, blob_close_cpl, blob); 9099 } 9100 9101 void 9102 spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 9103 { 9104 struct spdk_bs_cpl cpl; 9105 spdk_bs_sequence_t *seq; 9106 9107 blob_verify_md_op(blob); 9108 9109 SPDK_DEBUGLOG(blob, "Closing blob 0x%" PRIx64 "\n", blob->id); 9110 9111 if (blob->open_ref == 0) { 9112 cb_fn(cb_arg, -EBADF); 9113 return; 9114 } 9115 9116 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 9117 cpl.u.blob_basic.cb_fn = cb_fn; 9118 cpl.u.blob_basic.cb_arg = cb_arg; 9119 9120 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 9121 if (!seq) { 9122 cb_fn(cb_arg, -ENOMEM); 9123 return; 9124 } 9125 9126 if (blob->open_ref == 1 && blob_is_esnap_clone(blob)) { 9127 blob_esnap_destroy_bs_dev_channels(blob, false, blob_close_esnap_done, seq); 9128 return; 9129 } 9130 9131 /* Sync metadata */ 9132 blob_persist(seq, blob, blob_close_cpl, blob); 9133 } 9134 9135 /* END spdk_blob_close */ 9136 9137 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs) 9138 { 9139 return spdk_get_io_channel(bs); 9140 } 9141 9142 void 9143 spdk_bs_free_io_channel(struct spdk_io_channel *channel) 9144 { 9145 blob_esnap_destroy_bs_channel(spdk_io_channel_get_ctx(channel)); 9146 spdk_put_io_channel(channel); 9147 } 9148 9149 void 9150 spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel, 9151 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 9152 { 9153 blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 9154 SPDK_BLOB_UNMAP); 9155 } 9156 9157 void 9158 spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel, 9159 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 9160 { 9161 blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 9162 SPDK_BLOB_WRITE_ZEROES); 9163 } 9164 9165 void 9166 spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel, 9167 void *payload, uint64_t offset, uint64_t length, 9168 spdk_blob_op_complete cb_fn, void *cb_arg) 9169 { 9170 blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 9171 SPDK_BLOB_WRITE); 9172 } 9173 9174 void 9175 spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel, 9176 void *payload, uint64_t offset, uint64_t length, 9177 spdk_blob_op_complete cb_fn, void *cb_arg) 9178 { 9179 blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 9180 SPDK_BLOB_READ); 9181 } 9182 9183 void 9184 spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel, 9185 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9186 spdk_blob_op_complete cb_fn, void *cb_arg) 9187 { 9188 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, NULL); 9189 } 9190 9191 void 9192 spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel, 9193 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9194 spdk_blob_op_complete cb_fn, void *cb_arg) 9195 { 9196 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, NULL); 9197 } 9198 9199 void 9200 spdk_blob_io_writev_ext(struct spdk_blob *blob, struct spdk_io_channel *channel, 9201 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9202 spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts) 9203 { 9204 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, 9205 io_opts); 9206 } 9207 9208 void 9209 spdk_blob_io_readv_ext(struct spdk_blob *blob, struct spdk_io_channel *channel, 9210 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9211 spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts) 9212 { 9213 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, 9214 io_opts); 9215 } 9216 9217 struct spdk_bs_iter_ctx { 9218 int64_t page_num; 9219 struct spdk_blob_store *bs; 9220 9221 spdk_blob_op_with_handle_complete cb_fn; 9222 void *cb_arg; 9223 }; 9224 9225 static void 9226 bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 9227 { 9228 struct spdk_bs_iter_ctx *ctx = cb_arg; 9229 struct spdk_blob_store *bs = ctx->bs; 9230 spdk_blob_id id; 9231 9232 if (bserrno == 0) { 9233 ctx->cb_fn(ctx->cb_arg, _blob, bserrno); 9234 free(ctx); 9235 return; 9236 } 9237 9238 ctx->page_num++; 9239 ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num); 9240 if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) { 9241 ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT); 9242 free(ctx); 9243 return; 9244 } 9245 9246 id = bs_page_to_blobid(ctx->page_num); 9247 9248 spdk_bs_open_blob(bs, id, bs_iter_cpl, ctx); 9249 } 9250 9251 void 9252 spdk_bs_iter_first(struct spdk_blob_store *bs, 9253 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 9254 { 9255 struct spdk_bs_iter_ctx *ctx; 9256 9257 ctx = calloc(1, sizeof(*ctx)); 9258 if (!ctx) { 9259 cb_fn(cb_arg, NULL, -ENOMEM); 9260 return; 9261 } 9262 9263 ctx->page_num = -1; 9264 ctx->bs = bs; 9265 ctx->cb_fn = cb_fn; 9266 ctx->cb_arg = cb_arg; 9267 9268 bs_iter_cpl(ctx, NULL, -1); 9269 } 9270 9271 static void 9272 bs_iter_close_cpl(void *cb_arg, int bserrno) 9273 { 9274 struct spdk_bs_iter_ctx *ctx = cb_arg; 9275 9276 bs_iter_cpl(ctx, NULL, -1); 9277 } 9278 9279 void 9280 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob, 9281 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 9282 { 9283 struct spdk_bs_iter_ctx *ctx; 9284 9285 assert(blob != NULL); 9286 9287 ctx = calloc(1, sizeof(*ctx)); 9288 if (!ctx) { 9289 cb_fn(cb_arg, NULL, -ENOMEM); 9290 return; 9291 } 9292 9293 ctx->page_num = bs_blobid_to_page(blob->id); 9294 ctx->bs = bs; 9295 ctx->cb_fn = cb_fn; 9296 ctx->cb_arg = cb_arg; 9297 9298 /* Close the existing blob */ 9299 spdk_blob_close(blob, bs_iter_close_cpl, ctx); 9300 } 9301 9302 static int 9303 blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 9304 uint16_t value_len, bool internal) 9305 { 9306 struct spdk_xattr_tailq *xattrs; 9307 struct spdk_xattr *xattr; 9308 size_t desc_size; 9309 void *tmp; 9310 9311 blob_verify_md_op(blob); 9312 9313 if (blob->md_ro) { 9314 return -EPERM; 9315 } 9316 9317 desc_size = sizeof(struct spdk_blob_md_descriptor_xattr) + strlen(name) + value_len; 9318 if (desc_size > SPDK_BS_MAX_DESC_SIZE) { 9319 SPDK_DEBUGLOG(blob, "Xattr '%s' of size %zu does not fix into single page %zu\n", name, 9320 desc_size, SPDK_BS_MAX_DESC_SIZE); 9321 return -ENOMEM; 9322 } 9323 9324 if (internal) { 9325 xattrs = &blob->xattrs_internal; 9326 blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR; 9327 } else { 9328 xattrs = &blob->xattrs; 9329 } 9330 9331 TAILQ_FOREACH(xattr, xattrs, link) { 9332 if (!strcmp(name, xattr->name)) { 9333 tmp = malloc(value_len); 9334 if (!tmp) { 9335 return -ENOMEM; 9336 } 9337 9338 free(xattr->value); 9339 xattr->value_len = value_len; 9340 xattr->value = tmp; 9341 memcpy(xattr->value, value, value_len); 9342 9343 blob->state = SPDK_BLOB_STATE_DIRTY; 9344 9345 return 0; 9346 } 9347 } 9348 9349 xattr = calloc(1, sizeof(*xattr)); 9350 if (!xattr) { 9351 return -ENOMEM; 9352 } 9353 9354 xattr->name = strdup(name); 9355 if (!xattr->name) { 9356 free(xattr); 9357 return -ENOMEM; 9358 } 9359 9360 xattr->value_len = value_len; 9361 xattr->value = malloc(value_len); 9362 if (!xattr->value) { 9363 free(xattr->name); 9364 free(xattr); 9365 return -ENOMEM; 9366 } 9367 memcpy(xattr->value, value, value_len); 9368 TAILQ_INSERT_TAIL(xattrs, xattr, link); 9369 9370 blob->state = SPDK_BLOB_STATE_DIRTY; 9371 9372 return 0; 9373 } 9374 9375 int 9376 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 9377 uint16_t value_len) 9378 { 9379 return blob_set_xattr(blob, name, value, value_len, false); 9380 } 9381 9382 static int 9383 blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal) 9384 { 9385 struct spdk_xattr_tailq *xattrs; 9386 struct spdk_xattr *xattr; 9387 9388 blob_verify_md_op(blob); 9389 9390 if (blob->md_ro) { 9391 return -EPERM; 9392 } 9393 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 9394 9395 TAILQ_FOREACH(xattr, xattrs, link) { 9396 if (!strcmp(name, xattr->name)) { 9397 TAILQ_REMOVE(xattrs, xattr, link); 9398 free(xattr->value); 9399 free(xattr->name); 9400 free(xattr); 9401 9402 if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) { 9403 blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR; 9404 } 9405 blob->state = SPDK_BLOB_STATE_DIRTY; 9406 9407 return 0; 9408 } 9409 } 9410 9411 return -ENOENT; 9412 } 9413 9414 int 9415 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name) 9416 { 9417 return blob_remove_xattr(blob, name, false); 9418 } 9419 9420 static int 9421 blob_get_xattr_value(struct spdk_blob *blob, const char *name, 9422 const void **value, size_t *value_len, bool internal) 9423 { 9424 struct spdk_xattr *xattr; 9425 struct spdk_xattr_tailq *xattrs; 9426 9427 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 9428 9429 TAILQ_FOREACH(xattr, xattrs, link) { 9430 if (!strcmp(name, xattr->name)) { 9431 *value = xattr->value; 9432 *value_len = xattr->value_len; 9433 return 0; 9434 } 9435 } 9436 return -ENOENT; 9437 } 9438 9439 int 9440 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 9441 const void **value, size_t *value_len) 9442 { 9443 blob_verify_md_op(blob); 9444 9445 return blob_get_xattr_value(blob, name, value, value_len, false); 9446 } 9447 9448 struct spdk_xattr_names { 9449 uint32_t count; 9450 const char *names[0]; 9451 }; 9452 9453 static int 9454 blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names) 9455 { 9456 struct spdk_xattr *xattr; 9457 int count = 0; 9458 9459 TAILQ_FOREACH(xattr, xattrs, link) { 9460 count++; 9461 } 9462 9463 *names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *)); 9464 if (*names == NULL) { 9465 return -ENOMEM; 9466 } 9467 9468 TAILQ_FOREACH(xattr, xattrs, link) { 9469 (*names)->names[(*names)->count++] = xattr->name; 9470 } 9471 9472 return 0; 9473 } 9474 9475 int 9476 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names) 9477 { 9478 blob_verify_md_op(blob); 9479 9480 return blob_get_xattr_names(&blob->xattrs, names); 9481 } 9482 9483 uint32_t 9484 spdk_xattr_names_get_count(struct spdk_xattr_names *names) 9485 { 9486 assert(names != NULL); 9487 9488 return names->count; 9489 } 9490 9491 const char * 9492 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index) 9493 { 9494 if (index >= names->count) { 9495 return NULL; 9496 } 9497 9498 return names->names[index]; 9499 } 9500 9501 void 9502 spdk_xattr_names_free(struct spdk_xattr_names *names) 9503 { 9504 free(names); 9505 } 9506 9507 struct spdk_bs_type 9508 spdk_bs_get_bstype(struct spdk_blob_store *bs) 9509 { 9510 return bs->bstype; 9511 } 9512 9513 void 9514 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype) 9515 { 9516 memcpy(&bs->bstype, &bstype, sizeof(bstype)); 9517 } 9518 9519 bool 9520 spdk_blob_is_read_only(struct spdk_blob *blob) 9521 { 9522 assert(blob != NULL); 9523 return (blob->data_ro || blob->md_ro); 9524 } 9525 9526 bool 9527 spdk_blob_is_snapshot(struct spdk_blob *blob) 9528 { 9529 struct spdk_blob_list *snapshot_entry; 9530 9531 assert(blob != NULL); 9532 9533 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 9534 if (snapshot_entry == NULL) { 9535 return false; 9536 } 9537 9538 return true; 9539 } 9540 9541 bool 9542 spdk_blob_is_clone(struct spdk_blob *blob) 9543 { 9544 assert(blob != NULL); 9545 9546 if (blob->parent_id != SPDK_BLOBID_INVALID && 9547 blob->parent_id != SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 9548 assert(spdk_blob_is_thin_provisioned(blob)); 9549 return true; 9550 } 9551 9552 return false; 9553 } 9554 9555 bool 9556 spdk_blob_is_thin_provisioned(struct spdk_blob *blob) 9557 { 9558 assert(blob != NULL); 9559 return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 9560 } 9561 9562 bool 9563 spdk_blob_is_esnap_clone(const struct spdk_blob *blob) 9564 { 9565 return blob_is_esnap_clone(blob); 9566 } 9567 9568 static void 9569 blob_update_clear_method(struct spdk_blob *blob) 9570 { 9571 enum blob_clear_method stored_cm; 9572 9573 assert(blob != NULL); 9574 9575 /* If BLOB_CLEAR_WITH_DEFAULT was passed in, use the setting stored 9576 * in metadata previously. If something other than the default was 9577 * specified, ignore stored value and used what was passed in. 9578 */ 9579 stored_cm = ((blob->md_ro_flags & SPDK_BLOB_CLEAR_METHOD) >> SPDK_BLOB_CLEAR_METHOD_SHIFT); 9580 9581 if (blob->clear_method == BLOB_CLEAR_WITH_DEFAULT) { 9582 blob->clear_method = stored_cm; 9583 } else if (blob->clear_method != stored_cm) { 9584 SPDK_WARNLOG("Using passed in clear method 0x%x instead of stored value of 0x%x\n", 9585 blob->clear_method, stored_cm); 9586 } 9587 } 9588 9589 spdk_blob_id 9590 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id) 9591 { 9592 struct spdk_blob_list *snapshot_entry = NULL; 9593 struct spdk_blob_list *clone_entry = NULL; 9594 9595 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 9596 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 9597 if (clone_entry->id == blob_id) { 9598 return snapshot_entry->id; 9599 } 9600 } 9601 } 9602 9603 return SPDK_BLOBID_INVALID; 9604 } 9605 9606 int 9607 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids, 9608 size_t *count) 9609 { 9610 struct spdk_blob_list *snapshot_entry, *clone_entry; 9611 size_t n; 9612 9613 snapshot_entry = bs_get_snapshot_entry(bs, blobid); 9614 if (snapshot_entry == NULL) { 9615 *count = 0; 9616 return 0; 9617 } 9618 9619 if (ids == NULL || *count < snapshot_entry->clone_count) { 9620 *count = snapshot_entry->clone_count; 9621 return -ENOMEM; 9622 } 9623 *count = snapshot_entry->clone_count; 9624 9625 n = 0; 9626 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 9627 ids[n++] = clone_entry->id; 9628 } 9629 9630 return 0; 9631 } 9632 9633 static void 9634 bs_load_grow_continue(struct spdk_bs_load_ctx *ctx) 9635 { 9636 int rc; 9637 9638 if (ctx->super->size == 0) { 9639 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9640 } 9641 9642 if (ctx->super->io_unit_size == 0) { 9643 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 9644 } 9645 if (ctx->super->md_page_size == 0) { 9646 ctx->super->md_page_size = SPDK_BS_PAGE_SIZE; 9647 } 9648 9649 /* Parse the super block */ 9650 ctx->bs->clean = 1; 9651 ctx->bs->cluster_sz = ctx->super->cluster_size; 9652 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 9653 ctx->bs->md_page_size = ctx->super->md_page_size; 9654 ctx->bs->io_unit_size = ctx->super->io_unit_size; 9655 bs_init_per_cluster_fields(ctx->bs); 9656 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 9657 if (rc < 0) { 9658 bs_load_ctx_fail(ctx, -ENOMEM); 9659 return; 9660 } 9661 ctx->bs->md_start = ctx->super->md_start; 9662 ctx->bs->md_len = ctx->super->md_len; 9663 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len); 9664 if (rc < 0) { 9665 bs_load_ctx_fail(ctx, -ENOMEM); 9666 return; 9667 } 9668 9669 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 9670 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 9671 ctx->bs->super_blob = ctx->super->super_blob; 9672 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 9673 9674 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0) { 9675 SPDK_ERRLOG("Can not grow an unclean blobstore, please load it normally to clean it.\n"); 9676 bs_load_ctx_fail(ctx, -EIO); 9677 return; 9678 } else { 9679 bs_load_read_used_pages(ctx); 9680 } 9681 } 9682 9683 static void 9684 bs_load_grow_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9685 { 9686 struct spdk_bs_load_ctx *ctx = cb_arg; 9687 9688 if (bserrno != 0) { 9689 bs_load_ctx_fail(ctx, bserrno); 9690 return; 9691 } 9692 bs_load_grow_continue(ctx); 9693 } 9694 9695 static void 9696 bs_load_grow_used_clusters_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9697 { 9698 struct spdk_bs_load_ctx *ctx = cb_arg; 9699 9700 if (bserrno != 0) { 9701 bs_load_ctx_fail(ctx, bserrno); 9702 return; 9703 } 9704 9705 spdk_free(ctx->mask); 9706 9707 bs_sequence_write_dev(ctx->seq, ctx->super, bs_page_to_lba(ctx->bs, 0), 9708 bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 9709 bs_load_grow_super_write_cpl, ctx); 9710 } 9711 9712 static void 9713 bs_load_grow_used_clusters_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9714 { 9715 struct spdk_bs_load_ctx *ctx = cb_arg; 9716 uint64_t lba, lba_count; 9717 uint64_t dev_size; 9718 uint64_t total_clusters; 9719 9720 if (bserrno != 0) { 9721 bs_load_ctx_fail(ctx, bserrno); 9722 return; 9723 } 9724 9725 /* The type must be correct */ 9726 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 9727 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 9728 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 9729 struct spdk_blob_md_page) * 8)); 9730 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9731 total_clusters = dev_size / ctx->super->cluster_size; 9732 ctx->mask->length = total_clusters; 9733 9734 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 9735 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 9736 bs_sequence_write_dev(ctx->seq, ctx->mask, lba, lba_count, 9737 bs_load_grow_used_clusters_write_cpl, ctx); 9738 } 9739 9740 static void 9741 bs_load_try_to_grow(struct spdk_bs_load_ctx *ctx) 9742 { 9743 uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask; 9744 uint64_t lba, lba_count, mask_size; 9745 9746 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9747 total_clusters = dev_size / ctx->super->cluster_size; 9748 used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 9749 spdk_divide_round_up(total_clusters, 8), 9750 ctx->super->md_page_size); 9751 max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start; 9752 /* No necessary to grow or no space to grow */ 9753 if (ctx->super->size >= dev_size || used_cluster_mask_len > max_used_cluster_mask) { 9754 SPDK_DEBUGLOG(blob, "No grow\n"); 9755 bs_load_grow_continue(ctx); 9756 return; 9757 } 9758 9759 SPDK_DEBUGLOG(blob, "Resize blobstore\n"); 9760 9761 ctx->super->size = dev_size; 9762 ctx->super->used_cluster_mask_len = used_cluster_mask_len; 9763 ctx->super->crc = blob_md_page_calc_crc(ctx->super); 9764 9765 mask_size = used_cluster_mask_len * ctx->super->md_page_size; 9766 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_NUMA_ID_ANY, 9767 SPDK_MALLOC_DMA); 9768 if (!ctx->mask) { 9769 bs_load_ctx_fail(ctx, -ENOMEM); 9770 return; 9771 } 9772 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 9773 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 9774 bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count, 9775 bs_load_grow_used_clusters_read_cpl, ctx); 9776 } 9777 9778 static void 9779 bs_grow_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9780 { 9781 struct spdk_bs_load_ctx *ctx = cb_arg; 9782 int rc; 9783 9784 rc = bs_super_validate(ctx->super, ctx->bs); 9785 if (rc != 0) { 9786 bs_load_ctx_fail(ctx, rc); 9787 return; 9788 } 9789 9790 bs_load_try_to_grow(ctx); 9791 } 9792 9793 struct spdk_bs_grow_ctx { 9794 struct spdk_blob_store *bs; 9795 struct spdk_bs_super_block *super; 9796 9797 struct spdk_bit_pool *new_used_clusters; 9798 struct spdk_bs_md_mask *new_used_clusters_mask; 9799 9800 spdk_bs_sequence_t *seq; 9801 }; 9802 9803 static void 9804 bs_grow_live_done(struct spdk_bs_grow_ctx *ctx, int bserrno) 9805 { 9806 if (bserrno != 0) { 9807 spdk_bit_pool_free(&ctx->new_used_clusters); 9808 } 9809 9810 bs_sequence_finish(ctx->seq, bserrno); 9811 free(ctx->new_used_clusters_mask); 9812 spdk_free(ctx->super); 9813 free(ctx); 9814 } 9815 9816 static void 9817 bs_grow_live_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9818 { 9819 struct spdk_bs_grow_ctx *ctx = cb_arg; 9820 struct spdk_blob_store *bs = ctx->bs; 9821 uint64_t total_clusters; 9822 9823 if (bserrno != 0) { 9824 bs_grow_live_done(ctx, bserrno); 9825 return; 9826 } 9827 9828 /* 9829 * Blobstore is not clean until unload, for now only the super block is up to date. 9830 * This is similar to state right after blobstore init, when bs_write_used_md() didn't 9831 * yet execute. 9832 * When cleanly unloaded, the used md pages will be written out. 9833 * In case of unclean shutdown, loading blobstore will go through recovery path correctly 9834 * filling out the used_clusters with new size and writing it out. 9835 */ 9836 bs->clean = 0; 9837 9838 /* Reverting the super->size past this point is complex, avoid any error paths 9839 * that require to do so. */ 9840 spdk_spin_lock(&bs->used_lock); 9841 9842 total_clusters = ctx->super->size / ctx->super->cluster_size; 9843 9844 assert(total_clusters >= spdk_bit_pool_capacity(bs->used_clusters)); 9845 spdk_bit_pool_store_mask(bs->used_clusters, ctx->new_used_clusters_mask); 9846 9847 assert(total_clusters == spdk_bit_pool_capacity(ctx->new_used_clusters)); 9848 spdk_bit_pool_load_mask(ctx->new_used_clusters, ctx->new_used_clusters_mask); 9849 9850 spdk_bit_pool_free(&bs->used_clusters); 9851 bs->used_clusters = ctx->new_used_clusters; 9852 9853 bs->total_clusters = total_clusters; 9854 bs->total_data_clusters = bs->total_clusters - spdk_divide_round_up( 9855 bs->md_start + bs->md_len, bs->pages_per_cluster); 9856 9857 bs->num_free_clusters = spdk_bit_pool_count_free(bs->used_clusters); 9858 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 9859 spdk_spin_unlock(&bs->used_lock); 9860 9861 bs_grow_live_done(ctx, 0); 9862 } 9863 9864 static void 9865 bs_grow_live_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9866 { 9867 struct spdk_bs_grow_ctx *ctx = cb_arg; 9868 uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask; 9869 int rc; 9870 9871 if (bserrno != 0) { 9872 bs_grow_live_done(ctx, bserrno); 9873 return; 9874 } 9875 9876 rc = bs_super_validate(ctx->super, ctx->bs); 9877 if (rc != 0) { 9878 bs_grow_live_done(ctx, rc); 9879 return; 9880 } 9881 9882 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9883 total_clusters = dev_size / ctx->super->cluster_size; 9884 used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 9885 spdk_divide_round_up(total_clusters, 8), 9886 ctx->super->md_page_size); 9887 max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start; 9888 /* Only checking dev_size. Since it can change, but total_clusters remain the same. */ 9889 if (dev_size == ctx->super->size) { 9890 SPDK_DEBUGLOG(blob, "No need to grow blobstore\n"); 9891 bs_grow_live_done(ctx, 0); 9892 return; 9893 } 9894 /* 9895 * Blobstore cannot be shrunk, so check before if: 9896 * - new size of the device is smaller than size in super_block 9897 * - new total number of clusters is smaller than used_clusters bit_pool 9898 * - there is enough space in metadata for used_cluster_mask to be written out 9899 */ 9900 if (dev_size < ctx->super->size || 9901 total_clusters < spdk_bit_pool_capacity(ctx->bs->used_clusters) || 9902 used_cluster_mask_len > max_used_cluster_mask) { 9903 SPDK_DEBUGLOG(blob, "No space to grow blobstore\n"); 9904 bs_grow_live_done(ctx, -ENOSPC); 9905 return; 9906 } 9907 9908 SPDK_DEBUGLOG(blob, "Resizing blobstore\n"); 9909 9910 ctx->new_used_clusters_mask = calloc(1, total_clusters); 9911 if (!ctx->new_used_clusters_mask) { 9912 bs_grow_live_done(ctx, -ENOMEM); 9913 return; 9914 } 9915 ctx->new_used_clusters = spdk_bit_pool_create(total_clusters); 9916 if (!ctx->new_used_clusters) { 9917 bs_grow_live_done(ctx, -ENOMEM); 9918 return; 9919 } 9920 9921 ctx->super->clean = 0; 9922 ctx->super->size = dev_size; 9923 ctx->super->used_cluster_mask_len = used_cluster_mask_len; 9924 bs_write_super(seq, ctx->bs, ctx->super, bs_grow_live_super_write_cpl, ctx); 9925 } 9926 9927 void 9928 spdk_bs_grow_live(struct spdk_blob_store *bs, 9929 spdk_bs_op_complete cb_fn, void *cb_arg) 9930 { 9931 struct spdk_bs_cpl cpl; 9932 struct spdk_bs_grow_ctx *ctx; 9933 9934 assert(spdk_get_thread() == bs->md_thread); 9935 9936 SPDK_DEBUGLOG(blob, "Growing blobstore on dev %p\n", bs->dev); 9937 9938 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 9939 cpl.u.bs_basic.cb_fn = cb_fn; 9940 cpl.u.bs_basic.cb_arg = cb_arg; 9941 9942 ctx = calloc(1, sizeof(struct spdk_bs_grow_ctx)); 9943 if (!ctx) { 9944 cb_fn(cb_arg, -ENOMEM); 9945 return; 9946 } 9947 ctx->bs = bs; 9948 9949 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 9950 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 9951 if (!ctx->super) { 9952 free(ctx); 9953 cb_fn(cb_arg, -ENOMEM); 9954 return; 9955 } 9956 9957 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 9958 if (!ctx->seq) { 9959 spdk_free(ctx->super); 9960 free(ctx); 9961 cb_fn(cb_arg, -ENOMEM); 9962 return; 9963 } 9964 9965 /* Read the super block */ 9966 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 9967 bs_byte_to_lba(bs, sizeof(*ctx->super)), 9968 bs_grow_live_load_super_cpl, ctx); 9969 } 9970 9971 void 9972 spdk_bs_grow(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 9973 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 9974 { 9975 struct spdk_blob_store *bs; 9976 struct spdk_bs_cpl cpl; 9977 struct spdk_bs_load_ctx *ctx; 9978 struct spdk_bs_opts opts = {}; 9979 int err; 9980 9981 SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev); 9982 9983 if ((dev->phys_blocklen % dev->blocklen) != 0) { 9984 SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen); 9985 dev->destroy(dev); 9986 cb_fn(cb_arg, NULL, -EINVAL); 9987 return; 9988 } 9989 9990 spdk_bs_opts_init(&opts, sizeof(opts)); 9991 if (o) { 9992 if (bs_opts_copy(o, &opts)) { 9993 dev->destroy(dev); 9994 cb_fn(cb_arg, NULL, -EINVAL); 9995 return; 9996 } 9997 } 9998 9999 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 10000 dev->destroy(dev); 10001 cb_fn(cb_arg, NULL, -EINVAL); 10002 return; 10003 } 10004 10005 err = bs_alloc(dev, &opts, &bs, &ctx); 10006 if (err) { 10007 dev->destroy(dev); 10008 cb_fn(cb_arg, NULL, err); 10009 return; 10010 } 10011 10012 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 10013 cpl.u.bs_handle.cb_fn = cb_fn; 10014 cpl.u.bs_handle.cb_arg = cb_arg; 10015 cpl.u.bs_handle.bs = bs; 10016 10017 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 10018 if (!ctx->seq) { 10019 spdk_free(ctx->super); 10020 free(ctx); 10021 bs_free(bs); 10022 cb_fn(cb_arg, NULL, -ENOMEM); 10023 return; 10024 } 10025 10026 /* Read the super block */ 10027 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 10028 bs_byte_to_lba(bs, sizeof(*ctx->super)), 10029 bs_grow_load_super_cpl, ctx); 10030 } 10031 10032 int 10033 spdk_blob_get_esnap_id(struct spdk_blob *blob, const void **id, size_t *len) 10034 { 10035 if (!blob_is_esnap_clone(blob)) { 10036 return -EINVAL; 10037 } 10038 10039 return blob_get_xattr_value(blob, BLOB_EXTERNAL_SNAPSHOT_ID, id, len, true); 10040 } 10041 10042 struct spdk_io_channel * 10043 blob_esnap_get_io_channel(struct spdk_io_channel *ch, struct spdk_blob *blob) 10044 { 10045 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(ch); 10046 struct spdk_bs_dev *bs_dev = blob->back_bs_dev; 10047 struct blob_esnap_channel find = {}; 10048 struct blob_esnap_channel *esnap_channel, *existing; 10049 10050 find.blob_id = blob->id; 10051 esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find); 10052 if (spdk_likely(esnap_channel != NULL)) { 10053 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": using cached channel on thread %s\n", 10054 blob->id, spdk_thread_get_name(spdk_get_thread())); 10055 return esnap_channel->channel; 10056 } 10057 10058 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": allocating channel on thread %s\n", 10059 blob->id, spdk_thread_get_name(spdk_get_thread())); 10060 10061 esnap_channel = calloc(1, sizeof(*esnap_channel)); 10062 if (esnap_channel == NULL) { 10063 SPDK_NOTICELOG("blob 0x%" PRIx64 " channel allocation failed: no memory\n", 10064 find.blob_id); 10065 return NULL; 10066 } 10067 esnap_channel->channel = bs_dev->create_channel(bs_dev); 10068 if (esnap_channel->channel == NULL) { 10069 SPDK_NOTICELOG("blob 0x%" PRIx64 " back channel allocation failed\n", blob->id); 10070 free(esnap_channel); 10071 return NULL; 10072 } 10073 esnap_channel->blob_id = find.blob_id; 10074 existing = RB_INSERT(blob_esnap_channel_tree, &bs_channel->esnap_channels, esnap_channel); 10075 if (spdk_unlikely(existing != NULL)) { 10076 /* 10077 * This should be unreachable: all modifications to this tree happen on this thread. 10078 */ 10079 SPDK_ERRLOG("blob 0x%" PRIx64 "lost race to allocate a channel\n", find.blob_id); 10080 assert(false); 10081 10082 bs_dev->destroy_channel(bs_dev, esnap_channel->channel); 10083 free(esnap_channel); 10084 10085 return existing->channel; 10086 } 10087 10088 return esnap_channel->channel; 10089 } 10090 10091 static int 10092 blob_esnap_channel_compare(struct blob_esnap_channel *c1, struct blob_esnap_channel *c2) 10093 { 10094 return (c1->blob_id < c2->blob_id ? -1 : c1->blob_id > c2->blob_id); 10095 } 10096 10097 struct blob_esnap_destroy_ctx { 10098 spdk_blob_op_with_handle_complete cb_fn; 10099 void *cb_arg; 10100 struct spdk_blob *blob; 10101 struct spdk_bs_dev *back_bs_dev; 10102 bool abort_io; 10103 }; 10104 10105 static void 10106 blob_esnap_destroy_channels_done(struct spdk_io_channel_iter *i, int status) 10107 { 10108 struct blob_esnap_destroy_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 10109 struct spdk_blob *blob = ctx->blob; 10110 struct spdk_blob_store *bs = blob->bs; 10111 10112 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": done destroying channels for this blob\n", 10113 blob->id); 10114 10115 if (ctx->cb_fn != NULL) { 10116 ctx->cb_fn(ctx->cb_arg, blob, status); 10117 } 10118 free(ctx); 10119 10120 bs->esnap_channels_unloading--; 10121 if (bs->esnap_channels_unloading == 0 && bs->esnap_unload_cb_fn != NULL) { 10122 spdk_bs_unload(bs, bs->esnap_unload_cb_fn, bs->esnap_unload_cb_arg); 10123 } 10124 } 10125 10126 static void 10127 blob_esnap_destroy_one_channel(struct spdk_io_channel_iter *i) 10128 { 10129 struct blob_esnap_destroy_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 10130 struct spdk_blob *blob = ctx->blob; 10131 struct spdk_bs_dev *bs_dev = ctx->back_bs_dev; 10132 struct spdk_io_channel *channel = spdk_io_channel_iter_get_channel(i); 10133 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(channel); 10134 struct blob_esnap_channel *esnap_channel; 10135 struct blob_esnap_channel find = {}; 10136 10137 assert(spdk_get_thread() == spdk_io_channel_get_thread(channel)); 10138 10139 find.blob_id = blob->id; 10140 esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find); 10141 if (esnap_channel != NULL) { 10142 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": destroying channel on thread %s\n", 10143 blob->id, spdk_thread_get_name(spdk_get_thread())); 10144 RB_REMOVE(blob_esnap_channel_tree, &bs_channel->esnap_channels, esnap_channel); 10145 10146 if (ctx->abort_io) { 10147 spdk_bs_user_op_t *op, *tmp; 10148 10149 TAILQ_FOREACH_SAFE(op, &bs_channel->queued_io, link, tmp) { 10150 if (op->back_channel == esnap_channel->channel) { 10151 TAILQ_REMOVE(&bs_channel->queued_io, op, link); 10152 bs_user_op_abort(op, -EIO); 10153 } 10154 } 10155 } 10156 10157 bs_dev->destroy_channel(bs_dev, esnap_channel->channel); 10158 free(esnap_channel); 10159 } 10160 10161 spdk_for_each_channel_continue(i, 0); 10162 } 10163 10164 /* 10165 * Destroy the channels for a specific blob on each thread with a blobstore channel. This should be 10166 * used when closing an esnap clone blob and after decoupling from the parent. 10167 */ 10168 static void 10169 blob_esnap_destroy_bs_dev_channels(struct spdk_blob *blob, bool abort_io, 10170 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 10171 { 10172 struct blob_esnap_destroy_ctx *ctx; 10173 10174 if (!blob_is_esnap_clone(blob) || blob->back_bs_dev == NULL) { 10175 if (cb_fn != NULL) { 10176 cb_fn(cb_arg, blob, 0); 10177 } 10178 return; 10179 } 10180 10181 ctx = calloc(1, sizeof(*ctx)); 10182 if (ctx == NULL) { 10183 if (cb_fn != NULL) { 10184 cb_fn(cb_arg, blob, -ENOMEM); 10185 } 10186 return; 10187 } 10188 ctx->cb_fn = cb_fn; 10189 ctx->cb_arg = cb_arg; 10190 ctx->blob = blob; 10191 ctx->back_bs_dev = blob->back_bs_dev; 10192 ctx->abort_io = abort_io; 10193 10194 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": destroying channels for this blob\n", 10195 blob->id); 10196 10197 blob->bs->esnap_channels_unloading++; 10198 spdk_for_each_channel(blob->bs, blob_esnap_destroy_one_channel, ctx, 10199 blob_esnap_destroy_channels_done); 10200 } 10201 10202 /* 10203 * Destroy all bs_dev channels on a specific blobstore channel. This should be used when a 10204 * bs_channel is destroyed. 10205 */ 10206 static void 10207 blob_esnap_destroy_bs_channel(struct spdk_bs_channel *ch) 10208 { 10209 struct blob_esnap_channel *esnap_channel, *esnap_channel_tmp; 10210 10211 assert(spdk_get_thread() == spdk_io_channel_get_thread(spdk_io_channel_from_ctx(ch))); 10212 10213 SPDK_DEBUGLOG(blob_esnap, "destroying channels on thread %s\n", 10214 spdk_thread_get_name(spdk_get_thread())); 10215 RB_FOREACH_SAFE(esnap_channel, blob_esnap_channel_tree, &ch->esnap_channels, 10216 esnap_channel_tmp) { 10217 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 10218 ": destroying one channel in thread %s\n", 10219 esnap_channel->blob_id, spdk_thread_get_name(spdk_get_thread())); 10220 RB_REMOVE(blob_esnap_channel_tree, &ch->esnap_channels, esnap_channel); 10221 spdk_put_io_channel(esnap_channel->channel); 10222 free(esnap_channel); 10223 } 10224 SPDK_DEBUGLOG(blob_esnap, "done destroying channels on thread %s\n", 10225 spdk_thread_get_name(spdk_get_thread())); 10226 } 10227 10228 static void 10229 blob_set_back_bs_dev_done(void *_ctx, int bserrno) 10230 { 10231 struct set_bs_dev_ctx *ctx = _ctx; 10232 10233 if (bserrno != 0) { 10234 /* Even though the unfreeze failed, the update may have succeed. */ 10235 SPDK_ERRLOG("blob 0x%" PRIx64 ": unfreeze failed with error %d\n", ctx->blob->id, 10236 bserrno); 10237 } 10238 ctx->cb_fn(ctx->cb_arg, ctx->bserrno); 10239 free(ctx); 10240 } 10241 10242 static void 10243 blob_frozen_set_back_bs_dev(void *_ctx, struct spdk_blob *blob, int bserrno) 10244 { 10245 struct set_bs_dev_ctx *ctx = _ctx; 10246 int rc; 10247 10248 if (bserrno != 0) { 10249 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to release old back_bs_dev with error %d\n", 10250 blob->id, bserrno); 10251 ctx->bserrno = bserrno; 10252 blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx); 10253 return; 10254 } 10255 10256 if (blob->back_bs_dev != NULL) { 10257 blob_unref_back_bs_dev(blob); 10258 } 10259 10260 if (ctx->parent_refs_cb_fn) { 10261 rc = ctx->parent_refs_cb_fn(blob, ctx->parent_refs_cb_arg); 10262 if (rc != 0) { 10263 ctx->bserrno = rc; 10264 blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx); 10265 return; 10266 } 10267 } 10268 10269 SPDK_NOTICELOG("blob 0x%" PRIx64 ": hotplugged back_bs_dev\n", blob->id); 10270 blob->back_bs_dev = ctx->back_bs_dev; 10271 ctx->bserrno = 0; 10272 10273 blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx); 10274 } 10275 10276 static void 10277 blob_set_back_bs_dev_frozen(void *_ctx, int bserrno) 10278 { 10279 struct set_bs_dev_ctx *ctx = _ctx; 10280 struct spdk_blob *blob = ctx->blob; 10281 10282 if (bserrno != 0) { 10283 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to freeze with error %d\n", blob->id, 10284 bserrno); 10285 ctx->cb_fn(ctx->cb_arg, bserrno); 10286 free(ctx); 10287 return; 10288 } 10289 10290 /* 10291 * This does not prevent future reads from the esnap device because any future IO will 10292 * lazily create a new esnap IO channel. 10293 */ 10294 blob_esnap_destroy_bs_dev_channels(blob, true, blob_frozen_set_back_bs_dev, ctx); 10295 } 10296 10297 void 10298 spdk_blob_set_esnap_bs_dev(struct spdk_blob *blob, struct spdk_bs_dev *back_bs_dev, 10299 spdk_blob_op_complete cb_fn, void *cb_arg) 10300 { 10301 if (!blob_is_esnap_clone(blob)) { 10302 SPDK_ERRLOG("blob 0x%" PRIx64 ": not an esnap clone\n", blob->id); 10303 cb_fn(cb_arg, -EINVAL); 10304 return; 10305 } 10306 10307 blob_set_back_bs_dev(blob, back_bs_dev, NULL, NULL, cb_fn, cb_arg); 10308 } 10309 10310 struct spdk_bs_dev * 10311 spdk_blob_get_esnap_bs_dev(const struct spdk_blob *blob) 10312 { 10313 if (!blob_is_esnap_clone(blob)) { 10314 SPDK_ERRLOG("blob 0x%" PRIx64 ": not an esnap clone\n", blob->id); 10315 return NULL; 10316 } 10317 10318 return blob->back_bs_dev; 10319 } 10320 10321 bool 10322 spdk_blob_is_degraded(const struct spdk_blob *blob) 10323 { 10324 if (blob->bs->dev->is_degraded != NULL && blob->bs->dev->is_degraded(blob->bs->dev)) { 10325 return true; 10326 } 10327 if (blob->back_bs_dev == NULL || blob->back_bs_dev->is_degraded == NULL) { 10328 return false; 10329 } 10330 10331 return blob->back_bs_dev->is_degraded(blob->back_bs_dev); 10332 } 10333 10334 SPDK_LOG_REGISTER_COMPONENT(blob) 10335 SPDK_LOG_REGISTER_COMPONENT(blob_esnap) 10336 10337 static void 10338 blob_trace(void) 10339 { 10340 struct spdk_trace_tpoint_opts opts[] = { 10341 { 10342 "BLOB_REQ_SET_START", TRACE_BLOB_REQ_SET_START, 10343 OWNER_TYPE_NONE, OBJECT_BLOB_CB_ARG, 1, 10344 { 10345 { "ctx", SPDK_TRACE_ARG_TYPE_PTR, 8 } 10346 } 10347 }, 10348 { 10349 "BLOB_REQ_SET_COMPLETE", TRACE_BLOB_REQ_SET_COMPLETE, 10350 OWNER_TYPE_NONE, OBJECT_BLOB_CB_ARG, 0, 10351 { 10352 { "ctx", SPDK_TRACE_ARG_TYPE_PTR, 8 } 10353 } 10354 }, 10355 }; 10356 10357 spdk_trace_register_object(OBJECT_BLOB_CB_ARG, 'a'); 10358 spdk_trace_register_description_ext(opts, SPDK_COUNTOF(opts)); 10359 spdk_trace_tpoint_register_relation(TRACE_BDEV_IO_START, OBJECT_BLOB_CB_ARG, 1); 10360 spdk_trace_tpoint_register_relation(TRACE_BDEV_IO_DONE, OBJECT_BLOB_CB_ARG, 0); 10361 } 10362 SPDK_TRACE_REGISTER_FN(blob_trace, "blob", TRACE_GROUP_BLOB) 10363