1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "spdk/blob.h" 10 #include "spdk/crc32.h" 11 #include "spdk/env.h" 12 #include "spdk/queue.h" 13 #include "spdk/thread.h" 14 #include "spdk/bit_array.h" 15 #include "spdk/bit_pool.h" 16 #include "spdk/likely.h" 17 #include "spdk/util.h" 18 #include "spdk/string.h" 19 #include "spdk/trace.h" 20 21 #include "spdk_internal/assert.h" 22 #include "spdk_internal/trace_defs.h" 23 #include "spdk/log.h" 24 25 #include "blobstore.h" 26 27 #define BLOB_CRC32C_INITIAL 0xffffffffUL 28 29 static int bs_register_md_thread(struct spdk_blob_store *bs); 30 static int bs_unregister_md_thread(struct spdk_blob_store *bs); 31 static void blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 32 static void blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 33 uint64_t cluster, uint32_t extent, struct spdk_blob_md_page *page, 34 spdk_blob_op_complete cb_fn, void *cb_arg); 35 static void blob_free_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 36 uint32_t extent_page, struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg); 37 38 static int blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 39 uint16_t value_len, bool internal); 40 static int blob_get_xattr_value(struct spdk_blob *blob, const char *name, 41 const void **value, size_t *value_len, bool internal); 42 static int blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal); 43 44 static void blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 45 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg); 46 static void blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg); 47 48 static void bs_shallow_copy_cluster_find_next(void *cb_arg); 49 50 /* 51 * External snapshots require a channel per thread per esnap bdev. The tree 52 * is populated lazily as blob IOs are handled by the back_bs_dev. When this 53 * channel is destroyed, all the channels in the tree are destroyed. 54 */ 55 56 struct blob_esnap_channel { 57 RB_ENTRY(blob_esnap_channel) node; 58 spdk_blob_id blob_id; 59 struct spdk_io_channel *channel; 60 }; 61 62 static int blob_esnap_channel_compare(struct blob_esnap_channel *c1, struct blob_esnap_channel *c2); 63 static void blob_esnap_destroy_bs_dev_channels(struct spdk_blob *blob, bool abort_io, 64 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg); 65 static void blob_esnap_destroy_bs_channel(struct spdk_bs_channel *ch); 66 static void blob_set_back_bs_dev_frozen(void *_ctx, int bserrno); 67 RB_GENERATE_STATIC(blob_esnap_channel_tree, blob_esnap_channel, node, blob_esnap_channel_compare) 68 69 static inline bool 70 blob_is_esnap_clone(const struct spdk_blob *blob) 71 { 72 assert(blob != NULL); 73 return !!(blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT); 74 } 75 76 static int 77 blob_id_cmp(struct spdk_blob *blob1, struct spdk_blob *blob2) 78 { 79 assert(blob1 != NULL && blob2 != NULL); 80 return (blob1->id < blob2->id ? -1 : blob1->id > blob2->id); 81 } 82 83 RB_GENERATE_STATIC(spdk_blob_tree, spdk_blob, link, blob_id_cmp); 84 85 static void 86 blob_verify_md_op(struct spdk_blob *blob) 87 { 88 assert(blob != NULL); 89 assert(spdk_get_thread() == blob->bs->md_thread); 90 assert(blob->state != SPDK_BLOB_STATE_LOADING); 91 } 92 93 static struct spdk_blob_list * 94 bs_get_snapshot_entry(struct spdk_blob_store *bs, spdk_blob_id blobid) 95 { 96 struct spdk_blob_list *snapshot_entry = NULL; 97 98 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 99 if (snapshot_entry->id == blobid) { 100 break; 101 } 102 } 103 104 return snapshot_entry; 105 } 106 107 static void 108 bs_claim_md_page(struct spdk_blob_store *bs, uint32_t page) 109 { 110 assert(spdk_spin_held(&bs->used_lock)); 111 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 112 assert(spdk_bit_array_get(bs->used_md_pages, page) == false); 113 114 spdk_bit_array_set(bs->used_md_pages, page); 115 } 116 117 static void 118 bs_release_md_page(struct spdk_blob_store *bs, uint32_t page) 119 { 120 assert(spdk_spin_held(&bs->used_lock)); 121 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 122 assert(spdk_bit_array_get(bs->used_md_pages, page) == true); 123 124 spdk_bit_array_clear(bs->used_md_pages, page); 125 } 126 127 static uint32_t 128 bs_claim_cluster(struct spdk_blob_store *bs) 129 { 130 uint32_t cluster_num; 131 132 assert(spdk_spin_held(&bs->used_lock)); 133 134 cluster_num = spdk_bit_pool_allocate_bit(bs->used_clusters); 135 if (cluster_num == UINT32_MAX) { 136 return UINT32_MAX; 137 } 138 139 SPDK_DEBUGLOG(blob, "Claiming cluster %u\n", cluster_num); 140 bs->num_free_clusters--; 141 142 return cluster_num; 143 } 144 145 static void 146 bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 147 { 148 assert(spdk_spin_held(&bs->used_lock)); 149 assert(cluster_num < spdk_bit_pool_capacity(bs->used_clusters)); 150 assert(spdk_bit_pool_is_allocated(bs->used_clusters, cluster_num) == true); 151 assert(bs->num_free_clusters < bs->total_clusters); 152 153 SPDK_DEBUGLOG(blob, "Releasing cluster %u\n", cluster_num); 154 155 spdk_bit_pool_free_bit(bs->used_clusters, cluster_num); 156 bs->num_free_clusters++; 157 } 158 159 static int 160 blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster) 161 { 162 uint64_t *cluster_lba = &blob->active.clusters[cluster_num]; 163 164 blob_verify_md_op(blob); 165 166 if (*cluster_lba != 0) { 167 return -EEXIST; 168 } 169 170 *cluster_lba = bs_cluster_to_lba(blob->bs, cluster); 171 blob->active.num_allocated_clusters++; 172 173 return 0; 174 } 175 176 static int 177 bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num, 178 uint64_t *cluster, uint32_t *lowest_free_md_page, bool update_map) 179 { 180 uint32_t *extent_page = 0; 181 182 assert(spdk_spin_held(&blob->bs->used_lock)); 183 184 *cluster = bs_claim_cluster(blob->bs); 185 if (*cluster == UINT32_MAX) { 186 /* No more free clusters. Cannot satisfy the request */ 187 return -ENOSPC; 188 } 189 190 if (blob->use_extent_table) { 191 extent_page = bs_cluster_to_extent_page(blob, cluster_num); 192 if (*extent_page == 0) { 193 /* Extent page shall never occupy md_page so start the search from 1 */ 194 if (*lowest_free_md_page == 0) { 195 *lowest_free_md_page = 1; 196 } 197 /* No extent_page is allocated for the cluster */ 198 *lowest_free_md_page = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, 199 *lowest_free_md_page); 200 if (*lowest_free_md_page == UINT32_MAX) { 201 /* No more free md pages. Cannot satisfy the request */ 202 bs_release_cluster(blob->bs, *cluster); 203 return -ENOSPC; 204 } 205 bs_claim_md_page(blob->bs, *lowest_free_md_page); 206 } 207 } 208 209 SPDK_DEBUGLOG(blob, "Claiming cluster %" PRIu64 " for blob 0x%" PRIx64 "\n", *cluster, 210 blob->id); 211 212 if (update_map) { 213 blob_insert_cluster(blob, cluster_num, *cluster); 214 if (blob->use_extent_table && *extent_page == 0) { 215 *extent_page = *lowest_free_md_page; 216 } 217 } 218 219 return 0; 220 } 221 222 static void 223 blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs) 224 { 225 xattrs->count = 0; 226 xattrs->names = NULL; 227 xattrs->ctx = NULL; 228 xattrs->get_value = NULL; 229 } 230 231 void 232 spdk_blob_opts_init(struct spdk_blob_opts *opts, size_t opts_size) 233 { 234 if (!opts) { 235 SPDK_ERRLOG("opts should not be NULL\n"); 236 return; 237 } 238 239 if (!opts_size) { 240 SPDK_ERRLOG("opts_size should not be zero value\n"); 241 return; 242 } 243 244 memset(opts, 0, opts_size); 245 opts->opts_size = opts_size; 246 247 #define FIELD_OK(field) \ 248 offsetof(struct spdk_blob_opts, field) + sizeof(opts->field) <= opts_size 249 250 #define SET_FIELD(field, value) \ 251 if (FIELD_OK(field)) { \ 252 opts->field = value; \ 253 } \ 254 255 SET_FIELD(num_clusters, 0); 256 SET_FIELD(thin_provision, false); 257 SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT); 258 259 if (FIELD_OK(xattrs)) { 260 blob_xattrs_init(&opts->xattrs); 261 } 262 263 SET_FIELD(use_extent_table, true); 264 265 #undef FIELD_OK 266 #undef SET_FIELD 267 } 268 269 void 270 spdk_blob_open_opts_init(struct spdk_blob_open_opts *opts, size_t opts_size) 271 { 272 if (!opts) { 273 SPDK_ERRLOG("opts should not be NULL\n"); 274 return; 275 } 276 277 if (!opts_size) { 278 SPDK_ERRLOG("opts_size should not be zero value\n"); 279 return; 280 } 281 282 memset(opts, 0, opts_size); 283 opts->opts_size = opts_size; 284 285 #define FIELD_OK(field) \ 286 offsetof(struct spdk_blob_open_opts, field) + sizeof(opts->field) <= opts_size 287 288 #define SET_FIELD(field, value) \ 289 if (FIELD_OK(field)) { \ 290 opts->field = value; \ 291 } \ 292 293 SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT); 294 295 #undef FIELD_OK 296 #undef SET_FILED 297 } 298 299 static struct spdk_blob * 300 blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id) 301 { 302 struct spdk_blob *blob; 303 304 blob = calloc(1, sizeof(*blob)); 305 if (!blob) { 306 return NULL; 307 } 308 309 blob->id = id; 310 blob->bs = bs; 311 312 blob->parent_id = SPDK_BLOBID_INVALID; 313 314 blob->state = SPDK_BLOB_STATE_DIRTY; 315 blob->extent_rle_found = false; 316 blob->extent_table_found = false; 317 blob->active.num_pages = 1; 318 blob->active.pages = calloc(1, sizeof(*blob->active.pages)); 319 if (!blob->active.pages) { 320 free(blob); 321 return NULL; 322 } 323 324 blob->active.pages[0] = bs_blobid_to_page(id); 325 326 TAILQ_INIT(&blob->xattrs); 327 TAILQ_INIT(&blob->xattrs_internal); 328 TAILQ_INIT(&blob->pending_persists); 329 TAILQ_INIT(&blob->persists_to_complete); 330 331 return blob; 332 } 333 334 static void 335 xattrs_free(struct spdk_xattr_tailq *xattrs) 336 { 337 struct spdk_xattr *xattr, *xattr_tmp; 338 339 TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) { 340 TAILQ_REMOVE(xattrs, xattr, link); 341 free(xattr->name); 342 free(xattr->value); 343 free(xattr); 344 } 345 } 346 347 static void 348 blob_unref_back_bs_dev(struct spdk_blob *blob) 349 { 350 blob->back_bs_dev->destroy(blob->back_bs_dev); 351 blob->back_bs_dev = NULL; 352 } 353 354 static void 355 blob_free(struct spdk_blob *blob) 356 { 357 assert(blob != NULL); 358 assert(TAILQ_EMPTY(&blob->pending_persists)); 359 assert(TAILQ_EMPTY(&blob->persists_to_complete)); 360 361 free(blob->active.extent_pages); 362 free(blob->clean.extent_pages); 363 free(blob->active.clusters); 364 free(blob->clean.clusters); 365 free(blob->active.pages); 366 free(blob->clean.pages); 367 368 xattrs_free(&blob->xattrs); 369 xattrs_free(&blob->xattrs_internal); 370 371 if (blob->back_bs_dev) { 372 blob_unref_back_bs_dev(blob); 373 } 374 375 free(blob); 376 } 377 378 static void 379 blob_back_bs_destroy_esnap_done(void *ctx, struct spdk_blob *blob, int bserrno) 380 { 381 struct spdk_bs_dev *bs_dev = ctx; 382 383 if (bserrno != 0) { 384 /* 385 * This is probably due to a memory allocation failure when creating the 386 * blob_esnap_destroy_ctx before iterating threads. 387 */ 388 SPDK_ERRLOG("blob 0x%" PRIx64 ": Unable to destroy bs dev channels: error %d\n", 389 blob->id, bserrno); 390 assert(false); 391 } 392 393 if (bs_dev == NULL) { 394 /* 395 * This check exists to make scanbuild happy. 396 * 397 * blob->back_bs_dev for an esnap is NULL during the first iteration of blobs while 398 * the blobstore is being loaded. It could also be NULL if there was an error 399 * opening the esnap device. In each of these cases, no channels could have been 400 * created because back_bs_dev->create_channel() would have led to a NULL pointer 401 * deref. 402 */ 403 assert(false); 404 return; 405 } 406 407 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": calling destroy on back_bs_dev\n", blob->id); 408 bs_dev->destroy(bs_dev); 409 } 410 411 static void 412 blob_back_bs_destroy(struct spdk_blob *blob) 413 { 414 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": preparing to destroy back_bs_dev\n", 415 blob->id); 416 417 blob_esnap_destroy_bs_dev_channels(blob, false, blob_back_bs_destroy_esnap_done, 418 blob->back_bs_dev); 419 blob->back_bs_dev = NULL; 420 } 421 422 struct blob_parent { 423 union { 424 struct { 425 spdk_blob_id id; 426 struct spdk_blob *blob; 427 } snapshot; 428 429 struct { 430 void *id; 431 uint32_t id_len; 432 struct spdk_bs_dev *back_bs_dev; 433 } esnap; 434 } u; 435 }; 436 437 typedef int (*set_parent_refs_cb)(struct spdk_blob *blob, struct blob_parent *parent); 438 439 struct set_bs_dev_ctx { 440 struct spdk_blob *blob; 441 struct spdk_bs_dev *back_bs_dev; 442 443 /* 444 * This callback is used during a set parent operation to change the references 445 * to the parent of the blob. 446 */ 447 set_parent_refs_cb parent_refs_cb_fn; 448 struct blob_parent *parent_refs_cb_arg; 449 450 spdk_blob_op_complete cb_fn; 451 void *cb_arg; 452 int bserrno; 453 }; 454 455 static void 456 blob_set_back_bs_dev(struct spdk_blob *blob, struct spdk_bs_dev *back_bs_dev, 457 set_parent_refs_cb parent_refs_cb_fn, struct blob_parent *parent_refs_cb_arg, 458 spdk_blob_op_complete cb_fn, void *cb_arg) 459 { 460 struct set_bs_dev_ctx *ctx; 461 462 ctx = calloc(1, sizeof(*ctx)); 463 if (ctx == NULL) { 464 SPDK_ERRLOG("blob 0x%" PRIx64 ": out of memory while setting back_bs_dev\n", 465 blob->id); 466 cb_fn(cb_arg, -ENOMEM); 467 return; 468 } 469 470 ctx->parent_refs_cb_fn = parent_refs_cb_fn; 471 ctx->parent_refs_cb_arg = parent_refs_cb_arg; 472 ctx->cb_fn = cb_fn; 473 ctx->cb_arg = cb_arg; 474 ctx->back_bs_dev = back_bs_dev; 475 ctx->blob = blob; 476 477 blob_freeze_io(blob, blob_set_back_bs_dev_frozen, ctx); 478 } 479 480 struct freeze_io_ctx { 481 struct spdk_bs_cpl cpl; 482 struct spdk_blob *blob; 483 }; 484 485 static void 486 blob_io_sync(struct spdk_io_channel_iter *i) 487 { 488 spdk_for_each_channel_continue(i, 0); 489 } 490 491 static void 492 blob_execute_queued_io(struct spdk_io_channel_iter *i) 493 { 494 struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); 495 struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch); 496 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 497 struct spdk_bs_request_set *set; 498 struct spdk_bs_user_op_args *args; 499 spdk_bs_user_op_t *op, *tmp; 500 501 TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) { 502 set = (struct spdk_bs_request_set *)op; 503 args = &set->u.user_op; 504 505 if (args->blob == ctx->blob) { 506 TAILQ_REMOVE(&ch->queued_io, op, link); 507 bs_user_op_execute(op); 508 } 509 } 510 511 spdk_for_each_channel_continue(i, 0); 512 } 513 514 static void 515 blob_io_cpl(struct spdk_io_channel_iter *i, int status) 516 { 517 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 518 519 ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0); 520 521 free(ctx); 522 } 523 524 static void 525 blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 526 { 527 struct freeze_io_ctx *ctx; 528 529 blob_verify_md_op(blob); 530 531 ctx = calloc(1, sizeof(*ctx)); 532 if (!ctx) { 533 cb_fn(cb_arg, -ENOMEM); 534 return; 535 } 536 537 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 538 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 539 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 540 ctx->blob = blob; 541 542 /* Freeze I/O on blob */ 543 blob->frozen_refcnt++; 544 545 spdk_for_each_channel(blob->bs, blob_io_sync, ctx, blob_io_cpl); 546 } 547 548 static void 549 blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 550 { 551 struct freeze_io_ctx *ctx; 552 553 blob_verify_md_op(blob); 554 555 ctx = calloc(1, sizeof(*ctx)); 556 if (!ctx) { 557 cb_fn(cb_arg, -ENOMEM); 558 return; 559 } 560 561 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 562 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 563 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 564 ctx->blob = blob; 565 566 assert(blob->frozen_refcnt > 0); 567 568 blob->frozen_refcnt--; 569 570 spdk_for_each_channel(blob->bs, blob_execute_queued_io, ctx, blob_io_cpl); 571 } 572 573 static int 574 blob_mark_clean(struct spdk_blob *blob) 575 { 576 uint32_t *extent_pages = NULL; 577 uint64_t *clusters = NULL; 578 uint32_t *pages = NULL; 579 580 assert(blob != NULL); 581 582 if (blob->active.num_extent_pages) { 583 assert(blob->active.extent_pages); 584 extent_pages = calloc(blob->active.num_extent_pages, sizeof(*blob->active.extent_pages)); 585 if (!extent_pages) { 586 return -ENOMEM; 587 } 588 memcpy(extent_pages, blob->active.extent_pages, 589 blob->active.num_extent_pages * sizeof(*extent_pages)); 590 } 591 592 if (blob->active.num_clusters) { 593 assert(blob->active.clusters); 594 clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters)); 595 if (!clusters) { 596 free(extent_pages); 597 return -ENOMEM; 598 } 599 memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 600 } 601 602 if (blob->active.num_pages) { 603 assert(blob->active.pages); 604 pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages)); 605 if (!pages) { 606 free(extent_pages); 607 free(clusters); 608 return -ENOMEM; 609 } 610 memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 611 } 612 613 free(blob->clean.extent_pages); 614 free(blob->clean.clusters); 615 free(blob->clean.pages); 616 617 blob->clean.num_extent_pages = blob->active.num_extent_pages; 618 blob->clean.extent_pages = blob->active.extent_pages; 619 blob->clean.num_clusters = blob->active.num_clusters; 620 blob->clean.clusters = blob->active.clusters; 621 blob->clean.num_allocated_clusters = blob->active.num_allocated_clusters; 622 blob->clean.num_pages = blob->active.num_pages; 623 blob->clean.pages = blob->active.pages; 624 625 blob->active.extent_pages = extent_pages; 626 blob->active.clusters = clusters; 627 blob->active.pages = pages; 628 629 /* If the metadata was dirtied again while the metadata was being written to disk, 630 * we do not want to revert the DIRTY state back to CLEAN here. 631 */ 632 if (blob->state == SPDK_BLOB_STATE_LOADING) { 633 blob->state = SPDK_BLOB_STATE_CLEAN; 634 } 635 636 return 0; 637 } 638 639 static int 640 blob_deserialize_xattr(struct spdk_blob *blob, 641 struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal) 642 { 643 struct spdk_xattr *xattr; 644 645 if (desc_xattr->length != sizeof(desc_xattr->name_length) + 646 sizeof(desc_xattr->value_length) + 647 desc_xattr->name_length + desc_xattr->value_length) { 648 return -EINVAL; 649 } 650 651 xattr = calloc(1, sizeof(*xattr)); 652 if (xattr == NULL) { 653 return -ENOMEM; 654 } 655 656 xattr->name = malloc(desc_xattr->name_length + 1); 657 if (xattr->name == NULL) { 658 free(xattr); 659 return -ENOMEM; 660 } 661 662 xattr->value = malloc(desc_xattr->value_length); 663 if (xattr->value == NULL) { 664 free(xattr->name); 665 free(xattr); 666 return -ENOMEM; 667 } 668 669 memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length); 670 xattr->name[desc_xattr->name_length] = '\0'; 671 xattr->value_len = desc_xattr->value_length; 672 memcpy(xattr->value, 673 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 674 desc_xattr->value_length); 675 676 TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link); 677 678 return 0; 679 } 680 681 682 static int 683 blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob) 684 { 685 struct spdk_blob_md_descriptor *desc; 686 size_t cur_desc = 0; 687 void *tmp; 688 689 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 690 while (cur_desc < sizeof(page->descriptors)) { 691 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 692 if (desc->length == 0) { 693 /* If padding and length are 0, this terminates the page */ 694 break; 695 } 696 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 697 struct spdk_blob_md_descriptor_flags *desc_flags; 698 699 desc_flags = (struct spdk_blob_md_descriptor_flags *)desc; 700 701 if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) { 702 return -EINVAL; 703 } 704 705 if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) != 706 SPDK_BLOB_INVALID_FLAGS_MASK) { 707 return -EINVAL; 708 } 709 710 if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) != 711 SPDK_BLOB_DATA_RO_FLAGS_MASK) { 712 blob->data_ro = true; 713 blob->md_ro = true; 714 } 715 716 if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) != 717 SPDK_BLOB_MD_RO_FLAGS_MASK) { 718 blob->md_ro = true; 719 } 720 721 if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 722 blob->data_ro = true; 723 blob->md_ro = true; 724 } 725 726 blob->invalid_flags = desc_flags->invalid_flags; 727 blob->data_ro_flags = desc_flags->data_ro_flags; 728 blob->md_ro_flags = desc_flags->md_ro_flags; 729 730 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 731 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 732 unsigned int i, j; 733 unsigned int cluster_count = blob->active.num_clusters; 734 735 if (blob->extent_table_found) { 736 /* Extent Table already present in the md, 737 * both descriptors should never be at the same time. */ 738 return -EINVAL; 739 } 740 blob->extent_rle_found = true; 741 742 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 743 744 if (desc_extent_rle->length == 0 || 745 (desc_extent_rle->length % sizeof(desc_extent_rle->extents[0]) != 0)) { 746 return -EINVAL; 747 } 748 749 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 750 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 751 if (desc_extent_rle->extents[i].cluster_idx != 0) { 752 if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, 753 desc_extent_rle->extents[i].cluster_idx + j)) { 754 return -EINVAL; 755 } 756 } 757 cluster_count++; 758 } 759 } 760 761 if (cluster_count == 0) { 762 return -EINVAL; 763 } 764 tmp = realloc(blob->active.clusters, cluster_count * sizeof(*blob->active.clusters)); 765 if (tmp == NULL) { 766 return -ENOMEM; 767 } 768 blob->active.clusters = tmp; 769 blob->active.cluster_array_size = cluster_count; 770 771 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 772 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 773 if (desc_extent_rle->extents[i].cluster_idx != 0) { 774 blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs, 775 desc_extent_rle->extents[i].cluster_idx + j); 776 blob->active.num_allocated_clusters++; 777 } else if (spdk_blob_is_thin_provisioned(blob)) { 778 blob->active.clusters[blob->active.num_clusters++] = 0; 779 } else { 780 return -EINVAL; 781 } 782 } 783 } 784 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 785 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 786 uint32_t num_extent_pages = blob->active.num_extent_pages; 787 uint32_t i, j; 788 size_t extent_pages_length; 789 790 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 791 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 792 793 if (blob->extent_rle_found) { 794 /* This means that Extent RLE is present in MD, 795 * both should never be at the same time. */ 796 return -EINVAL; 797 } else if (blob->extent_table_found && 798 desc_extent_table->num_clusters != blob->remaining_clusters_in_et) { 799 /* Number of clusters in this ET does not match number 800 * from previously read EXTENT_TABLE. */ 801 return -EINVAL; 802 } 803 804 if (desc_extent_table->length == 0 || 805 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 806 return -EINVAL; 807 } 808 809 blob->extent_table_found = true; 810 811 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 812 num_extent_pages += desc_extent_table->extent_page[i].num_pages; 813 } 814 815 if (num_extent_pages > 0) { 816 tmp = realloc(blob->active.extent_pages, num_extent_pages * sizeof(uint32_t)); 817 if (tmp == NULL) { 818 return -ENOMEM; 819 } 820 blob->active.extent_pages = tmp; 821 } 822 blob->active.extent_pages_array_size = num_extent_pages; 823 824 blob->remaining_clusters_in_et = desc_extent_table->num_clusters; 825 826 /* Extent table entries contain md page numbers for extent pages. 827 * Zeroes represent unallocated extent pages, those are run-length-encoded. 828 */ 829 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 830 if (desc_extent_table->extent_page[i].page_idx != 0) { 831 assert(desc_extent_table->extent_page[i].num_pages == 1); 832 blob->active.extent_pages[blob->active.num_extent_pages++] = 833 desc_extent_table->extent_page[i].page_idx; 834 } else if (spdk_blob_is_thin_provisioned(blob)) { 835 for (j = 0; j < desc_extent_table->extent_page[i].num_pages; j++) { 836 blob->active.extent_pages[blob->active.num_extent_pages++] = 0; 837 } 838 } else { 839 return -EINVAL; 840 } 841 } 842 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 843 struct spdk_blob_md_descriptor_extent_page *desc_extent; 844 unsigned int i; 845 unsigned int cluster_count = 0; 846 size_t cluster_idx_length; 847 848 if (blob->extent_rle_found) { 849 /* This means that Extent RLE is present in MD, 850 * both should never be at the same time. */ 851 return -EINVAL; 852 } 853 854 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 855 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 856 857 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 858 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 859 return -EINVAL; 860 } 861 862 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 863 if (desc_extent->cluster_idx[i] != 0) { 864 if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, desc_extent->cluster_idx[i])) { 865 return -EINVAL; 866 } 867 } 868 cluster_count++; 869 } 870 871 if (cluster_count == 0) { 872 return -EINVAL; 873 } 874 875 /* When reading extent pages sequentially starting cluster idx should match 876 * current size of a blob. 877 * If changed to batch reading, this check shall be removed. */ 878 if (desc_extent->start_cluster_idx != blob->active.num_clusters) { 879 return -EINVAL; 880 } 881 882 tmp = realloc(blob->active.clusters, 883 (cluster_count + blob->active.num_clusters) * sizeof(*blob->active.clusters)); 884 if (tmp == NULL) { 885 return -ENOMEM; 886 } 887 blob->active.clusters = tmp; 888 blob->active.cluster_array_size = (cluster_count + blob->active.num_clusters); 889 890 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 891 if (desc_extent->cluster_idx[i] != 0) { 892 blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs, 893 desc_extent->cluster_idx[i]); 894 blob->active.num_allocated_clusters++; 895 } else if (spdk_blob_is_thin_provisioned(blob)) { 896 blob->active.clusters[blob->active.num_clusters++] = 0; 897 } else { 898 return -EINVAL; 899 } 900 } 901 assert(desc_extent->start_cluster_idx + cluster_count == blob->active.num_clusters); 902 assert(blob->remaining_clusters_in_et >= cluster_count); 903 blob->remaining_clusters_in_et -= cluster_count; 904 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 905 int rc; 906 907 rc = blob_deserialize_xattr(blob, 908 (struct spdk_blob_md_descriptor_xattr *) desc, false); 909 if (rc != 0) { 910 return rc; 911 } 912 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 913 int rc; 914 915 rc = blob_deserialize_xattr(blob, 916 (struct spdk_blob_md_descriptor_xattr *) desc, true); 917 if (rc != 0) { 918 return rc; 919 } 920 } else { 921 /* Unrecognized descriptor type. Do not fail - just continue to the 922 * next descriptor. If this descriptor is associated with some feature 923 * defined in a newer version of blobstore, that version of blobstore 924 * should create and set an associated feature flag to specify if this 925 * blob can be loaded or not. 926 */ 927 } 928 929 /* Advance to the next descriptor */ 930 cur_desc += sizeof(*desc) + desc->length; 931 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 932 break; 933 } 934 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 935 } 936 937 return 0; 938 } 939 940 static bool bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page); 941 942 static int 943 blob_parse_extent_page(struct spdk_blob_md_page *extent_page, struct spdk_blob *blob) 944 { 945 assert(blob != NULL); 946 assert(blob->state == SPDK_BLOB_STATE_LOADING); 947 948 if (bs_load_cur_extent_page_valid(extent_page) == false) { 949 return -ENOENT; 950 } 951 952 return blob_parse_page(extent_page, blob); 953 } 954 955 static int 956 blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count, 957 struct spdk_blob *blob) 958 { 959 const struct spdk_blob_md_page *page; 960 uint32_t i; 961 int rc; 962 void *tmp; 963 964 assert(page_count > 0); 965 assert(pages[0].sequence_num == 0); 966 assert(blob != NULL); 967 assert(blob->state == SPDK_BLOB_STATE_LOADING); 968 assert(blob->active.clusters == NULL); 969 970 /* The blobid provided doesn't match what's in the MD, this can 971 * happen for example if a bogus blobid is passed in through open. 972 */ 973 if (blob->id != pages[0].id) { 974 SPDK_ERRLOG("Blobid (0x%" PRIx64 ") doesn't match what's in metadata " 975 "(0x%" PRIx64 ")\n", blob->id, pages[0].id); 976 return -ENOENT; 977 } 978 979 tmp = realloc(blob->active.pages, page_count * sizeof(*blob->active.pages)); 980 if (!tmp) { 981 return -ENOMEM; 982 } 983 blob->active.pages = tmp; 984 985 blob->active.pages[0] = pages[0].id; 986 987 for (i = 1; i < page_count; i++) { 988 assert(spdk_bit_array_get(blob->bs->used_md_pages, pages[i - 1].next)); 989 blob->active.pages[i] = pages[i - 1].next; 990 } 991 blob->active.num_pages = page_count; 992 993 for (i = 0; i < page_count; i++) { 994 page = &pages[i]; 995 996 assert(page->id == blob->id); 997 assert(page->sequence_num == i); 998 999 rc = blob_parse_page(page, blob); 1000 if (rc != 0) { 1001 return rc; 1002 } 1003 } 1004 1005 return 0; 1006 } 1007 1008 static int 1009 blob_serialize_add_page(const struct spdk_blob *blob, 1010 struct spdk_blob_md_page **pages, 1011 uint32_t *page_count, 1012 struct spdk_blob_md_page **last_page) 1013 { 1014 struct spdk_blob_md_page *page, *tmp_pages; 1015 1016 assert(pages != NULL); 1017 assert(page_count != NULL); 1018 1019 *last_page = NULL; 1020 if (*page_count == 0) { 1021 assert(*pages == NULL); 1022 *pages = spdk_malloc(blob->bs->md_page_size, 0, 1023 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 1024 if (*pages == NULL) { 1025 return -ENOMEM; 1026 } 1027 *page_count = 1; 1028 } else { 1029 assert(*pages != NULL); 1030 tmp_pages = spdk_realloc(*pages, blob->bs->md_page_size * (*page_count + 1), 0); 1031 if (tmp_pages == NULL) { 1032 return -ENOMEM; 1033 } 1034 (*page_count)++; 1035 *pages = tmp_pages; 1036 } 1037 1038 page = &(*pages)[*page_count - 1]; 1039 memset(page, 0, sizeof(*page)); 1040 page->id = blob->id; 1041 page->sequence_num = *page_count - 1; 1042 page->next = SPDK_INVALID_MD_PAGE; 1043 *last_page = page; 1044 1045 return 0; 1046 } 1047 1048 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor. 1049 * Update required_sz on both success and failure. 1050 * 1051 */ 1052 static int 1053 blob_serialize_xattr(const struct spdk_xattr *xattr, 1054 uint8_t *buf, size_t buf_sz, 1055 size_t *required_sz, bool internal) 1056 { 1057 struct spdk_blob_md_descriptor_xattr *desc; 1058 1059 *required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) + 1060 strlen(xattr->name) + 1061 xattr->value_len; 1062 1063 if (buf_sz < *required_sz) { 1064 return -1; 1065 } 1066 1067 desc = (struct spdk_blob_md_descriptor_xattr *)buf; 1068 1069 desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR; 1070 desc->length = sizeof(desc->name_length) + 1071 sizeof(desc->value_length) + 1072 strlen(xattr->name) + 1073 xattr->value_len; 1074 desc->name_length = strlen(xattr->name); 1075 desc->value_length = xattr->value_len; 1076 1077 memcpy(desc->name, xattr->name, desc->name_length); 1078 memcpy((void *)((uintptr_t)desc->name + desc->name_length), 1079 xattr->value, 1080 desc->value_length); 1081 1082 return 0; 1083 } 1084 1085 static void 1086 blob_serialize_extent_table_entry(const struct spdk_blob *blob, 1087 uint64_t start_ep, uint64_t *next_ep, 1088 uint8_t **buf, size_t *remaining_sz) 1089 { 1090 struct spdk_blob_md_descriptor_extent_table *desc; 1091 size_t cur_sz; 1092 uint64_t i, et_idx; 1093 uint32_t extent_page, ep_len; 1094 1095 /* The buffer must have room for at least num_clusters entry */ 1096 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->num_clusters); 1097 if (*remaining_sz < cur_sz) { 1098 *next_ep = start_ep; 1099 return; 1100 } 1101 1102 desc = (struct spdk_blob_md_descriptor_extent_table *)*buf; 1103 desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE; 1104 1105 desc->num_clusters = blob->active.num_clusters; 1106 1107 ep_len = 1; 1108 et_idx = 0; 1109 for (i = start_ep; i < blob->active.num_extent_pages; i++) { 1110 if (*remaining_sz < cur_sz + sizeof(desc->extent_page[0])) { 1111 /* If we ran out of buffer space, return */ 1112 break; 1113 } 1114 1115 extent_page = blob->active.extent_pages[i]; 1116 /* Verify that next extent_page is unallocated */ 1117 if (extent_page == 0 && 1118 (i + 1 < blob->active.num_extent_pages && blob->active.extent_pages[i + 1] == 0)) { 1119 ep_len++; 1120 continue; 1121 } 1122 desc->extent_page[et_idx].page_idx = extent_page; 1123 desc->extent_page[et_idx].num_pages = ep_len; 1124 et_idx++; 1125 1126 ep_len = 1; 1127 cur_sz += sizeof(desc->extent_page[et_idx]); 1128 } 1129 *next_ep = i; 1130 1131 desc->length = sizeof(desc->num_clusters) + sizeof(desc->extent_page[0]) * et_idx; 1132 *remaining_sz -= sizeof(struct spdk_blob_md_descriptor) + desc->length; 1133 *buf += sizeof(struct spdk_blob_md_descriptor) + desc->length; 1134 } 1135 1136 static int 1137 blob_serialize_extent_table(const struct spdk_blob *blob, 1138 struct spdk_blob_md_page **pages, 1139 struct spdk_blob_md_page *cur_page, 1140 uint32_t *page_count, uint8_t **buf, 1141 size_t *remaining_sz) 1142 { 1143 uint64_t last_extent_page; 1144 int rc; 1145 1146 last_extent_page = 0; 1147 /* At least single extent table entry has to be always persisted. 1148 * Such case occurs with num_extent_pages == 0. */ 1149 while (last_extent_page <= blob->active.num_extent_pages) { 1150 blob_serialize_extent_table_entry(blob, last_extent_page, &last_extent_page, buf, 1151 remaining_sz); 1152 1153 if (last_extent_page == blob->active.num_extent_pages) { 1154 break; 1155 } 1156 1157 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1158 if (rc < 0) { 1159 return rc; 1160 } 1161 1162 *buf = (uint8_t *)cur_page->descriptors; 1163 *remaining_sz = sizeof(cur_page->descriptors); 1164 } 1165 1166 return 0; 1167 } 1168 1169 static void 1170 blob_serialize_extent_rle(const struct spdk_blob *blob, 1171 uint64_t start_cluster, uint64_t *next_cluster, 1172 uint8_t **buf, size_t *buf_sz) 1173 { 1174 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 1175 size_t cur_sz; 1176 uint64_t i, extent_idx; 1177 uint64_t lba, lba_per_cluster, lba_count; 1178 1179 /* The buffer must have room for at least one extent */ 1180 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc_extent_rle->extents[0]); 1181 if (*buf_sz < cur_sz) { 1182 *next_cluster = start_cluster; 1183 return; 1184 } 1185 1186 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)*buf; 1187 desc_extent_rle->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE; 1188 1189 lba_per_cluster = bs_cluster_to_lba(blob->bs, 1); 1190 /* Assert for scan-build false positive */ 1191 assert(lba_per_cluster > 0); 1192 1193 lba = blob->active.clusters[start_cluster]; 1194 lba_count = lba_per_cluster; 1195 extent_idx = 0; 1196 for (i = start_cluster + 1; i < blob->active.num_clusters; i++) { 1197 if ((lba + lba_count) == blob->active.clusters[i] && lba != 0) { 1198 /* Run-length encode sequential non-zero LBA */ 1199 lba_count += lba_per_cluster; 1200 continue; 1201 } else if (lba == 0 && blob->active.clusters[i] == 0) { 1202 /* Run-length encode unallocated clusters */ 1203 lba_count += lba_per_cluster; 1204 continue; 1205 } 1206 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1207 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1208 extent_idx++; 1209 1210 cur_sz += sizeof(desc_extent_rle->extents[extent_idx]); 1211 1212 if (*buf_sz < cur_sz) { 1213 /* If we ran out of buffer space, return */ 1214 *next_cluster = i; 1215 break; 1216 } 1217 1218 lba = blob->active.clusters[i]; 1219 lba_count = lba_per_cluster; 1220 } 1221 1222 if (*buf_sz >= cur_sz) { 1223 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1224 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1225 extent_idx++; 1226 1227 *next_cluster = blob->active.num_clusters; 1228 } 1229 1230 desc_extent_rle->length = sizeof(desc_extent_rle->extents[0]) * extent_idx; 1231 *buf_sz -= sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1232 *buf += sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1233 } 1234 1235 static int 1236 blob_serialize_extents_rle(const struct spdk_blob *blob, 1237 struct spdk_blob_md_page **pages, 1238 struct spdk_blob_md_page *cur_page, 1239 uint32_t *page_count, uint8_t **buf, 1240 size_t *remaining_sz) 1241 { 1242 uint64_t last_cluster; 1243 int rc; 1244 1245 last_cluster = 0; 1246 while (last_cluster < blob->active.num_clusters) { 1247 blob_serialize_extent_rle(blob, last_cluster, &last_cluster, buf, remaining_sz); 1248 1249 if (last_cluster == blob->active.num_clusters) { 1250 break; 1251 } 1252 1253 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1254 if (rc < 0) { 1255 return rc; 1256 } 1257 1258 *buf = (uint8_t *)cur_page->descriptors; 1259 *remaining_sz = sizeof(cur_page->descriptors); 1260 } 1261 1262 return 0; 1263 } 1264 1265 static void 1266 blob_serialize_extent_page(const struct spdk_blob *blob, 1267 uint64_t cluster, struct spdk_blob_md_page *page) 1268 { 1269 struct spdk_blob_md_descriptor_extent_page *desc_extent; 1270 uint64_t i, extent_idx; 1271 uint64_t lba, lba_per_cluster; 1272 uint64_t start_cluster_idx = (cluster / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP; 1273 1274 desc_extent = (struct spdk_blob_md_descriptor_extent_page *) page->descriptors; 1275 desc_extent->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE; 1276 1277 lba_per_cluster = bs_cluster_to_lba(blob->bs, 1); 1278 1279 desc_extent->start_cluster_idx = start_cluster_idx; 1280 extent_idx = 0; 1281 for (i = start_cluster_idx; i < blob->active.num_clusters; i++) { 1282 lba = blob->active.clusters[i]; 1283 desc_extent->cluster_idx[extent_idx++] = lba / lba_per_cluster; 1284 if (extent_idx >= SPDK_EXTENTS_PER_EP) { 1285 break; 1286 } 1287 } 1288 desc_extent->length = sizeof(desc_extent->start_cluster_idx) + 1289 sizeof(desc_extent->cluster_idx[0]) * extent_idx; 1290 } 1291 1292 static void 1293 blob_serialize_flags(const struct spdk_blob *blob, 1294 uint8_t *buf, size_t *buf_sz) 1295 { 1296 struct spdk_blob_md_descriptor_flags *desc; 1297 1298 /* 1299 * Flags get serialized first, so we should always have room for the flags 1300 * descriptor. 1301 */ 1302 assert(*buf_sz >= sizeof(*desc)); 1303 1304 desc = (struct spdk_blob_md_descriptor_flags *)buf; 1305 desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS; 1306 desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor); 1307 desc->invalid_flags = blob->invalid_flags; 1308 desc->data_ro_flags = blob->data_ro_flags; 1309 desc->md_ro_flags = blob->md_ro_flags; 1310 1311 *buf_sz -= sizeof(*desc); 1312 } 1313 1314 static int 1315 blob_serialize_xattrs(const struct spdk_blob *blob, 1316 const struct spdk_xattr_tailq *xattrs, bool internal, 1317 struct spdk_blob_md_page **pages, 1318 struct spdk_blob_md_page *cur_page, 1319 uint32_t *page_count, uint8_t **buf, 1320 size_t *remaining_sz) 1321 { 1322 const struct spdk_xattr *xattr; 1323 int rc; 1324 1325 TAILQ_FOREACH(xattr, xattrs, link) { 1326 size_t required_sz = 0; 1327 1328 rc = blob_serialize_xattr(xattr, 1329 *buf, *remaining_sz, 1330 &required_sz, internal); 1331 if (rc < 0) { 1332 /* Need to add a new page to the chain */ 1333 rc = blob_serialize_add_page(blob, pages, page_count, 1334 &cur_page); 1335 if (rc < 0) { 1336 spdk_free(*pages); 1337 *pages = NULL; 1338 *page_count = 0; 1339 return rc; 1340 } 1341 1342 *buf = (uint8_t *)cur_page->descriptors; 1343 *remaining_sz = sizeof(cur_page->descriptors); 1344 1345 /* Try again */ 1346 required_sz = 0; 1347 rc = blob_serialize_xattr(xattr, 1348 *buf, *remaining_sz, 1349 &required_sz, internal); 1350 1351 if (rc < 0) { 1352 spdk_free(*pages); 1353 *pages = NULL; 1354 *page_count = 0; 1355 return rc; 1356 } 1357 } 1358 1359 *remaining_sz -= required_sz; 1360 *buf += required_sz; 1361 } 1362 1363 return 0; 1364 } 1365 1366 static int 1367 blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages, 1368 uint32_t *page_count) 1369 { 1370 struct spdk_blob_md_page *cur_page; 1371 int rc; 1372 uint8_t *buf; 1373 size_t remaining_sz; 1374 1375 assert(pages != NULL); 1376 assert(page_count != NULL); 1377 assert(blob != NULL); 1378 assert(blob->state == SPDK_BLOB_STATE_DIRTY); 1379 1380 *pages = NULL; 1381 *page_count = 0; 1382 1383 /* A blob always has at least 1 page, even if it has no descriptors */ 1384 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1385 if (rc < 0) { 1386 return rc; 1387 } 1388 1389 buf = (uint8_t *)cur_page->descriptors; 1390 remaining_sz = sizeof(cur_page->descriptors); 1391 1392 /* Serialize flags */ 1393 blob_serialize_flags(blob, buf, &remaining_sz); 1394 buf += sizeof(struct spdk_blob_md_descriptor_flags); 1395 1396 /* Serialize xattrs */ 1397 rc = blob_serialize_xattrs(blob, &blob->xattrs, false, 1398 pages, cur_page, page_count, &buf, &remaining_sz); 1399 if (rc < 0) { 1400 return rc; 1401 } 1402 1403 /* Serialize internal xattrs */ 1404 rc = blob_serialize_xattrs(blob, &blob->xattrs_internal, true, 1405 pages, cur_page, page_count, &buf, &remaining_sz); 1406 if (rc < 0) { 1407 return rc; 1408 } 1409 1410 if (blob->use_extent_table) { 1411 /* Serialize extent table */ 1412 rc = blob_serialize_extent_table(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1413 } else { 1414 /* Serialize extents */ 1415 rc = blob_serialize_extents_rle(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1416 } 1417 1418 return rc; 1419 } 1420 1421 struct spdk_blob_load_ctx { 1422 struct spdk_blob *blob; 1423 1424 struct spdk_blob_md_page *pages; 1425 uint32_t num_pages; 1426 uint32_t next_extent_page; 1427 spdk_bs_sequence_t *seq; 1428 1429 spdk_bs_sequence_cpl cb_fn; 1430 void *cb_arg; 1431 }; 1432 1433 static uint32_t 1434 blob_md_page_calc_crc(void *page) 1435 { 1436 uint32_t crc; 1437 1438 crc = BLOB_CRC32C_INITIAL; 1439 crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc); 1440 crc ^= BLOB_CRC32C_INITIAL; 1441 1442 return crc; 1443 1444 } 1445 1446 static void 1447 blob_load_final(struct spdk_blob_load_ctx *ctx, int bserrno) 1448 { 1449 struct spdk_blob *blob = ctx->blob; 1450 1451 if (bserrno == 0) { 1452 blob_mark_clean(blob); 1453 } 1454 1455 ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno); 1456 1457 /* Free the memory */ 1458 spdk_free(ctx->pages); 1459 free(ctx); 1460 } 1461 1462 static void 1463 blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno) 1464 { 1465 struct spdk_blob_load_ctx *ctx = cb_arg; 1466 struct spdk_blob *blob = ctx->blob; 1467 1468 if (bserrno == 0) { 1469 blob->back_bs_dev = bs_create_blob_bs_dev(snapshot); 1470 if (blob->back_bs_dev == NULL) { 1471 bserrno = -ENOMEM; 1472 } 1473 } 1474 if (bserrno != 0) { 1475 SPDK_ERRLOG("Snapshot fail\n"); 1476 } 1477 1478 blob_load_final(ctx, bserrno); 1479 } 1480 1481 static void blob_update_clear_method(struct spdk_blob *blob); 1482 1483 static int 1484 blob_load_esnap(struct spdk_blob *blob, void *blob_ctx) 1485 { 1486 struct spdk_blob_store *bs = blob->bs; 1487 struct spdk_bs_dev *bs_dev = NULL; 1488 const void *esnap_id = NULL; 1489 size_t id_len = 0; 1490 int rc; 1491 1492 if (bs->esnap_bs_dev_create == NULL) { 1493 SPDK_NOTICELOG("blob 0x%" PRIx64 " is an esnap clone but the blobstore was opened " 1494 "without support for esnap clones\n", blob->id); 1495 return -ENOTSUP; 1496 } 1497 assert(blob->back_bs_dev == NULL); 1498 1499 rc = blob_get_xattr_value(blob, BLOB_EXTERNAL_SNAPSHOT_ID, &esnap_id, &id_len, true); 1500 if (rc != 0) { 1501 SPDK_ERRLOG("blob 0x%" PRIx64 " is an esnap clone but has no esnap ID\n", blob->id); 1502 return -EINVAL; 1503 } 1504 assert(id_len > 0 && id_len < UINT32_MAX); 1505 1506 SPDK_INFOLOG(blob, "Creating external snapshot device\n"); 1507 1508 rc = bs->esnap_bs_dev_create(bs->esnap_ctx, blob_ctx, blob, esnap_id, (uint32_t)id_len, 1509 &bs_dev); 1510 if (rc != 0) { 1511 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": failed to load back_bs_dev " 1512 "with error %d\n", blob->id, rc); 1513 return rc; 1514 } 1515 1516 /* 1517 * Note: bs_dev might be NULL if the consumer chose to not open the external snapshot. 1518 * This especially might happen during spdk_bs_load() iteration. 1519 */ 1520 if (bs_dev != NULL) { 1521 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": loaded back_bs_dev\n", blob->id); 1522 if ((bs->io_unit_size % bs_dev->blocklen) != 0) { 1523 SPDK_NOTICELOG("blob 0x%" PRIx64 " external snapshot device block size %u " 1524 "is not compatible with blobstore block size %u\n", 1525 blob->id, bs_dev->blocklen, bs->io_unit_size); 1526 bs_dev->destroy(bs_dev); 1527 return -EINVAL; 1528 } 1529 } 1530 1531 blob->back_bs_dev = bs_dev; 1532 blob->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT; 1533 1534 return 0; 1535 } 1536 1537 static void 1538 blob_load_backing_dev(spdk_bs_sequence_t *seq, void *cb_arg) 1539 { 1540 struct spdk_blob_load_ctx *ctx = cb_arg; 1541 struct spdk_blob *blob = ctx->blob; 1542 const void *value; 1543 size_t len; 1544 int rc; 1545 1546 if (blob_is_esnap_clone(blob)) { 1547 rc = blob_load_esnap(blob, seq->cpl.u.blob_handle.esnap_ctx); 1548 blob_load_final(ctx, rc); 1549 return; 1550 } 1551 1552 if (spdk_blob_is_thin_provisioned(blob)) { 1553 rc = blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true); 1554 if (rc == 0) { 1555 if (len != sizeof(spdk_blob_id)) { 1556 blob_load_final(ctx, -EINVAL); 1557 return; 1558 } 1559 /* open snapshot blob and continue in the callback function */ 1560 blob->parent_id = *(spdk_blob_id *)value; 1561 spdk_bs_open_blob(blob->bs, blob->parent_id, 1562 blob_load_snapshot_cpl, ctx); 1563 return; 1564 } else { 1565 /* add zeroes_dev for thin provisioned blob */ 1566 blob->back_bs_dev = bs_create_zeroes_dev(); 1567 } 1568 } else { 1569 /* standard blob */ 1570 blob->back_bs_dev = NULL; 1571 } 1572 blob_load_final(ctx, 0); 1573 } 1574 1575 static void 1576 blob_load_cpl_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1577 { 1578 struct spdk_blob_load_ctx *ctx = cb_arg; 1579 struct spdk_blob *blob = ctx->blob; 1580 struct spdk_blob_md_page *page; 1581 uint64_t i; 1582 uint32_t crc; 1583 uint64_t lba; 1584 void *tmp; 1585 uint64_t sz; 1586 1587 if (bserrno) { 1588 SPDK_ERRLOG("Extent page read failed: %d\n", bserrno); 1589 blob_load_final(ctx, bserrno); 1590 return; 1591 } 1592 1593 if (ctx->pages == NULL) { 1594 /* First iteration of this function, allocate buffer for single EXTENT_PAGE */ 1595 ctx->pages = spdk_zmalloc(blob->bs->md_page_size, 0, 1596 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 1597 if (!ctx->pages) { 1598 blob_load_final(ctx, -ENOMEM); 1599 return; 1600 } 1601 ctx->num_pages = 1; 1602 ctx->next_extent_page = 0; 1603 } else { 1604 page = &ctx->pages[0]; 1605 crc = blob_md_page_calc_crc(page); 1606 if (crc != page->crc) { 1607 blob_load_final(ctx, -EINVAL); 1608 return; 1609 } 1610 1611 if (page->next != SPDK_INVALID_MD_PAGE) { 1612 blob_load_final(ctx, -EINVAL); 1613 return; 1614 } 1615 1616 bserrno = blob_parse_extent_page(page, blob); 1617 if (bserrno) { 1618 blob_load_final(ctx, bserrno); 1619 return; 1620 } 1621 } 1622 1623 for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) { 1624 if (blob->active.extent_pages[i] != 0) { 1625 /* Extent page was allocated, read and parse it. */ 1626 lba = bs_md_page_to_lba(blob->bs, blob->active.extent_pages[i]); 1627 ctx->next_extent_page = i + 1; 1628 1629 bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1630 bs_byte_to_lba(blob->bs, blob->bs->md_page_size), 1631 blob_load_cpl_extents_cpl, ctx); 1632 return; 1633 } else { 1634 /* Thin provisioned blobs can point to unallocated extent pages. 1635 * In this case blob size should be increased by up to the amount left in remaining_clusters_in_et. */ 1636 1637 sz = spdk_min(blob->remaining_clusters_in_et, SPDK_EXTENTS_PER_EP); 1638 blob->active.num_clusters += sz; 1639 blob->remaining_clusters_in_et -= sz; 1640 1641 assert(spdk_blob_is_thin_provisioned(blob)); 1642 assert(i + 1 < blob->active.num_extent_pages || blob->remaining_clusters_in_et == 0); 1643 1644 tmp = realloc(blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 1645 if (tmp == NULL) { 1646 blob_load_final(ctx, -ENOMEM); 1647 return; 1648 } 1649 memset(tmp + sizeof(*blob->active.clusters) * blob->active.cluster_array_size, 0, 1650 sizeof(*blob->active.clusters) * (blob->active.num_clusters - blob->active.cluster_array_size)); 1651 blob->active.clusters = tmp; 1652 blob->active.cluster_array_size = blob->active.num_clusters; 1653 } 1654 } 1655 1656 blob_load_backing_dev(seq, ctx); 1657 } 1658 1659 static void 1660 blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1661 { 1662 struct spdk_blob_load_ctx *ctx = cb_arg; 1663 struct spdk_blob *blob = ctx->blob; 1664 struct spdk_blob_md_page *page; 1665 int rc; 1666 uint32_t crc; 1667 uint32_t current_page; 1668 1669 if (ctx->num_pages == 1) { 1670 current_page = bs_blobid_to_page(blob->id); 1671 } else { 1672 assert(ctx->num_pages != 0); 1673 page = &ctx->pages[ctx->num_pages - 2]; 1674 current_page = page->next; 1675 } 1676 1677 if (bserrno) { 1678 SPDK_ERRLOG("Metadata page %d read failed for blobid 0x%" PRIx64 ": %d\n", 1679 current_page, blob->id, bserrno); 1680 blob_load_final(ctx, bserrno); 1681 return; 1682 } 1683 1684 page = &ctx->pages[ctx->num_pages - 1]; 1685 crc = blob_md_page_calc_crc(page); 1686 if (crc != page->crc) { 1687 SPDK_ERRLOG("Metadata page %d crc mismatch for blobid 0x%" PRIx64 "\n", 1688 current_page, blob->id); 1689 blob_load_final(ctx, -EINVAL); 1690 return; 1691 } 1692 1693 if (page->next != SPDK_INVALID_MD_PAGE) { 1694 struct spdk_blob_md_page *tmp_pages; 1695 uint32_t next_page = page->next; 1696 uint64_t next_lba = bs_md_page_to_lba(blob->bs, next_page); 1697 1698 /* Read the next page */ 1699 tmp_pages = spdk_realloc(ctx->pages, (sizeof(*page) * (ctx->num_pages + 1)), 0); 1700 if (tmp_pages == NULL) { 1701 blob_load_final(ctx, -ENOMEM); 1702 return; 1703 } 1704 ctx->num_pages++; 1705 ctx->pages = tmp_pages; 1706 1707 bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1], 1708 next_lba, 1709 bs_byte_to_lba(blob->bs, sizeof(*page)), 1710 blob_load_cpl, ctx); 1711 return; 1712 } 1713 1714 /* Parse the pages */ 1715 rc = blob_parse(ctx->pages, ctx->num_pages, blob); 1716 if (rc) { 1717 blob_load_final(ctx, rc); 1718 return; 1719 } 1720 1721 if (blob->extent_table_found == true) { 1722 /* If EXTENT_TABLE was found, that means support for it should be enabled. */ 1723 assert(blob->extent_rle_found == false); 1724 blob->use_extent_table = true; 1725 } else { 1726 /* If EXTENT_RLE or no extent_* descriptor was found disable support 1727 * for extent table. No extent_* descriptors means that blob has length of 0 1728 * and no extent_rle descriptors were persisted for it. 1729 * EXTENT_TABLE if used, is always present in metadata regardless of length. */ 1730 blob->use_extent_table = false; 1731 } 1732 1733 /* Check the clear_method stored in metadata vs what may have been passed 1734 * via spdk_bs_open_blob_ext() and update accordingly. 1735 */ 1736 blob_update_clear_method(blob); 1737 1738 spdk_free(ctx->pages); 1739 ctx->pages = NULL; 1740 1741 if (blob->extent_table_found) { 1742 blob_load_cpl_extents_cpl(seq, ctx, 0); 1743 } else { 1744 blob_load_backing_dev(seq, ctx); 1745 } 1746 } 1747 1748 /* Load a blob from disk given a blobid */ 1749 static void 1750 blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 1751 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 1752 { 1753 struct spdk_blob_load_ctx *ctx; 1754 struct spdk_blob_store *bs; 1755 uint32_t page_num; 1756 uint64_t lba; 1757 1758 blob_verify_md_op(blob); 1759 1760 bs = blob->bs; 1761 1762 ctx = calloc(1, sizeof(*ctx)); 1763 if (!ctx) { 1764 cb_fn(seq, cb_arg, -ENOMEM); 1765 return; 1766 } 1767 1768 ctx->blob = blob; 1769 ctx->pages = spdk_realloc(ctx->pages, bs->md_page_size, 0); 1770 if (!ctx->pages) { 1771 free(ctx); 1772 cb_fn(seq, cb_arg, -ENOMEM); 1773 return; 1774 } 1775 ctx->num_pages = 1; 1776 ctx->cb_fn = cb_fn; 1777 ctx->cb_arg = cb_arg; 1778 ctx->seq = seq; 1779 1780 page_num = bs_blobid_to_page(blob->id); 1781 lba = bs_md_page_to_lba(blob->bs, page_num); 1782 1783 blob->state = SPDK_BLOB_STATE_LOADING; 1784 1785 bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1786 bs_byte_to_lba(bs, bs->md_page_size), 1787 blob_load_cpl, ctx); 1788 } 1789 1790 struct spdk_blob_persist_ctx { 1791 struct spdk_blob *blob; 1792 1793 struct spdk_blob_md_page *pages; 1794 uint32_t next_extent_page; 1795 struct spdk_blob_md_page *extent_page; 1796 1797 spdk_bs_sequence_t *seq; 1798 spdk_bs_sequence_cpl cb_fn; 1799 void *cb_arg; 1800 TAILQ_ENTRY(spdk_blob_persist_ctx) link; 1801 }; 1802 1803 static void 1804 bs_batch_clear_dev(struct spdk_blob *blob, spdk_bs_batch_t *batch, uint64_t lba, 1805 uint64_t lba_count) 1806 { 1807 switch (blob->clear_method) { 1808 case BLOB_CLEAR_WITH_DEFAULT: 1809 case BLOB_CLEAR_WITH_UNMAP: 1810 bs_batch_unmap_dev(batch, lba, lba_count); 1811 break; 1812 case BLOB_CLEAR_WITH_WRITE_ZEROES: 1813 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1814 break; 1815 case BLOB_CLEAR_WITH_NONE: 1816 default: 1817 break; 1818 } 1819 } 1820 1821 static int 1822 bs_super_validate(struct spdk_bs_super_block *super, struct spdk_blob_store *bs) 1823 { 1824 uint32_t crc; 1825 static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH]; 1826 1827 if (super->version > SPDK_BS_VERSION || 1828 super->version < SPDK_BS_INITIAL_VERSION) { 1829 return -EILSEQ; 1830 } 1831 1832 if (memcmp(super->signature, SPDK_BS_SUPER_BLOCK_SIG, 1833 sizeof(super->signature)) != 0) { 1834 return -EILSEQ; 1835 } 1836 1837 crc = blob_md_page_calc_crc(super); 1838 if (crc != super->crc) { 1839 return -EILSEQ; 1840 } 1841 1842 if (memcmp(&bs->bstype, &super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 1843 SPDK_DEBUGLOG(blob, "Bstype matched - loading blobstore\n"); 1844 } else if (memcmp(&bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 1845 SPDK_DEBUGLOG(blob, "Bstype wildcard used - loading blobstore regardless bstype\n"); 1846 } else { 1847 SPDK_DEBUGLOG(blob, "Unexpected bstype\n"); 1848 SPDK_LOGDUMP(blob, "Expected:", bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 1849 SPDK_LOGDUMP(blob, "Found:", super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 1850 return -ENXIO; 1851 } 1852 1853 if (super->size > bs->dev->blockcnt * bs->dev->blocklen) { 1854 SPDK_NOTICELOG("Size mismatch, dev size: %" PRIu64 ", blobstore size: %" PRIu64 "\n", 1855 bs->dev->blockcnt * bs->dev->blocklen, super->size); 1856 return -EILSEQ; 1857 } 1858 1859 return 0; 1860 } 1861 1862 static void bs_mark_dirty(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 1863 spdk_bs_sequence_cpl cb_fn, void *cb_arg); 1864 1865 static void 1866 blob_persist_complete_cb(void *arg) 1867 { 1868 struct spdk_blob_persist_ctx *ctx = arg; 1869 1870 /* Call user callback */ 1871 ctx->cb_fn(ctx->seq, ctx->cb_arg, 0); 1872 1873 /* Free the memory */ 1874 spdk_free(ctx->pages); 1875 free(ctx); 1876 } 1877 1878 static void blob_persist_start(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 1879 1880 static void 1881 blob_persist_complete(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx, int bserrno) 1882 { 1883 struct spdk_blob_persist_ctx *next_persist, *tmp; 1884 struct spdk_blob *blob = ctx->blob; 1885 1886 if (bserrno == 0) { 1887 blob_mark_clean(blob); 1888 } 1889 1890 assert(ctx == TAILQ_FIRST(&blob->persists_to_complete)); 1891 1892 /* Complete all persists that were pending when the current persist started */ 1893 TAILQ_FOREACH_SAFE(next_persist, &blob->persists_to_complete, link, tmp) { 1894 TAILQ_REMOVE(&blob->persists_to_complete, next_persist, link); 1895 spdk_thread_send_msg(spdk_get_thread(), blob_persist_complete_cb, next_persist); 1896 } 1897 1898 if (TAILQ_EMPTY(&blob->pending_persists)) { 1899 return; 1900 } 1901 1902 /* Queue up all pending persists for completion and start blob persist with first one */ 1903 TAILQ_SWAP(&blob->persists_to_complete, &blob->pending_persists, spdk_blob_persist_ctx, link); 1904 next_persist = TAILQ_FIRST(&blob->persists_to_complete); 1905 1906 blob->state = SPDK_BLOB_STATE_DIRTY; 1907 bs_mark_dirty(seq, blob->bs, blob_persist_start, next_persist); 1908 } 1909 1910 static void 1911 blob_persist_clear_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1912 { 1913 struct spdk_blob_persist_ctx *ctx = cb_arg; 1914 struct spdk_blob *blob = ctx->blob; 1915 struct spdk_blob_store *bs = blob->bs; 1916 size_t i; 1917 1918 if (bserrno != 0) { 1919 blob_persist_complete(seq, ctx, bserrno); 1920 return; 1921 } 1922 1923 spdk_spin_lock(&bs->used_lock); 1924 1925 /* Release all extent_pages that were truncated */ 1926 for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) { 1927 /* Nothing to release if it was not allocated */ 1928 if (blob->active.extent_pages[i] != 0) { 1929 bs_release_md_page(bs, blob->active.extent_pages[i]); 1930 } 1931 } 1932 1933 spdk_spin_unlock(&bs->used_lock); 1934 1935 if (blob->active.num_extent_pages == 0) { 1936 free(blob->active.extent_pages); 1937 blob->active.extent_pages = NULL; 1938 blob->active.extent_pages_array_size = 0; 1939 } else if (blob->active.num_extent_pages != blob->active.extent_pages_array_size) { 1940 #ifndef __clang_analyzer__ 1941 void *tmp; 1942 1943 /* scan-build really can't figure reallocs, workaround it */ 1944 tmp = realloc(blob->active.extent_pages, sizeof(uint32_t) * blob->active.num_extent_pages); 1945 assert(tmp != NULL); 1946 blob->active.extent_pages = tmp; 1947 #endif 1948 blob->active.extent_pages_array_size = blob->active.num_extent_pages; 1949 } 1950 1951 blob_persist_complete(seq, ctx, bserrno); 1952 } 1953 1954 static void 1955 blob_persist_clear_extents(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 1956 { 1957 struct spdk_blob *blob = ctx->blob; 1958 struct spdk_blob_store *bs = blob->bs; 1959 size_t i; 1960 uint64_t lba; 1961 uint64_t lba_count; 1962 spdk_bs_batch_t *batch; 1963 1964 batch = bs_sequence_to_batch(seq, blob_persist_clear_extents_cpl, ctx); 1965 lba_count = bs_byte_to_lba(bs, bs->md_page_size); 1966 1967 /* Clear all extent_pages that were truncated */ 1968 for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) { 1969 /* Nothing to clear if it was not allocated */ 1970 if (blob->active.extent_pages[i] != 0) { 1971 lba = bs_md_page_to_lba(bs, blob->active.extent_pages[i]); 1972 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1973 } 1974 } 1975 1976 bs_batch_close(batch); 1977 } 1978 1979 static void 1980 blob_persist_clear_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1981 { 1982 struct spdk_blob_persist_ctx *ctx = cb_arg; 1983 struct spdk_blob *blob = ctx->blob; 1984 struct spdk_blob_store *bs = blob->bs; 1985 size_t i; 1986 1987 if (bserrno != 0) { 1988 blob_persist_complete(seq, ctx, bserrno); 1989 return; 1990 } 1991 1992 spdk_spin_lock(&bs->used_lock); 1993 /* Release all clusters that were truncated */ 1994 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1995 uint32_t cluster_num = bs_lba_to_cluster(bs, blob->active.clusters[i]); 1996 1997 /* Nothing to release if it was not allocated */ 1998 if (blob->active.clusters[i] != 0) { 1999 bs_release_cluster(bs, cluster_num); 2000 } 2001 } 2002 spdk_spin_unlock(&bs->used_lock); 2003 2004 if (blob->active.num_clusters == 0) { 2005 free(blob->active.clusters); 2006 blob->active.clusters = NULL; 2007 blob->active.cluster_array_size = 0; 2008 } else if (blob->active.num_clusters != blob->active.cluster_array_size) { 2009 #ifndef __clang_analyzer__ 2010 void *tmp; 2011 2012 /* scan-build really can't figure reallocs, workaround it */ 2013 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * blob->active.num_clusters); 2014 assert(tmp != NULL); 2015 blob->active.clusters = tmp; 2016 2017 #endif 2018 blob->active.cluster_array_size = blob->active.num_clusters; 2019 } 2020 2021 /* Move on to clearing extent pages */ 2022 blob_persist_clear_extents(seq, ctx); 2023 } 2024 2025 static void 2026 blob_persist_clear_clusters(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 2027 { 2028 struct spdk_blob *blob = ctx->blob; 2029 struct spdk_blob_store *bs = blob->bs; 2030 spdk_bs_batch_t *batch; 2031 size_t i; 2032 uint64_t lba; 2033 uint64_t lba_count; 2034 2035 /* Clusters don't move around in blobs. The list shrinks or grows 2036 * at the end, but no changes ever occur in the middle of the list. 2037 */ 2038 2039 batch = bs_sequence_to_batch(seq, blob_persist_clear_clusters_cpl, ctx); 2040 2041 /* Clear all clusters that were truncated */ 2042 lba = 0; 2043 lba_count = 0; 2044 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 2045 uint64_t next_lba = blob->active.clusters[i]; 2046 uint64_t next_lba_count = bs_cluster_to_lba(bs, 1); 2047 2048 if (next_lba > 0 && (lba + lba_count) == next_lba) { 2049 /* This cluster is contiguous with the previous one. */ 2050 lba_count += next_lba_count; 2051 continue; 2052 } else if (next_lba == 0) { 2053 continue; 2054 } 2055 2056 /* This cluster is not contiguous with the previous one. */ 2057 2058 /* If a run of LBAs previously existing, clear them now */ 2059 if (lba_count > 0) { 2060 bs_batch_clear_dev(ctx->blob, batch, lba, lba_count); 2061 } 2062 2063 /* Start building the next batch */ 2064 lba = next_lba; 2065 if (next_lba > 0) { 2066 lba_count = next_lba_count; 2067 } else { 2068 lba_count = 0; 2069 } 2070 } 2071 2072 /* If we ended with a contiguous set of LBAs, clear them now */ 2073 if (lba_count > 0) { 2074 bs_batch_clear_dev(ctx->blob, batch, lba, lba_count); 2075 } 2076 2077 bs_batch_close(batch); 2078 } 2079 2080 static void 2081 blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2082 { 2083 struct spdk_blob_persist_ctx *ctx = cb_arg; 2084 struct spdk_blob *blob = ctx->blob; 2085 struct spdk_blob_store *bs = blob->bs; 2086 size_t i; 2087 2088 if (bserrno != 0) { 2089 blob_persist_complete(seq, ctx, bserrno); 2090 return; 2091 } 2092 2093 spdk_spin_lock(&bs->used_lock); 2094 2095 /* This loop starts at 1 because the first page is special and handled 2096 * below. The pages (except the first) are never written in place, 2097 * so any pages in the clean list must be zeroed. 2098 */ 2099 for (i = 1; i < blob->clean.num_pages; i++) { 2100 bs_release_md_page(bs, blob->clean.pages[i]); 2101 } 2102 2103 if (blob->active.num_pages == 0) { 2104 uint32_t page_num; 2105 2106 page_num = bs_blobid_to_page(blob->id); 2107 bs_release_md_page(bs, page_num); 2108 } 2109 2110 spdk_spin_unlock(&bs->used_lock); 2111 2112 /* Move on to clearing clusters */ 2113 blob_persist_clear_clusters(seq, ctx); 2114 } 2115 2116 static void 2117 blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2118 { 2119 struct spdk_blob_persist_ctx *ctx = cb_arg; 2120 struct spdk_blob *blob = ctx->blob; 2121 struct spdk_blob_store *bs = blob->bs; 2122 uint64_t lba; 2123 uint64_t lba_count; 2124 spdk_bs_batch_t *batch; 2125 size_t i; 2126 2127 if (bserrno != 0) { 2128 blob_persist_complete(seq, ctx, bserrno); 2129 return; 2130 } 2131 2132 batch = bs_sequence_to_batch(seq, blob_persist_zero_pages_cpl, ctx); 2133 2134 lba_count = bs_byte_to_lba(bs, bs->md_page_size); 2135 2136 /* This loop starts at 1 because the first page is special and handled 2137 * below. The pages (except the first) are never written in place, 2138 * so any pages in the clean list must be zeroed. 2139 */ 2140 for (i = 1; i < blob->clean.num_pages; i++) { 2141 lba = bs_md_page_to_lba(bs, blob->clean.pages[i]); 2142 2143 bs_batch_write_zeroes_dev(batch, lba, lba_count); 2144 } 2145 2146 /* The first page will only be zeroed if this is a delete. */ 2147 if (blob->active.num_pages == 0) { 2148 uint32_t page_num; 2149 2150 /* The first page in the metadata goes where the blobid indicates */ 2151 page_num = bs_blobid_to_page(blob->id); 2152 lba = bs_md_page_to_lba(bs, page_num); 2153 2154 bs_batch_write_zeroes_dev(batch, lba, lba_count); 2155 } 2156 2157 bs_batch_close(batch); 2158 } 2159 2160 static void 2161 blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2162 { 2163 struct spdk_blob_persist_ctx *ctx = cb_arg; 2164 struct spdk_blob *blob = ctx->blob; 2165 struct spdk_blob_store *bs = blob->bs; 2166 uint64_t lba; 2167 uint32_t lba_count; 2168 struct spdk_blob_md_page *page; 2169 2170 if (bserrno != 0) { 2171 blob_persist_complete(seq, ctx, bserrno); 2172 return; 2173 } 2174 2175 if (blob->active.num_pages == 0) { 2176 /* Move on to the next step */ 2177 blob_persist_zero_pages(seq, ctx, 0); 2178 return; 2179 } 2180 2181 lba_count = bs_byte_to_lba(bs, bs->md_page_size); 2182 2183 page = &ctx->pages[0]; 2184 /* The first page in the metadata goes where the blobid indicates */ 2185 lba = bs_md_page_to_lba(bs, bs_blobid_to_page(blob->id)); 2186 2187 bs_sequence_write_dev(seq, page, lba, lba_count, 2188 blob_persist_zero_pages, ctx); 2189 } 2190 2191 static void 2192 blob_persist_write_page_chain(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 2193 { 2194 struct spdk_blob *blob = ctx->blob; 2195 struct spdk_blob_store *bs = blob->bs; 2196 uint64_t lba; 2197 uint32_t lba_count; 2198 struct spdk_blob_md_page *page; 2199 spdk_bs_batch_t *batch; 2200 size_t i; 2201 2202 /* Clusters don't move around in blobs. The list shrinks or grows 2203 * at the end, but no changes ever occur in the middle of the list. 2204 */ 2205 2206 lba_count = bs_byte_to_lba(bs, sizeof(*page)); 2207 2208 batch = bs_sequence_to_batch(seq, blob_persist_write_page_root, ctx); 2209 2210 /* This starts at 1. The root page is not written until 2211 * all of the others are finished 2212 */ 2213 for (i = 1; i < blob->active.num_pages; i++) { 2214 page = &ctx->pages[i]; 2215 assert(page->sequence_num == i); 2216 2217 lba = bs_md_page_to_lba(bs, blob->active.pages[i]); 2218 2219 bs_batch_write_dev(batch, page, lba, lba_count); 2220 } 2221 2222 bs_batch_close(batch); 2223 } 2224 2225 static int 2226 blob_resize(struct spdk_blob *blob, uint64_t sz) 2227 { 2228 uint64_t i; 2229 uint64_t *tmp; 2230 uint64_t cluster; 2231 uint32_t lfmd; /* lowest free md page */ 2232 uint64_t num_clusters; 2233 uint32_t *ep_tmp; 2234 uint64_t new_num_ep = 0, current_num_ep = 0; 2235 struct spdk_blob_store *bs; 2236 int rc; 2237 2238 bs = blob->bs; 2239 2240 blob_verify_md_op(blob); 2241 2242 if (blob->active.num_clusters == sz) { 2243 return 0; 2244 } 2245 2246 if (blob->active.num_clusters < blob->active.cluster_array_size) { 2247 /* If this blob was resized to be larger, then smaller, then 2248 * larger without syncing, then the cluster array already 2249 * contains spare assigned clusters we can use. 2250 */ 2251 num_clusters = spdk_min(blob->active.cluster_array_size, 2252 sz); 2253 } else { 2254 num_clusters = blob->active.num_clusters; 2255 } 2256 2257 if (blob->use_extent_table) { 2258 /* Round up since every cluster beyond current Extent Table size, 2259 * requires new extent page. */ 2260 new_num_ep = spdk_divide_round_up(sz, SPDK_EXTENTS_PER_EP); 2261 current_num_ep = spdk_divide_round_up(num_clusters, SPDK_EXTENTS_PER_EP); 2262 } 2263 2264 assert(!spdk_spin_held(&bs->used_lock)); 2265 2266 /* Check first that we have enough clusters and md pages before we start claiming them. 2267 * bs->used_lock is held to ensure that clusters we think are free are still free when we go 2268 * to claim them later in this function. 2269 */ 2270 if (sz > num_clusters && spdk_blob_is_thin_provisioned(blob) == false) { 2271 spdk_spin_lock(&bs->used_lock); 2272 if ((sz - num_clusters) > bs->num_free_clusters) { 2273 rc = -ENOSPC; 2274 goto out; 2275 } 2276 lfmd = 0; 2277 for (i = current_num_ep; i < new_num_ep ; i++) { 2278 lfmd = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, lfmd); 2279 if (lfmd == UINT32_MAX) { 2280 /* No more free md pages. Cannot satisfy the request */ 2281 rc = -ENOSPC; 2282 goto out; 2283 } 2284 } 2285 } 2286 2287 if (sz > num_clusters) { 2288 /* Expand the cluster array if necessary. 2289 * We only shrink the array when persisting. 2290 */ 2291 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * sz); 2292 if (sz > 0 && tmp == NULL) { 2293 rc = -ENOMEM; 2294 goto out; 2295 } 2296 memset(tmp + blob->active.cluster_array_size, 0, 2297 sizeof(*blob->active.clusters) * (sz - blob->active.cluster_array_size)); 2298 blob->active.clusters = tmp; 2299 blob->active.cluster_array_size = sz; 2300 2301 /* Expand the extents table, only if enough clusters were added */ 2302 if (new_num_ep > current_num_ep && blob->use_extent_table) { 2303 ep_tmp = realloc(blob->active.extent_pages, sizeof(*blob->active.extent_pages) * new_num_ep); 2304 if (new_num_ep > 0 && ep_tmp == NULL) { 2305 rc = -ENOMEM; 2306 goto out; 2307 } 2308 memset(ep_tmp + blob->active.extent_pages_array_size, 0, 2309 sizeof(*blob->active.extent_pages) * (new_num_ep - blob->active.extent_pages_array_size)); 2310 blob->active.extent_pages = ep_tmp; 2311 blob->active.extent_pages_array_size = new_num_ep; 2312 } 2313 } 2314 2315 blob->state = SPDK_BLOB_STATE_DIRTY; 2316 2317 if (spdk_blob_is_thin_provisioned(blob) == false) { 2318 cluster = 0; 2319 lfmd = 0; 2320 for (i = num_clusters; i < sz; i++) { 2321 bs_allocate_cluster(blob, i, &cluster, &lfmd, true); 2322 /* Do not increment lfmd here. lfmd will get updated 2323 * to the md_page allocated (if any) when a new extent 2324 * page is needed. Just pass that value again, 2325 * bs_allocate_cluster will just start at that index 2326 * to find the next free md_page when needed. 2327 */ 2328 } 2329 } 2330 2331 /* If we are shrinking the blob, we must adjust num_allocated_clusters */ 2332 for (i = sz; i < num_clusters; i++) { 2333 if (blob->active.clusters[i] != 0) { 2334 blob->active.num_allocated_clusters--; 2335 } 2336 } 2337 2338 blob->active.num_clusters = sz; 2339 blob->active.num_extent_pages = new_num_ep; 2340 2341 rc = 0; 2342 out: 2343 if (spdk_spin_held(&bs->used_lock)) { 2344 spdk_spin_unlock(&bs->used_lock); 2345 } 2346 2347 return rc; 2348 } 2349 2350 static void 2351 blob_persist_generate_new_md(struct spdk_blob_persist_ctx *ctx) 2352 { 2353 spdk_bs_sequence_t *seq = ctx->seq; 2354 struct spdk_blob *blob = ctx->blob; 2355 struct spdk_blob_store *bs = blob->bs; 2356 uint64_t i; 2357 uint32_t page_num; 2358 void *tmp; 2359 int rc; 2360 2361 /* Generate the new metadata */ 2362 rc = blob_serialize(blob, &ctx->pages, &blob->active.num_pages); 2363 if (rc < 0) { 2364 blob_persist_complete(seq, ctx, rc); 2365 return; 2366 } 2367 2368 assert(blob->active.num_pages >= 1); 2369 2370 /* Resize the cache of page indices */ 2371 tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 2372 if (!tmp) { 2373 blob_persist_complete(seq, ctx, -ENOMEM); 2374 return; 2375 } 2376 blob->active.pages = tmp; 2377 2378 /* Assign this metadata to pages. This requires two passes - one to verify that there are 2379 * enough pages and a second to actually claim them. The used_lock is held across 2380 * both passes to ensure things don't change in the middle. 2381 */ 2382 spdk_spin_lock(&bs->used_lock); 2383 page_num = 0; 2384 /* Note that this loop starts at one. The first page location is fixed by the blobid. */ 2385 for (i = 1; i < blob->active.num_pages; i++) { 2386 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 2387 if (page_num == UINT32_MAX) { 2388 spdk_spin_unlock(&bs->used_lock); 2389 blob_persist_complete(seq, ctx, -ENOMEM); 2390 return; 2391 } 2392 page_num++; 2393 } 2394 2395 page_num = 0; 2396 blob->active.pages[0] = bs_blobid_to_page(blob->id); 2397 for (i = 1; i < blob->active.num_pages; i++) { 2398 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 2399 ctx->pages[i - 1].next = page_num; 2400 /* Now that previous metadata page is complete, calculate the crc for it. */ 2401 ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]); 2402 blob->active.pages[i] = page_num; 2403 bs_claim_md_page(bs, page_num); 2404 SPDK_DEBUGLOG(blob, "Claiming page %u for blob 0x%" PRIx64 "\n", page_num, 2405 blob->id); 2406 page_num++; 2407 } 2408 spdk_spin_unlock(&bs->used_lock); 2409 ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]); 2410 /* Start writing the metadata from last page to first */ 2411 blob->state = SPDK_BLOB_STATE_CLEAN; 2412 blob_persist_write_page_chain(seq, ctx); 2413 } 2414 2415 static void 2416 blob_persist_write_extent_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2417 { 2418 struct spdk_blob_persist_ctx *ctx = cb_arg; 2419 struct spdk_blob *blob = ctx->blob; 2420 size_t i; 2421 uint32_t extent_page_id; 2422 uint32_t page_count = 0; 2423 int rc; 2424 2425 if (ctx->extent_page != NULL) { 2426 spdk_free(ctx->extent_page); 2427 ctx->extent_page = NULL; 2428 } 2429 2430 if (bserrno != 0) { 2431 blob_persist_complete(seq, ctx, bserrno); 2432 return; 2433 } 2434 2435 /* Only write out Extent Pages when blob was resized. */ 2436 for (i = ctx->next_extent_page; i < blob->active.extent_pages_array_size; i++) { 2437 extent_page_id = blob->active.extent_pages[i]; 2438 if (extent_page_id == 0) { 2439 /* No Extent Page to persist */ 2440 assert(spdk_blob_is_thin_provisioned(blob)); 2441 continue; 2442 } 2443 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent_page_id)); 2444 ctx->next_extent_page = i + 1; 2445 rc = blob_serialize_add_page(ctx->blob, &ctx->extent_page, &page_count, &ctx->extent_page); 2446 if (rc < 0) { 2447 blob_persist_complete(seq, ctx, rc); 2448 return; 2449 } 2450 2451 blob->state = SPDK_BLOB_STATE_DIRTY; 2452 blob_serialize_extent_page(blob, i * SPDK_EXTENTS_PER_EP, ctx->extent_page); 2453 2454 ctx->extent_page->crc = blob_md_page_calc_crc(ctx->extent_page); 2455 2456 bs_sequence_write_dev(seq, ctx->extent_page, bs_md_page_to_lba(blob->bs, extent_page_id), 2457 bs_byte_to_lba(blob->bs, blob->bs->md_page_size), 2458 blob_persist_write_extent_pages, ctx); 2459 return; 2460 } 2461 2462 blob_persist_generate_new_md(ctx); 2463 } 2464 2465 static void 2466 blob_persist_start(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2467 { 2468 struct spdk_blob_persist_ctx *ctx = cb_arg; 2469 struct spdk_blob *blob = ctx->blob; 2470 2471 if (bserrno != 0) { 2472 blob_persist_complete(seq, ctx, bserrno); 2473 return; 2474 } 2475 2476 if (blob->active.num_pages == 0) { 2477 /* This is the signal that the blob should be deleted. 2478 * Immediately jump to the clean up routine. */ 2479 assert(blob->clean.num_pages > 0); 2480 blob->state = SPDK_BLOB_STATE_CLEAN; 2481 blob_persist_zero_pages(seq, ctx, 0); 2482 return; 2483 2484 } 2485 2486 if (blob->clean.num_clusters < blob->active.num_clusters) { 2487 /* Blob was resized up */ 2488 assert(blob->clean.num_extent_pages <= blob->active.num_extent_pages); 2489 ctx->next_extent_page = spdk_max(1, blob->clean.num_extent_pages) - 1; 2490 } else if (blob->active.num_clusters < blob->active.cluster_array_size) { 2491 /* Blob was resized down */ 2492 assert(blob->clean.num_extent_pages >= blob->active.num_extent_pages); 2493 ctx->next_extent_page = spdk_max(1, blob->active.num_extent_pages) - 1; 2494 } else { 2495 /* No change in size occurred */ 2496 blob_persist_generate_new_md(ctx); 2497 return; 2498 } 2499 2500 blob_persist_write_extent_pages(seq, ctx, 0); 2501 } 2502 2503 struct spdk_bs_mark_dirty { 2504 struct spdk_blob_store *bs; 2505 struct spdk_bs_super_block *super; 2506 spdk_bs_sequence_cpl cb_fn; 2507 void *cb_arg; 2508 }; 2509 2510 static void 2511 bs_mark_dirty_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2512 { 2513 struct spdk_bs_mark_dirty *ctx = cb_arg; 2514 2515 if (bserrno == 0) { 2516 ctx->bs->clean = 0; 2517 } 2518 2519 ctx->cb_fn(seq, ctx->cb_arg, bserrno); 2520 2521 spdk_free(ctx->super); 2522 free(ctx); 2523 } 2524 2525 static void bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2526 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg); 2527 2528 2529 static void 2530 bs_mark_dirty_write(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2531 { 2532 struct spdk_bs_mark_dirty *ctx = cb_arg; 2533 int rc; 2534 2535 if (bserrno != 0) { 2536 bs_mark_dirty_write_cpl(seq, ctx, bserrno); 2537 return; 2538 } 2539 2540 rc = bs_super_validate(ctx->super, ctx->bs); 2541 if (rc != 0) { 2542 bs_mark_dirty_write_cpl(seq, ctx, rc); 2543 return; 2544 } 2545 2546 ctx->super->clean = 0; 2547 if (ctx->super->size == 0) { 2548 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 2549 } 2550 2551 bs_write_super(seq, ctx->bs, ctx->super, bs_mark_dirty_write_cpl, ctx); 2552 } 2553 2554 static void 2555 bs_mark_dirty(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2556 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2557 { 2558 struct spdk_bs_mark_dirty *ctx; 2559 2560 /* Blobstore is already marked dirty */ 2561 if (bs->clean == 0) { 2562 cb_fn(seq, cb_arg, 0); 2563 return; 2564 } 2565 2566 ctx = calloc(1, sizeof(*ctx)); 2567 if (!ctx) { 2568 cb_fn(seq, cb_arg, -ENOMEM); 2569 return; 2570 } 2571 ctx->bs = bs; 2572 ctx->cb_fn = cb_fn; 2573 ctx->cb_arg = cb_arg; 2574 2575 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 2576 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 2577 if (!ctx->super) { 2578 free(ctx); 2579 cb_fn(seq, cb_arg, -ENOMEM); 2580 return; 2581 } 2582 2583 bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0), 2584 bs_byte_to_lba(bs, sizeof(*ctx->super)), 2585 bs_mark_dirty_write, ctx); 2586 } 2587 2588 /* Write a blob to disk */ 2589 static void 2590 blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 2591 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2592 { 2593 struct spdk_blob_persist_ctx *ctx; 2594 2595 blob_verify_md_op(blob); 2596 2597 if (blob->state == SPDK_BLOB_STATE_CLEAN && TAILQ_EMPTY(&blob->persists_to_complete)) { 2598 cb_fn(seq, cb_arg, 0); 2599 return; 2600 } 2601 2602 ctx = calloc(1, sizeof(*ctx)); 2603 if (!ctx) { 2604 cb_fn(seq, cb_arg, -ENOMEM); 2605 return; 2606 } 2607 ctx->blob = blob; 2608 ctx->seq = seq; 2609 ctx->cb_fn = cb_fn; 2610 ctx->cb_arg = cb_arg; 2611 2612 /* Multiple blob persists can affect one another, via blob->state or 2613 * blob mutable data changes. To prevent it, queue up the persists. */ 2614 if (!TAILQ_EMPTY(&blob->persists_to_complete)) { 2615 TAILQ_INSERT_TAIL(&blob->pending_persists, ctx, link); 2616 return; 2617 } 2618 TAILQ_INSERT_HEAD(&blob->persists_to_complete, ctx, link); 2619 2620 bs_mark_dirty(seq, blob->bs, blob_persist_start, ctx); 2621 } 2622 2623 struct spdk_blob_copy_cluster_ctx { 2624 struct spdk_blob *blob; 2625 uint8_t *buf; 2626 uint64_t io_unit; 2627 uint64_t new_cluster; 2628 uint32_t new_extent_page; 2629 spdk_bs_sequence_t *seq; 2630 struct spdk_blob_md_page *new_cluster_page; 2631 }; 2632 2633 struct spdk_blob_free_cluster_ctx { 2634 struct spdk_blob *blob; 2635 uint64_t page; 2636 struct spdk_blob_md_page *md_page; 2637 uint64_t cluster_num; 2638 uint32_t extent_page; 2639 spdk_bs_sequence_t *seq; 2640 }; 2641 2642 static void 2643 blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno) 2644 { 2645 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2646 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq; 2647 TAILQ_HEAD(, spdk_bs_request_set) requests; 2648 spdk_bs_user_op_t *op; 2649 2650 TAILQ_INIT(&requests); 2651 TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link); 2652 2653 while (!TAILQ_EMPTY(&requests)) { 2654 op = TAILQ_FIRST(&requests); 2655 TAILQ_REMOVE(&requests, op, link); 2656 if (bserrno == 0) { 2657 bs_user_op_execute(op); 2658 } else { 2659 bs_user_op_abort(op, bserrno); 2660 } 2661 } 2662 2663 spdk_free(ctx->buf); 2664 free(ctx); 2665 } 2666 2667 static void 2668 blob_free_cluster_cpl(void *cb_arg, int bserrno) 2669 { 2670 struct spdk_blob_free_cluster_ctx *ctx = cb_arg; 2671 spdk_bs_sequence_t *seq = ctx->seq; 2672 2673 bs_sequence_finish(seq, bserrno); 2674 2675 free(ctx); 2676 } 2677 2678 static void 2679 blob_insert_cluster_revert(struct spdk_blob_copy_cluster_ctx *ctx) 2680 { 2681 spdk_spin_lock(&ctx->blob->bs->used_lock); 2682 bs_release_cluster(ctx->blob->bs, ctx->new_cluster); 2683 if (ctx->new_extent_page != 0) { 2684 bs_release_md_page(ctx->blob->bs, ctx->new_extent_page); 2685 } 2686 spdk_spin_unlock(&ctx->blob->bs->used_lock); 2687 } 2688 2689 static void 2690 blob_insert_cluster_clear_cpl(void *cb_arg, int bserrno) 2691 { 2692 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2693 2694 if (bserrno) { 2695 SPDK_WARNLOG("Failed to clear cluster: %d\n", bserrno); 2696 } 2697 2698 blob_insert_cluster_revert(ctx); 2699 bs_sequence_finish(ctx->seq, bserrno); 2700 } 2701 2702 static void 2703 blob_insert_cluster_clear(struct spdk_blob_copy_cluster_ctx *ctx) 2704 { 2705 struct spdk_bs_cpl cpl; 2706 spdk_bs_batch_t *batch; 2707 struct spdk_io_channel *ch = spdk_io_channel_from_ctx(ctx->seq->channel); 2708 2709 /* 2710 * We allocated a cluster and we copied data to it. But now, we realized that we don't need 2711 * this cluster and we want to release it. We must ensure that we clear the data on this 2712 * cluster. 2713 * The cluster may later be re-allocated by a thick-provisioned blob for example. When 2714 * reading from this thick-provisioned blob before writing data, we should read zeroes. 2715 */ 2716 2717 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2718 cpl.u.blob_basic.cb_fn = blob_insert_cluster_clear_cpl; 2719 cpl.u.blob_basic.cb_arg = ctx; 2720 2721 batch = bs_batch_open(ch, &cpl, ctx->blob); 2722 if (!batch) { 2723 blob_insert_cluster_clear_cpl(ctx, -ENOMEM); 2724 return; 2725 } 2726 2727 bs_batch_clear_dev(ctx->blob, batch, bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 2728 bs_cluster_to_lba(ctx->blob->bs, 1)); 2729 bs_batch_close(batch); 2730 } 2731 2732 static void 2733 blob_insert_cluster_cpl(void *cb_arg, int bserrno) 2734 { 2735 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2736 2737 if (bserrno) { 2738 if (bserrno == -EEXIST) { 2739 /* The metadata insert failed because another thread 2740 * allocated the cluster first. Clear and free our cluster 2741 * but continue without error. */ 2742 blob_insert_cluster_clear(ctx); 2743 return; 2744 } 2745 2746 blob_insert_cluster_revert(ctx); 2747 } 2748 2749 bs_sequence_finish(ctx->seq, bserrno); 2750 } 2751 2752 static void 2753 blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2754 { 2755 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2756 uint32_t cluster_number; 2757 2758 if (bserrno) { 2759 /* The write failed, so jump to the final completion handler */ 2760 bs_sequence_finish(seq, bserrno); 2761 return; 2762 } 2763 2764 cluster_number = bs_io_unit_to_cluster(ctx->blob->bs, ctx->io_unit); 2765 2766 blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2767 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx); 2768 } 2769 2770 static void 2771 blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2772 { 2773 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2774 2775 if (bserrno != 0) { 2776 /* The read failed, so jump to the final completion handler */ 2777 bs_sequence_finish(seq, bserrno); 2778 return; 2779 } 2780 2781 /* Write whole cluster */ 2782 bs_sequence_write_dev(seq, ctx->buf, 2783 bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 2784 bs_cluster_to_lba(ctx->blob->bs, 1), 2785 blob_write_copy_cpl, ctx); 2786 } 2787 2788 static bool 2789 blob_can_copy(struct spdk_blob *blob, uint64_t cluster_start_io_unit, uint64_t *base_lba) 2790 { 2791 uint64_t lba = bs_dev_io_unit_to_lba(blob, blob->back_bs_dev, cluster_start_io_unit); 2792 2793 return (!blob_is_esnap_clone(blob) && blob->bs->dev->copy != NULL) && 2794 blob->back_bs_dev->translate_lba(blob->back_bs_dev, lba, base_lba); 2795 } 2796 2797 static void 2798 blob_copy(struct spdk_blob_copy_cluster_ctx *ctx, spdk_bs_user_op_t *op, uint64_t src_lba) 2799 { 2800 struct spdk_blob *blob = ctx->blob; 2801 uint64_t lba_count = bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz); 2802 2803 bs_sequence_copy_dev(ctx->seq, 2804 bs_cluster_to_lba(blob->bs, ctx->new_cluster), 2805 src_lba, 2806 lba_count, 2807 blob_write_copy_cpl, ctx); 2808 } 2809 2810 static void 2811 bs_allocate_and_copy_cluster(struct spdk_blob *blob, 2812 struct spdk_io_channel *_ch, 2813 uint64_t io_unit, spdk_bs_user_op_t *op) 2814 { 2815 struct spdk_bs_cpl cpl; 2816 struct spdk_bs_channel *ch; 2817 struct spdk_blob_copy_cluster_ctx *ctx; 2818 uint64_t cluster_start_io_unit; 2819 uint32_t cluster_number; 2820 bool is_zeroes; 2821 bool can_copy; 2822 bool is_valid_range; 2823 uint64_t copy_src_lba; 2824 int rc; 2825 2826 ch = spdk_io_channel_get_ctx(_ch); 2827 2828 if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) { 2829 /* There are already operations pending. Queue this user op 2830 * and return because it will be re-executed when the outstanding 2831 * cluster allocation completes. */ 2832 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2833 return; 2834 } 2835 2836 /* Round the io_unit offset down to the first io_unit in the cluster */ 2837 cluster_start_io_unit = bs_io_unit_to_cluster_start(blob, io_unit); 2838 2839 /* Calculate which index in the metadata cluster array the corresponding 2840 * cluster is supposed to be at. */ 2841 cluster_number = bs_io_unit_to_cluster_number(blob, io_unit); 2842 2843 ctx = calloc(1, sizeof(*ctx)); 2844 if (!ctx) { 2845 bs_user_op_abort(op, -ENOMEM); 2846 return; 2847 } 2848 2849 assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0); 2850 2851 ctx->blob = blob; 2852 ctx->io_unit = cluster_start_io_unit; 2853 ctx->new_cluster_page = ch->new_cluster_page; 2854 memset(ctx->new_cluster_page, 0, blob->bs->md_page_size); 2855 2856 /* Check if the cluster that we intend to do CoW for is valid for 2857 * the backing dev. For zeroes backing dev, it'll be always valid. 2858 * For other backing dev e.g. a snapshot, it could be invalid if 2859 * the blob has been resized after snapshot was taken. */ 2860 is_valid_range = blob->back_bs_dev->is_range_valid(blob->back_bs_dev, 2861 bs_dev_io_unit_to_lba(blob, blob->back_bs_dev, cluster_start_io_unit), 2862 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz)); 2863 2864 can_copy = is_valid_range && blob_can_copy(blob, cluster_start_io_unit, ©_src_lba); 2865 2866 is_zeroes = is_valid_range && blob->back_bs_dev->is_zeroes(blob->back_bs_dev, 2867 bs_dev_io_unit_to_lba(blob, blob->back_bs_dev, cluster_start_io_unit), 2868 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz)); 2869 if (blob->parent_id != SPDK_BLOBID_INVALID && !is_zeroes && !can_copy) { 2870 ctx->buf = spdk_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen, 2871 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 2872 if (!ctx->buf) { 2873 SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n", 2874 blob->bs->cluster_sz); 2875 free(ctx); 2876 bs_user_op_abort(op, -ENOMEM); 2877 return; 2878 } 2879 } 2880 2881 spdk_spin_lock(&blob->bs->used_lock); 2882 rc = bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, &ctx->new_extent_page, 2883 false); 2884 spdk_spin_unlock(&blob->bs->used_lock); 2885 if (rc != 0) { 2886 spdk_free(ctx->buf); 2887 free(ctx); 2888 bs_user_op_abort(op, rc); 2889 return; 2890 } 2891 2892 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2893 cpl.u.blob_basic.cb_fn = blob_allocate_and_copy_cluster_cpl; 2894 cpl.u.blob_basic.cb_arg = ctx; 2895 2896 ctx->seq = bs_sequence_start_blob(_ch, &cpl, blob); 2897 if (!ctx->seq) { 2898 spdk_spin_lock(&blob->bs->used_lock); 2899 bs_release_cluster(blob->bs, ctx->new_cluster); 2900 spdk_spin_unlock(&blob->bs->used_lock); 2901 spdk_free(ctx->buf); 2902 free(ctx); 2903 bs_user_op_abort(op, -ENOMEM); 2904 return; 2905 } 2906 2907 /* Queue the user op to block other incoming operations */ 2908 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2909 2910 if (blob->parent_id != SPDK_BLOBID_INVALID && !is_zeroes) { 2911 if (can_copy) { 2912 blob_copy(ctx, op, copy_src_lba); 2913 } else { 2914 /* Read cluster from backing device */ 2915 bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf, 2916 bs_dev_io_unit_to_lba(blob, blob->back_bs_dev, cluster_start_io_unit), 2917 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz), 2918 blob_write_copy, ctx); 2919 } 2920 2921 } else { 2922 blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2923 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx); 2924 } 2925 } 2926 2927 static inline bool 2928 blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t io_unit, uint64_t length, 2929 uint64_t *lba, uint64_t *lba_count) 2930 { 2931 *lba_count = length; 2932 2933 if (!bs_io_unit_is_allocated(blob, io_unit)) { 2934 assert(blob->back_bs_dev != NULL); 2935 *lba = bs_io_unit_to_back_dev_lba(blob, io_unit); 2936 *lba_count = bs_io_unit_to_back_dev_lba(blob, *lba_count); 2937 return false; 2938 } else { 2939 *lba = bs_blob_io_unit_to_lba(blob, io_unit); 2940 return true; 2941 } 2942 } 2943 2944 struct op_split_ctx { 2945 struct spdk_blob *blob; 2946 struct spdk_io_channel *channel; 2947 uint64_t io_unit_offset; 2948 uint64_t io_units_remaining; 2949 void *curr_payload; 2950 enum spdk_blob_op_type op_type; 2951 spdk_bs_sequence_t *seq; 2952 bool in_submit_ctx; 2953 bool completed_in_submit_ctx; 2954 bool done; 2955 }; 2956 2957 static void 2958 blob_request_submit_op_split_next(void *cb_arg, int bserrno) 2959 { 2960 struct op_split_ctx *ctx = cb_arg; 2961 struct spdk_blob *blob = ctx->blob; 2962 struct spdk_io_channel *ch = ctx->channel; 2963 enum spdk_blob_op_type op_type = ctx->op_type; 2964 uint8_t *buf; 2965 uint64_t offset; 2966 uint64_t length; 2967 uint64_t op_length; 2968 2969 if (bserrno != 0 || ctx->io_units_remaining == 0) { 2970 bs_sequence_finish(ctx->seq, bserrno); 2971 if (ctx->in_submit_ctx) { 2972 /* Defer freeing of the ctx object, since it will be 2973 * accessed when this unwinds back to the submission 2974 * context. 2975 */ 2976 ctx->done = true; 2977 } else { 2978 free(ctx); 2979 } 2980 return; 2981 } 2982 2983 if (ctx->in_submit_ctx) { 2984 /* If this split operation completed in the context 2985 * of its submission, mark the flag and return immediately 2986 * to avoid recursion. 2987 */ 2988 ctx->completed_in_submit_ctx = true; 2989 return; 2990 } 2991 2992 while (true) { 2993 ctx->completed_in_submit_ctx = false; 2994 2995 offset = ctx->io_unit_offset; 2996 length = ctx->io_units_remaining; 2997 buf = ctx->curr_payload; 2998 op_length = spdk_min(length, bs_num_io_units_to_cluster_boundary(blob, 2999 offset)); 3000 3001 /* Update length and payload for next operation */ 3002 ctx->io_units_remaining -= op_length; 3003 ctx->io_unit_offset += op_length; 3004 if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) { 3005 ctx->curr_payload += op_length * blob->bs->io_unit_size; 3006 } 3007 3008 assert(!ctx->in_submit_ctx); 3009 ctx->in_submit_ctx = true; 3010 3011 switch (op_type) { 3012 case SPDK_BLOB_READ: 3013 spdk_blob_io_read(blob, ch, buf, offset, op_length, 3014 blob_request_submit_op_split_next, ctx); 3015 break; 3016 case SPDK_BLOB_WRITE: 3017 spdk_blob_io_write(blob, ch, buf, offset, op_length, 3018 blob_request_submit_op_split_next, ctx); 3019 break; 3020 case SPDK_BLOB_UNMAP: 3021 spdk_blob_io_unmap(blob, ch, offset, op_length, 3022 blob_request_submit_op_split_next, ctx); 3023 break; 3024 case SPDK_BLOB_WRITE_ZEROES: 3025 spdk_blob_io_write_zeroes(blob, ch, offset, op_length, 3026 blob_request_submit_op_split_next, ctx); 3027 break; 3028 case SPDK_BLOB_READV: 3029 case SPDK_BLOB_WRITEV: 3030 SPDK_ERRLOG("readv/write not valid\n"); 3031 bs_sequence_finish(ctx->seq, -EINVAL); 3032 free(ctx); 3033 return; 3034 } 3035 3036 #ifndef __clang_analyzer__ 3037 /* scan-build reports a false positive around accessing the ctx here. It 3038 * forms a path that recursively calls this function, but then says 3039 * "assuming ctx->in_submit_ctx is false", when that isn't possible. 3040 * This path does free(ctx), returns to here, and reports a use-after-free 3041 * bug. Wrapping this bit of code so that scan-build doesn't see it 3042 * works around the scan-build bug. 3043 */ 3044 assert(ctx->in_submit_ctx); 3045 ctx->in_submit_ctx = false; 3046 3047 /* If the operation completed immediately, loop back and submit the 3048 * next operation. Otherwise we can return and the next split 3049 * operation will get submitted when this current operation is 3050 * later completed asynchronously. 3051 */ 3052 if (ctx->completed_in_submit_ctx) { 3053 continue; 3054 } else if (ctx->done) { 3055 free(ctx); 3056 } 3057 #endif 3058 break; 3059 } 3060 } 3061 3062 static void 3063 blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob, 3064 void *payload, uint64_t offset, uint64_t length, 3065 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 3066 { 3067 struct op_split_ctx *ctx; 3068 spdk_bs_sequence_t *seq; 3069 struct spdk_bs_cpl cpl; 3070 3071 assert(blob != NULL); 3072 3073 ctx = calloc(1, sizeof(struct op_split_ctx)); 3074 if (ctx == NULL) { 3075 cb_fn(cb_arg, -ENOMEM); 3076 return; 3077 } 3078 3079 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 3080 cpl.u.blob_basic.cb_fn = cb_fn; 3081 cpl.u.blob_basic.cb_arg = cb_arg; 3082 3083 seq = bs_sequence_start_blob(ch, &cpl, blob); 3084 if (!seq) { 3085 free(ctx); 3086 cb_fn(cb_arg, -ENOMEM); 3087 return; 3088 } 3089 3090 ctx->blob = blob; 3091 ctx->channel = ch; 3092 ctx->curr_payload = payload; 3093 ctx->io_unit_offset = offset; 3094 ctx->io_units_remaining = length; 3095 ctx->op_type = op_type; 3096 ctx->seq = seq; 3097 3098 blob_request_submit_op_split_next(ctx, 0); 3099 } 3100 3101 static void 3102 spdk_free_cluster_unmap_complete(void *cb_arg, int bserrno) 3103 { 3104 struct spdk_blob_free_cluster_ctx *ctx = cb_arg; 3105 3106 if (bserrno) { 3107 bs_sequence_finish(ctx->seq, bserrno); 3108 free(ctx); 3109 return; 3110 } 3111 3112 blob_free_cluster_on_md_thread(ctx->blob, ctx->cluster_num, 3113 ctx->extent_page, ctx->md_page, blob_free_cluster_cpl, ctx); 3114 } 3115 3116 static void 3117 blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob, 3118 void *payload, uint64_t offset, uint64_t length, 3119 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 3120 { 3121 struct spdk_bs_cpl cpl; 3122 uint64_t lba; 3123 uint64_t lba_count; 3124 bool is_allocated; 3125 3126 assert(blob != NULL); 3127 3128 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 3129 cpl.u.blob_basic.cb_fn = cb_fn; 3130 cpl.u.blob_basic.cb_arg = cb_arg; 3131 3132 if (blob->frozen_refcnt) { 3133 /* This blob I/O is frozen */ 3134 spdk_bs_user_op_t *op; 3135 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 3136 3137 op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 3138 if (!op) { 3139 cb_fn(cb_arg, -ENOMEM); 3140 return; 3141 } 3142 3143 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 3144 3145 return; 3146 } 3147 3148 is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 3149 3150 switch (op_type) { 3151 case SPDK_BLOB_READ: { 3152 spdk_bs_batch_t *batch; 3153 3154 batch = bs_batch_open(_ch, &cpl, blob); 3155 if (!batch) { 3156 cb_fn(cb_arg, -ENOMEM); 3157 return; 3158 } 3159 3160 if (is_allocated) { 3161 /* Read from the blob */ 3162 bs_batch_read_dev(batch, payload, lba, lba_count); 3163 } else { 3164 /* Read from the backing block device */ 3165 bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count); 3166 } 3167 3168 bs_batch_close(batch); 3169 break; 3170 } 3171 case SPDK_BLOB_WRITE: 3172 case SPDK_BLOB_WRITE_ZEROES: { 3173 if (is_allocated) { 3174 /* Write to the blob */ 3175 spdk_bs_batch_t *batch; 3176 3177 if (lba_count == 0) { 3178 cb_fn(cb_arg, 0); 3179 return; 3180 } 3181 3182 batch = bs_batch_open(_ch, &cpl, blob); 3183 if (!batch) { 3184 cb_fn(cb_arg, -ENOMEM); 3185 return; 3186 } 3187 3188 if (op_type == SPDK_BLOB_WRITE) { 3189 bs_batch_write_dev(batch, payload, lba, lba_count); 3190 } else { 3191 bs_batch_write_zeroes_dev(batch, lba, lba_count); 3192 } 3193 3194 bs_batch_close(batch); 3195 } else { 3196 /* Queue this operation and allocate the cluster */ 3197 spdk_bs_user_op_t *op; 3198 3199 op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 3200 if (!op) { 3201 cb_fn(cb_arg, -ENOMEM); 3202 return; 3203 } 3204 3205 bs_allocate_and_copy_cluster(blob, _ch, offset, op); 3206 } 3207 break; 3208 } 3209 case SPDK_BLOB_UNMAP: { 3210 struct spdk_blob_free_cluster_ctx *ctx = NULL; 3211 spdk_bs_batch_t *batch; 3212 3213 /* if aligned with cluster release cluster */ 3214 if (spdk_blob_is_thin_provisioned(blob) && is_allocated && 3215 blob_backed_with_zeroes_dev(blob) && 3216 bs_io_units_per_cluster(blob) == length) { 3217 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 3218 uint64_t cluster_start_page; 3219 uint32_t cluster_number; 3220 3221 assert(offset % bs_io_units_per_cluster(blob) == 0); 3222 3223 /* Round the io_unit offset down to the first page in the cluster */ 3224 cluster_start_page = bs_io_unit_to_cluster_start(blob, offset); 3225 3226 /* Calculate which index in the metadata cluster array the corresponding 3227 * cluster is supposed to be at. */ 3228 cluster_number = bs_io_unit_to_cluster_number(blob, offset); 3229 3230 ctx = calloc(1, sizeof(*ctx)); 3231 if (!ctx) { 3232 cb_fn(cb_arg, -ENOMEM); 3233 return; 3234 } 3235 /* When freeing a cluster the flow should be (in order): 3236 * 1. Unmap the underlying area (so if the cluster is reclaimed in the future, it won't leak 3237 * old data) 3238 * 2. Once the unmap completes (to avoid any races with incoming writes that may claim the 3239 * cluster), update and sync metadata freeing the cluster 3240 * 3. Once metadata update is done, complete the user unmap request 3241 */ 3242 ctx->blob = blob; 3243 ctx->page = cluster_start_page; 3244 ctx->cluster_num = cluster_number; 3245 ctx->md_page = bs_channel->new_cluster_page; 3246 ctx->seq = bs_sequence_start_bs(_ch, &cpl); 3247 if (!ctx->seq) { 3248 free(ctx); 3249 cb_fn(cb_arg, -ENOMEM); 3250 return; 3251 } 3252 3253 if (blob->use_extent_table) { 3254 ctx->extent_page = *bs_cluster_to_extent_page(blob, cluster_number); 3255 } 3256 3257 cpl.u.blob_basic.cb_fn = spdk_free_cluster_unmap_complete; 3258 cpl.u.blob_basic.cb_arg = ctx; 3259 } 3260 3261 batch = bs_batch_open(_ch, &cpl, blob); 3262 if (!batch) { 3263 free(ctx); 3264 cb_fn(cb_arg, -ENOMEM); 3265 return; 3266 } 3267 3268 if (is_allocated) { 3269 bs_batch_unmap_dev(batch, lba, lba_count); 3270 } 3271 3272 bs_batch_close(batch); 3273 break; 3274 } 3275 case SPDK_BLOB_READV: 3276 case SPDK_BLOB_WRITEV: 3277 SPDK_ERRLOG("readv/write not valid\n"); 3278 cb_fn(cb_arg, -EINVAL); 3279 break; 3280 } 3281 } 3282 3283 static void 3284 blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel, 3285 void *payload, uint64_t offset, uint64_t length, 3286 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 3287 { 3288 assert(blob != NULL); 3289 3290 if (blob->data_ro && op_type != SPDK_BLOB_READ) { 3291 cb_fn(cb_arg, -EPERM); 3292 return; 3293 } 3294 3295 if (length == 0) { 3296 cb_fn(cb_arg, 0); 3297 return; 3298 } 3299 3300 if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 3301 cb_fn(cb_arg, -EINVAL); 3302 return; 3303 } 3304 if (length <= bs_num_io_units_to_cluster_boundary(blob, offset)) { 3305 blob_request_submit_op_single(_channel, blob, payload, offset, length, 3306 cb_fn, cb_arg, op_type); 3307 } else { 3308 blob_request_submit_op_split(_channel, blob, payload, offset, length, 3309 cb_fn, cb_arg, op_type); 3310 } 3311 } 3312 3313 struct rw_iov_ctx { 3314 struct spdk_blob *blob; 3315 struct spdk_io_channel *channel; 3316 spdk_blob_op_complete cb_fn; 3317 void *cb_arg; 3318 bool read; 3319 int iovcnt; 3320 struct iovec *orig_iov; 3321 uint64_t io_unit_offset; 3322 uint64_t io_units_remaining; 3323 uint64_t io_units_done; 3324 struct spdk_blob_ext_io_opts *ext_io_opts; 3325 struct iovec iov[0]; 3326 }; 3327 3328 static void 3329 rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3330 { 3331 assert(cb_arg == NULL); 3332 bs_sequence_finish(seq, bserrno); 3333 } 3334 3335 static void 3336 rw_iov_split_next(void *cb_arg, int bserrno) 3337 { 3338 struct rw_iov_ctx *ctx = cb_arg; 3339 struct spdk_blob *blob = ctx->blob; 3340 struct iovec *iov, *orig_iov; 3341 int iovcnt; 3342 size_t orig_iovoff; 3343 uint64_t io_units_count, io_units_to_boundary, io_unit_offset; 3344 uint64_t byte_count; 3345 3346 if (bserrno != 0 || ctx->io_units_remaining == 0) { 3347 ctx->cb_fn(ctx->cb_arg, bserrno); 3348 free(ctx); 3349 return; 3350 } 3351 3352 io_unit_offset = ctx->io_unit_offset; 3353 io_units_to_boundary = bs_num_io_units_to_cluster_boundary(blob, io_unit_offset); 3354 io_units_count = spdk_min(ctx->io_units_remaining, io_units_to_boundary); 3355 /* 3356 * Get index and offset into the original iov array for our current position in the I/O sequence. 3357 * byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will 3358 * point to the current position in the I/O sequence. 3359 */ 3360 byte_count = ctx->io_units_done * blob->bs->io_unit_size; 3361 orig_iov = &ctx->orig_iov[0]; 3362 orig_iovoff = 0; 3363 while (byte_count > 0) { 3364 if (byte_count >= orig_iov->iov_len) { 3365 byte_count -= orig_iov->iov_len; 3366 orig_iov++; 3367 } else { 3368 orig_iovoff = byte_count; 3369 byte_count = 0; 3370 } 3371 } 3372 3373 /* 3374 * Build an iov array for the next I/O in the sequence. byte_count will keep track of how many 3375 * bytes of this next I/O remain to be accounted for in the new iov array. 3376 */ 3377 byte_count = io_units_count * blob->bs->io_unit_size; 3378 iov = &ctx->iov[0]; 3379 iovcnt = 0; 3380 while (byte_count > 0) { 3381 assert(iovcnt < ctx->iovcnt); 3382 iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff); 3383 iov->iov_base = orig_iov->iov_base + orig_iovoff; 3384 byte_count -= iov->iov_len; 3385 orig_iovoff = 0; 3386 orig_iov++; 3387 iov++; 3388 iovcnt++; 3389 } 3390 3391 ctx->io_unit_offset += io_units_count; 3392 ctx->io_units_remaining -= io_units_count; 3393 ctx->io_units_done += io_units_count; 3394 iov = &ctx->iov[0]; 3395 3396 if (ctx->read) { 3397 spdk_blob_io_readv_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 3398 io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts); 3399 } else { 3400 spdk_blob_io_writev_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 3401 io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts); 3402 } 3403 } 3404 3405 static void 3406 blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel, 3407 struct iovec *iov, int iovcnt, 3408 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg, bool read, 3409 struct spdk_blob_ext_io_opts *ext_io_opts) 3410 { 3411 struct spdk_bs_cpl cpl; 3412 3413 assert(blob != NULL); 3414 3415 if (!read && blob->data_ro) { 3416 cb_fn(cb_arg, -EPERM); 3417 return; 3418 } 3419 3420 if (length == 0) { 3421 cb_fn(cb_arg, 0); 3422 return; 3423 } 3424 3425 if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 3426 cb_fn(cb_arg, -EINVAL); 3427 return; 3428 } 3429 3430 /* 3431 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having 3432 * to split a request that spans a cluster boundary. For I/O that do not span a cluster boundary, 3433 * there will be no noticeable difference compared to using a batch. For I/O that do span a cluster 3434 * boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need 3435 * to allocate a separate iov array and split the I/O such that none of the resulting 3436 * smaller I/O cross a cluster boundary. These smaller I/O will be issued in sequence (not in parallel) 3437 * but since this case happens very infrequently, any performance impact will be negligible. 3438 * 3439 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs 3440 * for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them 3441 * in a batch. That would also require creating an intermediate spdk_bs_cpl that would get called 3442 * when the batch was completed, to allow for freeing the memory for the iov arrays. 3443 */ 3444 if (spdk_likely(length <= bs_num_io_units_to_cluster_boundary(blob, offset))) { 3445 uint64_t lba_count; 3446 uint64_t lba; 3447 bool is_allocated; 3448 3449 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 3450 cpl.u.blob_basic.cb_fn = cb_fn; 3451 cpl.u.blob_basic.cb_arg = cb_arg; 3452 3453 if (blob->frozen_refcnt) { 3454 /* This blob I/O is frozen */ 3455 enum spdk_blob_op_type op_type; 3456 spdk_bs_user_op_t *op; 3457 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel); 3458 3459 op_type = read ? SPDK_BLOB_READV : SPDK_BLOB_WRITEV; 3460 op = bs_user_op_alloc(_channel, &cpl, op_type, blob, iov, iovcnt, offset, length); 3461 if (!op) { 3462 cb_fn(cb_arg, -ENOMEM); 3463 return; 3464 } 3465 3466 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 3467 3468 return; 3469 } 3470 3471 is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 3472 3473 if (read) { 3474 spdk_bs_sequence_t *seq; 3475 3476 seq = bs_sequence_start_blob(_channel, &cpl, blob); 3477 if (!seq) { 3478 cb_fn(cb_arg, -ENOMEM); 3479 return; 3480 } 3481 3482 seq->ext_io_opts = ext_io_opts; 3483 3484 if (is_allocated) { 3485 bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL); 3486 } else { 3487 bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count, 3488 rw_iov_done, NULL); 3489 } 3490 } else { 3491 if (is_allocated) { 3492 spdk_bs_sequence_t *seq; 3493 3494 seq = bs_sequence_start_blob(_channel, &cpl, blob); 3495 if (!seq) { 3496 cb_fn(cb_arg, -ENOMEM); 3497 return; 3498 } 3499 3500 seq->ext_io_opts = ext_io_opts; 3501 3502 bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL); 3503 } else { 3504 /* Queue this operation and allocate the cluster */ 3505 spdk_bs_user_op_t *op; 3506 3507 op = bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset, 3508 length); 3509 if (!op) { 3510 cb_fn(cb_arg, -ENOMEM); 3511 return; 3512 } 3513 3514 op->ext_io_opts = ext_io_opts; 3515 3516 bs_allocate_and_copy_cluster(blob, _channel, offset, op); 3517 } 3518 } 3519 } else { 3520 struct rw_iov_ctx *ctx; 3521 3522 ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec)); 3523 if (ctx == NULL) { 3524 cb_fn(cb_arg, -ENOMEM); 3525 return; 3526 } 3527 3528 ctx->blob = blob; 3529 ctx->channel = _channel; 3530 ctx->cb_fn = cb_fn; 3531 ctx->cb_arg = cb_arg; 3532 ctx->read = read; 3533 ctx->orig_iov = iov; 3534 ctx->iovcnt = iovcnt; 3535 ctx->io_unit_offset = offset; 3536 ctx->io_units_remaining = length; 3537 ctx->io_units_done = 0; 3538 ctx->ext_io_opts = ext_io_opts; 3539 3540 rw_iov_split_next(ctx, 0); 3541 } 3542 } 3543 3544 static struct spdk_blob * 3545 blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid) 3546 { 3547 struct spdk_blob find; 3548 3549 if (spdk_bit_array_get(bs->open_blobids, blobid) == 0) { 3550 return NULL; 3551 } 3552 3553 find.id = blobid; 3554 return RB_FIND(spdk_blob_tree, &bs->open_blobs, &find); 3555 } 3556 3557 static void 3558 blob_get_snapshot_and_clone_entries(struct spdk_blob *blob, 3559 struct spdk_blob_list **snapshot_entry, struct spdk_blob_list **clone_entry) 3560 { 3561 assert(blob != NULL); 3562 *snapshot_entry = NULL; 3563 *clone_entry = NULL; 3564 3565 if (blob->parent_id == SPDK_BLOBID_INVALID) { 3566 return; 3567 } 3568 3569 TAILQ_FOREACH(*snapshot_entry, &blob->bs->snapshots, link) { 3570 if ((*snapshot_entry)->id == blob->parent_id) { 3571 break; 3572 } 3573 } 3574 3575 if (*snapshot_entry != NULL) { 3576 TAILQ_FOREACH(*clone_entry, &(*snapshot_entry)->clones, link) { 3577 if ((*clone_entry)->id == blob->id) { 3578 break; 3579 } 3580 } 3581 3582 assert(*clone_entry != NULL); 3583 } 3584 } 3585 3586 static int 3587 bs_channel_create(void *io_device, void *ctx_buf) 3588 { 3589 struct spdk_blob_store *bs = io_device; 3590 struct spdk_bs_channel *channel = ctx_buf; 3591 struct spdk_bs_dev *dev; 3592 uint32_t max_ops = bs->max_channel_ops; 3593 uint32_t i; 3594 3595 dev = bs->dev; 3596 3597 channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set)); 3598 if (!channel->req_mem) { 3599 return -1; 3600 } 3601 3602 TAILQ_INIT(&channel->reqs); 3603 3604 for (i = 0; i < max_ops; i++) { 3605 TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link); 3606 } 3607 3608 channel->bs = bs; 3609 channel->dev = dev; 3610 channel->dev_channel = dev->create_channel(dev); 3611 3612 if (!channel->dev_channel) { 3613 SPDK_ERRLOG("Failed to create device channel.\n"); 3614 free(channel->req_mem); 3615 return -1; 3616 } 3617 3618 channel->new_cluster_page = spdk_zmalloc(bs->md_page_size, 0, NULL, SPDK_ENV_NUMA_ID_ANY, 3619 SPDK_MALLOC_DMA); 3620 if (!channel->new_cluster_page) { 3621 SPDK_ERRLOG("Failed to allocate new cluster page\n"); 3622 free(channel->req_mem); 3623 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 3624 return -1; 3625 } 3626 3627 TAILQ_INIT(&channel->need_cluster_alloc); 3628 TAILQ_INIT(&channel->queued_io); 3629 RB_INIT(&channel->esnap_channels); 3630 3631 return 0; 3632 } 3633 3634 static void 3635 bs_channel_destroy(void *io_device, void *ctx_buf) 3636 { 3637 struct spdk_bs_channel *channel = ctx_buf; 3638 spdk_bs_user_op_t *op; 3639 3640 while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) { 3641 op = TAILQ_FIRST(&channel->need_cluster_alloc); 3642 TAILQ_REMOVE(&channel->need_cluster_alloc, op, link); 3643 bs_user_op_abort(op, -EIO); 3644 } 3645 3646 while (!TAILQ_EMPTY(&channel->queued_io)) { 3647 op = TAILQ_FIRST(&channel->queued_io); 3648 TAILQ_REMOVE(&channel->queued_io, op, link); 3649 bs_user_op_abort(op, -EIO); 3650 } 3651 3652 blob_esnap_destroy_bs_channel(channel); 3653 3654 free(channel->req_mem); 3655 spdk_free(channel->new_cluster_page); 3656 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 3657 } 3658 3659 static void 3660 bs_dev_destroy(void *io_device) 3661 { 3662 struct spdk_blob_store *bs = io_device; 3663 struct spdk_blob *blob, *blob_tmp; 3664 3665 bs->dev->destroy(bs->dev); 3666 3667 RB_FOREACH_SAFE(blob, spdk_blob_tree, &bs->open_blobs, blob_tmp) { 3668 RB_REMOVE(spdk_blob_tree, &bs->open_blobs, blob); 3669 spdk_bit_array_clear(bs->open_blobids, blob->id); 3670 blob_free(blob); 3671 } 3672 3673 spdk_spin_destroy(&bs->used_lock); 3674 3675 spdk_bit_array_free(&bs->open_blobids); 3676 spdk_bit_array_free(&bs->used_blobids); 3677 spdk_bit_array_free(&bs->used_md_pages); 3678 spdk_bit_pool_free(&bs->used_clusters); 3679 /* 3680 * If this function is called for any reason except a successful unload, 3681 * the unload_cpl type will be NONE and this will be a nop. 3682 */ 3683 bs_call_cpl(&bs->unload_cpl, bs->unload_err); 3684 3685 free(bs); 3686 } 3687 3688 static int 3689 bs_blob_list_add(struct spdk_blob *blob) 3690 { 3691 spdk_blob_id snapshot_id; 3692 struct spdk_blob_list *snapshot_entry = NULL; 3693 struct spdk_blob_list *clone_entry = NULL; 3694 3695 assert(blob != NULL); 3696 3697 snapshot_id = blob->parent_id; 3698 if (snapshot_id == SPDK_BLOBID_INVALID || 3699 snapshot_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 3700 return 0; 3701 } 3702 3703 snapshot_entry = bs_get_snapshot_entry(blob->bs, snapshot_id); 3704 if (snapshot_entry == NULL) { 3705 /* Snapshot not found */ 3706 snapshot_entry = calloc(1, sizeof(struct spdk_blob_list)); 3707 if (snapshot_entry == NULL) { 3708 return -ENOMEM; 3709 } 3710 snapshot_entry->id = snapshot_id; 3711 TAILQ_INIT(&snapshot_entry->clones); 3712 TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link); 3713 } else { 3714 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 3715 if (clone_entry->id == blob->id) { 3716 break; 3717 } 3718 } 3719 } 3720 3721 if (clone_entry == NULL) { 3722 /* Clone not found */ 3723 clone_entry = calloc(1, sizeof(struct spdk_blob_list)); 3724 if (clone_entry == NULL) { 3725 return -ENOMEM; 3726 } 3727 clone_entry->id = blob->id; 3728 TAILQ_INIT(&clone_entry->clones); 3729 TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link); 3730 snapshot_entry->clone_count++; 3731 } 3732 3733 return 0; 3734 } 3735 3736 static void 3737 bs_blob_list_remove(struct spdk_blob *blob) 3738 { 3739 struct spdk_blob_list *snapshot_entry = NULL; 3740 struct spdk_blob_list *clone_entry = NULL; 3741 3742 blob_get_snapshot_and_clone_entries(blob, &snapshot_entry, &clone_entry); 3743 3744 if (snapshot_entry == NULL) { 3745 return; 3746 } 3747 3748 blob->parent_id = SPDK_BLOBID_INVALID; 3749 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3750 free(clone_entry); 3751 3752 snapshot_entry->clone_count--; 3753 } 3754 3755 static int 3756 bs_blob_list_free(struct spdk_blob_store *bs) 3757 { 3758 struct spdk_blob_list *snapshot_entry; 3759 struct spdk_blob_list *snapshot_entry_tmp; 3760 struct spdk_blob_list *clone_entry; 3761 struct spdk_blob_list *clone_entry_tmp; 3762 3763 TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) { 3764 TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) { 3765 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3766 free(clone_entry); 3767 } 3768 TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link); 3769 free(snapshot_entry); 3770 } 3771 3772 return 0; 3773 } 3774 3775 static void 3776 bs_free(struct spdk_blob_store *bs) 3777 { 3778 bs_blob_list_free(bs); 3779 3780 bs_unregister_md_thread(bs); 3781 spdk_io_device_unregister(bs, bs_dev_destroy); 3782 } 3783 3784 void 3785 spdk_bs_opts_init(struct spdk_bs_opts *opts, size_t opts_size) 3786 { 3787 3788 if (!opts) { 3789 SPDK_ERRLOG("opts should not be NULL\n"); 3790 return; 3791 } 3792 3793 if (!opts_size) { 3794 SPDK_ERRLOG("opts_size should not be zero value\n"); 3795 return; 3796 } 3797 3798 memset(opts, 0, opts_size); 3799 opts->opts_size = opts_size; 3800 3801 #define FIELD_OK(field) \ 3802 offsetof(struct spdk_bs_opts, field) + sizeof(opts->field) <= opts_size 3803 3804 #define SET_FIELD(field, value) \ 3805 if (FIELD_OK(field)) { \ 3806 opts->field = value; \ 3807 } \ 3808 3809 SET_FIELD(cluster_sz, SPDK_BLOB_OPTS_CLUSTER_SZ); 3810 SET_FIELD(num_md_pages, SPDK_BLOB_OPTS_NUM_MD_PAGES); 3811 SET_FIELD(max_md_ops, SPDK_BLOB_OPTS_NUM_MD_PAGES); 3812 SET_FIELD(max_channel_ops, SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS); 3813 SET_FIELD(clear_method, BS_CLEAR_WITH_UNMAP); 3814 3815 if (FIELD_OK(bstype)) { 3816 memset(&opts->bstype, 0, sizeof(opts->bstype)); 3817 } 3818 3819 SET_FIELD(iter_cb_fn, NULL); 3820 SET_FIELD(iter_cb_arg, NULL); 3821 SET_FIELD(force_recover, false); 3822 SET_FIELD(esnap_bs_dev_create, NULL); 3823 SET_FIELD(esnap_ctx, NULL); 3824 3825 #undef FIELD_OK 3826 #undef SET_FIELD 3827 } 3828 3829 static int 3830 bs_opts_verify(struct spdk_bs_opts *opts) 3831 { 3832 if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 || 3833 opts->max_channel_ops == 0) { 3834 SPDK_ERRLOG("Blobstore options cannot be set to 0\n"); 3835 return -1; 3836 } 3837 3838 return 0; 3839 } 3840 3841 /* START spdk_bs_load */ 3842 3843 /* spdk_bs_load_ctx is used for init, load, unload and dump code paths. */ 3844 3845 struct spdk_bs_load_ctx { 3846 struct spdk_blob_store *bs; 3847 struct spdk_bs_super_block *super; 3848 3849 struct spdk_bs_md_mask *mask; 3850 bool in_page_chain; 3851 uint32_t page_index; 3852 uint32_t cur_page; 3853 struct spdk_blob_md_page *page; 3854 3855 uint64_t num_extent_pages; 3856 uint32_t *extent_page_num; 3857 struct spdk_blob_md_page *extent_pages; 3858 struct spdk_bit_array *used_clusters; 3859 3860 spdk_bs_sequence_t *seq; 3861 spdk_blob_op_with_handle_complete iter_cb_fn; 3862 void *iter_cb_arg; 3863 struct spdk_blob *blob; 3864 spdk_blob_id blobid; 3865 3866 bool force_recover; 3867 3868 /* These fields are used in the spdk_bs_dump path. */ 3869 bool dumping; 3870 FILE *fp; 3871 spdk_bs_dump_print_xattr print_xattr_fn; 3872 char xattr_name[4096]; 3873 }; 3874 3875 static void 3876 bs_init_per_cluster_fields(struct spdk_blob_store *bs) 3877 { 3878 bs->pages_per_cluster = bs->cluster_sz / bs->md_page_size; 3879 if (spdk_u32_is_pow2(bs->pages_per_cluster)) { 3880 bs->pages_per_cluster_shift = spdk_u32log2(bs->pages_per_cluster); 3881 } 3882 bs->io_units_per_cluster = bs->cluster_sz / bs->io_unit_size; 3883 if (spdk_u32_is_pow2(bs->io_units_per_cluster)) { 3884 bs->io_units_per_cluster_shift = spdk_u32log2(bs->io_units_per_cluster); 3885 } 3886 } 3887 3888 static int 3889 bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_blob_store **_bs, 3890 struct spdk_bs_load_ctx **_ctx) 3891 { 3892 struct spdk_blob_store *bs; 3893 struct spdk_bs_load_ctx *ctx; 3894 uint64_t dev_size; 3895 uint32_t md_page_size; 3896 int rc; 3897 3898 dev_size = dev->blocklen * dev->blockcnt; 3899 if (dev_size < opts->cluster_sz) { 3900 /* Device size cannot be smaller than cluster size of blobstore */ 3901 SPDK_INFOLOG(blob, "Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n", 3902 dev_size, opts->cluster_sz); 3903 return -ENOSPC; 3904 } 3905 3906 md_page_size = spdk_max(spdk_max(dev->phys_blocklen, SPDK_BS_PAGE_SIZE), 3907 opts->md_page_size); 3908 if (opts->cluster_sz < md_page_size) { 3909 /* Cluster size cannot be smaller than page size */ 3910 SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n", 3911 opts->cluster_sz, md_page_size); 3912 return -EINVAL; 3913 } 3914 bs = calloc(1, sizeof(struct spdk_blob_store)); 3915 if (!bs) { 3916 return -ENOMEM; 3917 } 3918 3919 ctx = calloc(1, sizeof(struct spdk_bs_load_ctx)); 3920 if (!ctx) { 3921 free(bs); 3922 return -ENOMEM; 3923 } 3924 3925 ctx->bs = bs; 3926 ctx->iter_cb_fn = opts->iter_cb_fn; 3927 ctx->iter_cb_arg = opts->iter_cb_arg; 3928 ctx->force_recover = opts->force_recover; 3929 3930 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 3931 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 3932 if (!ctx->super) { 3933 free(ctx); 3934 free(bs); 3935 return -ENOMEM; 3936 } 3937 3938 RB_INIT(&bs->open_blobs); 3939 TAILQ_INIT(&bs->snapshots); 3940 bs->dev = dev; 3941 bs->md_page_size = md_page_size; 3942 bs->md_thread = spdk_get_thread(); 3943 assert(bs->md_thread != NULL); 3944 3945 /* 3946 * Do not use bs_lba_to_cluster() here since blockcnt may not be an 3947 * even multiple of the cluster size. 3948 */ 3949 bs->cluster_sz = opts->cluster_sz; 3950 bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen); 3951 ctx->used_clusters = spdk_bit_array_create(bs->total_clusters); 3952 if (!ctx->used_clusters) { 3953 spdk_free(ctx->super); 3954 free(ctx); 3955 free(bs); 3956 return -ENOMEM; 3957 } 3958 3959 bs->num_free_clusters = bs->total_clusters; 3960 bs->io_unit_size = dev->blocklen; 3961 bs_init_per_cluster_fields(bs); 3962 3963 bs->max_channel_ops = opts->max_channel_ops; 3964 bs->super_blob = SPDK_BLOBID_INVALID; 3965 memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype)); 3966 bs->esnap_bs_dev_create = opts->esnap_bs_dev_create; 3967 bs->esnap_ctx = opts->esnap_ctx; 3968 3969 /* The metadata is assumed to be at least 1 page */ 3970 bs->used_md_pages = spdk_bit_array_create(1); 3971 bs->used_blobids = spdk_bit_array_create(0); 3972 bs->open_blobids = spdk_bit_array_create(0); 3973 3974 spdk_spin_init(&bs->used_lock); 3975 3976 spdk_io_device_register(bs, bs_channel_create, bs_channel_destroy, 3977 sizeof(struct spdk_bs_channel), "blobstore"); 3978 rc = bs_register_md_thread(bs); 3979 if (rc == -1) { 3980 spdk_io_device_unregister(bs, NULL); 3981 spdk_spin_destroy(&bs->used_lock); 3982 spdk_bit_array_free(&bs->open_blobids); 3983 spdk_bit_array_free(&bs->used_blobids); 3984 spdk_bit_array_free(&bs->used_md_pages); 3985 spdk_bit_array_free(&ctx->used_clusters); 3986 spdk_free(ctx->super); 3987 free(ctx); 3988 free(bs); 3989 /* FIXME: this is a lie but don't know how to get a proper error code here */ 3990 return -ENOMEM; 3991 } 3992 3993 *_ctx = ctx; 3994 *_bs = bs; 3995 return 0; 3996 } 3997 3998 static void 3999 bs_load_ctx_fail(struct spdk_bs_load_ctx *ctx, int bserrno) 4000 { 4001 assert(bserrno != 0); 4002 4003 spdk_free(ctx->super); 4004 bs_sequence_finish(ctx->seq, bserrno); 4005 bs_free(ctx->bs); 4006 spdk_bit_array_free(&ctx->used_clusters); 4007 free(ctx); 4008 } 4009 4010 static void 4011 bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 4012 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg) 4013 { 4014 /* Update the values in the super block */ 4015 super->super_blob = bs->super_blob; 4016 memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype)); 4017 super->crc = blob_md_page_calc_crc(super); 4018 bs_sequence_write_dev(seq, super, bs_page_to_lba(bs, 0), 4019 bs_byte_to_lba(bs, sizeof(*super)), 4020 cb_fn, cb_arg); 4021 } 4022 4023 static void 4024 bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 4025 { 4026 struct spdk_bs_load_ctx *ctx = arg; 4027 uint64_t mask_size, lba, lba_count; 4028 4029 /* Write out the used clusters mask */ 4030 mask_size = ctx->super->used_cluster_mask_len * ctx->bs->md_page_size; 4031 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 4032 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4033 if (!ctx->mask) { 4034 bs_load_ctx_fail(ctx, -ENOMEM); 4035 return; 4036 } 4037 4038 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS; 4039 ctx->mask->length = ctx->bs->total_clusters; 4040 /* We could get here through the normal unload path, or through dirty 4041 * shutdown recovery. For the normal unload path, we use the mask from 4042 * the bit pool. For dirty shutdown recovery, we don't have a bit pool yet - 4043 * only the bit array from the load ctx. 4044 */ 4045 if (ctx->bs->used_clusters) { 4046 assert(ctx->mask->length == spdk_bit_pool_capacity(ctx->bs->used_clusters)); 4047 spdk_bit_pool_store_mask(ctx->bs->used_clusters, ctx->mask->mask); 4048 } else { 4049 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->used_clusters)); 4050 spdk_bit_array_store_mask(ctx->used_clusters, ctx->mask->mask); 4051 } 4052 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 4053 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 4054 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 4055 } 4056 4057 static void 4058 bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 4059 { 4060 struct spdk_bs_load_ctx *ctx = arg; 4061 uint64_t mask_size, lba, lba_count; 4062 4063 mask_size = ctx->super->used_page_mask_len * ctx->bs->md_page_size; 4064 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 4065 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4066 if (!ctx->mask) { 4067 bs_load_ctx_fail(ctx, -ENOMEM); 4068 return; 4069 } 4070 4071 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES; 4072 ctx->mask->length = ctx->super->md_len; 4073 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages)); 4074 4075 spdk_bit_array_store_mask(ctx->bs->used_md_pages, ctx->mask->mask); 4076 lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 4077 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 4078 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 4079 } 4080 4081 static void 4082 bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 4083 { 4084 struct spdk_bs_load_ctx *ctx = arg; 4085 uint64_t mask_size, lba, lba_count; 4086 4087 if (ctx->super->used_blobid_mask_len == 0) { 4088 /* 4089 * This is a pre-v3 on-disk format where the blobid mask does not get 4090 * written to disk. 4091 */ 4092 cb_fn(seq, arg, 0); 4093 return; 4094 } 4095 4096 mask_size = ctx->super->used_blobid_mask_len * ctx->bs->md_page_size; 4097 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_NUMA_ID_ANY, 4098 SPDK_MALLOC_DMA); 4099 if (!ctx->mask) { 4100 bs_load_ctx_fail(ctx, -ENOMEM); 4101 return; 4102 } 4103 4104 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS; 4105 ctx->mask->length = ctx->super->md_len; 4106 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids)); 4107 4108 spdk_bit_array_store_mask(ctx->bs->used_blobids, ctx->mask->mask); 4109 lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 4110 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 4111 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 4112 } 4113 4114 static void 4115 blob_set_thin_provision(struct spdk_blob *blob) 4116 { 4117 blob_verify_md_op(blob); 4118 blob->invalid_flags |= SPDK_BLOB_THIN_PROV; 4119 blob->state = SPDK_BLOB_STATE_DIRTY; 4120 } 4121 4122 static void 4123 blob_set_clear_method(struct spdk_blob *blob, enum blob_clear_method clear_method) 4124 { 4125 blob_verify_md_op(blob); 4126 blob->clear_method = clear_method; 4127 blob->md_ro_flags |= (clear_method << SPDK_BLOB_CLEAR_METHOD_SHIFT); 4128 blob->state = SPDK_BLOB_STATE_DIRTY; 4129 } 4130 4131 static void bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno); 4132 4133 static void 4134 bs_delete_corrupted_blob_cpl(void *cb_arg, int bserrno) 4135 { 4136 struct spdk_bs_load_ctx *ctx = cb_arg; 4137 spdk_blob_id id; 4138 int64_t page_num; 4139 4140 /* Iterate to next blob (we can't use spdk_bs_iter_next function as our 4141 * last blob has been removed */ 4142 page_num = bs_blobid_to_page(ctx->blobid); 4143 page_num++; 4144 page_num = spdk_bit_array_find_first_set(ctx->bs->used_blobids, page_num); 4145 if (page_num >= spdk_bit_array_capacity(ctx->bs->used_blobids)) { 4146 bs_load_iter(ctx, NULL, -ENOENT); 4147 return; 4148 } 4149 4150 id = bs_page_to_blobid(page_num); 4151 4152 spdk_bs_open_blob(ctx->bs, id, bs_load_iter, ctx); 4153 } 4154 4155 static void 4156 bs_delete_corrupted_close_cb(void *cb_arg, int bserrno) 4157 { 4158 struct spdk_bs_load_ctx *ctx = cb_arg; 4159 4160 if (bserrno != 0) { 4161 SPDK_ERRLOG("Failed to close corrupted blob\n"); 4162 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4163 return; 4164 } 4165 4166 spdk_bs_delete_blob(ctx->bs, ctx->blobid, bs_delete_corrupted_blob_cpl, ctx); 4167 } 4168 4169 static void 4170 bs_delete_corrupted_blob(void *cb_arg, int bserrno) 4171 { 4172 struct spdk_bs_load_ctx *ctx = cb_arg; 4173 uint64_t i; 4174 4175 if (bserrno != 0) { 4176 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 4177 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4178 return; 4179 } 4180 4181 /* Snapshot and clone have the same copy of cluster map and extent pages 4182 * at this point. Let's clear both for snapshot now, 4183 * so that it won't be cleared for clone later when we remove snapshot. 4184 * Also set thin provision to pass data corruption check */ 4185 for (i = 0; i < ctx->blob->active.num_clusters; i++) { 4186 ctx->blob->active.clusters[i] = 0; 4187 } 4188 for (i = 0; i < ctx->blob->active.num_extent_pages; i++) { 4189 ctx->blob->active.extent_pages[i] = 0; 4190 } 4191 4192 ctx->blob->active.num_allocated_clusters = 0; 4193 4194 ctx->blob->md_ro = false; 4195 4196 blob_set_thin_provision(ctx->blob); 4197 4198 ctx->blobid = ctx->blob->id; 4199 4200 spdk_blob_close(ctx->blob, bs_delete_corrupted_close_cb, ctx); 4201 } 4202 4203 static void 4204 bs_update_corrupted_blob(void *cb_arg, int bserrno) 4205 { 4206 struct spdk_bs_load_ctx *ctx = cb_arg; 4207 4208 if (bserrno != 0) { 4209 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 4210 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4211 return; 4212 } 4213 4214 ctx->blob->md_ro = false; 4215 blob_remove_xattr(ctx->blob, SNAPSHOT_PENDING_REMOVAL, true); 4216 blob_remove_xattr(ctx->blob, SNAPSHOT_IN_PROGRESS, true); 4217 spdk_blob_set_read_only(ctx->blob); 4218 4219 if (ctx->iter_cb_fn) { 4220 ctx->iter_cb_fn(ctx->iter_cb_arg, ctx->blob, 0); 4221 } 4222 bs_blob_list_add(ctx->blob); 4223 4224 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4225 } 4226 4227 static void 4228 bs_examine_clone(void *cb_arg, struct spdk_blob *blob, int bserrno) 4229 { 4230 struct spdk_bs_load_ctx *ctx = cb_arg; 4231 4232 if (bserrno != 0) { 4233 SPDK_ERRLOG("Failed to open clone of a corrupted blob\n"); 4234 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4235 return; 4236 } 4237 4238 if (blob->parent_id == ctx->blob->id) { 4239 /* Power failure occurred before updating clone (snapshot delete case) 4240 * or after updating clone (creating snapshot case) - keep snapshot */ 4241 spdk_blob_close(blob, bs_update_corrupted_blob, ctx); 4242 } else { 4243 /* Power failure occurred after updating clone (snapshot delete case) 4244 * or before updating clone (creating snapshot case) - remove snapshot */ 4245 spdk_blob_close(blob, bs_delete_corrupted_blob, ctx); 4246 } 4247 } 4248 4249 static void 4250 bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno) 4251 { 4252 struct spdk_bs_load_ctx *ctx = arg; 4253 const void *value; 4254 size_t len; 4255 int rc = 0; 4256 4257 if (bserrno == 0) { 4258 /* Examine blob if it is corrupted after power failure. Fix 4259 * the ones that can be fixed and remove any other corrupted 4260 * ones. If it is not corrupted just process it */ 4261 rc = blob_get_xattr_value(blob, SNAPSHOT_PENDING_REMOVAL, &value, &len, true); 4262 if (rc != 0) { 4263 rc = blob_get_xattr_value(blob, SNAPSHOT_IN_PROGRESS, &value, &len, true); 4264 if (rc != 0) { 4265 /* Not corrupted - process it and continue with iterating through blobs */ 4266 if (ctx->iter_cb_fn) { 4267 ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0); 4268 } 4269 bs_blob_list_add(blob); 4270 spdk_bs_iter_next(ctx->bs, blob, bs_load_iter, ctx); 4271 return; 4272 } 4273 4274 } 4275 4276 assert(len == sizeof(spdk_blob_id)); 4277 4278 ctx->blob = blob; 4279 4280 /* Open clone to check if we are able to fix this blob or should we remove it */ 4281 spdk_bs_open_blob(ctx->bs, *(spdk_blob_id *)value, bs_examine_clone, ctx); 4282 return; 4283 } else if (bserrno == -ENOENT) { 4284 bserrno = 0; 4285 } else { 4286 /* 4287 * This case needs to be looked at further. Same problem 4288 * exists with applications that rely on explicit blob 4289 * iteration. We should just skip the blob that failed 4290 * to load and continue on to the next one. 4291 */ 4292 SPDK_ERRLOG("Error in iterating blobs\n"); 4293 } 4294 4295 ctx->iter_cb_fn = NULL; 4296 4297 spdk_free(ctx->super); 4298 spdk_free(ctx->mask); 4299 bs_sequence_finish(ctx->seq, bserrno); 4300 free(ctx); 4301 } 4302 4303 static void bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg); 4304 4305 static void 4306 bs_load_complete(struct spdk_bs_load_ctx *ctx) 4307 { 4308 ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters); 4309 if (ctx->dumping) { 4310 bs_dump_read_md_page(ctx->seq, ctx); 4311 return; 4312 } 4313 spdk_bs_iter_first(ctx->bs, bs_load_iter, ctx); 4314 } 4315 4316 static void 4317 bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4318 { 4319 struct spdk_bs_load_ctx *ctx = cb_arg; 4320 int rc; 4321 4322 /* The type must be correct */ 4323 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS); 4324 4325 /* The length of the mask (in bits) must not be greater than 4326 * the length of the buffer (converted to bits) */ 4327 assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * ctx->super->md_page_size * 8)); 4328 4329 /* The length of the mask must be exactly equal to the size 4330 * (in pages) of the metadata region */ 4331 assert(ctx->mask->length == ctx->super->md_len); 4332 4333 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->mask->length); 4334 if (rc < 0) { 4335 spdk_free(ctx->mask); 4336 bs_load_ctx_fail(ctx, rc); 4337 return; 4338 } 4339 4340 spdk_bit_array_load_mask(ctx->bs->used_blobids, ctx->mask->mask); 4341 bs_load_complete(ctx); 4342 } 4343 4344 static void 4345 bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4346 { 4347 struct spdk_bs_load_ctx *ctx = cb_arg; 4348 uint64_t lba, lba_count, mask_size; 4349 int rc; 4350 4351 if (bserrno != 0) { 4352 bs_load_ctx_fail(ctx, bserrno); 4353 return; 4354 } 4355 4356 /* The type must be correct */ 4357 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 4358 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 4359 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 4360 struct spdk_blob_md_page) * 8)); 4361 /* 4362 * The length of the mask must be equal to or larger than the total number of clusters. It may be 4363 * larger than the total number of clusters due to a failure spdk_bs_grow. 4364 */ 4365 assert(ctx->mask->length >= ctx->bs->total_clusters); 4366 if (ctx->mask->length > ctx->bs->total_clusters) { 4367 SPDK_WARNLOG("Shrink the used_custers mask length to total_clusters"); 4368 ctx->mask->length = ctx->bs->total_clusters; 4369 } 4370 4371 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->mask->length); 4372 if (rc < 0) { 4373 spdk_free(ctx->mask); 4374 bs_load_ctx_fail(ctx, rc); 4375 return; 4376 } 4377 4378 spdk_bit_array_load_mask(ctx->used_clusters, ctx->mask->mask); 4379 ctx->bs->num_free_clusters = spdk_bit_array_count_clear(ctx->used_clusters); 4380 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 4381 4382 spdk_free(ctx->mask); 4383 4384 /* Read the used blobids mask */ 4385 mask_size = ctx->super->used_blobid_mask_len * ctx->super->md_page_size; 4386 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_NUMA_ID_ANY, 4387 SPDK_MALLOC_DMA); 4388 if (!ctx->mask) { 4389 bs_load_ctx_fail(ctx, -ENOMEM); 4390 return; 4391 } 4392 lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 4393 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 4394 bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 4395 bs_load_used_blobids_cpl, ctx); 4396 } 4397 4398 static void 4399 bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4400 { 4401 struct spdk_bs_load_ctx *ctx = cb_arg; 4402 uint64_t lba, lba_count, mask_size; 4403 int rc; 4404 4405 if (bserrno != 0) { 4406 bs_load_ctx_fail(ctx, bserrno); 4407 return; 4408 } 4409 4410 /* The type must be correct */ 4411 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES); 4412 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 4413 assert(ctx->mask->length <= (ctx->super->used_page_mask_len * ctx->super->md_page_size * 4414 8)); 4415 /* The length of the mask must be exactly equal to the size (in pages) of the metadata region */ 4416 if (ctx->mask->length != ctx->super->md_len) { 4417 SPDK_ERRLOG("mismatched md_len in used_pages mask: " 4418 "mask->length=%" PRIu32 " super->md_len=%" PRIu32 "\n", 4419 ctx->mask->length, ctx->super->md_len); 4420 assert(false); 4421 } 4422 4423 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->mask->length); 4424 if (rc < 0) { 4425 spdk_free(ctx->mask); 4426 bs_load_ctx_fail(ctx, rc); 4427 return; 4428 } 4429 4430 spdk_bit_array_load_mask(ctx->bs->used_md_pages, ctx->mask->mask); 4431 spdk_free(ctx->mask); 4432 4433 /* Read the used clusters mask */ 4434 mask_size = ctx->super->used_cluster_mask_len * ctx->super->md_page_size; 4435 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_NUMA_ID_ANY, 4436 SPDK_MALLOC_DMA); 4437 if (!ctx->mask) { 4438 bs_load_ctx_fail(ctx, -ENOMEM); 4439 return; 4440 } 4441 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 4442 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 4443 bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 4444 bs_load_used_clusters_cpl, ctx); 4445 } 4446 4447 static void 4448 bs_load_read_used_pages(struct spdk_bs_load_ctx *ctx) 4449 { 4450 uint64_t lba, lba_count, mask_size; 4451 4452 /* Read the used pages mask */ 4453 mask_size = ctx->super->used_page_mask_len * ctx->super->md_page_size; 4454 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 4455 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4456 if (!ctx->mask) { 4457 bs_load_ctx_fail(ctx, -ENOMEM); 4458 return; 4459 } 4460 4461 lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 4462 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 4463 bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count, 4464 bs_load_used_pages_cpl, ctx); 4465 } 4466 4467 static int 4468 bs_load_replay_md_parse_page(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_page *page) 4469 { 4470 struct spdk_blob_store *bs = ctx->bs; 4471 struct spdk_blob_md_descriptor *desc; 4472 size_t cur_desc = 0; 4473 4474 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4475 while (cur_desc < sizeof(page->descriptors)) { 4476 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 4477 if (desc->length == 0) { 4478 /* If padding and length are 0, this terminates the page */ 4479 break; 4480 } 4481 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 4482 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 4483 unsigned int i, j; 4484 unsigned int cluster_count = 0; 4485 uint32_t cluster_idx; 4486 4487 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 4488 4489 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 4490 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 4491 cluster_idx = desc_extent_rle->extents[i].cluster_idx; 4492 /* 4493 * cluster_idx = 0 means an unallocated cluster - don't mark that 4494 * in the used cluster map. 4495 */ 4496 if (cluster_idx != 0) { 4497 SPDK_NOTICELOG("Recover: cluster %" PRIu32 "\n", cluster_idx + j); 4498 spdk_bit_array_set(ctx->used_clusters, cluster_idx + j); 4499 if (bs->num_free_clusters == 0) { 4500 return -ENOSPC; 4501 } 4502 bs->num_free_clusters--; 4503 } 4504 cluster_count++; 4505 } 4506 } 4507 if (cluster_count == 0) { 4508 return -EINVAL; 4509 } 4510 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4511 struct spdk_blob_md_descriptor_extent_page *desc_extent; 4512 uint32_t i; 4513 uint32_t cluster_count = 0; 4514 uint32_t cluster_idx; 4515 size_t cluster_idx_length; 4516 4517 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 4518 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 4519 4520 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 4521 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 4522 return -EINVAL; 4523 } 4524 4525 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 4526 cluster_idx = desc_extent->cluster_idx[i]; 4527 /* 4528 * cluster_idx = 0 means an unallocated cluster - don't mark that 4529 * in the used cluster map. 4530 */ 4531 if (cluster_idx != 0) { 4532 if (cluster_idx < desc_extent->start_cluster_idx && 4533 cluster_idx >= desc_extent->start_cluster_idx + cluster_count) { 4534 return -EINVAL; 4535 } 4536 spdk_bit_array_set(ctx->used_clusters, cluster_idx); 4537 if (bs->num_free_clusters == 0) { 4538 return -ENOSPC; 4539 } 4540 bs->num_free_clusters--; 4541 } 4542 cluster_count++; 4543 } 4544 4545 if (cluster_count == 0) { 4546 return -EINVAL; 4547 } 4548 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 4549 /* Skip this item */ 4550 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 4551 /* Skip this item */ 4552 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 4553 /* Skip this item */ 4554 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 4555 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 4556 uint32_t num_extent_pages = ctx->num_extent_pages; 4557 uint32_t i; 4558 size_t extent_pages_length; 4559 void *tmp; 4560 4561 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 4562 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 4563 4564 if (desc_extent_table->length == 0 || 4565 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 4566 return -EINVAL; 4567 } 4568 4569 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 4570 if (desc_extent_table->extent_page[i].page_idx != 0) { 4571 if (desc_extent_table->extent_page[i].num_pages != 1) { 4572 return -EINVAL; 4573 } 4574 num_extent_pages += 1; 4575 } 4576 } 4577 4578 if (num_extent_pages > 0) { 4579 tmp = realloc(ctx->extent_page_num, num_extent_pages * sizeof(uint32_t)); 4580 if (tmp == NULL) { 4581 return -ENOMEM; 4582 } 4583 ctx->extent_page_num = tmp; 4584 4585 /* Extent table entries contain md page numbers for extent pages. 4586 * Zeroes represent unallocated extent pages, those are run-length-encoded. 4587 */ 4588 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 4589 if (desc_extent_table->extent_page[i].page_idx != 0) { 4590 ctx->extent_page_num[ctx->num_extent_pages] = desc_extent_table->extent_page[i].page_idx; 4591 ctx->num_extent_pages += 1; 4592 } 4593 } 4594 } 4595 } else { 4596 /* Error */ 4597 return -EINVAL; 4598 } 4599 /* Advance to the next descriptor */ 4600 cur_desc += sizeof(*desc) + desc->length; 4601 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 4602 break; 4603 } 4604 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 4605 } 4606 return 0; 4607 } 4608 4609 static bool 4610 bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page) 4611 { 4612 uint32_t crc; 4613 struct spdk_blob_md_descriptor *desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4614 size_t desc_len; 4615 4616 crc = blob_md_page_calc_crc(page); 4617 if (crc != page->crc) { 4618 return false; 4619 } 4620 4621 /* Extent page should always be of sequence num 0. */ 4622 if (page->sequence_num != 0) { 4623 return false; 4624 } 4625 4626 /* Descriptor type must be EXTENT_PAGE. */ 4627 if (desc->type != SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4628 return false; 4629 } 4630 4631 /* Descriptor length cannot exceed the page. */ 4632 desc_len = sizeof(*desc) + desc->length; 4633 if (desc_len > sizeof(page->descriptors)) { 4634 return false; 4635 } 4636 4637 /* It has to be the only descriptor in the page. */ 4638 if (desc_len + sizeof(*desc) <= sizeof(page->descriptors)) { 4639 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + desc_len); 4640 if (desc->length != 0) { 4641 return false; 4642 } 4643 } 4644 4645 return true; 4646 } 4647 4648 static bool 4649 bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx) 4650 { 4651 uint32_t crc; 4652 struct spdk_blob_md_page *page = ctx->page; 4653 4654 crc = blob_md_page_calc_crc(page); 4655 if (crc != page->crc) { 4656 return false; 4657 } 4658 4659 /* First page of a sequence should match the blobid. */ 4660 if (page->sequence_num == 0 && 4661 bs_page_to_blobid(ctx->cur_page) != page->id) { 4662 return false; 4663 } 4664 assert(bs_load_cur_extent_page_valid(page) == false); 4665 4666 return true; 4667 } 4668 4669 static void bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx); 4670 4671 static void 4672 bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4673 { 4674 struct spdk_bs_load_ctx *ctx = cb_arg; 4675 4676 if (bserrno != 0) { 4677 bs_load_ctx_fail(ctx, bserrno); 4678 return; 4679 } 4680 4681 bs_load_complete(ctx); 4682 } 4683 4684 static void 4685 bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4686 { 4687 struct spdk_bs_load_ctx *ctx = cb_arg; 4688 4689 spdk_free(ctx->mask); 4690 ctx->mask = NULL; 4691 4692 if (bserrno != 0) { 4693 bs_load_ctx_fail(ctx, bserrno); 4694 return; 4695 } 4696 4697 bs_write_used_clusters(seq, ctx, bs_load_write_used_clusters_cpl); 4698 } 4699 4700 static void 4701 bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4702 { 4703 struct spdk_bs_load_ctx *ctx = cb_arg; 4704 4705 spdk_free(ctx->mask); 4706 ctx->mask = NULL; 4707 4708 if (bserrno != 0) { 4709 bs_load_ctx_fail(ctx, bserrno); 4710 return; 4711 } 4712 4713 bs_write_used_blobids(seq, ctx, bs_load_write_used_blobids_cpl); 4714 } 4715 4716 static void 4717 bs_load_write_used_md(struct spdk_bs_load_ctx *ctx) 4718 { 4719 bs_write_used_md(ctx->seq, ctx, bs_load_write_used_pages_cpl); 4720 } 4721 4722 static void 4723 bs_load_replay_md_chain_cpl(struct spdk_bs_load_ctx *ctx) 4724 { 4725 uint64_t num_md_clusters; 4726 uint64_t i; 4727 4728 ctx->in_page_chain = false; 4729 4730 do { 4731 ctx->page_index++; 4732 } while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true); 4733 4734 if (ctx->page_index < ctx->super->md_len) { 4735 ctx->cur_page = ctx->page_index; 4736 bs_load_replay_cur_md_page(ctx); 4737 } else { 4738 /* Claim all of the clusters used by the metadata */ 4739 num_md_clusters = spdk_divide_round_up( 4740 ctx->super->md_start + ctx->super->md_len, ctx->bs->pages_per_cluster); 4741 for (i = 0; i < num_md_clusters; i++) { 4742 spdk_bit_array_set(ctx->used_clusters, i); 4743 } 4744 ctx->bs->num_free_clusters -= num_md_clusters; 4745 spdk_free(ctx->page); 4746 bs_load_write_used_md(ctx); 4747 } 4748 } 4749 4750 static void 4751 bs_load_replay_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4752 { 4753 struct spdk_bs_load_ctx *ctx = cb_arg; 4754 uint32_t page_num; 4755 uint64_t i; 4756 4757 if (bserrno != 0) { 4758 spdk_free(ctx->extent_pages); 4759 bs_load_ctx_fail(ctx, bserrno); 4760 return; 4761 } 4762 4763 for (i = 0; i < ctx->num_extent_pages; i++) { 4764 /* Extent pages are only read when present within in chain md. 4765 * Integrity of md is not right if that page was not a valid extent page. */ 4766 if (bs_load_cur_extent_page_valid(&ctx->extent_pages[i]) != true) { 4767 spdk_free(ctx->extent_pages); 4768 bs_load_ctx_fail(ctx, -EILSEQ); 4769 return; 4770 } 4771 4772 page_num = ctx->extent_page_num[i]; 4773 spdk_bit_array_set(ctx->bs->used_md_pages, page_num); 4774 if (bs_load_replay_md_parse_page(ctx, &ctx->extent_pages[i])) { 4775 spdk_free(ctx->extent_pages); 4776 bs_load_ctx_fail(ctx, -EILSEQ); 4777 return; 4778 } 4779 } 4780 4781 spdk_free(ctx->extent_pages); 4782 free(ctx->extent_page_num); 4783 ctx->extent_page_num = NULL; 4784 ctx->num_extent_pages = 0; 4785 4786 bs_load_replay_md_chain_cpl(ctx); 4787 } 4788 4789 static void 4790 bs_load_replay_extent_pages(struct spdk_bs_load_ctx *ctx) 4791 { 4792 spdk_bs_batch_t *batch; 4793 uint32_t page; 4794 uint64_t lba; 4795 uint64_t i; 4796 4797 ctx->extent_pages = spdk_zmalloc(ctx->super->md_page_size * ctx->num_extent_pages, 0, 4798 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4799 if (!ctx->extent_pages) { 4800 bs_load_ctx_fail(ctx, -ENOMEM); 4801 return; 4802 } 4803 4804 batch = bs_sequence_to_batch(ctx->seq, bs_load_replay_extent_page_cpl, ctx); 4805 4806 for (i = 0; i < ctx->num_extent_pages; i++) { 4807 page = ctx->extent_page_num[i]; 4808 assert(page < ctx->super->md_len); 4809 lba = bs_md_page_to_lba(ctx->bs, page); 4810 bs_batch_read_dev(batch, &ctx->extent_pages[i], lba, 4811 bs_byte_to_lba(ctx->bs, ctx->super->md_page_size)); 4812 } 4813 4814 bs_batch_close(batch); 4815 } 4816 4817 static void 4818 bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4819 { 4820 struct spdk_bs_load_ctx *ctx = cb_arg; 4821 uint32_t page_num; 4822 struct spdk_blob_md_page *page; 4823 4824 if (bserrno != 0) { 4825 bs_load_ctx_fail(ctx, bserrno); 4826 return; 4827 } 4828 4829 page_num = ctx->cur_page; 4830 page = ctx->page; 4831 if (bs_load_cur_md_page_valid(ctx) == true) { 4832 if (page->sequence_num == 0 || ctx->in_page_chain == true) { 4833 spdk_spin_lock(&ctx->bs->used_lock); 4834 bs_claim_md_page(ctx->bs, page_num); 4835 spdk_spin_unlock(&ctx->bs->used_lock); 4836 if (page->sequence_num == 0) { 4837 SPDK_NOTICELOG("Recover: blob 0x%" PRIx32 "\n", page_num); 4838 spdk_bit_array_set(ctx->bs->used_blobids, page_num); 4839 } 4840 if (bs_load_replay_md_parse_page(ctx, page)) { 4841 bs_load_ctx_fail(ctx, -EILSEQ); 4842 return; 4843 } 4844 if (page->next != SPDK_INVALID_MD_PAGE) { 4845 ctx->in_page_chain = true; 4846 ctx->cur_page = page->next; 4847 bs_load_replay_cur_md_page(ctx); 4848 return; 4849 } 4850 if (ctx->num_extent_pages != 0) { 4851 bs_load_replay_extent_pages(ctx); 4852 return; 4853 } 4854 } 4855 } 4856 bs_load_replay_md_chain_cpl(ctx); 4857 } 4858 4859 static void 4860 bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx) 4861 { 4862 uint64_t lba; 4863 4864 assert(ctx->cur_page < ctx->super->md_len); 4865 lba = bs_md_page_to_lba(ctx->bs, ctx->cur_page); 4866 bs_sequence_read_dev(ctx->seq, ctx->page, lba, 4867 bs_byte_to_lba(ctx->bs, ctx->super->md_page_size), 4868 bs_load_replay_md_cpl, ctx); 4869 } 4870 4871 static void 4872 bs_load_replay_md(struct spdk_bs_load_ctx *ctx) 4873 { 4874 ctx->page_index = 0; 4875 ctx->cur_page = 0; 4876 ctx->page = spdk_zmalloc(ctx->bs->md_page_size, 0, 4877 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4878 if (!ctx->page) { 4879 bs_load_ctx_fail(ctx, -ENOMEM); 4880 return; 4881 } 4882 bs_load_replay_cur_md_page(ctx); 4883 } 4884 4885 static void 4886 bs_recover(struct spdk_bs_load_ctx *ctx) 4887 { 4888 int rc; 4889 4890 SPDK_NOTICELOG("Performing recovery on blobstore\n"); 4891 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len); 4892 if (rc < 0) { 4893 bs_load_ctx_fail(ctx, -ENOMEM); 4894 return; 4895 } 4896 4897 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len); 4898 if (rc < 0) { 4899 bs_load_ctx_fail(ctx, -ENOMEM); 4900 return; 4901 } 4902 4903 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 4904 if (rc < 0) { 4905 bs_load_ctx_fail(ctx, -ENOMEM); 4906 return; 4907 } 4908 4909 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->super->md_len); 4910 if (rc < 0) { 4911 bs_load_ctx_fail(ctx, -ENOMEM); 4912 return; 4913 } 4914 4915 ctx->bs->num_free_clusters = ctx->bs->total_clusters; 4916 bs_load_replay_md(ctx); 4917 } 4918 4919 static int 4920 bs_parse_super(struct spdk_bs_load_ctx *ctx) 4921 { 4922 int rc; 4923 4924 if (ctx->super->size == 0) { 4925 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 4926 } 4927 4928 if (ctx->super->io_unit_size == 0) { 4929 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 4930 } 4931 if (ctx->super->md_page_size == 0) { 4932 ctx->super->md_page_size = SPDK_BS_PAGE_SIZE; 4933 } 4934 4935 ctx->bs->clean = 1; 4936 ctx->bs->cluster_sz = ctx->super->cluster_size; 4937 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 4938 ctx->bs->io_unit_size = ctx->super->io_unit_size; 4939 ctx->bs->md_page_size = ctx->super->md_page_size; 4940 bs_init_per_cluster_fields(ctx->bs); 4941 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 4942 if (rc < 0) { 4943 return -ENOMEM; 4944 } 4945 ctx->bs->md_start = ctx->super->md_start; 4946 ctx->bs->md_len = ctx->super->md_len; 4947 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len); 4948 if (rc < 0) { 4949 return -ENOMEM; 4950 } 4951 4952 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 4953 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 4954 ctx->bs->super_blob = ctx->super->super_blob; 4955 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 4956 4957 return 0; 4958 } 4959 4960 static void 4961 bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4962 { 4963 struct spdk_bs_load_ctx *ctx = cb_arg; 4964 int rc; 4965 4966 rc = bs_super_validate(ctx->super, ctx->bs); 4967 if (rc != 0) { 4968 bs_load_ctx_fail(ctx, rc); 4969 return; 4970 } 4971 4972 rc = bs_parse_super(ctx); 4973 if (rc < 0) { 4974 bs_load_ctx_fail(ctx, rc); 4975 return; 4976 } 4977 4978 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0 || ctx->force_recover) { 4979 bs_recover(ctx); 4980 } else { 4981 bs_load_read_used_pages(ctx); 4982 } 4983 } 4984 4985 static inline int 4986 bs_opts_copy(struct spdk_bs_opts *src, struct spdk_bs_opts *dst) 4987 { 4988 4989 if (!src->opts_size) { 4990 SPDK_ERRLOG("opts_size should not be zero value\n"); 4991 return -1; 4992 } 4993 4994 #define FIELD_OK(field) \ 4995 offsetof(struct spdk_bs_opts, field) + sizeof(src->field) <= src->opts_size 4996 4997 #define SET_FIELD(field) \ 4998 if (FIELD_OK(field)) { \ 4999 dst->field = src->field; \ 5000 } \ 5001 5002 SET_FIELD(cluster_sz); 5003 SET_FIELD(num_md_pages); 5004 SET_FIELD(max_md_ops); 5005 SET_FIELD(max_channel_ops); 5006 SET_FIELD(clear_method); 5007 5008 if (FIELD_OK(bstype)) { 5009 memcpy(&dst->bstype, &src->bstype, sizeof(dst->bstype)); 5010 } 5011 SET_FIELD(md_page_size); 5012 SET_FIELD(iter_cb_fn); 5013 SET_FIELD(iter_cb_arg); 5014 SET_FIELD(force_recover); 5015 SET_FIELD(esnap_bs_dev_create); 5016 SET_FIELD(esnap_ctx); 5017 5018 dst->opts_size = src->opts_size; 5019 5020 /* You should not remove this statement, but need to update the assert statement 5021 * if you add a new field, and also add a corresponding SET_FIELD statement */ 5022 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_opts) == 88, "Incorrect size"); 5023 5024 #undef FIELD_OK 5025 #undef SET_FIELD 5026 5027 return 0; 5028 } 5029 5030 void 5031 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 5032 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 5033 { 5034 struct spdk_blob_store *bs; 5035 struct spdk_bs_cpl cpl; 5036 struct spdk_bs_load_ctx *ctx; 5037 struct spdk_bs_opts opts = {}; 5038 int err; 5039 5040 SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev); 5041 5042 if ((dev->phys_blocklen % dev->blocklen) != 0) { 5043 SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen); 5044 dev->destroy(dev); 5045 cb_fn(cb_arg, NULL, -EINVAL); 5046 return; 5047 } 5048 5049 spdk_bs_opts_init(&opts, sizeof(opts)); 5050 if (o) { 5051 if (bs_opts_copy(o, &opts)) { 5052 return; 5053 } 5054 } 5055 5056 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 5057 dev->destroy(dev); 5058 cb_fn(cb_arg, NULL, -EINVAL); 5059 return; 5060 } 5061 5062 err = bs_alloc(dev, &opts, &bs, &ctx); 5063 if (err) { 5064 dev->destroy(dev); 5065 cb_fn(cb_arg, NULL, err); 5066 return; 5067 } 5068 5069 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 5070 cpl.u.bs_handle.cb_fn = cb_fn; 5071 cpl.u.bs_handle.cb_arg = cb_arg; 5072 cpl.u.bs_handle.bs = bs; 5073 5074 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5075 if (!ctx->seq) { 5076 spdk_free(ctx->super); 5077 free(ctx); 5078 bs_free(bs); 5079 cb_fn(cb_arg, NULL, -ENOMEM); 5080 return; 5081 } 5082 5083 /* Read the super block */ 5084 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5085 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5086 bs_load_super_cpl, ctx); 5087 } 5088 5089 /* END spdk_bs_load */ 5090 5091 /* START spdk_bs_dump */ 5092 5093 static void 5094 bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno) 5095 { 5096 spdk_free(ctx->super); 5097 5098 /* 5099 * We need to defer calling bs_call_cpl() until after 5100 * dev destruction, so tuck these away for later use. 5101 */ 5102 ctx->bs->unload_err = bserrno; 5103 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5104 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5105 5106 bs_sequence_finish(seq, 0); 5107 bs_free(ctx->bs); 5108 free(ctx); 5109 } 5110 5111 static void 5112 bs_dump_print_xattr(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 5113 { 5114 struct spdk_blob_md_descriptor_xattr *desc_xattr; 5115 uint32_t i; 5116 const char *type; 5117 5118 desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc; 5119 5120 if (desc_xattr->length != 5121 sizeof(desc_xattr->name_length) + sizeof(desc_xattr->value_length) + 5122 desc_xattr->name_length + desc_xattr->value_length) { 5123 } 5124 5125 memcpy(ctx->xattr_name, desc_xattr->name, desc_xattr->name_length); 5126 ctx->xattr_name[desc_xattr->name_length] = '\0'; 5127 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 5128 type = "XATTR"; 5129 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 5130 type = "XATTR_INTERNAL"; 5131 } else { 5132 assert(false); 5133 type = "XATTR_?"; 5134 } 5135 fprintf(ctx->fp, "%s: name = \"%s\"\n", type, ctx->xattr_name); 5136 fprintf(ctx->fp, " value = \""); 5137 ctx->print_xattr_fn(ctx->fp, ctx->super->bstype.bstype, ctx->xattr_name, 5138 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 5139 desc_xattr->value_length); 5140 fprintf(ctx->fp, "\"\n"); 5141 for (i = 0; i < desc_xattr->value_length; i++) { 5142 if (i % 16 == 0) { 5143 fprintf(ctx->fp, " "); 5144 } 5145 fprintf(ctx->fp, "%02" PRIx8 " ", *((uint8_t *)desc_xattr->name + desc_xattr->name_length + i)); 5146 if ((i + 1) % 16 == 0) { 5147 fprintf(ctx->fp, "\n"); 5148 } 5149 } 5150 if (i % 16 != 0) { 5151 fprintf(ctx->fp, "\n"); 5152 } 5153 } 5154 5155 struct type_flag_desc { 5156 uint64_t mask; 5157 uint64_t val; 5158 const char *name; 5159 }; 5160 5161 static void 5162 bs_dump_print_type_bits(struct spdk_bs_load_ctx *ctx, uint64_t flags, 5163 struct type_flag_desc *desc, size_t numflags) 5164 { 5165 uint64_t covered = 0; 5166 size_t i; 5167 5168 for (i = 0; i < numflags; i++) { 5169 if ((desc[i].mask & flags) != desc[i].val) { 5170 continue; 5171 } 5172 fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " %s", desc[i].val, desc[i].name); 5173 if (desc[i].mask != desc[i].val) { 5174 fprintf(ctx->fp, " (mask 0x%" PRIx64 " value 0x%" PRIx64 ")", 5175 desc[i].mask, desc[i].val); 5176 } 5177 fprintf(ctx->fp, "\n"); 5178 covered |= desc[i].mask; 5179 } 5180 if ((flags & ~covered) != 0) { 5181 fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " Unknown\n", flags & ~covered); 5182 } 5183 } 5184 5185 static void 5186 bs_dump_print_type_flags(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 5187 { 5188 struct spdk_blob_md_descriptor_flags *type_desc; 5189 #define ADD_FLAG(f) { f, f, #f } 5190 #define ADD_MASK_VAL(m, v) { m, v, #v } 5191 static struct type_flag_desc invalid[] = { 5192 ADD_FLAG(SPDK_BLOB_THIN_PROV), 5193 ADD_FLAG(SPDK_BLOB_INTERNAL_XATTR), 5194 ADD_FLAG(SPDK_BLOB_EXTENT_TABLE), 5195 }; 5196 static struct type_flag_desc data_ro[] = { 5197 ADD_FLAG(SPDK_BLOB_READ_ONLY), 5198 }; 5199 static struct type_flag_desc md_ro[] = { 5200 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_DEFAULT), 5201 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_NONE), 5202 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_UNMAP), 5203 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_WRITE_ZEROES), 5204 }; 5205 #undef ADD_FLAG 5206 #undef ADD_MASK_VAL 5207 5208 type_desc = (struct spdk_blob_md_descriptor_flags *)desc; 5209 fprintf(ctx->fp, "Flags:\n"); 5210 fprintf(ctx->fp, "\tinvalid: 0x%016" PRIx64 "\n", type_desc->invalid_flags); 5211 bs_dump_print_type_bits(ctx, type_desc->invalid_flags, invalid, 5212 SPDK_COUNTOF(invalid)); 5213 fprintf(ctx->fp, "\tdata_ro: 0x%016" PRIx64 "\n", type_desc->data_ro_flags); 5214 bs_dump_print_type_bits(ctx, type_desc->data_ro_flags, data_ro, 5215 SPDK_COUNTOF(data_ro)); 5216 fprintf(ctx->fp, "\t md_ro: 0x%016" PRIx64 "\n", type_desc->md_ro_flags); 5217 bs_dump_print_type_bits(ctx, type_desc->md_ro_flags, md_ro, 5218 SPDK_COUNTOF(md_ro)); 5219 } 5220 5221 static void 5222 bs_dump_print_extent_table(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 5223 { 5224 struct spdk_blob_md_descriptor_extent_table *et_desc; 5225 uint64_t num_extent_pages; 5226 uint32_t et_idx; 5227 5228 et_desc = (struct spdk_blob_md_descriptor_extent_table *)desc; 5229 num_extent_pages = (et_desc->length - sizeof(et_desc->num_clusters)) / 5230 sizeof(et_desc->extent_page[0]); 5231 5232 fprintf(ctx->fp, "Extent table:\n"); 5233 for (et_idx = 0; et_idx < num_extent_pages; et_idx++) { 5234 if (et_desc->extent_page[et_idx].page_idx == 0) { 5235 /* Zeroes represent unallocated extent pages. */ 5236 continue; 5237 } 5238 fprintf(ctx->fp, "\tExtent page: %5" PRIu32 " length %3" PRIu32 5239 " at LBA %" PRIu64 "\n", et_desc->extent_page[et_idx].page_idx, 5240 et_desc->extent_page[et_idx].num_pages, 5241 bs_md_page_to_lba(ctx->bs, et_desc->extent_page[et_idx].page_idx)); 5242 } 5243 } 5244 5245 static void 5246 bs_dump_print_md_page(struct spdk_bs_load_ctx *ctx) 5247 { 5248 uint32_t page_idx = ctx->cur_page; 5249 struct spdk_blob_md_page *page = ctx->page; 5250 struct spdk_blob_md_descriptor *desc; 5251 size_t cur_desc = 0; 5252 uint32_t crc; 5253 5254 fprintf(ctx->fp, "=========\n"); 5255 fprintf(ctx->fp, "Metadata Page Index: %" PRIu32 " (0x%" PRIx32 ")\n", page_idx, page_idx); 5256 fprintf(ctx->fp, "Start LBA: %" PRIu64 "\n", bs_md_page_to_lba(ctx->bs, page_idx)); 5257 fprintf(ctx->fp, "Blob ID: 0x%" PRIx64 "\n", page->id); 5258 fprintf(ctx->fp, "Sequence: %" PRIu32 "\n", page->sequence_num); 5259 if (page->next == SPDK_INVALID_MD_PAGE) { 5260 fprintf(ctx->fp, "Next: None\n"); 5261 } else { 5262 fprintf(ctx->fp, "Next: %" PRIu32 "\n", page->next); 5263 } 5264 fprintf(ctx->fp, "In used bit array%s:", ctx->super->clean ? "" : " (not clean: dubious)"); 5265 if (spdk_bit_array_get(ctx->bs->used_md_pages, page_idx)) { 5266 fprintf(ctx->fp, " md"); 5267 } 5268 if (spdk_bit_array_get(ctx->bs->used_blobids, page_idx)) { 5269 fprintf(ctx->fp, " blob"); 5270 } 5271 fprintf(ctx->fp, "\n"); 5272 5273 crc = blob_md_page_calc_crc(page); 5274 fprintf(ctx->fp, "CRC: 0x%" PRIx32 " (%s)\n", page->crc, crc == page->crc ? "OK" : "Mismatch"); 5275 5276 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 5277 while (cur_desc < sizeof(page->descriptors)) { 5278 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 5279 if (desc->length == 0) { 5280 /* If padding and length are 0, this terminates the page */ 5281 break; 5282 } 5283 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 5284 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 5285 unsigned int i; 5286 5287 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 5288 5289 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 5290 if (desc_extent_rle->extents[i].cluster_idx != 0) { 5291 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 5292 desc_extent_rle->extents[i].cluster_idx); 5293 } else { 5294 fprintf(ctx->fp, "Unallocated Extent - "); 5295 } 5296 fprintf(ctx->fp, " Length: %" PRIu32, desc_extent_rle->extents[i].length); 5297 fprintf(ctx->fp, "\n"); 5298 } 5299 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 5300 struct spdk_blob_md_descriptor_extent_page *desc_extent; 5301 unsigned int i; 5302 5303 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 5304 5305 for (i = 0; i < desc_extent->length / sizeof(desc_extent->cluster_idx[0]); i++) { 5306 if (desc_extent->cluster_idx[i] != 0) { 5307 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 5308 desc_extent->cluster_idx[i]); 5309 } else { 5310 fprintf(ctx->fp, "Unallocated Extent"); 5311 } 5312 fprintf(ctx->fp, "\n"); 5313 } 5314 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 5315 bs_dump_print_xattr(ctx, desc); 5316 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 5317 bs_dump_print_xattr(ctx, desc); 5318 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 5319 bs_dump_print_type_flags(ctx, desc); 5320 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 5321 bs_dump_print_extent_table(ctx, desc); 5322 } else { 5323 /* Error */ 5324 fprintf(ctx->fp, "Unknown descriptor type %" PRIu8 "\n", desc->type); 5325 } 5326 /* Advance to the next descriptor */ 5327 cur_desc += sizeof(*desc) + desc->length; 5328 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 5329 break; 5330 } 5331 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 5332 } 5333 } 5334 5335 static void 5336 bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5337 { 5338 struct spdk_bs_load_ctx *ctx = cb_arg; 5339 5340 if (bserrno != 0) { 5341 bs_dump_finish(seq, ctx, bserrno); 5342 return; 5343 } 5344 5345 if (ctx->page->id != 0) { 5346 bs_dump_print_md_page(ctx); 5347 } 5348 5349 ctx->cur_page++; 5350 5351 if (ctx->cur_page < ctx->super->md_len) { 5352 bs_dump_read_md_page(seq, ctx); 5353 } else { 5354 spdk_free(ctx->page); 5355 bs_dump_finish(seq, ctx, 0); 5356 } 5357 } 5358 5359 static void 5360 bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg) 5361 { 5362 struct spdk_bs_load_ctx *ctx = cb_arg; 5363 uint64_t lba; 5364 5365 assert(ctx->cur_page < ctx->super->md_len); 5366 lba = bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page); 5367 bs_sequence_read_dev(seq, ctx->page, lba, 5368 bs_byte_to_lba(ctx->bs, ctx->super->md_page_size), 5369 bs_dump_read_md_page_cpl, ctx); 5370 } 5371 5372 static void 5373 bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5374 { 5375 struct spdk_bs_load_ctx *ctx = cb_arg; 5376 int rc; 5377 5378 fprintf(ctx->fp, "Signature: \"%.8s\" ", ctx->super->signature); 5379 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 5380 sizeof(ctx->super->signature)) != 0) { 5381 fprintf(ctx->fp, "(Mismatch)\n"); 5382 bs_dump_finish(seq, ctx, bserrno); 5383 return; 5384 } else { 5385 fprintf(ctx->fp, "(OK)\n"); 5386 } 5387 fprintf(ctx->fp, "Version: %" PRIu32 "\n", ctx->super->version); 5388 fprintf(ctx->fp, "CRC: 0x%x (%s)\n", ctx->super->crc, 5389 (ctx->super->crc == blob_md_page_calc_crc(ctx->super)) ? "OK" : "Mismatch"); 5390 fprintf(ctx->fp, "Blobstore Type: %.*s\n", SPDK_BLOBSTORE_TYPE_LENGTH, ctx->super->bstype.bstype); 5391 fprintf(ctx->fp, "Cluster Size: %" PRIu32 "\n", ctx->super->cluster_size); 5392 fprintf(ctx->fp, "Super Blob ID: "); 5393 if (ctx->super->super_blob == SPDK_BLOBID_INVALID) { 5394 fprintf(ctx->fp, "(None)\n"); 5395 } else { 5396 fprintf(ctx->fp, "0x%" PRIx64 "\n", ctx->super->super_blob); 5397 } 5398 fprintf(ctx->fp, "Clean: %" PRIu32 "\n", ctx->super->clean); 5399 fprintf(ctx->fp, "Used Metadata Page Mask Start: %" PRIu32 "\n", ctx->super->used_page_mask_start); 5400 fprintf(ctx->fp, "Used Metadata Page Mask Length: %" PRIu32 "\n", ctx->super->used_page_mask_len); 5401 fprintf(ctx->fp, "Used Cluster Mask Start: %" PRIu32 "\n", ctx->super->used_cluster_mask_start); 5402 fprintf(ctx->fp, "Used Cluster Mask Length: %" PRIu32 "\n", ctx->super->used_cluster_mask_len); 5403 fprintf(ctx->fp, "Used Blob ID Mask Start: %" PRIu32 "\n", ctx->super->used_blobid_mask_start); 5404 fprintf(ctx->fp, "Used Blob ID Mask Length: %" PRIu32 "\n", ctx->super->used_blobid_mask_len); 5405 fprintf(ctx->fp, "Metadata Start: %" PRIu32 "\n", ctx->super->md_start); 5406 fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len); 5407 5408 ctx->cur_page = 0; 5409 ctx->page = spdk_zmalloc(ctx->super->md_page_size, 0, 5410 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 5411 if (!ctx->page) { 5412 bs_dump_finish(seq, ctx, -ENOMEM); 5413 return; 5414 } 5415 5416 rc = bs_parse_super(ctx); 5417 if (rc < 0) { 5418 bs_load_ctx_fail(ctx, rc); 5419 return; 5420 } 5421 5422 bs_load_read_used_pages(ctx); 5423 } 5424 5425 void 5426 spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_xattr_fn, 5427 spdk_bs_op_complete cb_fn, void *cb_arg) 5428 { 5429 struct spdk_blob_store *bs; 5430 struct spdk_bs_cpl cpl; 5431 struct spdk_bs_load_ctx *ctx; 5432 struct spdk_bs_opts opts = {}; 5433 int err; 5434 5435 SPDK_DEBUGLOG(blob, "Dumping blobstore from dev %p\n", dev); 5436 5437 spdk_bs_opts_init(&opts, sizeof(opts)); 5438 5439 err = bs_alloc(dev, &opts, &bs, &ctx); 5440 if (err) { 5441 dev->destroy(dev); 5442 cb_fn(cb_arg, err); 5443 return; 5444 } 5445 5446 ctx->dumping = true; 5447 ctx->fp = fp; 5448 ctx->print_xattr_fn = print_xattr_fn; 5449 5450 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5451 cpl.u.bs_basic.cb_fn = cb_fn; 5452 cpl.u.bs_basic.cb_arg = cb_arg; 5453 5454 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5455 if (!ctx->seq) { 5456 spdk_free(ctx->super); 5457 free(ctx); 5458 bs_free(bs); 5459 cb_fn(cb_arg, -ENOMEM); 5460 return; 5461 } 5462 5463 /* Read the super block */ 5464 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5465 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5466 bs_dump_super_cpl, ctx); 5467 } 5468 5469 /* END spdk_bs_dump */ 5470 5471 /* START spdk_bs_init */ 5472 5473 static void 5474 bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5475 { 5476 struct spdk_bs_load_ctx *ctx = cb_arg; 5477 5478 ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters); 5479 spdk_free(ctx->super); 5480 free(ctx); 5481 5482 bs_sequence_finish(seq, bserrno); 5483 } 5484 5485 static void 5486 bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5487 { 5488 struct spdk_bs_load_ctx *ctx = cb_arg; 5489 5490 /* Write super block */ 5491 bs_sequence_write_dev(seq, ctx->super, bs_page_to_lba(ctx->bs, 0), 5492 bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 5493 bs_init_persist_super_cpl, ctx); 5494 } 5495 5496 void 5497 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 5498 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 5499 { 5500 struct spdk_bs_load_ctx *ctx; 5501 struct spdk_blob_store *bs; 5502 struct spdk_bs_cpl cpl; 5503 spdk_bs_sequence_t *seq; 5504 spdk_bs_batch_t *batch; 5505 uint64_t num_md_lba; 5506 uint64_t num_md_pages; 5507 uint64_t num_md_clusters; 5508 uint64_t max_used_cluster_mask_len; 5509 uint32_t i; 5510 struct spdk_bs_opts opts = {}; 5511 int rc; 5512 uint64_t lba, lba_count; 5513 5514 SPDK_DEBUGLOG(blob, "Initializing blobstore on dev %p\n", dev); 5515 if ((dev->phys_blocklen % dev->blocklen) != 0) { 5516 SPDK_ERRLOG("unsupported dev block length of %d\n", 5517 dev->blocklen); 5518 dev->destroy(dev); 5519 cb_fn(cb_arg, NULL, -EINVAL); 5520 return; 5521 } 5522 5523 spdk_bs_opts_init(&opts, sizeof(opts)); 5524 if (o) { 5525 if (bs_opts_copy(o, &opts)) { 5526 return; 5527 } 5528 } 5529 5530 if (bs_opts_verify(&opts) != 0) { 5531 dev->destroy(dev); 5532 cb_fn(cb_arg, NULL, -EINVAL); 5533 return; 5534 } 5535 5536 rc = bs_alloc(dev, &opts, &bs, &ctx); 5537 if (rc) { 5538 dev->destroy(dev); 5539 cb_fn(cb_arg, NULL, rc); 5540 return; 5541 } 5542 5543 if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) { 5544 /* By default, allocate 1 page per cluster. 5545 * Technically, this over-allocates metadata 5546 * because more metadata will reduce the number 5547 * of usable clusters. This can be addressed with 5548 * more complex math in the future. 5549 */ 5550 bs->md_len = bs->total_clusters; 5551 } else { 5552 bs->md_len = opts.num_md_pages; 5553 } 5554 rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len); 5555 if (rc < 0) { 5556 spdk_free(ctx->super); 5557 free(ctx); 5558 bs_free(bs); 5559 cb_fn(cb_arg, NULL, -ENOMEM); 5560 return; 5561 } 5562 5563 rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len); 5564 if (rc < 0) { 5565 spdk_free(ctx->super); 5566 free(ctx); 5567 bs_free(bs); 5568 cb_fn(cb_arg, NULL, -ENOMEM); 5569 return; 5570 } 5571 5572 rc = spdk_bit_array_resize(&bs->open_blobids, bs->md_len); 5573 if (rc < 0) { 5574 spdk_free(ctx->super); 5575 free(ctx); 5576 bs_free(bs); 5577 cb_fn(cb_arg, NULL, -ENOMEM); 5578 return; 5579 } 5580 5581 memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 5582 sizeof(ctx->super->signature)); 5583 ctx->super->version = SPDK_BS_VERSION; 5584 ctx->super->length = sizeof(*ctx->super); 5585 ctx->super->super_blob = bs->super_blob; 5586 ctx->super->clean = 0; 5587 ctx->super->cluster_size = bs->cluster_sz; 5588 ctx->super->io_unit_size = bs->io_unit_size; 5589 ctx->super->md_page_size = bs->md_page_size; 5590 memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype)); 5591 5592 /* Calculate how many pages the metadata consumes at the front 5593 * of the disk. 5594 */ 5595 5596 /* The super block uses 1 page */ 5597 num_md_pages = 1; 5598 5599 /* The used_md_pages mask requires 1 bit per metadata page, rounded 5600 * up to the nearest page, plus a header. 5601 */ 5602 ctx->super->used_page_mask_start = num_md_pages; 5603 ctx->super->used_page_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5604 spdk_divide_round_up(bs->md_len, 8), 5605 ctx->super->md_page_size); 5606 num_md_pages += ctx->super->used_page_mask_len; 5607 5608 /* The used_clusters mask requires 1 bit per cluster, rounded 5609 * up to the nearest page, plus a header. 5610 */ 5611 ctx->super->used_cluster_mask_start = num_md_pages; 5612 ctx->super->used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5613 spdk_divide_round_up(bs->total_clusters, 8), 5614 ctx->super->md_page_size); 5615 /* The blobstore might be extended, then the used_cluster bitmap will need more space. 5616 * Here we calculate the max clusters we can support according to the 5617 * num_md_pages (bs->md_len). 5618 */ 5619 max_used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5620 spdk_divide_round_up(bs->md_len, 8), 5621 ctx->super->md_page_size); 5622 max_used_cluster_mask_len = spdk_max(max_used_cluster_mask_len, 5623 ctx->super->used_cluster_mask_len); 5624 num_md_pages += max_used_cluster_mask_len; 5625 5626 /* The used_blobids mask requires 1 bit per metadata page, rounded 5627 * up to the nearest page, plus a header. 5628 */ 5629 ctx->super->used_blobid_mask_start = num_md_pages; 5630 ctx->super->used_blobid_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5631 spdk_divide_round_up(bs->md_len, 8), 5632 ctx->super->md_page_size); 5633 num_md_pages += ctx->super->used_blobid_mask_len; 5634 5635 /* The metadata region size was chosen above */ 5636 ctx->super->md_start = bs->md_start = num_md_pages; 5637 ctx->super->md_len = bs->md_len; 5638 num_md_pages += bs->md_len; 5639 5640 num_md_lba = bs_page_to_lba(bs, num_md_pages); 5641 5642 ctx->super->size = dev->blockcnt * dev->blocklen; 5643 5644 ctx->super->crc = blob_md_page_calc_crc(ctx->super); 5645 5646 num_md_clusters = spdk_divide_round_up(num_md_pages, bs->pages_per_cluster); 5647 if (num_md_clusters > bs->total_clusters) { 5648 SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, " 5649 "please decrease number of pages reserved for metadata " 5650 "or increase cluster size.\n"); 5651 spdk_free(ctx->super); 5652 spdk_bit_array_free(&ctx->used_clusters); 5653 free(ctx); 5654 bs_free(bs); 5655 cb_fn(cb_arg, NULL, -ENOMEM); 5656 return; 5657 } 5658 /* Claim all of the clusters used by the metadata */ 5659 for (i = 0; i < num_md_clusters; i++) { 5660 spdk_bit_array_set(ctx->used_clusters, i); 5661 } 5662 5663 bs->num_free_clusters -= num_md_clusters; 5664 bs->total_data_clusters = bs->num_free_clusters; 5665 5666 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 5667 cpl.u.bs_handle.cb_fn = cb_fn; 5668 cpl.u.bs_handle.cb_arg = cb_arg; 5669 cpl.u.bs_handle.bs = bs; 5670 5671 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5672 if (!seq) { 5673 spdk_free(ctx->super); 5674 free(ctx); 5675 bs_free(bs); 5676 cb_fn(cb_arg, NULL, -ENOMEM); 5677 return; 5678 } 5679 5680 batch = bs_sequence_to_batch(seq, bs_init_trim_cpl, ctx); 5681 5682 /* Clear metadata space */ 5683 bs_batch_write_zeroes_dev(batch, 0, num_md_lba); 5684 5685 lba = num_md_lba; 5686 lba_count = ctx->bs->dev->blockcnt - lba; 5687 switch (opts.clear_method) { 5688 case BS_CLEAR_WITH_UNMAP: 5689 /* Trim data clusters */ 5690 bs_batch_unmap_dev(batch, lba, lba_count); 5691 break; 5692 case BS_CLEAR_WITH_WRITE_ZEROES: 5693 /* Write_zeroes to data clusters */ 5694 bs_batch_write_zeroes_dev(batch, lba, lba_count); 5695 break; 5696 case BS_CLEAR_WITH_NONE: 5697 default: 5698 break; 5699 } 5700 5701 bs_batch_close(batch); 5702 } 5703 5704 /* END spdk_bs_init */ 5705 5706 /* START spdk_bs_destroy */ 5707 5708 static void 5709 bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5710 { 5711 struct spdk_bs_load_ctx *ctx = cb_arg; 5712 struct spdk_blob_store *bs = ctx->bs; 5713 5714 /* 5715 * We need to defer calling bs_call_cpl() until after 5716 * dev destruction, so tuck these away for later use. 5717 */ 5718 bs->unload_err = bserrno; 5719 memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5720 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5721 5722 bs_sequence_finish(seq, bserrno); 5723 5724 bs_free(bs); 5725 free(ctx); 5726 } 5727 5728 void 5729 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, 5730 void *cb_arg) 5731 { 5732 struct spdk_bs_cpl cpl; 5733 spdk_bs_sequence_t *seq; 5734 struct spdk_bs_load_ctx *ctx; 5735 5736 SPDK_DEBUGLOG(blob, "Destroying blobstore\n"); 5737 5738 if (!RB_EMPTY(&bs->open_blobs)) { 5739 SPDK_ERRLOG("Blobstore still has open blobs\n"); 5740 cb_fn(cb_arg, -EBUSY); 5741 return; 5742 } 5743 5744 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5745 cpl.u.bs_basic.cb_fn = cb_fn; 5746 cpl.u.bs_basic.cb_arg = cb_arg; 5747 5748 ctx = calloc(1, sizeof(*ctx)); 5749 if (!ctx) { 5750 cb_fn(cb_arg, -ENOMEM); 5751 return; 5752 } 5753 5754 ctx->bs = bs; 5755 5756 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5757 if (!seq) { 5758 free(ctx); 5759 cb_fn(cb_arg, -ENOMEM); 5760 return; 5761 } 5762 5763 /* Write zeroes to the super block */ 5764 bs_sequence_write_zeroes_dev(seq, 5765 bs_page_to_lba(bs, 0), 5766 bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)), 5767 bs_destroy_trim_cpl, ctx); 5768 } 5769 5770 /* END spdk_bs_destroy */ 5771 5772 /* START spdk_bs_unload */ 5773 5774 static void 5775 bs_unload_finish(struct spdk_bs_load_ctx *ctx, int bserrno) 5776 { 5777 spdk_bs_sequence_t *seq = ctx->seq; 5778 5779 spdk_free(ctx->super); 5780 5781 /* 5782 * We need to defer calling bs_call_cpl() until after 5783 * dev destruction, so tuck these away for later use. 5784 */ 5785 ctx->bs->unload_err = bserrno; 5786 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5787 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5788 5789 bs_sequence_finish(seq, bserrno); 5790 5791 bs_free(ctx->bs); 5792 free(ctx); 5793 } 5794 5795 static void 5796 bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5797 { 5798 struct spdk_bs_load_ctx *ctx = cb_arg; 5799 5800 bs_unload_finish(ctx, bserrno); 5801 } 5802 5803 static void 5804 bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5805 { 5806 struct spdk_bs_load_ctx *ctx = cb_arg; 5807 5808 spdk_free(ctx->mask); 5809 5810 if (bserrno != 0) { 5811 bs_unload_finish(ctx, bserrno); 5812 return; 5813 } 5814 5815 ctx->super->clean = 1; 5816 5817 bs_write_super(seq, ctx->bs, ctx->super, bs_unload_write_super_cpl, ctx); 5818 } 5819 5820 static void 5821 bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5822 { 5823 struct spdk_bs_load_ctx *ctx = cb_arg; 5824 5825 spdk_free(ctx->mask); 5826 ctx->mask = NULL; 5827 5828 if (bserrno != 0) { 5829 bs_unload_finish(ctx, bserrno); 5830 return; 5831 } 5832 5833 bs_write_used_clusters(seq, ctx, bs_unload_write_used_clusters_cpl); 5834 } 5835 5836 static void 5837 bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5838 { 5839 struct spdk_bs_load_ctx *ctx = cb_arg; 5840 5841 spdk_free(ctx->mask); 5842 ctx->mask = NULL; 5843 5844 if (bserrno != 0) { 5845 bs_unload_finish(ctx, bserrno); 5846 return; 5847 } 5848 5849 bs_write_used_blobids(seq, ctx, bs_unload_write_used_blobids_cpl); 5850 } 5851 5852 static void 5853 bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5854 { 5855 struct spdk_bs_load_ctx *ctx = cb_arg; 5856 int rc; 5857 5858 if (bserrno != 0) { 5859 bs_unload_finish(ctx, bserrno); 5860 return; 5861 } 5862 5863 rc = bs_super_validate(ctx->super, ctx->bs); 5864 if (rc != 0) { 5865 bs_unload_finish(ctx, rc); 5866 return; 5867 } 5868 5869 bs_write_used_md(seq, cb_arg, bs_unload_write_used_pages_cpl); 5870 } 5871 5872 void 5873 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg) 5874 { 5875 struct spdk_bs_cpl cpl; 5876 struct spdk_bs_load_ctx *ctx; 5877 5878 SPDK_DEBUGLOG(blob, "Syncing blobstore\n"); 5879 5880 /* 5881 * If external snapshot channels are being destroyed while the blobstore is unloaded, the 5882 * unload is deferred until after the channel destruction completes. 5883 */ 5884 if (bs->esnap_channels_unloading != 0) { 5885 if (bs->esnap_unload_cb_fn != NULL) { 5886 SPDK_ERRLOG("Blobstore unload in progress\n"); 5887 cb_fn(cb_arg, -EBUSY); 5888 return; 5889 } 5890 SPDK_DEBUGLOG(blob_esnap, "Blobstore unload deferred: %" PRIu32 5891 " esnap clones are unloading\n", bs->esnap_channels_unloading); 5892 bs->esnap_unload_cb_fn = cb_fn; 5893 bs->esnap_unload_cb_arg = cb_arg; 5894 return; 5895 } 5896 if (bs->esnap_unload_cb_fn != NULL) { 5897 SPDK_DEBUGLOG(blob_esnap, "Blobstore deferred unload progressing\n"); 5898 assert(bs->esnap_unload_cb_fn == cb_fn); 5899 assert(bs->esnap_unload_cb_arg == cb_arg); 5900 bs->esnap_unload_cb_fn = NULL; 5901 bs->esnap_unload_cb_arg = NULL; 5902 } 5903 5904 if (!RB_EMPTY(&bs->open_blobs)) { 5905 SPDK_ERRLOG("Blobstore still has open blobs\n"); 5906 cb_fn(cb_arg, -EBUSY); 5907 return; 5908 } 5909 5910 ctx = calloc(1, sizeof(*ctx)); 5911 if (!ctx) { 5912 cb_fn(cb_arg, -ENOMEM); 5913 return; 5914 } 5915 5916 ctx->bs = bs; 5917 5918 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 5919 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 5920 if (!ctx->super) { 5921 free(ctx); 5922 cb_fn(cb_arg, -ENOMEM); 5923 return; 5924 } 5925 5926 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5927 cpl.u.bs_basic.cb_fn = cb_fn; 5928 cpl.u.bs_basic.cb_arg = cb_arg; 5929 5930 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5931 if (!ctx->seq) { 5932 spdk_free(ctx->super); 5933 free(ctx); 5934 cb_fn(cb_arg, -ENOMEM); 5935 return; 5936 } 5937 5938 /* Read super block */ 5939 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5940 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5941 bs_unload_read_super_cpl, ctx); 5942 } 5943 5944 /* END spdk_bs_unload */ 5945 5946 /* START spdk_bs_set_super */ 5947 5948 struct spdk_bs_set_super_ctx { 5949 struct spdk_blob_store *bs; 5950 struct spdk_bs_super_block *super; 5951 }; 5952 5953 static void 5954 bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5955 { 5956 struct spdk_bs_set_super_ctx *ctx = cb_arg; 5957 5958 if (bserrno != 0) { 5959 SPDK_ERRLOG("Unable to write to super block of blobstore\n"); 5960 } 5961 5962 spdk_free(ctx->super); 5963 5964 bs_sequence_finish(seq, bserrno); 5965 5966 free(ctx); 5967 } 5968 5969 static void 5970 bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5971 { 5972 struct spdk_bs_set_super_ctx *ctx = cb_arg; 5973 int rc; 5974 5975 if (bserrno != 0) { 5976 SPDK_ERRLOG("Unable to read super block of blobstore\n"); 5977 spdk_free(ctx->super); 5978 bs_sequence_finish(seq, bserrno); 5979 free(ctx); 5980 return; 5981 } 5982 5983 rc = bs_super_validate(ctx->super, ctx->bs); 5984 if (rc != 0) { 5985 SPDK_ERRLOG("Not a valid super block\n"); 5986 spdk_free(ctx->super); 5987 bs_sequence_finish(seq, rc); 5988 free(ctx); 5989 return; 5990 } 5991 5992 bs_write_super(seq, ctx->bs, ctx->super, bs_set_super_write_cpl, ctx); 5993 } 5994 5995 void 5996 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid, 5997 spdk_bs_op_complete cb_fn, void *cb_arg) 5998 { 5999 struct spdk_bs_cpl cpl; 6000 spdk_bs_sequence_t *seq; 6001 struct spdk_bs_set_super_ctx *ctx; 6002 6003 SPDK_DEBUGLOG(blob, "Setting super blob id on blobstore\n"); 6004 6005 ctx = calloc(1, sizeof(*ctx)); 6006 if (!ctx) { 6007 cb_fn(cb_arg, -ENOMEM); 6008 return; 6009 } 6010 6011 ctx->bs = bs; 6012 6013 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 6014 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 6015 if (!ctx->super) { 6016 free(ctx); 6017 cb_fn(cb_arg, -ENOMEM); 6018 return; 6019 } 6020 6021 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 6022 cpl.u.bs_basic.cb_fn = cb_fn; 6023 cpl.u.bs_basic.cb_arg = cb_arg; 6024 6025 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 6026 if (!seq) { 6027 spdk_free(ctx->super); 6028 free(ctx); 6029 cb_fn(cb_arg, -ENOMEM); 6030 return; 6031 } 6032 6033 bs->super_blob = blobid; 6034 6035 /* Read super block */ 6036 bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0), 6037 bs_byte_to_lba(bs, sizeof(*ctx->super)), 6038 bs_set_super_read_cpl, ctx); 6039 } 6040 6041 /* END spdk_bs_set_super */ 6042 6043 void 6044 spdk_bs_get_super(struct spdk_blob_store *bs, 6045 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6046 { 6047 if (bs->super_blob == SPDK_BLOBID_INVALID) { 6048 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT); 6049 } else { 6050 cb_fn(cb_arg, bs->super_blob, 0); 6051 } 6052 } 6053 6054 uint64_t 6055 spdk_bs_get_cluster_size(struct spdk_blob_store *bs) 6056 { 6057 return bs->cluster_sz; 6058 } 6059 6060 uint64_t 6061 spdk_bs_get_page_size(struct spdk_blob_store *bs) 6062 { 6063 return bs->md_page_size; 6064 } 6065 6066 uint64_t 6067 spdk_bs_get_io_unit_size(struct spdk_blob_store *bs) 6068 { 6069 return bs->io_unit_size; 6070 } 6071 6072 uint64_t 6073 spdk_bs_free_cluster_count(struct spdk_blob_store *bs) 6074 { 6075 return bs->num_free_clusters; 6076 } 6077 6078 uint64_t 6079 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs) 6080 { 6081 return bs->total_data_clusters; 6082 } 6083 6084 static int 6085 bs_register_md_thread(struct spdk_blob_store *bs) 6086 { 6087 bs->md_channel = spdk_get_io_channel(bs); 6088 if (!bs->md_channel) { 6089 SPDK_ERRLOG("Failed to get IO channel.\n"); 6090 return -1; 6091 } 6092 6093 return 0; 6094 } 6095 6096 static int 6097 bs_unregister_md_thread(struct spdk_blob_store *bs) 6098 { 6099 spdk_put_io_channel(bs->md_channel); 6100 6101 return 0; 6102 } 6103 6104 spdk_blob_id 6105 spdk_blob_get_id(struct spdk_blob *blob) 6106 { 6107 assert(blob != NULL); 6108 6109 return blob->id; 6110 } 6111 6112 uint64_t 6113 spdk_blob_get_num_io_units(struct spdk_blob *blob) 6114 { 6115 assert(blob != NULL); 6116 6117 return bs_cluster_to_io_unit(blob->bs, blob->active.num_clusters); 6118 } 6119 6120 uint64_t 6121 spdk_blob_get_num_clusters(struct spdk_blob *blob) 6122 { 6123 assert(blob != NULL); 6124 6125 return blob->active.num_clusters; 6126 } 6127 6128 uint64_t 6129 spdk_blob_get_num_allocated_clusters(struct spdk_blob *blob) 6130 { 6131 assert(blob != NULL); 6132 6133 return blob->active.num_allocated_clusters; 6134 } 6135 6136 static uint64_t 6137 blob_find_io_unit(struct spdk_blob *blob, uint64_t offset, bool is_allocated) 6138 { 6139 uint64_t blob_io_unit_num = spdk_blob_get_num_io_units(blob); 6140 6141 while (offset < blob_io_unit_num) { 6142 if (bs_io_unit_is_allocated(blob, offset) == is_allocated) { 6143 return offset; 6144 } 6145 6146 offset += bs_num_io_units_to_cluster_boundary(blob, offset); 6147 } 6148 6149 return UINT64_MAX; 6150 } 6151 6152 uint64_t 6153 spdk_blob_get_next_allocated_io_unit(struct spdk_blob *blob, uint64_t offset) 6154 { 6155 return blob_find_io_unit(blob, offset, true); 6156 } 6157 6158 uint64_t 6159 spdk_blob_get_next_unallocated_io_unit(struct spdk_blob *blob, uint64_t offset) 6160 { 6161 return blob_find_io_unit(blob, offset, false); 6162 } 6163 6164 /* START spdk_bs_create_blob */ 6165 6166 static void 6167 bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 6168 { 6169 struct spdk_blob *blob = cb_arg; 6170 uint32_t page_idx = bs_blobid_to_page(blob->id); 6171 6172 if (bserrno != 0) { 6173 spdk_spin_lock(&blob->bs->used_lock); 6174 spdk_bit_array_clear(blob->bs->used_blobids, page_idx); 6175 bs_release_md_page(blob->bs, page_idx); 6176 spdk_spin_unlock(&blob->bs->used_lock); 6177 } 6178 6179 blob_free(blob); 6180 6181 bs_sequence_finish(seq, bserrno); 6182 } 6183 6184 static int 6185 blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs, 6186 bool internal) 6187 { 6188 uint64_t i; 6189 size_t value_len = 0; 6190 int rc; 6191 const void *value = NULL; 6192 if (xattrs->count > 0 && xattrs->get_value == NULL) { 6193 return -EINVAL; 6194 } 6195 for (i = 0; i < xattrs->count; i++) { 6196 xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len); 6197 if (value == NULL || value_len == 0) { 6198 return -EINVAL; 6199 } 6200 rc = blob_set_xattr(blob, xattrs->names[i], value, value_len, internal); 6201 if (rc < 0) { 6202 return rc; 6203 } 6204 } 6205 return 0; 6206 } 6207 6208 static void 6209 blob_opts_copy(const struct spdk_blob_opts *src, struct spdk_blob_opts *dst) 6210 { 6211 #define FIELD_OK(field) \ 6212 offsetof(struct spdk_blob_opts, field) + sizeof(src->field) <= src->opts_size 6213 6214 #define SET_FIELD(field) \ 6215 if (FIELD_OK(field)) { \ 6216 dst->field = src->field; \ 6217 } \ 6218 6219 SET_FIELD(num_clusters); 6220 SET_FIELD(thin_provision); 6221 SET_FIELD(clear_method); 6222 6223 if (FIELD_OK(xattrs)) { 6224 memcpy(&dst->xattrs, &src->xattrs, sizeof(src->xattrs)); 6225 } 6226 6227 SET_FIELD(use_extent_table); 6228 SET_FIELD(esnap_id); 6229 SET_FIELD(esnap_id_len); 6230 6231 dst->opts_size = src->opts_size; 6232 6233 /* You should not remove this statement, but need to update the assert statement 6234 * if you add a new field, and also add a corresponding SET_FIELD statement */ 6235 SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_opts) == 80, "Incorrect size"); 6236 6237 #undef FIELD_OK 6238 #undef SET_FIELD 6239 } 6240 6241 static void 6242 bs_create_blob(struct spdk_blob_store *bs, 6243 const struct spdk_blob_opts *opts, 6244 const struct spdk_blob_xattr_opts *internal_xattrs, 6245 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6246 { 6247 struct spdk_blob *blob; 6248 uint32_t page_idx; 6249 struct spdk_bs_cpl cpl; 6250 struct spdk_blob_opts opts_local; 6251 struct spdk_blob_xattr_opts internal_xattrs_default; 6252 spdk_bs_sequence_t *seq; 6253 spdk_blob_id id; 6254 int rc; 6255 6256 assert(spdk_get_thread() == bs->md_thread); 6257 6258 spdk_spin_lock(&bs->used_lock); 6259 page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); 6260 if (page_idx == UINT32_MAX) { 6261 spdk_spin_unlock(&bs->used_lock); 6262 cb_fn(cb_arg, 0, -ENOMEM); 6263 return; 6264 } 6265 spdk_bit_array_set(bs->used_blobids, page_idx); 6266 bs_claim_md_page(bs, page_idx); 6267 spdk_spin_unlock(&bs->used_lock); 6268 6269 id = bs_page_to_blobid(page_idx); 6270 6271 SPDK_DEBUGLOG(blob, "Creating blob with id 0x%" PRIx64 " at page %u\n", id, page_idx); 6272 6273 spdk_blob_opts_init(&opts_local, sizeof(opts_local)); 6274 if (opts) { 6275 blob_opts_copy(opts, &opts_local); 6276 } 6277 6278 blob = blob_alloc(bs, id); 6279 if (!blob) { 6280 rc = -ENOMEM; 6281 goto error; 6282 } 6283 6284 blob->use_extent_table = opts_local.use_extent_table; 6285 if (blob->use_extent_table) { 6286 blob->invalid_flags |= SPDK_BLOB_EXTENT_TABLE; 6287 } 6288 6289 if (!internal_xattrs) { 6290 blob_xattrs_init(&internal_xattrs_default); 6291 internal_xattrs = &internal_xattrs_default; 6292 } 6293 6294 rc = blob_set_xattrs(blob, &opts_local.xattrs, false); 6295 if (rc < 0) { 6296 goto error; 6297 } 6298 6299 rc = blob_set_xattrs(blob, internal_xattrs, true); 6300 if (rc < 0) { 6301 goto error; 6302 } 6303 6304 if (opts_local.thin_provision) { 6305 blob_set_thin_provision(blob); 6306 } 6307 6308 blob_set_clear_method(blob, opts_local.clear_method); 6309 6310 if (opts_local.esnap_id != NULL) { 6311 if (opts_local.esnap_id_len > UINT16_MAX) { 6312 SPDK_ERRLOG("esnap id length %" PRIu64 "is too long\n", 6313 opts_local.esnap_id_len); 6314 rc = -EINVAL; 6315 goto error; 6316 6317 } 6318 blob_set_thin_provision(blob); 6319 blob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 6320 rc = blob_set_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID, 6321 opts_local.esnap_id, opts_local.esnap_id_len, true); 6322 if (rc != 0) { 6323 goto error; 6324 } 6325 } 6326 6327 rc = blob_resize(blob, opts_local.num_clusters); 6328 if (rc < 0) { 6329 goto error; 6330 } 6331 cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6332 cpl.u.blobid.cb_fn = cb_fn; 6333 cpl.u.blobid.cb_arg = cb_arg; 6334 cpl.u.blobid.blobid = blob->id; 6335 6336 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 6337 if (!seq) { 6338 rc = -ENOMEM; 6339 goto error; 6340 } 6341 6342 blob_persist(seq, blob, bs_create_blob_cpl, blob); 6343 return; 6344 6345 error: 6346 SPDK_ERRLOG("Failed to create blob: %s, size in clusters/size: %lu (clusters)\n", 6347 spdk_strerror(rc), opts_local.num_clusters); 6348 if (blob != NULL) { 6349 blob_free(blob); 6350 } 6351 spdk_spin_lock(&bs->used_lock); 6352 spdk_bit_array_clear(bs->used_blobids, page_idx); 6353 bs_release_md_page(bs, page_idx); 6354 spdk_spin_unlock(&bs->used_lock); 6355 cb_fn(cb_arg, 0, rc); 6356 } 6357 6358 void 6359 spdk_bs_create_blob(struct spdk_blob_store *bs, 6360 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6361 { 6362 bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg); 6363 } 6364 6365 void 6366 spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts, 6367 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6368 { 6369 bs_create_blob(bs, opts, NULL, cb_fn, cb_arg); 6370 } 6371 6372 /* END spdk_bs_create_blob */ 6373 6374 /* START blob_cleanup */ 6375 6376 struct spdk_clone_snapshot_ctx { 6377 struct spdk_bs_cpl cpl; 6378 int bserrno; 6379 bool frozen; 6380 6381 struct spdk_io_channel *channel; 6382 6383 /* Current cluster for inflate operation */ 6384 uint64_t cluster; 6385 6386 /* For inflation force allocation of all unallocated clusters and remove 6387 * thin-provisioning. Otherwise only decouple parent and keep clone thin. */ 6388 bool allocate_all; 6389 6390 struct { 6391 spdk_blob_id id; 6392 struct spdk_blob *blob; 6393 bool md_ro; 6394 } original; 6395 struct { 6396 spdk_blob_id id; 6397 struct spdk_blob *blob; 6398 } new; 6399 6400 /* xattrs specified for snapshot/clones only. They have no impact on 6401 * the original blobs xattrs. */ 6402 const struct spdk_blob_xattr_opts *xattrs; 6403 }; 6404 6405 static void 6406 bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno) 6407 { 6408 struct spdk_clone_snapshot_ctx *ctx = cb_arg; 6409 struct spdk_bs_cpl *cpl = &ctx->cpl; 6410 6411 if (bserrno != 0) { 6412 if (ctx->bserrno != 0) { 6413 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6414 } else { 6415 ctx->bserrno = bserrno; 6416 } 6417 } 6418 6419 switch (cpl->type) { 6420 case SPDK_BS_CPL_TYPE_BLOBID: 6421 cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno); 6422 break; 6423 case SPDK_BS_CPL_TYPE_BLOB_BASIC: 6424 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno); 6425 break; 6426 default: 6427 SPDK_UNREACHABLE(); 6428 break; 6429 } 6430 6431 free(ctx); 6432 } 6433 6434 static void 6435 bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 6436 { 6437 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6438 struct spdk_blob *origblob = ctx->original.blob; 6439 6440 if (bserrno != 0) { 6441 if (ctx->bserrno != 0) { 6442 SPDK_ERRLOG("Unfreeze error %d\n", bserrno); 6443 } else { 6444 ctx->bserrno = bserrno; 6445 } 6446 } 6447 6448 ctx->original.id = origblob->id; 6449 origblob->locked_operation_in_progress = false; 6450 6451 /* Revert md_ro to original state */ 6452 origblob->md_ro = ctx->original.md_ro; 6453 6454 spdk_blob_close(origblob, bs_clone_snapshot_cleanup_finish, ctx); 6455 } 6456 6457 static void 6458 bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno) 6459 { 6460 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6461 struct spdk_blob *origblob = ctx->original.blob; 6462 6463 if (bserrno != 0) { 6464 if (ctx->bserrno != 0) { 6465 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6466 } else { 6467 ctx->bserrno = bserrno; 6468 } 6469 } 6470 6471 if (ctx->frozen) { 6472 /* Unfreeze any outstanding I/O */ 6473 blob_unfreeze_io(origblob, bs_snapshot_unfreeze_cpl, ctx); 6474 } else { 6475 bs_snapshot_unfreeze_cpl(ctx, 0); 6476 } 6477 6478 } 6479 6480 static void 6481 bs_clone_snapshot_newblob_cleanup(struct spdk_clone_snapshot_ctx *ctx, int bserrno) 6482 { 6483 struct spdk_blob *newblob = ctx->new.blob; 6484 6485 if (bserrno != 0) { 6486 if (ctx->bserrno != 0) { 6487 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6488 } else { 6489 ctx->bserrno = bserrno; 6490 } 6491 } 6492 6493 ctx->new.id = newblob->id; 6494 spdk_blob_close(newblob, bs_clone_snapshot_origblob_cleanup, ctx); 6495 } 6496 6497 /* END blob_cleanup */ 6498 6499 /* START spdk_bs_create_snapshot */ 6500 6501 static void 6502 bs_snapshot_swap_cluster_maps(struct spdk_blob *blob1, struct spdk_blob *blob2) 6503 { 6504 uint64_t *cluster_temp; 6505 uint64_t num_allocated_clusters_temp; 6506 uint32_t *extent_page_temp; 6507 6508 cluster_temp = blob1->active.clusters; 6509 blob1->active.clusters = blob2->active.clusters; 6510 blob2->active.clusters = cluster_temp; 6511 6512 num_allocated_clusters_temp = blob1->active.num_allocated_clusters; 6513 blob1->active.num_allocated_clusters = blob2->active.num_allocated_clusters; 6514 blob2->active.num_allocated_clusters = num_allocated_clusters_temp; 6515 6516 extent_page_temp = blob1->active.extent_pages; 6517 blob1->active.extent_pages = blob2->active.extent_pages; 6518 blob2->active.extent_pages = extent_page_temp; 6519 } 6520 6521 /* Copies an internal xattr */ 6522 static int 6523 bs_snapshot_copy_xattr(struct spdk_blob *toblob, struct spdk_blob *fromblob, const char *name) 6524 { 6525 const void *val = NULL; 6526 size_t len; 6527 int bserrno; 6528 6529 bserrno = blob_get_xattr_value(fromblob, name, &val, &len, true); 6530 if (bserrno != 0) { 6531 SPDK_ERRLOG("blob 0x%" PRIx64 " missing %s XATTR\n", fromblob->id, name); 6532 return bserrno; 6533 } 6534 6535 bserrno = blob_set_xattr(toblob, name, val, len, true); 6536 if (bserrno != 0) { 6537 SPDK_ERRLOG("could not set %s XATTR on blob 0x%" PRIx64 "\n", 6538 name, toblob->id); 6539 return bserrno; 6540 } 6541 return 0; 6542 } 6543 6544 static void 6545 bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno) 6546 { 6547 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6548 struct spdk_blob *origblob = ctx->original.blob; 6549 struct spdk_blob *newblob = ctx->new.blob; 6550 6551 if (bserrno != 0) { 6552 bs_snapshot_swap_cluster_maps(newblob, origblob); 6553 if (blob_is_esnap_clone(newblob)) { 6554 bs_snapshot_copy_xattr(origblob, newblob, BLOB_EXTERNAL_SNAPSHOT_ID); 6555 origblob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 6556 } 6557 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6558 return; 6559 } 6560 6561 /* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */ 6562 bserrno = blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true); 6563 if (bserrno != 0) { 6564 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6565 return; 6566 } 6567 6568 bs_blob_list_add(ctx->original.blob); 6569 6570 spdk_blob_set_read_only(newblob); 6571 6572 /* sync snapshot metadata */ 6573 spdk_blob_sync_md(newblob, bs_clone_snapshot_origblob_cleanup, ctx); 6574 } 6575 6576 static void 6577 bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno) 6578 { 6579 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6580 struct spdk_blob *origblob = ctx->original.blob; 6581 struct spdk_blob *newblob = ctx->new.blob; 6582 6583 if (bserrno != 0) { 6584 /* return cluster map back to original */ 6585 bs_snapshot_swap_cluster_maps(newblob, origblob); 6586 6587 /* Newblob md sync failed. Valid clusters are only present in origblob. 6588 * Since I/O is frozen on origblob, not changes to zeroed out cluster map should have occurred. 6589 * Newblob needs to be reverted to thin_provisioned state at creation to properly close. */ 6590 blob_set_thin_provision(newblob); 6591 assert(spdk_mem_all_zero(newblob->active.clusters, 6592 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 6593 assert(spdk_mem_all_zero(newblob->active.extent_pages, 6594 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 6595 6596 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6597 return; 6598 } 6599 6600 /* Set internal xattr for snapshot id */ 6601 bserrno = blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true); 6602 if (bserrno != 0) { 6603 /* return cluster map back to original */ 6604 bs_snapshot_swap_cluster_maps(newblob, origblob); 6605 blob_set_thin_provision(newblob); 6606 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6607 return; 6608 } 6609 6610 /* Create new back_bs_dev for snapshot */ 6611 origblob->back_bs_dev = bs_create_blob_bs_dev(newblob); 6612 if (origblob->back_bs_dev == NULL) { 6613 /* return cluster map back to original */ 6614 bs_snapshot_swap_cluster_maps(newblob, origblob); 6615 blob_set_thin_provision(newblob); 6616 bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL); 6617 return; 6618 } 6619 6620 /* Remove the xattr that references an external snapshot */ 6621 if (blob_is_esnap_clone(origblob)) { 6622 origblob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT; 6623 bserrno = blob_remove_xattr(origblob, BLOB_EXTERNAL_SNAPSHOT_ID, true); 6624 if (bserrno != 0) { 6625 if (bserrno == -ENOENT) { 6626 SPDK_ERRLOG("blob 0x%" PRIx64 " has no " BLOB_EXTERNAL_SNAPSHOT_ID 6627 " xattr to remove\n", origblob->id); 6628 assert(false); 6629 } else { 6630 /* return cluster map back to original */ 6631 bs_snapshot_swap_cluster_maps(newblob, origblob); 6632 blob_set_thin_provision(newblob); 6633 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6634 return; 6635 } 6636 } 6637 } 6638 6639 bs_blob_list_remove(origblob); 6640 origblob->parent_id = newblob->id; 6641 /* set clone blob as thin provisioned */ 6642 blob_set_thin_provision(origblob); 6643 6644 bs_blob_list_add(newblob); 6645 6646 /* sync clone metadata */ 6647 spdk_blob_sync_md(origblob, bs_snapshot_origblob_sync_cpl, ctx); 6648 } 6649 6650 static void 6651 bs_snapshot_freeze_cpl(void *cb_arg, int rc) 6652 { 6653 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6654 struct spdk_blob *origblob = ctx->original.blob; 6655 struct spdk_blob *newblob = ctx->new.blob; 6656 int bserrno; 6657 6658 if (rc != 0) { 6659 bs_clone_snapshot_newblob_cleanup(ctx, rc); 6660 return; 6661 } 6662 6663 ctx->frozen = true; 6664 6665 if (blob_is_esnap_clone(origblob)) { 6666 /* Clean up any channels associated with the original blob id because future IO will 6667 * perform IO using the snapshot blob_id. 6668 */ 6669 blob_esnap_destroy_bs_dev_channels(origblob, false, NULL, NULL); 6670 } 6671 if (newblob->back_bs_dev) { 6672 blob_back_bs_destroy(newblob); 6673 } 6674 /* set new back_bs_dev for snapshot */ 6675 newblob->back_bs_dev = origblob->back_bs_dev; 6676 /* Set invalid flags from origblob */ 6677 newblob->invalid_flags = origblob->invalid_flags; 6678 6679 /* inherit parent from original blob if set */ 6680 newblob->parent_id = origblob->parent_id; 6681 switch (origblob->parent_id) { 6682 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 6683 bserrno = bs_snapshot_copy_xattr(newblob, origblob, BLOB_EXTERNAL_SNAPSHOT_ID); 6684 if (bserrno != 0) { 6685 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6686 return; 6687 } 6688 break; 6689 case SPDK_BLOBID_INVALID: 6690 break; 6691 default: 6692 /* Set internal xattr for snapshot id */ 6693 bserrno = blob_set_xattr(newblob, BLOB_SNAPSHOT, 6694 &origblob->parent_id, sizeof(spdk_blob_id), true); 6695 if (bserrno != 0) { 6696 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6697 return; 6698 } 6699 } 6700 6701 /* swap cluster maps */ 6702 bs_snapshot_swap_cluster_maps(newblob, origblob); 6703 6704 /* Set the clear method on the new blob to match the original. */ 6705 blob_set_clear_method(newblob, origblob->clear_method); 6706 6707 /* sync snapshot metadata */ 6708 spdk_blob_sync_md(newblob, bs_snapshot_newblob_sync_cpl, ctx); 6709 } 6710 6711 static void 6712 bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6713 { 6714 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6715 struct spdk_blob *origblob = ctx->original.blob; 6716 struct spdk_blob *newblob = _blob; 6717 6718 if (bserrno != 0) { 6719 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6720 return; 6721 } 6722 6723 ctx->new.blob = newblob; 6724 assert(spdk_blob_is_thin_provisioned(newblob)); 6725 assert(spdk_mem_all_zero(newblob->active.clusters, 6726 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 6727 assert(spdk_mem_all_zero(newblob->active.extent_pages, 6728 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 6729 6730 blob_freeze_io(origblob, bs_snapshot_freeze_cpl, ctx); 6731 } 6732 6733 static void 6734 bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 6735 { 6736 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6737 struct spdk_blob *origblob = ctx->original.blob; 6738 6739 if (bserrno != 0) { 6740 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6741 return; 6742 } 6743 6744 ctx->new.id = blobid; 6745 ctx->cpl.u.blobid.blobid = blobid; 6746 6747 spdk_bs_open_blob(origblob->bs, ctx->new.id, bs_snapshot_newblob_open_cpl, ctx); 6748 } 6749 6750 6751 static void 6752 bs_xattr_snapshot(void *arg, const char *name, 6753 const void **value, size_t *value_len) 6754 { 6755 assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0); 6756 6757 struct spdk_blob *blob = (struct spdk_blob *)arg; 6758 *value = &blob->id; 6759 *value_len = sizeof(blob->id); 6760 } 6761 6762 static void 6763 bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6764 { 6765 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6766 struct spdk_blob_opts opts; 6767 struct spdk_blob_xattr_opts internal_xattrs; 6768 char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS }; 6769 6770 if (bserrno != 0) { 6771 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6772 return; 6773 } 6774 6775 ctx->original.blob = _blob; 6776 6777 if (_blob->data_ro || _blob->md_ro) { 6778 SPDK_DEBUGLOG(blob, "Cannot create snapshot from read only blob with id 0x%" 6779 PRIx64 "\n", _blob->id); 6780 ctx->bserrno = -EINVAL; 6781 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6782 return; 6783 } 6784 6785 if (_blob->locked_operation_in_progress) { 6786 SPDK_DEBUGLOG(blob, "Cannot create snapshot - another operation in progress\n"); 6787 ctx->bserrno = -EBUSY; 6788 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6789 return; 6790 } 6791 6792 _blob->locked_operation_in_progress = true; 6793 6794 spdk_blob_opts_init(&opts, sizeof(opts)); 6795 blob_xattrs_init(&internal_xattrs); 6796 6797 /* Change the size of new blob to the same as in original blob, 6798 * but do not allocate clusters */ 6799 opts.thin_provision = true; 6800 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 6801 opts.use_extent_table = _blob->use_extent_table; 6802 6803 /* If there are any xattrs specified for snapshot, set them now */ 6804 if (ctx->xattrs) { 6805 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 6806 } 6807 /* Set internal xattr SNAPSHOT_IN_PROGRESS */ 6808 internal_xattrs.count = 1; 6809 internal_xattrs.ctx = _blob; 6810 internal_xattrs.names = xattrs_names; 6811 internal_xattrs.get_value = bs_xattr_snapshot; 6812 6813 bs_create_blob(_blob->bs, &opts, &internal_xattrs, 6814 bs_snapshot_newblob_create_cpl, ctx); 6815 } 6816 6817 void 6818 spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid, 6819 const struct spdk_blob_xattr_opts *snapshot_xattrs, 6820 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6821 { 6822 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6823 6824 if (!ctx) { 6825 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 6826 return; 6827 } 6828 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6829 ctx->cpl.u.blobid.cb_fn = cb_fn; 6830 ctx->cpl.u.blobid.cb_arg = cb_arg; 6831 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 6832 ctx->bserrno = 0; 6833 ctx->frozen = false; 6834 ctx->original.id = blobid; 6835 ctx->xattrs = snapshot_xattrs; 6836 6837 spdk_bs_open_blob(bs, ctx->original.id, bs_snapshot_origblob_open_cpl, ctx); 6838 } 6839 /* END spdk_bs_create_snapshot */ 6840 6841 /* START spdk_bs_create_clone */ 6842 6843 static void 6844 bs_xattr_clone(void *arg, const char *name, 6845 const void **value, size_t *value_len) 6846 { 6847 assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0); 6848 6849 struct spdk_blob *blob = (struct spdk_blob *)arg; 6850 *value = &blob->id; 6851 *value_len = sizeof(blob->id); 6852 } 6853 6854 static void 6855 bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6856 { 6857 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6858 struct spdk_blob *clone = _blob; 6859 6860 ctx->new.blob = clone; 6861 bs_blob_list_add(clone); 6862 6863 spdk_blob_close(clone, bs_clone_snapshot_origblob_cleanup, ctx); 6864 } 6865 6866 static void 6867 bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 6868 { 6869 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6870 6871 ctx->cpl.u.blobid.blobid = blobid; 6872 spdk_bs_open_blob(ctx->original.blob->bs, blobid, bs_clone_newblob_open_cpl, ctx); 6873 } 6874 6875 static void 6876 bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6877 { 6878 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6879 struct spdk_blob_opts opts; 6880 struct spdk_blob_xattr_opts internal_xattrs; 6881 char *xattr_names[] = { BLOB_SNAPSHOT }; 6882 6883 if (bserrno != 0) { 6884 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6885 return; 6886 } 6887 6888 ctx->original.blob = _blob; 6889 ctx->original.md_ro = _blob->md_ro; 6890 6891 if (!_blob->data_ro || !_blob->md_ro) { 6892 SPDK_DEBUGLOG(blob, "Clone not from read-only blob\n"); 6893 ctx->bserrno = -EINVAL; 6894 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6895 return; 6896 } 6897 6898 if (_blob->locked_operation_in_progress) { 6899 SPDK_DEBUGLOG(blob, "Cannot create clone - another operation in progress\n"); 6900 ctx->bserrno = -EBUSY; 6901 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6902 return; 6903 } 6904 6905 _blob->locked_operation_in_progress = true; 6906 6907 spdk_blob_opts_init(&opts, sizeof(opts)); 6908 blob_xattrs_init(&internal_xattrs); 6909 6910 opts.thin_provision = true; 6911 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 6912 opts.use_extent_table = _blob->use_extent_table; 6913 if (ctx->xattrs) { 6914 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 6915 } 6916 6917 /* Set internal xattr BLOB_SNAPSHOT */ 6918 internal_xattrs.count = 1; 6919 internal_xattrs.ctx = _blob; 6920 internal_xattrs.names = xattr_names; 6921 internal_xattrs.get_value = bs_xattr_clone; 6922 6923 bs_create_blob(_blob->bs, &opts, &internal_xattrs, 6924 bs_clone_newblob_create_cpl, ctx); 6925 } 6926 6927 void 6928 spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid, 6929 const struct spdk_blob_xattr_opts *clone_xattrs, 6930 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6931 { 6932 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6933 6934 if (!ctx) { 6935 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 6936 return; 6937 } 6938 6939 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6940 ctx->cpl.u.blobid.cb_fn = cb_fn; 6941 ctx->cpl.u.blobid.cb_arg = cb_arg; 6942 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 6943 ctx->bserrno = 0; 6944 ctx->xattrs = clone_xattrs; 6945 ctx->original.id = blobid; 6946 6947 spdk_bs_open_blob(bs, ctx->original.id, bs_clone_origblob_open_cpl, ctx); 6948 } 6949 6950 /* END spdk_bs_create_clone */ 6951 6952 /* START spdk_bs_inflate_blob */ 6953 6954 static void 6955 bs_inflate_blob_set_parent_cpl(void *cb_arg, struct spdk_blob *_parent, int bserrno) 6956 { 6957 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6958 struct spdk_blob *_blob = ctx->original.blob; 6959 6960 if (bserrno != 0) { 6961 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6962 return; 6963 } 6964 6965 /* Temporarily override md_ro flag for MD modification */ 6966 _blob->md_ro = false; 6967 6968 bserrno = blob_set_xattr(_blob, BLOB_SNAPSHOT, &_parent->id, sizeof(spdk_blob_id), true); 6969 if (bserrno != 0) { 6970 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6971 return; 6972 } 6973 6974 assert(_parent != NULL); 6975 6976 bs_blob_list_remove(_blob); 6977 _blob->parent_id = _parent->id; 6978 6979 blob_back_bs_destroy(_blob); 6980 _blob->back_bs_dev = bs_create_blob_bs_dev(_parent); 6981 bs_blob_list_add(_blob); 6982 6983 spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx); 6984 } 6985 6986 static void 6987 bs_inflate_blob_done(struct spdk_clone_snapshot_ctx *ctx) 6988 { 6989 struct spdk_blob *_blob = ctx->original.blob; 6990 struct spdk_blob *_parent; 6991 6992 if (ctx->allocate_all) { 6993 /* remove thin provisioning */ 6994 bs_blob_list_remove(_blob); 6995 if (_blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 6996 blob_remove_xattr(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, true); 6997 _blob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT; 6998 } else { 6999 blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 7000 } 7001 _blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV; 7002 blob_back_bs_destroy(_blob); 7003 _blob->parent_id = SPDK_BLOBID_INVALID; 7004 } else { 7005 /* For now, esnap clones always have allocate_all set. */ 7006 assert(!blob_is_esnap_clone(_blob)); 7007 7008 _parent = ((struct spdk_blob_bs_dev *)(_blob->back_bs_dev))->blob; 7009 if (_parent->parent_id != SPDK_BLOBID_INVALID) { 7010 /* We must change the parent of the inflated blob */ 7011 spdk_bs_open_blob(_blob->bs, _parent->parent_id, 7012 bs_inflate_blob_set_parent_cpl, ctx); 7013 return; 7014 } 7015 7016 bs_blob_list_remove(_blob); 7017 _blob->parent_id = SPDK_BLOBID_INVALID; 7018 blob_back_bs_destroy(_blob); 7019 _blob->back_bs_dev = bs_create_zeroes_dev(); 7020 } 7021 7022 /* Temporarily override md_ro flag for MD modification */ 7023 _blob->md_ro = false; 7024 blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 7025 _blob->state = SPDK_BLOB_STATE_DIRTY; 7026 7027 spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx); 7028 } 7029 7030 /* Check if cluster needs allocation */ 7031 static inline bool 7032 bs_cluster_needs_allocation(struct spdk_blob *blob, uint64_t cluster, bool allocate_all) 7033 { 7034 struct spdk_blob_bs_dev *b; 7035 7036 assert(blob != NULL); 7037 7038 if (blob->active.clusters[cluster] != 0) { 7039 /* Cluster is already allocated */ 7040 return false; 7041 } 7042 7043 if (blob->parent_id == SPDK_BLOBID_INVALID) { 7044 /* Blob have no parent blob */ 7045 return allocate_all; 7046 } 7047 7048 if (blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 7049 return true; 7050 } 7051 7052 b = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 7053 return (allocate_all || b->blob->active.clusters[cluster] != 0); 7054 } 7055 7056 static void 7057 bs_inflate_blob_touch_next(void *cb_arg, int bserrno) 7058 { 7059 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 7060 struct spdk_blob *_blob = ctx->original.blob; 7061 struct spdk_bs_cpl cpl; 7062 spdk_bs_user_op_t *op; 7063 uint64_t offset; 7064 7065 if (bserrno != 0) { 7066 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 7067 return; 7068 } 7069 7070 for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) { 7071 if (bs_cluster_needs_allocation(_blob, ctx->cluster, ctx->allocate_all)) { 7072 break; 7073 } 7074 } 7075 7076 if (ctx->cluster < _blob->active.num_clusters) { 7077 offset = bs_cluster_to_lba(_blob->bs, ctx->cluster); 7078 7079 /* We may safely increment a cluster before copying */ 7080 ctx->cluster++; 7081 7082 /* Use a dummy 0B read as a context for cluster copy */ 7083 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7084 cpl.u.blob_basic.cb_fn = bs_inflate_blob_touch_next; 7085 cpl.u.blob_basic.cb_arg = ctx; 7086 7087 op = bs_user_op_alloc(ctx->channel, &cpl, SPDK_BLOB_READ, _blob, 7088 NULL, 0, offset, 0); 7089 if (!op) { 7090 bs_clone_snapshot_origblob_cleanup(ctx, -ENOMEM); 7091 return; 7092 } 7093 7094 bs_allocate_and_copy_cluster(_blob, ctx->channel, offset, op); 7095 } else { 7096 bs_inflate_blob_done(ctx); 7097 } 7098 } 7099 7100 static void 7101 bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 7102 { 7103 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 7104 uint64_t clusters_needed; 7105 uint64_t i; 7106 7107 if (bserrno != 0) { 7108 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 7109 return; 7110 } 7111 7112 ctx->original.blob = _blob; 7113 ctx->original.md_ro = _blob->md_ro; 7114 7115 if (_blob->locked_operation_in_progress) { 7116 SPDK_DEBUGLOG(blob, "Cannot inflate blob - another operation in progress\n"); 7117 ctx->bserrno = -EBUSY; 7118 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 7119 return; 7120 } 7121 7122 _blob->locked_operation_in_progress = true; 7123 7124 switch (_blob->parent_id) { 7125 case SPDK_BLOBID_INVALID: 7126 if (!ctx->allocate_all) { 7127 /* This blob has no parent, so we cannot decouple it. */ 7128 SPDK_ERRLOG("Cannot decouple parent of blob with no parent.\n"); 7129 bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 7130 return; 7131 } 7132 break; 7133 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 7134 /* 7135 * It would be better to rely on back_bs_dev->is_zeroes(), to determine which 7136 * clusters require allocation. Until there is a blobstore consumer that 7137 * uses esnaps with an spdk_bs_dev that implements a useful is_zeroes() it is not 7138 * worth the effort. 7139 */ 7140 ctx->allocate_all = true; 7141 break; 7142 default: 7143 break; 7144 } 7145 7146 if (spdk_blob_is_thin_provisioned(_blob) == false) { 7147 /* This is not thin provisioned blob. No need to inflate. */ 7148 bs_clone_snapshot_origblob_cleanup(ctx, 0); 7149 return; 7150 } 7151 7152 /* Do two passes - one to verify that we can obtain enough clusters 7153 * and another to actually claim them. 7154 */ 7155 clusters_needed = 0; 7156 for (i = 0; i < _blob->active.num_clusters; i++) { 7157 if (bs_cluster_needs_allocation(_blob, i, ctx->allocate_all)) { 7158 clusters_needed++; 7159 } 7160 } 7161 7162 if (clusters_needed > _blob->bs->num_free_clusters) { 7163 /* Not enough free clusters. Cannot satisfy the request. */ 7164 bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC); 7165 return; 7166 } 7167 7168 ctx->cluster = 0; 7169 bs_inflate_blob_touch_next(ctx, 0); 7170 } 7171 7172 static void 7173 bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7174 spdk_blob_id blobid, bool allocate_all, spdk_blob_op_complete cb_fn, void *cb_arg) 7175 { 7176 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 7177 7178 if (!ctx) { 7179 cb_fn(cb_arg, -ENOMEM); 7180 return; 7181 } 7182 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7183 ctx->cpl.u.bs_basic.cb_fn = cb_fn; 7184 ctx->cpl.u.bs_basic.cb_arg = cb_arg; 7185 ctx->bserrno = 0; 7186 ctx->original.id = blobid; 7187 ctx->channel = channel; 7188 ctx->allocate_all = allocate_all; 7189 7190 spdk_bs_open_blob(bs, ctx->original.id, bs_inflate_blob_open_cpl, ctx); 7191 } 7192 7193 void 7194 spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7195 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 7196 { 7197 bs_inflate_blob(bs, channel, blobid, true, cb_fn, cb_arg); 7198 } 7199 7200 void 7201 spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7202 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 7203 { 7204 bs_inflate_blob(bs, channel, blobid, false, cb_fn, cb_arg); 7205 } 7206 /* END spdk_bs_inflate_blob */ 7207 7208 /* START spdk_bs_blob_shallow_copy */ 7209 7210 struct shallow_copy_ctx { 7211 struct spdk_bs_cpl cpl; 7212 int bserrno; 7213 7214 /* Blob source for copy */ 7215 struct spdk_blob_store *bs; 7216 spdk_blob_id blobid; 7217 struct spdk_blob *blob; 7218 struct spdk_io_channel *blob_channel; 7219 7220 /* Destination device for copy */ 7221 struct spdk_bs_dev *ext_dev; 7222 struct spdk_io_channel *ext_channel; 7223 7224 /* Current cluster for copy operation */ 7225 uint64_t cluster; 7226 7227 /* Buffer for blob reading */ 7228 uint8_t *read_buff; 7229 7230 /* Struct for external device writing */ 7231 struct spdk_bs_dev_cb_args ext_args; 7232 7233 /* Actual number of copied clusters */ 7234 uint64_t copied_clusters_count; 7235 7236 /* Status callback for updates about the ongoing operation */ 7237 spdk_blob_shallow_copy_status status_cb; 7238 7239 /* Argument passed to function status_cb */ 7240 void *status_cb_arg; 7241 }; 7242 7243 static void 7244 bs_shallow_copy_cleanup_finish(void *cb_arg, int bserrno) 7245 { 7246 struct shallow_copy_ctx *ctx = cb_arg; 7247 struct spdk_bs_cpl *cpl = &ctx->cpl; 7248 7249 if (bserrno != 0) { 7250 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, cleanup error %d\n", ctx->blob->id, bserrno); 7251 ctx->bserrno = bserrno; 7252 } 7253 7254 ctx->ext_dev->destroy_channel(ctx->ext_dev, ctx->ext_channel); 7255 spdk_free(ctx->read_buff); 7256 7257 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno); 7258 7259 free(ctx); 7260 } 7261 7262 static void 7263 bs_shallow_copy_bdev_write_cpl(struct spdk_io_channel *channel, void *cb_arg, int bserrno) 7264 { 7265 struct shallow_copy_ctx *ctx = cb_arg; 7266 struct spdk_blob *_blob = ctx->blob; 7267 7268 if (bserrno != 0) { 7269 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, ext dev write error %d\n", ctx->blob->id, bserrno); 7270 ctx->bserrno = bserrno; 7271 _blob->locked_operation_in_progress = false; 7272 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7273 return; 7274 } 7275 7276 ctx->cluster++; 7277 if (ctx->status_cb) { 7278 ctx->copied_clusters_count++; 7279 ctx->status_cb(ctx->copied_clusters_count, ctx->status_cb_arg); 7280 } 7281 7282 bs_shallow_copy_cluster_find_next(ctx); 7283 } 7284 7285 static void 7286 bs_shallow_copy_blob_read_cpl(void *cb_arg, int bserrno) 7287 { 7288 struct shallow_copy_ctx *ctx = cb_arg; 7289 struct spdk_bs_dev *ext_dev = ctx->ext_dev; 7290 struct spdk_blob *_blob = ctx->blob; 7291 7292 if (bserrno != 0) { 7293 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, blob read error %d\n", ctx->blob->id, bserrno); 7294 ctx->bserrno = bserrno; 7295 _blob->locked_operation_in_progress = false; 7296 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7297 return; 7298 } 7299 7300 ctx->ext_args.channel = ctx->ext_channel; 7301 ctx->ext_args.cb_fn = bs_shallow_copy_bdev_write_cpl; 7302 ctx->ext_args.cb_arg = ctx; 7303 7304 ext_dev->write(ext_dev, ctx->ext_channel, ctx->read_buff, 7305 bs_cluster_to_lba(_blob->bs, ctx->cluster), 7306 bs_dev_byte_to_lba(_blob->bs->dev, _blob->bs->cluster_sz), 7307 &ctx->ext_args); 7308 } 7309 7310 static void 7311 bs_shallow_copy_cluster_find_next(void *cb_arg) 7312 { 7313 struct shallow_copy_ctx *ctx = cb_arg; 7314 struct spdk_blob *_blob = ctx->blob; 7315 7316 while (ctx->cluster < _blob->active.num_clusters) { 7317 if (_blob->active.clusters[ctx->cluster] != 0) { 7318 break; 7319 } 7320 7321 ctx->cluster++; 7322 } 7323 7324 if (ctx->cluster < _blob->active.num_clusters) { 7325 blob_request_submit_op_single(ctx->blob_channel, _blob, ctx->read_buff, 7326 bs_cluster_to_lba(_blob->bs, ctx->cluster), 7327 bs_dev_byte_to_lba(_blob->bs->dev, _blob->bs->cluster_sz), 7328 bs_shallow_copy_blob_read_cpl, ctx, SPDK_BLOB_READ); 7329 } else { 7330 _blob->locked_operation_in_progress = false; 7331 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7332 } 7333 } 7334 7335 static void 7336 bs_shallow_copy_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 7337 { 7338 struct shallow_copy_ctx *ctx = cb_arg; 7339 struct spdk_bs_dev *ext_dev = ctx->ext_dev; 7340 uint32_t blob_block_size; 7341 uint64_t blob_total_size; 7342 7343 if (bserrno != 0) { 7344 SPDK_ERRLOG("Shallow copy blob open error %d\n", bserrno); 7345 ctx->bserrno = bserrno; 7346 bs_shallow_copy_cleanup_finish(ctx, 0); 7347 return; 7348 } 7349 7350 if (!spdk_blob_is_read_only(_blob)) { 7351 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, blob must be read only\n", _blob->id); 7352 ctx->bserrno = -EPERM; 7353 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7354 return; 7355 } 7356 7357 blob_block_size = _blob->bs->dev->blocklen; 7358 blob_total_size = spdk_blob_get_num_clusters(_blob) * spdk_bs_get_cluster_size(_blob->bs); 7359 7360 if (blob_total_size > ext_dev->blockcnt * ext_dev->blocklen) { 7361 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, external device must have at least blob size\n", 7362 _blob->id); 7363 ctx->bserrno = -EINVAL; 7364 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7365 return; 7366 } 7367 7368 if (blob_block_size % ext_dev->blocklen != 0) { 7369 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, external device block size is not compatible with \ 7370 blobstore block size\n", _blob->id); 7371 ctx->bserrno = -EINVAL; 7372 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7373 return; 7374 } 7375 7376 ctx->blob = _blob; 7377 7378 if (_blob->locked_operation_in_progress) { 7379 SPDK_DEBUGLOG(blob, "blob 0x%" PRIx64 " shallow copy - another operation in progress\n", _blob->id); 7380 ctx->bserrno = -EBUSY; 7381 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7382 return; 7383 } 7384 7385 _blob->locked_operation_in_progress = true; 7386 7387 ctx->cluster = 0; 7388 bs_shallow_copy_cluster_find_next(ctx); 7389 } 7390 7391 int 7392 spdk_bs_blob_shallow_copy(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7393 spdk_blob_id blobid, struct spdk_bs_dev *ext_dev, 7394 spdk_blob_shallow_copy_status status_cb_fn, void *status_cb_arg, 7395 spdk_blob_op_complete cb_fn, void *cb_arg) 7396 { 7397 struct shallow_copy_ctx *ctx; 7398 struct spdk_io_channel *ext_channel; 7399 7400 ctx = calloc(1, sizeof(*ctx)); 7401 if (!ctx) { 7402 return -ENOMEM; 7403 } 7404 7405 ctx->bs = bs; 7406 ctx->blobid = blobid; 7407 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7408 ctx->cpl.u.bs_basic.cb_fn = cb_fn; 7409 ctx->cpl.u.bs_basic.cb_arg = cb_arg; 7410 ctx->bserrno = 0; 7411 ctx->blob_channel = channel; 7412 ctx->status_cb = status_cb_fn; 7413 ctx->status_cb_arg = status_cb_arg; 7414 ctx->read_buff = spdk_malloc(bs->cluster_sz, bs->dev->blocklen, NULL, 7415 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 7416 if (!ctx->read_buff) { 7417 free(ctx); 7418 return -ENOMEM; 7419 } 7420 7421 ext_channel = ext_dev->create_channel(ext_dev); 7422 if (!ext_channel) { 7423 spdk_free(ctx->read_buff); 7424 free(ctx); 7425 return -ENOMEM; 7426 } 7427 ctx->ext_dev = ext_dev; 7428 ctx->ext_channel = ext_channel; 7429 7430 spdk_bs_open_blob(ctx->bs, ctx->blobid, bs_shallow_copy_blob_open_cpl, ctx); 7431 7432 return 0; 7433 } 7434 /* END spdk_bs_blob_shallow_copy */ 7435 7436 /* START spdk_bs_blob_set_parent */ 7437 7438 struct set_parent_ctx { 7439 struct spdk_blob_store *bs; 7440 int bserrno; 7441 spdk_bs_op_complete cb_fn; 7442 void *cb_arg; 7443 7444 struct spdk_blob *blob; 7445 bool blob_md_ro; 7446 7447 struct blob_parent parent; 7448 }; 7449 7450 static void 7451 bs_set_parent_cleanup_finish(void *cb_arg, int bserrno) 7452 { 7453 struct set_parent_ctx *ctx = cb_arg; 7454 7455 assert(ctx != NULL); 7456 7457 if (bserrno != 0) { 7458 SPDK_ERRLOG("blob set parent finish error %d\n", bserrno); 7459 if (ctx->bserrno == 0) { 7460 ctx->bserrno = bserrno; 7461 } 7462 } 7463 7464 ctx->cb_fn(ctx->cb_arg, ctx->bserrno); 7465 7466 free(ctx); 7467 } 7468 7469 static void 7470 bs_set_parent_close_snapshot(void *cb_arg, int bserrno) 7471 { 7472 struct set_parent_ctx *ctx = cb_arg; 7473 7474 if (ctx->bserrno != 0) { 7475 spdk_blob_close(ctx->parent.u.snapshot.blob, bs_set_parent_cleanup_finish, ctx); 7476 return; 7477 } 7478 7479 if (bserrno != 0) { 7480 SPDK_ERRLOG("blob close error %d\n", bserrno); 7481 ctx->bserrno = bserrno; 7482 } 7483 7484 bs_set_parent_cleanup_finish(ctx, ctx->bserrno); 7485 } 7486 7487 static void 7488 bs_set_parent_close_blob(void *cb_arg, int bserrno) 7489 { 7490 struct set_parent_ctx *ctx = cb_arg; 7491 struct spdk_blob *blob = ctx->blob; 7492 struct spdk_blob *snapshot = ctx->parent.u.snapshot.blob; 7493 7494 if (bserrno != 0 && ctx->bserrno == 0) { 7495 SPDK_ERRLOG("error %d in metadata sync\n", bserrno); 7496 ctx->bserrno = bserrno; 7497 } 7498 7499 /* Revert md_ro to original state */ 7500 blob->md_ro = ctx->blob_md_ro; 7501 7502 blob->locked_operation_in_progress = false; 7503 snapshot->locked_operation_in_progress = false; 7504 7505 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7506 } 7507 7508 static void 7509 bs_set_parent_set_back_bs_dev_done(void *cb_arg, int bserrno) 7510 { 7511 struct set_parent_ctx *ctx = cb_arg; 7512 struct spdk_blob *blob = ctx->blob; 7513 7514 if (bserrno != 0) { 7515 SPDK_ERRLOG("error %d setting back_bs_dev\n", bserrno); 7516 ctx->bserrno = bserrno; 7517 bs_set_parent_close_blob(ctx, bserrno); 7518 return; 7519 } 7520 7521 spdk_blob_sync_md(blob, bs_set_parent_close_blob, ctx); 7522 } 7523 7524 static int 7525 bs_set_parent_refs(struct spdk_blob *blob, struct blob_parent *parent) 7526 { 7527 int rc; 7528 7529 bs_blob_list_remove(blob); 7530 7531 rc = blob_set_xattr(blob, BLOB_SNAPSHOT, &parent->u.snapshot.id, sizeof(spdk_blob_id), true); 7532 if (rc != 0) { 7533 SPDK_ERRLOG("error %d setting snapshot xattr\n", rc); 7534 return rc; 7535 } 7536 blob->parent_id = parent->u.snapshot.id; 7537 7538 if (blob_is_esnap_clone(blob)) { 7539 /* Remove the xattr that references the external snapshot */ 7540 blob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT; 7541 blob_remove_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID, true); 7542 } 7543 7544 bs_blob_list_add(blob); 7545 7546 return 0; 7547 } 7548 7549 static void 7550 bs_set_parent_snapshot_open_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno) 7551 { 7552 struct set_parent_ctx *ctx = cb_arg; 7553 struct spdk_blob *blob = ctx->blob; 7554 struct spdk_bs_dev *back_bs_dev; 7555 7556 if (bserrno != 0) { 7557 SPDK_ERRLOG("snapshot open error %d\n", bserrno); 7558 ctx->bserrno = bserrno; 7559 spdk_blob_close(blob, bs_set_parent_cleanup_finish, ctx); 7560 return; 7561 } 7562 7563 ctx->parent.u.snapshot.blob = snapshot; 7564 ctx->parent.u.snapshot.id = snapshot->id; 7565 7566 if (!spdk_blob_is_snapshot(snapshot)) { 7567 SPDK_ERRLOG("parent blob is not a snapshot\n"); 7568 ctx->bserrno = -EINVAL; 7569 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7570 return; 7571 } 7572 7573 if (blob->active.num_clusters != snapshot->active.num_clusters) { 7574 SPDK_ERRLOG("parent blob has a number of clusters different from child's ones\n"); 7575 ctx->bserrno = -EINVAL; 7576 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7577 return; 7578 } 7579 7580 if (blob->locked_operation_in_progress || snapshot->locked_operation_in_progress) { 7581 SPDK_ERRLOG("cannot set parent of blob, another operation in progress\n"); 7582 ctx->bserrno = -EBUSY; 7583 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7584 return; 7585 } 7586 7587 blob->locked_operation_in_progress = true; 7588 snapshot->locked_operation_in_progress = true; 7589 7590 /* Temporarily override md_ro flag for MD modification */ 7591 blob->md_ro = false; 7592 7593 back_bs_dev = bs_create_blob_bs_dev(snapshot); 7594 7595 blob_set_back_bs_dev(blob, back_bs_dev, bs_set_parent_refs, &ctx->parent, 7596 bs_set_parent_set_back_bs_dev_done, 7597 ctx); 7598 } 7599 7600 static void 7601 bs_set_parent_blob_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 7602 { 7603 struct set_parent_ctx *ctx = cb_arg; 7604 7605 if (bserrno != 0) { 7606 SPDK_ERRLOG("blob open error %d\n", bserrno); 7607 ctx->bserrno = bserrno; 7608 bs_set_parent_cleanup_finish(ctx, 0); 7609 return; 7610 } 7611 7612 if (!spdk_blob_is_thin_provisioned(blob)) { 7613 SPDK_ERRLOG("blob is not thin-provisioned\n"); 7614 ctx->bserrno = -EINVAL; 7615 spdk_blob_close(blob, bs_set_parent_cleanup_finish, ctx); 7616 return; 7617 } 7618 7619 ctx->blob = blob; 7620 ctx->blob_md_ro = blob->md_ro; 7621 7622 spdk_bs_open_blob(ctx->bs, ctx->parent.u.snapshot.id, bs_set_parent_snapshot_open_cpl, ctx); 7623 } 7624 7625 void 7626 spdk_bs_blob_set_parent(struct spdk_blob_store *bs, spdk_blob_id blob_id, 7627 spdk_blob_id snapshot_id, spdk_blob_op_complete cb_fn, void *cb_arg) 7628 { 7629 struct set_parent_ctx *ctx; 7630 7631 if (snapshot_id == SPDK_BLOBID_INVALID) { 7632 SPDK_ERRLOG("snapshot id not valid\n"); 7633 cb_fn(cb_arg, -EINVAL); 7634 return; 7635 } 7636 7637 if (blob_id == snapshot_id) { 7638 SPDK_ERRLOG("blob id and snapshot id cannot be the same\n"); 7639 cb_fn(cb_arg, -EINVAL); 7640 return; 7641 } 7642 7643 if (spdk_blob_get_parent_snapshot(bs, blob_id) == snapshot_id) { 7644 SPDK_NOTICELOG("snapshot is already the parent of blob\n"); 7645 cb_fn(cb_arg, -EEXIST); 7646 return; 7647 } 7648 7649 ctx = calloc(1, sizeof(*ctx)); 7650 if (!ctx) { 7651 cb_fn(cb_arg, -ENOMEM); 7652 return; 7653 } 7654 7655 ctx->bs = bs; 7656 ctx->parent.u.snapshot.id = snapshot_id; 7657 ctx->cb_fn = cb_fn; 7658 ctx->cb_arg = cb_arg; 7659 ctx->bserrno = 0; 7660 7661 spdk_bs_open_blob(bs, blob_id, bs_set_parent_blob_open_cpl, ctx); 7662 } 7663 /* END spdk_bs_blob_set_parent */ 7664 7665 /* START spdk_bs_blob_set_external_parent */ 7666 7667 static void 7668 bs_set_external_parent_cleanup_finish(void *cb_arg, int bserrno) 7669 { 7670 struct set_parent_ctx *ctx = cb_arg; 7671 7672 if (bserrno != 0) { 7673 SPDK_ERRLOG("blob set external parent finish error %d\n", bserrno); 7674 if (ctx->bserrno == 0) { 7675 ctx->bserrno = bserrno; 7676 } 7677 } 7678 7679 ctx->cb_fn(ctx->cb_arg, ctx->bserrno); 7680 7681 free(ctx->parent.u.esnap.id); 7682 free(ctx); 7683 } 7684 7685 static void 7686 bs_set_external_parent_close_blob(void *cb_arg, int bserrno) 7687 { 7688 struct set_parent_ctx *ctx = cb_arg; 7689 struct spdk_blob *blob = ctx->blob; 7690 7691 if (bserrno != 0 && ctx->bserrno == 0) { 7692 SPDK_ERRLOG("error %d in metadata sync\n", bserrno); 7693 ctx->bserrno = bserrno; 7694 } 7695 7696 /* Revert md_ro to original state */ 7697 blob->md_ro = ctx->blob_md_ro; 7698 7699 blob->locked_operation_in_progress = false; 7700 7701 spdk_blob_close(blob, bs_set_external_parent_cleanup_finish, ctx); 7702 } 7703 7704 static void 7705 bs_set_external_parent_unfrozen(void *cb_arg, int bserrno) 7706 { 7707 struct set_parent_ctx *ctx = cb_arg; 7708 struct spdk_blob *blob = ctx->blob; 7709 7710 if (bserrno != 0) { 7711 SPDK_ERRLOG("error %d setting back_bs_dev\n", bserrno); 7712 ctx->bserrno = bserrno; 7713 bs_set_external_parent_close_blob(ctx, bserrno); 7714 return; 7715 } 7716 7717 spdk_blob_sync_md(blob, bs_set_external_parent_close_blob, ctx); 7718 } 7719 7720 static int 7721 bs_set_external_parent_refs(struct spdk_blob *blob, struct blob_parent *parent) 7722 { 7723 int rc; 7724 7725 bs_blob_list_remove(blob); 7726 7727 if (spdk_blob_is_clone(blob)) { 7728 /* Remove the xattr that references the snapshot */ 7729 blob->parent_id = SPDK_BLOBID_INVALID; 7730 blob_remove_xattr(blob, BLOB_SNAPSHOT, true); 7731 } 7732 7733 rc = blob_set_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID, parent->u.esnap.id, 7734 parent->u.esnap.id_len, true); 7735 if (rc != 0) { 7736 SPDK_ERRLOG("error %d setting external snapshot xattr\n", rc); 7737 return rc; 7738 } 7739 blob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 7740 blob->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT; 7741 7742 bs_blob_list_add(blob); 7743 7744 return 0; 7745 } 7746 7747 static void 7748 bs_set_external_parent_blob_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 7749 { 7750 struct set_parent_ctx *ctx = cb_arg; 7751 const void *esnap_id; 7752 size_t esnap_id_len; 7753 int rc; 7754 7755 if (bserrno != 0) { 7756 SPDK_ERRLOG("blob open error %d\n", bserrno); 7757 ctx->bserrno = bserrno; 7758 bs_set_parent_cleanup_finish(ctx, 0); 7759 return; 7760 } 7761 7762 ctx->blob = blob; 7763 ctx->blob_md_ro = blob->md_ro; 7764 7765 rc = spdk_blob_get_esnap_id(blob, &esnap_id, &esnap_id_len); 7766 if (rc == 0 && esnap_id != NULL && esnap_id_len == ctx->parent.u.esnap.id_len && 7767 memcmp(esnap_id, ctx->parent.u.esnap.id, esnap_id_len) == 0) { 7768 SPDK_ERRLOG("external snapshot is already the parent of blob\n"); 7769 ctx->bserrno = -EEXIST; 7770 goto error; 7771 } 7772 7773 if (!spdk_blob_is_thin_provisioned(blob)) { 7774 SPDK_ERRLOG("blob is not thin-provisioned\n"); 7775 ctx->bserrno = -EINVAL; 7776 goto error; 7777 } 7778 7779 if (blob->locked_operation_in_progress) { 7780 SPDK_ERRLOG("cannot set external parent of blob, another operation in progress\n"); 7781 ctx->bserrno = -EBUSY; 7782 goto error; 7783 } 7784 7785 blob->locked_operation_in_progress = true; 7786 7787 /* Temporarily override md_ro flag for MD modification */ 7788 blob->md_ro = false; 7789 7790 blob_set_back_bs_dev(blob, ctx->parent.u.esnap.back_bs_dev, bs_set_external_parent_refs, 7791 &ctx->parent, bs_set_external_parent_unfrozen, ctx); 7792 return; 7793 7794 error: 7795 spdk_blob_close(blob, bs_set_external_parent_cleanup_finish, ctx); 7796 } 7797 7798 void 7799 spdk_bs_blob_set_external_parent(struct spdk_blob_store *bs, spdk_blob_id blob_id, 7800 struct spdk_bs_dev *esnap_bs_dev, const void *esnap_id, 7801 uint32_t esnap_id_len, spdk_blob_op_complete cb_fn, void *cb_arg) 7802 { 7803 struct set_parent_ctx *ctx; 7804 uint64_t esnap_dev_size, cluster_sz; 7805 7806 if (sizeof(blob_id) == esnap_id_len && memcmp(&blob_id, esnap_id, sizeof(blob_id)) == 0) { 7807 SPDK_ERRLOG("blob id and external snapshot id cannot be the same\n"); 7808 cb_fn(cb_arg, -EINVAL); 7809 return; 7810 } 7811 7812 esnap_dev_size = esnap_bs_dev->blockcnt * esnap_bs_dev->blocklen; 7813 cluster_sz = spdk_bs_get_cluster_size(bs); 7814 if ((esnap_dev_size % cluster_sz) != 0) { 7815 SPDK_ERRLOG("Esnap device size %" PRIu64 " is not an integer multiple of " 7816 "cluster size %" PRIu64 "\n", esnap_dev_size, cluster_sz); 7817 cb_fn(cb_arg, -EINVAL); 7818 return; 7819 } 7820 7821 ctx = calloc(1, sizeof(*ctx)); 7822 if (!ctx) { 7823 cb_fn(cb_arg, -ENOMEM); 7824 return; 7825 } 7826 7827 ctx->parent.u.esnap.id = calloc(1, esnap_id_len); 7828 if (!ctx->parent.u.esnap.id) { 7829 free(ctx); 7830 cb_fn(cb_arg, -ENOMEM); 7831 return; 7832 } 7833 7834 ctx->bs = bs; 7835 ctx->parent.u.esnap.back_bs_dev = esnap_bs_dev; 7836 memcpy(ctx->parent.u.esnap.id, esnap_id, esnap_id_len); 7837 ctx->parent.u.esnap.id_len = esnap_id_len; 7838 ctx->cb_fn = cb_fn; 7839 ctx->cb_arg = cb_arg; 7840 ctx->bserrno = 0; 7841 7842 spdk_bs_open_blob(bs, blob_id, bs_set_external_parent_blob_open_cpl, ctx); 7843 } 7844 /* END spdk_bs_blob_set_external_parent */ 7845 7846 /* START spdk_blob_resize */ 7847 struct spdk_bs_resize_ctx { 7848 spdk_blob_op_complete cb_fn; 7849 void *cb_arg; 7850 struct spdk_blob *blob; 7851 uint64_t sz; 7852 int rc; 7853 }; 7854 7855 static void 7856 bs_resize_unfreeze_cpl(void *cb_arg, int rc) 7857 { 7858 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 7859 7860 if (rc != 0) { 7861 SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc); 7862 } 7863 7864 if (ctx->rc != 0) { 7865 SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc); 7866 rc = ctx->rc; 7867 } 7868 7869 ctx->blob->locked_operation_in_progress = false; 7870 7871 ctx->cb_fn(ctx->cb_arg, rc); 7872 free(ctx); 7873 } 7874 7875 static void 7876 bs_resize_freeze_cpl(void *cb_arg, int rc) 7877 { 7878 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 7879 7880 if (rc != 0) { 7881 ctx->blob->locked_operation_in_progress = false; 7882 ctx->cb_fn(ctx->cb_arg, rc); 7883 free(ctx); 7884 return; 7885 } 7886 7887 ctx->rc = blob_resize(ctx->blob, ctx->sz); 7888 7889 blob_unfreeze_io(ctx->blob, bs_resize_unfreeze_cpl, ctx); 7890 } 7891 7892 void 7893 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg) 7894 { 7895 struct spdk_bs_resize_ctx *ctx; 7896 7897 blob_verify_md_op(blob); 7898 7899 SPDK_DEBUGLOG(blob, "Resizing blob 0x%" PRIx64 " to %" PRIu64 " clusters\n", blob->id, sz); 7900 7901 if (blob->md_ro) { 7902 cb_fn(cb_arg, -EPERM); 7903 return; 7904 } 7905 7906 if (sz == blob->active.num_clusters) { 7907 cb_fn(cb_arg, 0); 7908 return; 7909 } 7910 7911 if (blob->locked_operation_in_progress) { 7912 cb_fn(cb_arg, -EBUSY); 7913 return; 7914 } 7915 7916 ctx = calloc(1, sizeof(*ctx)); 7917 if (!ctx) { 7918 cb_fn(cb_arg, -ENOMEM); 7919 return; 7920 } 7921 7922 blob->locked_operation_in_progress = true; 7923 ctx->cb_fn = cb_fn; 7924 ctx->cb_arg = cb_arg; 7925 ctx->blob = blob; 7926 ctx->sz = sz; 7927 blob_freeze_io(blob, bs_resize_freeze_cpl, ctx); 7928 } 7929 7930 /* END spdk_blob_resize */ 7931 7932 7933 /* START spdk_bs_delete_blob */ 7934 7935 static void 7936 bs_delete_close_cpl(void *cb_arg, int bserrno) 7937 { 7938 spdk_bs_sequence_t *seq = cb_arg; 7939 7940 bs_sequence_finish(seq, bserrno); 7941 } 7942 7943 static void 7944 bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7945 { 7946 struct spdk_blob *blob = cb_arg; 7947 7948 if (bserrno != 0) { 7949 /* 7950 * We already removed this blob from the blobstore tailq, so 7951 * we need to free it here since this is the last reference 7952 * to it. 7953 */ 7954 blob_free(blob); 7955 bs_delete_close_cpl(seq, bserrno); 7956 return; 7957 } 7958 7959 /* 7960 * This will immediately decrement the ref_count and call 7961 * the completion routine since the metadata state is clean. 7962 * By calling spdk_blob_close, we reduce the number of call 7963 * points into code that touches the blob->open_ref count 7964 * and the blobstore's blob list. 7965 */ 7966 spdk_blob_close(blob, bs_delete_close_cpl, seq); 7967 } 7968 7969 struct delete_snapshot_ctx { 7970 struct spdk_blob_list *parent_snapshot_entry; 7971 struct spdk_blob *snapshot; 7972 struct spdk_blob_md_page *page; 7973 bool snapshot_md_ro; 7974 struct spdk_blob *clone; 7975 bool clone_md_ro; 7976 spdk_blob_op_with_handle_complete cb_fn; 7977 void *cb_arg; 7978 int bserrno; 7979 uint32_t next_extent_page; 7980 }; 7981 7982 static void 7983 delete_blob_cleanup_finish(void *cb_arg, int bserrno) 7984 { 7985 struct delete_snapshot_ctx *ctx = cb_arg; 7986 7987 if (bserrno != 0) { 7988 SPDK_ERRLOG("Snapshot cleanup error %d\n", bserrno); 7989 } 7990 7991 assert(ctx != NULL); 7992 7993 if (bserrno != 0 && ctx->bserrno == 0) { 7994 ctx->bserrno = bserrno; 7995 } 7996 7997 ctx->cb_fn(ctx->cb_arg, ctx->snapshot, ctx->bserrno); 7998 spdk_free(ctx->page); 7999 free(ctx); 8000 } 8001 8002 static void 8003 delete_snapshot_cleanup_snapshot(void *cb_arg, int bserrno) 8004 { 8005 struct delete_snapshot_ctx *ctx = cb_arg; 8006 8007 if (bserrno != 0) { 8008 ctx->bserrno = bserrno; 8009 SPDK_ERRLOG("Clone cleanup error %d\n", bserrno); 8010 } 8011 8012 if (ctx->bserrno != 0) { 8013 assert(blob_lookup(ctx->snapshot->bs, ctx->snapshot->id) == NULL); 8014 RB_INSERT(spdk_blob_tree, &ctx->snapshot->bs->open_blobs, ctx->snapshot); 8015 spdk_bit_array_set(ctx->snapshot->bs->open_blobids, ctx->snapshot->id); 8016 } 8017 8018 ctx->snapshot->locked_operation_in_progress = false; 8019 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 8020 8021 spdk_blob_close(ctx->snapshot, delete_blob_cleanup_finish, ctx); 8022 } 8023 8024 static void 8025 delete_snapshot_cleanup_clone(void *cb_arg, int bserrno) 8026 { 8027 struct delete_snapshot_ctx *ctx = cb_arg; 8028 8029 ctx->clone->locked_operation_in_progress = false; 8030 ctx->clone->md_ro = ctx->clone_md_ro; 8031 8032 spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx); 8033 } 8034 8035 static void 8036 delete_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 8037 { 8038 struct delete_snapshot_ctx *ctx = cb_arg; 8039 8040 if (bserrno) { 8041 ctx->bserrno = bserrno; 8042 delete_snapshot_cleanup_clone(ctx, 0); 8043 return; 8044 } 8045 8046 ctx->clone->locked_operation_in_progress = false; 8047 spdk_blob_close(ctx->clone, delete_blob_cleanup_finish, ctx); 8048 } 8049 8050 static void 8051 delete_snapshot_sync_snapshot_cpl(void *cb_arg, int bserrno) 8052 { 8053 struct delete_snapshot_ctx *ctx = cb_arg; 8054 struct spdk_blob_list *parent_snapshot_entry = NULL; 8055 struct spdk_blob_list *snapshot_entry = NULL; 8056 struct spdk_blob_list *clone_entry = NULL; 8057 struct spdk_blob_list *snapshot_clone_entry = NULL; 8058 8059 if (bserrno) { 8060 SPDK_ERRLOG("Failed to sync MD on blob\n"); 8061 ctx->bserrno = bserrno; 8062 delete_snapshot_cleanup_clone(ctx, 0); 8063 return; 8064 } 8065 8066 /* Get snapshot entry for the snapshot we want to remove */ 8067 snapshot_entry = bs_get_snapshot_entry(ctx->snapshot->bs, ctx->snapshot->id); 8068 8069 assert(snapshot_entry != NULL); 8070 8071 /* Remove clone entry in this snapshot (at this point there can be only one clone) */ 8072 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 8073 assert(clone_entry != NULL); 8074 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 8075 snapshot_entry->clone_count--; 8076 assert(TAILQ_EMPTY(&snapshot_entry->clones)); 8077 8078 switch (ctx->snapshot->parent_id) { 8079 case SPDK_BLOBID_INVALID: 8080 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 8081 /* No parent snapshot - just remove clone entry */ 8082 free(clone_entry); 8083 break; 8084 default: 8085 /* This snapshot is at the same time a clone of another snapshot - we need to 8086 * update parent snapshot (remove current clone, add new one inherited from 8087 * the snapshot that is being removed) */ 8088 8089 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 8090 * snapshot that we are removing */ 8091 blob_get_snapshot_and_clone_entries(ctx->snapshot, &parent_snapshot_entry, 8092 &snapshot_clone_entry); 8093 8094 /* Switch clone entry in parent snapshot */ 8095 TAILQ_INSERT_TAIL(&parent_snapshot_entry->clones, clone_entry, link); 8096 TAILQ_REMOVE(&parent_snapshot_entry->clones, snapshot_clone_entry, link); 8097 free(snapshot_clone_entry); 8098 } 8099 8100 /* Restore md_ro flags */ 8101 ctx->clone->md_ro = ctx->clone_md_ro; 8102 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 8103 8104 blob_unfreeze_io(ctx->clone, delete_snapshot_unfreeze_cpl, ctx); 8105 } 8106 8107 static void 8108 delete_snapshot_sync_clone_cpl(void *cb_arg, int bserrno) 8109 { 8110 struct delete_snapshot_ctx *ctx = cb_arg; 8111 uint64_t i; 8112 8113 ctx->snapshot->md_ro = false; 8114 8115 if (bserrno) { 8116 SPDK_ERRLOG("Failed to sync MD on clone\n"); 8117 ctx->bserrno = bserrno; 8118 8119 /* Restore snapshot to previous state */ 8120 bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true); 8121 if (bserrno != 0) { 8122 delete_snapshot_cleanup_clone(ctx, bserrno); 8123 return; 8124 } 8125 8126 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx); 8127 return; 8128 } 8129 8130 /* Clear cluster map entries for snapshot */ 8131 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 8132 if (ctx->clone->active.clusters[i] == ctx->snapshot->active.clusters[i]) { 8133 if (ctx->snapshot->active.clusters[i] != 0) { 8134 ctx->snapshot->active.num_allocated_clusters--; 8135 } 8136 ctx->snapshot->active.clusters[i] = 0; 8137 } 8138 } 8139 for (i = 0; i < ctx->snapshot->active.num_extent_pages && 8140 i < ctx->clone->active.num_extent_pages; i++) { 8141 if (ctx->clone->active.extent_pages[i] == ctx->snapshot->active.extent_pages[i]) { 8142 ctx->snapshot->active.extent_pages[i] = 0; 8143 } 8144 } 8145 8146 blob_set_thin_provision(ctx->snapshot); 8147 ctx->snapshot->state = SPDK_BLOB_STATE_DIRTY; 8148 8149 if (ctx->parent_snapshot_entry != NULL) { 8150 ctx->snapshot->back_bs_dev = NULL; 8151 } 8152 8153 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_cpl, ctx); 8154 } 8155 8156 static void 8157 delete_snapshot_update_extent_pages_cpl(struct delete_snapshot_ctx *ctx) 8158 { 8159 int bserrno; 8160 8161 /* Delete old backing bs_dev from clone (related to snapshot that will be removed) */ 8162 blob_back_bs_destroy(ctx->clone); 8163 8164 /* Set/remove snapshot xattr and switch parent ID and backing bs_dev on clone... */ 8165 if (ctx->snapshot->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 8166 bserrno = bs_snapshot_copy_xattr(ctx->clone, ctx->snapshot, 8167 BLOB_EXTERNAL_SNAPSHOT_ID); 8168 if (bserrno != 0) { 8169 ctx->bserrno = bserrno; 8170 8171 /* Restore snapshot to previous state */ 8172 bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true); 8173 if (bserrno != 0) { 8174 delete_snapshot_cleanup_clone(ctx, bserrno); 8175 return; 8176 } 8177 8178 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx); 8179 return; 8180 } 8181 ctx->clone->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT; 8182 ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev; 8183 /* Do not delete the external snapshot along with this snapshot */ 8184 ctx->snapshot->back_bs_dev = NULL; 8185 ctx->clone->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 8186 } else if (ctx->parent_snapshot_entry != NULL) { 8187 /* ...to parent snapshot */ 8188 ctx->clone->parent_id = ctx->parent_snapshot_entry->id; 8189 ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev; 8190 blob_set_xattr(ctx->clone, BLOB_SNAPSHOT, &ctx->parent_snapshot_entry->id, 8191 sizeof(spdk_blob_id), 8192 true); 8193 } else { 8194 /* ...to blobid invalid and zeroes dev */ 8195 ctx->clone->parent_id = SPDK_BLOBID_INVALID; 8196 ctx->clone->back_bs_dev = bs_create_zeroes_dev(); 8197 blob_remove_xattr(ctx->clone, BLOB_SNAPSHOT, true); 8198 } 8199 8200 spdk_blob_sync_md(ctx->clone, delete_snapshot_sync_clone_cpl, ctx); 8201 } 8202 8203 static void 8204 delete_snapshot_update_extent_pages(void *cb_arg, int bserrno) 8205 { 8206 struct delete_snapshot_ctx *ctx = cb_arg; 8207 uint32_t *extent_page; 8208 uint64_t i; 8209 8210 for (i = ctx->next_extent_page; i < ctx->snapshot->active.num_extent_pages && 8211 i < ctx->clone->active.num_extent_pages; i++) { 8212 if (ctx->snapshot->active.extent_pages[i] == 0) { 8213 /* No extent page to use from snapshot */ 8214 continue; 8215 } 8216 8217 extent_page = &ctx->clone->active.extent_pages[i]; 8218 if (*extent_page == 0) { 8219 /* Copy extent page from snapshot when clone did not have a matching one */ 8220 *extent_page = ctx->snapshot->active.extent_pages[i]; 8221 continue; 8222 } 8223 8224 /* Clone and snapshot both contain partially filled matching extent pages. 8225 * Update the clone extent page in place with cluster map containing the mix of both. */ 8226 ctx->next_extent_page = i + 1; 8227 memset(ctx->page, 0, SPDK_BS_PAGE_SIZE); 8228 8229 blob_write_extent_page(ctx->clone, *extent_page, i * SPDK_EXTENTS_PER_EP, ctx->page, 8230 delete_snapshot_update_extent_pages, ctx); 8231 return; 8232 } 8233 delete_snapshot_update_extent_pages_cpl(ctx); 8234 } 8235 8236 static void 8237 delete_snapshot_sync_snapshot_xattr_cpl(void *cb_arg, int bserrno) 8238 { 8239 struct delete_snapshot_ctx *ctx = cb_arg; 8240 uint64_t i; 8241 8242 /* Temporarily override md_ro flag for clone for MD modification */ 8243 ctx->clone_md_ro = ctx->clone->md_ro; 8244 ctx->clone->md_ro = false; 8245 8246 if (bserrno) { 8247 SPDK_ERRLOG("Failed to sync MD with xattr on blob\n"); 8248 ctx->bserrno = bserrno; 8249 delete_snapshot_cleanup_clone(ctx, 0); 8250 return; 8251 } 8252 8253 /* Copy snapshot map to clone map (only unallocated clusters in clone) */ 8254 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 8255 if (ctx->clone->active.clusters[i] == 0) { 8256 ctx->clone->active.clusters[i] = ctx->snapshot->active.clusters[i]; 8257 if (ctx->clone->active.clusters[i] != 0) { 8258 ctx->clone->active.num_allocated_clusters++; 8259 } 8260 } 8261 } 8262 ctx->next_extent_page = 0; 8263 delete_snapshot_update_extent_pages(ctx, 0); 8264 } 8265 8266 static void 8267 delete_snapshot_esnap_channels_destroyed_cb(void *cb_arg, struct spdk_blob *blob, int bserrno) 8268 { 8269 struct delete_snapshot_ctx *ctx = cb_arg; 8270 8271 if (bserrno != 0) { 8272 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to destroy esnap channels: %d\n", 8273 blob->id, bserrno); 8274 /* That error should not stop us from syncing metadata. */ 8275 } 8276 8277 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx); 8278 } 8279 8280 static void 8281 delete_snapshot_freeze_io_cb(void *cb_arg, int bserrno) 8282 { 8283 struct delete_snapshot_ctx *ctx = cb_arg; 8284 8285 if (bserrno) { 8286 SPDK_ERRLOG("Failed to freeze I/O on clone\n"); 8287 ctx->bserrno = bserrno; 8288 delete_snapshot_cleanup_clone(ctx, 0); 8289 return; 8290 } 8291 8292 /* Temporarily override md_ro flag for snapshot for MD modification */ 8293 ctx->snapshot_md_ro = ctx->snapshot->md_ro; 8294 ctx->snapshot->md_ro = false; 8295 8296 /* Mark blob as pending for removal for power failure safety, use clone id for recovery */ 8297 ctx->bserrno = blob_set_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, &ctx->clone->id, 8298 sizeof(spdk_blob_id), true); 8299 if (ctx->bserrno != 0) { 8300 delete_snapshot_cleanup_clone(ctx, 0); 8301 return; 8302 } 8303 8304 if (blob_is_esnap_clone(ctx->snapshot)) { 8305 blob_esnap_destroy_bs_dev_channels(ctx->snapshot, false, 8306 delete_snapshot_esnap_channels_destroyed_cb, 8307 ctx); 8308 return; 8309 } 8310 8311 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx); 8312 } 8313 8314 static void 8315 delete_snapshot_open_clone_cb(void *cb_arg, struct spdk_blob *clone, int bserrno) 8316 { 8317 struct delete_snapshot_ctx *ctx = cb_arg; 8318 8319 if (bserrno) { 8320 SPDK_ERRLOG("Failed to open clone\n"); 8321 ctx->bserrno = bserrno; 8322 delete_snapshot_cleanup_snapshot(ctx, 0); 8323 return; 8324 } 8325 8326 ctx->clone = clone; 8327 8328 if (clone->locked_operation_in_progress) { 8329 SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress on its clone\n"); 8330 ctx->bserrno = -EBUSY; 8331 spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx); 8332 return; 8333 } 8334 8335 clone->locked_operation_in_progress = true; 8336 8337 blob_freeze_io(clone, delete_snapshot_freeze_io_cb, ctx); 8338 } 8339 8340 static void 8341 update_clone_on_snapshot_deletion(struct spdk_blob *snapshot, struct delete_snapshot_ctx *ctx) 8342 { 8343 struct spdk_blob_list *snapshot_entry = NULL; 8344 struct spdk_blob_list *clone_entry = NULL; 8345 struct spdk_blob_list *snapshot_clone_entry = NULL; 8346 8347 /* Get snapshot entry for the snapshot we want to remove */ 8348 snapshot_entry = bs_get_snapshot_entry(snapshot->bs, snapshot->id); 8349 8350 assert(snapshot_entry != NULL); 8351 8352 /* Get clone of the snapshot (at this point there can be only one clone) */ 8353 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 8354 assert(snapshot_entry->clone_count == 1); 8355 assert(clone_entry != NULL); 8356 8357 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 8358 * snapshot that we are removing */ 8359 blob_get_snapshot_and_clone_entries(snapshot, &ctx->parent_snapshot_entry, 8360 &snapshot_clone_entry); 8361 8362 spdk_bs_open_blob(snapshot->bs, clone_entry->id, delete_snapshot_open_clone_cb, ctx); 8363 } 8364 8365 static void 8366 bs_delete_blob_finish(void *cb_arg, struct spdk_blob *blob, int bserrno) 8367 { 8368 spdk_bs_sequence_t *seq = cb_arg; 8369 struct spdk_blob_list *snapshot_entry = NULL; 8370 uint32_t page_num; 8371 8372 if (bserrno) { 8373 SPDK_ERRLOG("Failed to remove blob\n"); 8374 bs_sequence_finish(seq, bserrno); 8375 return; 8376 } 8377 8378 /* Remove snapshot from the list */ 8379 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 8380 if (snapshot_entry != NULL) { 8381 TAILQ_REMOVE(&blob->bs->snapshots, snapshot_entry, link); 8382 free(snapshot_entry); 8383 } 8384 8385 page_num = bs_blobid_to_page(blob->id); 8386 spdk_bit_array_clear(blob->bs->used_blobids, page_num); 8387 blob->state = SPDK_BLOB_STATE_DIRTY; 8388 blob->active.num_pages = 0; 8389 blob_resize(blob, 0); 8390 8391 blob_persist(seq, blob, bs_delete_persist_cpl, blob); 8392 } 8393 8394 static int 8395 bs_is_blob_deletable(struct spdk_blob *blob, bool *update_clone) 8396 { 8397 struct spdk_blob_list *snapshot_entry = NULL; 8398 struct spdk_blob_list *clone_entry = NULL; 8399 struct spdk_blob *clone = NULL; 8400 bool has_one_clone = false; 8401 8402 /* Check if this is a snapshot with clones */ 8403 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 8404 if (snapshot_entry != NULL) { 8405 if (snapshot_entry->clone_count > 1) { 8406 SPDK_ERRLOG("Cannot remove snapshot with more than one clone\n"); 8407 return -EBUSY; 8408 } else if (snapshot_entry->clone_count == 1) { 8409 has_one_clone = true; 8410 } 8411 } 8412 8413 /* Check if someone has this blob open (besides this delete context): 8414 * - open_ref = 1 - only this context opened blob, so it is ok to remove it 8415 * - open_ref <= 2 && has_one_clone = true - clone is holding snapshot 8416 * and that is ok, because we will update it accordingly */ 8417 if (blob->open_ref <= 2 && has_one_clone) { 8418 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 8419 assert(clone_entry != NULL); 8420 clone = blob_lookup(blob->bs, clone_entry->id); 8421 8422 if (blob->open_ref == 2 && clone == NULL) { 8423 /* Clone is closed and someone else opened this blob */ 8424 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 8425 return -EBUSY; 8426 } 8427 8428 *update_clone = true; 8429 return 0; 8430 } 8431 8432 if (blob->open_ref > 1) { 8433 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 8434 return -EBUSY; 8435 } 8436 8437 assert(has_one_clone == false); 8438 *update_clone = false; 8439 return 0; 8440 } 8441 8442 static void 8443 bs_delete_enomem_close_cpl(void *cb_arg, int bserrno) 8444 { 8445 spdk_bs_sequence_t *seq = cb_arg; 8446 8447 bs_sequence_finish(seq, -ENOMEM); 8448 } 8449 8450 static void 8451 bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 8452 { 8453 spdk_bs_sequence_t *seq = cb_arg; 8454 struct delete_snapshot_ctx *ctx; 8455 bool update_clone = false; 8456 8457 if (bserrno != 0) { 8458 bs_sequence_finish(seq, bserrno); 8459 return; 8460 } 8461 8462 blob_verify_md_op(blob); 8463 8464 ctx = calloc(1, sizeof(*ctx)); 8465 if (ctx == NULL) { 8466 spdk_blob_close(blob, bs_delete_enomem_close_cpl, seq); 8467 return; 8468 } 8469 8470 ctx->snapshot = blob; 8471 ctx->cb_fn = bs_delete_blob_finish; 8472 ctx->cb_arg = seq; 8473 8474 /* Check if blob can be removed and if it is a snapshot with clone on top of it */ 8475 ctx->bserrno = bs_is_blob_deletable(blob, &update_clone); 8476 if (ctx->bserrno) { 8477 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 8478 return; 8479 } 8480 8481 if (blob->locked_operation_in_progress) { 8482 SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress\n"); 8483 ctx->bserrno = -EBUSY; 8484 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 8485 return; 8486 } 8487 8488 blob->locked_operation_in_progress = true; 8489 8490 /* 8491 * Remove the blob from the blob_store list now, to ensure it does not 8492 * get returned after this point by blob_lookup(). 8493 */ 8494 spdk_bit_array_clear(blob->bs->open_blobids, blob->id); 8495 RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob); 8496 8497 if (update_clone) { 8498 ctx->page = spdk_zmalloc(blob->bs->md_page_size, 0, NULL, SPDK_ENV_NUMA_ID_ANY, 8499 SPDK_MALLOC_DMA); 8500 if (!ctx->page) { 8501 ctx->bserrno = -ENOMEM; 8502 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 8503 return; 8504 } 8505 /* This blob is a snapshot with active clone - update clone first */ 8506 update_clone_on_snapshot_deletion(blob, ctx); 8507 } else { 8508 /* This blob does not have any clones - just remove it */ 8509 bs_blob_list_remove(blob); 8510 bs_delete_blob_finish(seq, blob, 0); 8511 free(ctx); 8512 } 8513 } 8514 8515 void 8516 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 8517 spdk_blob_op_complete cb_fn, void *cb_arg) 8518 { 8519 struct spdk_bs_cpl cpl; 8520 spdk_bs_sequence_t *seq; 8521 8522 SPDK_DEBUGLOG(blob, "Deleting blob 0x%" PRIx64 "\n", blobid); 8523 8524 assert(spdk_get_thread() == bs->md_thread); 8525 8526 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 8527 cpl.u.blob_basic.cb_fn = cb_fn; 8528 cpl.u.blob_basic.cb_arg = cb_arg; 8529 8530 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 8531 if (!seq) { 8532 cb_fn(cb_arg, -ENOMEM); 8533 return; 8534 } 8535 8536 spdk_bs_open_blob(bs, blobid, bs_delete_open_cpl, seq); 8537 } 8538 8539 /* END spdk_bs_delete_blob */ 8540 8541 /* START spdk_bs_open_blob */ 8542 8543 static void 8544 bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8545 { 8546 struct spdk_blob *blob = cb_arg; 8547 struct spdk_blob *existing; 8548 8549 if (bserrno != 0) { 8550 blob_free(blob); 8551 seq->cpl.u.blob_handle.blob = NULL; 8552 bs_sequence_finish(seq, bserrno); 8553 return; 8554 } 8555 8556 existing = blob_lookup(blob->bs, blob->id); 8557 if (existing) { 8558 blob_free(blob); 8559 existing->open_ref++; 8560 seq->cpl.u.blob_handle.blob = existing; 8561 bs_sequence_finish(seq, 0); 8562 return; 8563 } 8564 8565 blob->open_ref++; 8566 8567 spdk_bit_array_set(blob->bs->open_blobids, blob->id); 8568 RB_INSERT(spdk_blob_tree, &blob->bs->open_blobs, blob); 8569 8570 bs_sequence_finish(seq, bserrno); 8571 } 8572 8573 static inline void 8574 blob_open_opts_copy(const struct spdk_blob_open_opts *src, struct spdk_blob_open_opts *dst) 8575 { 8576 #define FIELD_OK(field) \ 8577 offsetof(struct spdk_blob_open_opts, field) + sizeof(src->field) <= src->opts_size 8578 8579 #define SET_FIELD(field) \ 8580 if (FIELD_OK(field)) { \ 8581 dst->field = src->field; \ 8582 } \ 8583 8584 SET_FIELD(clear_method); 8585 SET_FIELD(esnap_ctx); 8586 8587 dst->opts_size = src->opts_size; 8588 8589 /* You should not remove this statement, but need to update the assert statement 8590 * if you add a new field, and also add a corresponding SET_FIELD statement */ 8591 SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_open_opts) == 24, "Incorrect size"); 8592 8593 #undef FIELD_OK 8594 #undef SET_FIELD 8595 } 8596 8597 static void 8598 bs_open_blob(struct spdk_blob_store *bs, 8599 spdk_blob_id blobid, 8600 struct spdk_blob_open_opts *opts, 8601 spdk_blob_op_with_handle_complete cb_fn, 8602 void *cb_arg) 8603 { 8604 struct spdk_blob *blob; 8605 struct spdk_bs_cpl cpl; 8606 struct spdk_blob_open_opts opts_local; 8607 spdk_bs_sequence_t *seq; 8608 uint32_t page_num; 8609 8610 SPDK_DEBUGLOG(blob, "Opening blob 0x%" PRIx64 "\n", blobid); 8611 assert(spdk_get_thread() == bs->md_thread); 8612 8613 page_num = bs_blobid_to_page(blobid); 8614 if (spdk_bit_array_get(bs->used_blobids, page_num) == false) { 8615 /* Invalid blobid */ 8616 cb_fn(cb_arg, NULL, -ENOENT); 8617 return; 8618 } 8619 8620 blob = blob_lookup(bs, blobid); 8621 if (blob) { 8622 blob->open_ref++; 8623 cb_fn(cb_arg, blob, 0); 8624 return; 8625 } 8626 8627 blob = blob_alloc(bs, blobid); 8628 if (!blob) { 8629 cb_fn(cb_arg, NULL, -ENOMEM); 8630 return; 8631 } 8632 8633 spdk_blob_open_opts_init(&opts_local, sizeof(opts_local)); 8634 if (opts) { 8635 blob_open_opts_copy(opts, &opts_local); 8636 } 8637 8638 blob->clear_method = opts_local.clear_method; 8639 8640 cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE; 8641 cpl.u.blob_handle.cb_fn = cb_fn; 8642 cpl.u.blob_handle.cb_arg = cb_arg; 8643 cpl.u.blob_handle.blob = blob; 8644 cpl.u.blob_handle.esnap_ctx = opts_local.esnap_ctx; 8645 8646 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 8647 if (!seq) { 8648 blob_free(blob); 8649 cb_fn(cb_arg, NULL, -ENOMEM); 8650 return; 8651 } 8652 8653 blob_load(seq, blob, bs_open_blob_cpl, blob); 8654 } 8655 8656 void 8657 spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 8658 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 8659 { 8660 bs_open_blob(bs, blobid, NULL, cb_fn, cb_arg); 8661 } 8662 8663 void 8664 spdk_bs_open_blob_ext(struct spdk_blob_store *bs, spdk_blob_id blobid, 8665 struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 8666 { 8667 bs_open_blob(bs, blobid, opts, cb_fn, cb_arg); 8668 } 8669 8670 /* END spdk_bs_open_blob */ 8671 8672 /* START spdk_blob_set_read_only */ 8673 int 8674 spdk_blob_set_read_only(struct spdk_blob *blob) 8675 { 8676 blob_verify_md_op(blob); 8677 8678 blob->data_ro_flags |= SPDK_BLOB_READ_ONLY; 8679 8680 blob->state = SPDK_BLOB_STATE_DIRTY; 8681 return 0; 8682 } 8683 /* END spdk_blob_set_read_only */ 8684 8685 /* START spdk_blob_sync_md */ 8686 8687 static void 8688 blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8689 { 8690 struct spdk_blob *blob = cb_arg; 8691 8692 if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 8693 blob->data_ro = true; 8694 blob->md_ro = true; 8695 } 8696 8697 bs_sequence_finish(seq, bserrno); 8698 } 8699 8700 static void 8701 blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 8702 { 8703 struct spdk_bs_cpl cpl; 8704 spdk_bs_sequence_t *seq; 8705 8706 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 8707 cpl.u.blob_basic.cb_fn = cb_fn; 8708 cpl.u.blob_basic.cb_arg = cb_arg; 8709 8710 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 8711 if (!seq) { 8712 cb_fn(cb_arg, -ENOMEM); 8713 return; 8714 } 8715 8716 blob_persist(seq, blob, blob_sync_md_cpl, blob); 8717 } 8718 8719 void 8720 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 8721 { 8722 blob_verify_md_op(blob); 8723 8724 SPDK_DEBUGLOG(blob, "Syncing blob 0x%" PRIx64 "\n", blob->id); 8725 8726 if (blob->md_ro) { 8727 assert(blob->state == SPDK_BLOB_STATE_CLEAN); 8728 cb_fn(cb_arg, 0); 8729 return; 8730 } 8731 8732 blob_sync_md(blob, cb_fn, cb_arg); 8733 } 8734 8735 /* END spdk_blob_sync_md */ 8736 8737 struct spdk_blob_cluster_op_ctx { 8738 struct spdk_thread *thread; 8739 struct spdk_blob *blob; 8740 uint32_t cluster_num; /* cluster index in blob */ 8741 uint32_t cluster; /* cluster on disk */ 8742 uint32_t extent_page; /* extent page on disk */ 8743 struct spdk_blob_md_page *page; /* preallocated extent page */ 8744 int rc; 8745 spdk_blob_op_complete cb_fn; 8746 void *cb_arg; 8747 }; 8748 8749 static void 8750 blob_op_cluster_msg_cpl(void *arg) 8751 { 8752 struct spdk_blob_cluster_op_ctx *ctx = arg; 8753 8754 ctx->cb_fn(ctx->cb_arg, ctx->rc); 8755 free(ctx); 8756 } 8757 8758 static void 8759 blob_op_cluster_msg_cb(void *arg, int bserrno) 8760 { 8761 struct spdk_blob_cluster_op_ctx *ctx = arg; 8762 8763 ctx->rc = bserrno; 8764 spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx); 8765 } 8766 8767 static void 8768 blob_insert_new_ep_cb(void *arg, int bserrno) 8769 { 8770 struct spdk_blob_cluster_op_ctx *ctx = arg; 8771 uint32_t *extent_page; 8772 8773 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 8774 *extent_page = ctx->extent_page; 8775 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8776 blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx); 8777 } 8778 8779 struct spdk_blob_write_extent_page_ctx { 8780 struct spdk_blob_store *bs; 8781 8782 uint32_t extent; 8783 struct spdk_blob_md_page *page; 8784 }; 8785 8786 static void 8787 blob_free_cluster_msg_cb(void *arg, int bserrno) 8788 { 8789 struct spdk_blob_cluster_op_ctx *ctx = arg; 8790 8791 spdk_spin_lock(&ctx->blob->bs->used_lock); 8792 bs_release_cluster(ctx->blob->bs, ctx->cluster); 8793 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8794 8795 ctx->rc = bserrno; 8796 spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx); 8797 } 8798 8799 static void 8800 blob_free_cluster_update_ep_cb(void *arg, int bserrno) 8801 { 8802 struct spdk_blob_cluster_op_ctx *ctx = arg; 8803 8804 if (bserrno != 0 || ctx->blob->bs->clean == 0) { 8805 blob_free_cluster_msg_cb(ctx, bserrno); 8806 return; 8807 } 8808 8809 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8810 blob_sync_md(ctx->blob, blob_free_cluster_msg_cb, ctx); 8811 } 8812 8813 static void 8814 blob_free_cluster_free_ep_cb(void *arg, int bserrno) 8815 { 8816 struct spdk_blob_cluster_op_ctx *ctx = arg; 8817 8818 spdk_spin_lock(&ctx->blob->bs->used_lock); 8819 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8820 bs_release_md_page(ctx->blob->bs, ctx->extent_page); 8821 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8822 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8823 blob_sync_md(ctx->blob, blob_free_cluster_msg_cb, ctx); 8824 } 8825 8826 static void 8827 blob_persist_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8828 { 8829 struct spdk_blob_write_extent_page_ctx *ctx = cb_arg; 8830 8831 free(ctx); 8832 bs_sequence_finish(seq, bserrno); 8833 } 8834 8835 static void 8836 blob_write_extent_page_ready(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8837 { 8838 struct spdk_blob_write_extent_page_ctx *ctx = cb_arg; 8839 8840 if (bserrno != 0) { 8841 blob_persist_extent_page_cpl(seq, ctx, bserrno); 8842 return; 8843 } 8844 bs_sequence_write_dev(seq, ctx->page, bs_md_page_to_lba(ctx->bs, ctx->extent), 8845 bs_byte_to_lba(ctx->bs, ctx->bs->md_page_size), 8846 blob_persist_extent_page_cpl, ctx); 8847 } 8848 8849 static void 8850 blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 8851 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg) 8852 { 8853 struct spdk_blob_write_extent_page_ctx *ctx; 8854 spdk_bs_sequence_t *seq; 8855 struct spdk_bs_cpl cpl; 8856 8857 ctx = calloc(1, sizeof(*ctx)); 8858 if (!ctx) { 8859 cb_fn(cb_arg, -ENOMEM); 8860 return; 8861 } 8862 ctx->bs = blob->bs; 8863 ctx->extent = extent; 8864 ctx->page = page; 8865 8866 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 8867 cpl.u.blob_basic.cb_fn = cb_fn; 8868 cpl.u.blob_basic.cb_arg = cb_arg; 8869 8870 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 8871 if (!seq) { 8872 free(ctx); 8873 cb_fn(cb_arg, -ENOMEM); 8874 return; 8875 } 8876 8877 assert(page); 8878 page->next = SPDK_INVALID_MD_PAGE; 8879 page->id = blob->id; 8880 page->sequence_num = 0; 8881 8882 blob_serialize_extent_page(blob, cluster_num, page); 8883 8884 page->crc = blob_md_page_calc_crc(page); 8885 8886 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent) == true); 8887 8888 bs_mark_dirty(seq, blob->bs, blob_write_extent_page_ready, ctx); 8889 } 8890 8891 static void 8892 blob_insert_cluster_msg(void *arg) 8893 { 8894 struct spdk_blob_cluster_op_ctx *ctx = arg; 8895 uint32_t *extent_page; 8896 8897 ctx->rc = blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster); 8898 if (ctx->rc != 0) { 8899 spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx); 8900 return; 8901 } 8902 8903 if (ctx->blob->use_extent_table == false) { 8904 /* Extent table is not used, proceed with sync of md that will only use extents_rle. */ 8905 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8906 blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx); 8907 return; 8908 } 8909 8910 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 8911 if (*extent_page == 0) { 8912 /* Extent page requires allocation. 8913 * It was already claimed in the used_md_pages map and placed in ctx. */ 8914 assert(ctx->extent_page != 0); 8915 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8916 blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, ctx->page, 8917 blob_insert_new_ep_cb, ctx); 8918 } else { 8919 /* It is possible for original thread to allocate extent page for 8920 * different cluster in the same extent page. In such case proceed with 8921 * updating the existing extent page, but release the additional one. */ 8922 if (ctx->extent_page != 0) { 8923 spdk_spin_lock(&ctx->blob->bs->used_lock); 8924 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8925 bs_release_md_page(ctx->blob->bs, ctx->extent_page); 8926 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8927 ctx->extent_page = 0; 8928 } 8929 /* Extent page already allocated. 8930 * Every cluster allocation, requires just an update of single extent page. */ 8931 blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, ctx->page, 8932 blob_op_cluster_msg_cb, ctx); 8933 } 8934 } 8935 8936 static void 8937 blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 8938 uint64_t cluster, uint32_t extent_page, struct spdk_blob_md_page *page, 8939 spdk_blob_op_complete cb_fn, void *cb_arg) 8940 { 8941 struct spdk_blob_cluster_op_ctx *ctx; 8942 8943 ctx = calloc(1, sizeof(*ctx)); 8944 if (ctx == NULL) { 8945 cb_fn(cb_arg, -ENOMEM); 8946 return; 8947 } 8948 8949 ctx->thread = spdk_get_thread(); 8950 ctx->blob = blob; 8951 ctx->cluster_num = cluster_num; 8952 ctx->cluster = cluster; 8953 ctx->extent_page = extent_page; 8954 ctx->page = page; 8955 ctx->cb_fn = cb_fn; 8956 ctx->cb_arg = cb_arg; 8957 8958 spdk_thread_send_msg(blob->bs->md_thread, blob_insert_cluster_msg, ctx); 8959 } 8960 8961 static void 8962 blob_free_cluster_msg(void *arg) 8963 { 8964 struct spdk_blob_cluster_op_ctx *ctx = arg; 8965 uint32_t *extent_page; 8966 uint32_t start_cluster_idx; 8967 bool free_extent_page = true; 8968 size_t i; 8969 8970 ctx->cluster = bs_lba_to_cluster(ctx->blob->bs, ctx->blob->active.clusters[ctx->cluster_num]); 8971 8972 /* There were concurrent unmaps to the same cluster, only release the cluster on the first one */ 8973 if (ctx->cluster == 0) { 8974 blob_op_cluster_msg_cb(ctx, 0); 8975 return; 8976 } 8977 8978 ctx->blob->active.clusters[ctx->cluster_num] = 0; 8979 if (ctx->cluster != 0) { 8980 ctx->blob->active.num_allocated_clusters--; 8981 } 8982 8983 if (ctx->blob->use_extent_table == false) { 8984 /* Extent table is not used, proceed with sync of md that will only use extents_rle. */ 8985 spdk_spin_lock(&ctx->blob->bs->used_lock); 8986 bs_release_cluster(ctx->blob->bs, ctx->cluster); 8987 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8988 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8989 blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx); 8990 return; 8991 } 8992 8993 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 8994 8995 /* There shouldn't be parallel release operations on same cluster */ 8996 assert(*extent_page == ctx->extent_page); 8997 8998 start_cluster_idx = (ctx->cluster_num / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP; 8999 for (i = 0; i < SPDK_EXTENTS_PER_EP; ++i) { 9000 if (ctx->blob->active.clusters[start_cluster_idx + i] != 0) { 9001 free_extent_page = false; 9002 break; 9003 } 9004 } 9005 9006 if (free_extent_page) { 9007 assert(ctx->extent_page != 0); 9008 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 9009 ctx->blob->active.extent_pages[bs_cluster_to_extent_table_id(ctx->cluster_num)] = 0; 9010 blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, ctx->page, 9011 blob_free_cluster_free_ep_cb, ctx); 9012 } else { 9013 blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, ctx->page, 9014 blob_free_cluster_update_ep_cb, ctx); 9015 } 9016 } 9017 9018 9019 static void 9020 blob_free_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, uint32_t extent_page, 9021 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg) 9022 { 9023 struct spdk_blob_cluster_op_ctx *ctx; 9024 9025 ctx = calloc(1, sizeof(*ctx)); 9026 if (ctx == NULL) { 9027 cb_fn(cb_arg, -ENOMEM); 9028 return; 9029 } 9030 9031 ctx->thread = spdk_get_thread(); 9032 ctx->blob = blob; 9033 ctx->cluster_num = cluster_num; 9034 ctx->extent_page = extent_page; 9035 ctx->page = page; 9036 ctx->cb_fn = cb_fn; 9037 ctx->cb_arg = cb_arg; 9038 9039 spdk_thread_send_msg(blob->bs->md_thread, blob_free_cluster_msg, ctx); 9040 } 9041 9042 /* START spdk_blob_close */ 9043 9044 static void 9045 blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9046 { 9047 struct spdk_blob *blob = cb_arg; 9048 9049 if (bserrno == 0) { 9050 blob->open_ref--; 9051 if (blob->open_ref == 0) { 9052 /* 9053 * Blobs with active.num_pages == 0 are deleted blobs. 9054 * these blobs are removed from the blob_store list 9055 * when the deletion process starts - so don't try to 9056 * remove them again. 9057 */ 9058 if (blob->active.num_pages > 0) { 9059 spdk_bit_array_clear(blob->bs->open_blobids, blob->id); 9060 RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob); 9061 } 9062 blob_free(blob); 9063 } 9064 } 9065 9066 bs_sequence_finish(seq, bserrno); 9067 } 9068 9069 static void 9070 blob_close_esnap_done(void *cb_arg, struct spdk_blob *blob, int bserrno) 9071 { 9072 spdk_bs_sequence_t *seq = cb_arg; 9073 9074 if (bserrno != 0) { 9075 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": close failed with error %d\n", 9076 blob->id, bserrno); 9077 bs_sequence_finish(seq, bserrno); 9078 return; 9079 } 9080 9081 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": closed, syncing metadata on thread %s\n", 9082 blob->id, spdk_thread_get_name(spdk_get_thread())); 9083 9084 /* Sync metadata */ 9085 blob_persist(seq, blob, blob_close_cpl, blob); 9086 } 9087 9088 void 9089 spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 9090 { 9091 struct spdk_bs_cpl cpl; 9092 spdk_bs_sequence_t *seq; 9093 9094 blob_verify_md_op(blob); 9095 9096 SPDK_DEBUGLOG(blob, "Closing blob 0x%" PRIx64 "\n", blob->id); 9097 9098 if (blob->open_ref == 0) { 9099 cb_fn(cb_arg, -EBADF); 9100 return; 9101 } 9102 9103 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 9104 cpl.u.blob_basic.cb_fn = cb_fn; 9105 cpl.u.blob_basic.cb_arg = cb_arg; 9106 9107 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 9108 if (!seq) { 9109 cb_fn(cb_arg, -ENOMEM); 9110 return; 9111 } 9112 9113 if (blob->open_ref == 1 && blob_is_esnap_clone(blob)) { 9114 blob_esnap_destroy_bs_dev_channels(blob, false, blob_close_esnap_done, seq); 9115 return; 9116 } 9117 9118 /* Sync metadata */ 9119 blob_persist(seq, blob, blob_close_cpl, blob); 9120 } 9121 9122 /* END spdk_blob_close */ 9123 9124 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs) 9125 { 9126 return spdk_get_io_channel(bs); 9127 } 9128 9129 void 9130 spdk_bs_free_io_channel(struct spdk_io_channel *channel) 9131 { 9132 blob_esnap_destroy_bs_channel(spdk_io_channel_get_ctx(channel)); 9133 spdk_put_io_channel(channel); 9134 } 9135 9136 void 9137 spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel, 9138 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 9139 { 9140 blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 9141 SPDK_BLOB_UNMAP); 9142 } 9143 9144 void 9145 spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel, 9146 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 9147 { 9148 blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 9149 SPDK_BLOB_WRITE_ZEROES); 9150 } 9151 9152 void 9153 spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel, 9154 void *payload, uint64_t offset, uint64_t length, 9155 spdk_blob_op_complete cb_fn, void *cb_arg) 9156 { 9157 blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 9158 SPDK_BLOB_WRITE); 9159 } 9160 9161 void 9162 spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel, 9163 void *payload, uint64_t offset, uint64_t length, 9164 spdk_blob_op_complete cb_fn, void *cb_arg) 9165 { 9166 blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 9167 SPDK_BLOB_READ); 9168 } 9169 9170 void 9171 spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel, 9172 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9173 spdk_blob_op_complete cb_fn, void *cb_arg) 9174 { 9175 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, NULL); 9176 } 9177 9178 void 9179 spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel, 9180 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9181 spdk_blob_op_complete cb_fn, void *cb_arg) 9182 { 9183 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, NULL); 9184 } 9185 9186 void 9187 spdk_blob_io_writev_ext(struct spdk_blob *blob, struct spdk_io_channel *channel, 9188 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9189 spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts) 9190 { 9191 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, 9192 io_opts); 9193 } 9194 9195 void 9196 spdk_blob_io_readv_ext(struct spdk_blob *blob, struct spdk_io_channel *channel, 9197 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9198 spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts) 9199 { 9200 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, 9201 io_opts); 9202 } 9203 9204 struct spdk_bs_iter_ctx { 9205 int64_t page_num; 9206 struct spdk_blob_store *bs; 9207 9208 spdk_blob_op_with_handle_complete cb_fn; 9209 void *cb_arg; 9210 }; 9211 9212 static void 9213 bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 9214 { 9215 struct spdk_bs_iter_ctx *ctx = cb_arg; 9216 struct spdk_blob_store *bs = ctx->bs; 9217 spdk_blob_id id; 9218 9219 if (bserrno == 0) { 9220 ctx->cb_fn(ctx->cb_arg, _blob, bserrno); 9221 free(ctx); 9222 return; 9223 } 9224 9225 ctx->page_num++; 9226 ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num); 9227 if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) { 9228 ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT); 9229 free(ctx); 9230 return; 9231 } 9232 9233 id = bs_page_to_blobid(ctx->page_num); 9234 9235 spdk_bs_open_blob(bs, id, bs_iter_cpl, ctx); 9236 } 9237 9238 void 9239 spdk_bs_iter_first(struct spdk_blob_store *bs, 9240 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 9241 { 9242 struct spdk_bs_iter_ctx *ctx; 9243 9244 ctx = calloc(1, sizeof(*ctx)); 9245 if (!ctx) { 9246 cb_fn(cb_arg, NULL, -ENOMEM); 9247 return; 9248 } 9249 9250 ctx->page_num = -1; 9251 ctx->bs = bs; 9252 ctx->cb_fn = cb_fn; 9253 ctx->cb_arg = cb_arg; 9254 9255 bs_iter_cpl(ctx, NULL, -1); 9256 } 9257 9258 static void 9259 bs_iter_close_cpl(void *cb_arg, int bserrno) 9260 { 9261 struct spdk_bs_iter_ctx *ctx = cb_arg; 9262 9263 bs_iter_cpl(ctx, NULL, -1); 9264 } 9265 9266 void 9267 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob, 9268 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 9269 { 9270 struct spdk_bs_iter_ctx *ctx; 9271 9272 assert(blob != NULL); 9273 9274 ctx = calloc(1, sizeof(*ctx)); 9275 if (!ctx) { 9276 cb_fn(cb_arg, NULL, -ENOMEM); 9277 return; 9278 } 9279 9280 ctx->page_num = bs_blobid_to_page(blob->id); 9281 ctx->bs = bs; 9282 ctx->cb_fn = cb_fn; 9283 ctx->cb_arg = cb_arg; 9284 9285 /* Close the existing blob */ 9286 spdk_blob_close(blob, bs_iter_close_cpl, ctx); 9287 } 9288 9289 static int 9290 blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 9291 uint16_t value_len, bool internal) 9292 { 9293 struct spdk_xattr_tailq *xattrs; 9294 struct spdk_xattr *xattr; 9295 size_t desc_size; 9296 void *tmp; 9297 9298 blob_verify_md_op(blob); 9299 9300 if (blob->md_ro) { 9301 return -EPERM; 9302 } 9303 9304 desc_size = sizeof(struct spdk_blob_md_descriptor_xattr) + strlen(name) + value_len; 9305 if (desc_size > SPDK_BS_MAX_DESC_SIZE) { 9306 SPDK_DEBUGLOG(blob, "Xattr '%s' of size %zu does not fix into single page %zu\n", name, 9307 desc_size, SPDK_BS_MAX_DESC_SIZE); 9308 return -ENOMEM; 9309 } 9310 9311 if (internal) { 9312 xattrs = &blob->xattrs_internal; 9313 blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR; 9314 } else { 9315 xattrs = &blob->xattrs; 9316 } 9317 9318 TAILQ_FOREACH(xattr, xattrs, link) { 9319 if (!strcmp(name, xattr->name)) { 9320 tmp = malloc(value_len); 9321 if (!tmp) { 9322 return -ENOMEM; 9323 } 9324 9325 free(xattr->value); 9326 xattr->value_len = value_len; 9327 xattr->value = tmp; 9328 memcpy(xattr->value, value, value_len); 9329 9330 blob->state = SPDK_BLOB_STATE_DIRTY; 9331 9332 return 0; 9333 } 9334 } 9335 9336 xattr = calloc(1, sizeof(*xattr)); 9337 if (!xattr) { 9338 return -ENOMEM; 9339 } 9340 9341 xattr->name = strdup(name); 9342 if (!xattr->name) { 9343 free(xattr); 9344 return -ENOMEM; 9345 } 9346 9347 xattr->value_len = value_len; 9348 xattr->value = malloc(value_len); 9349 if (!xattr->value) { 9350 free(xattr->name); 9351 free(xattr); 9352 return -ENOMEM; 9353 } 9354 memcpy(xattr->value, value, value_len); 9355 TAILQ_INSERT_TAIL(xattrs, xattr, link); 9356 9357 blob->state = SPDK_BLOB_STATE_DIRTY; 9358 9359 return 0; 9360 } 9361 9362 int 9363 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 9364 uint16_t value_len) 9365 { 9366 return blob_set_xattr(blob, name, value, value_len, false); 9367 } 9368 9369 static int 9370 blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal) 9371 { 9372 struct spdk_xattr_tailq *xattrs; 9373 struct spdk_xattr *xattr; 9374 9375 blob_verify_md_op(blob); 9376 9377 if (blob->md_ro) { 9378 return -EPERM; 9379 } 9380 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 9381 9382 TAILQ_FOREACH(xattr, xattrs, link) { 9383 if (!strcmp(name, xattr->name)) { 9384 TAILQ_REMOVE(xattrs, xattr, link); 9385 free(xattr->value); 9386 free(xattr->name); 9387 free(xattr); 9388 9389 if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) { 9390 blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR; 9391 } 9392 blob->state = SPDK_BLOB_STATE_DIRTY; 9393 9394 return 0; 9395 } 9396 } 9397 9398 return -ENOENT; 9399 } 9400 9401 int 9402 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name) 9403 { 9404 return blob_remove_xattr(blob, name, false); 9405 } 9406 9407 static int 9408 blob_get_xattr_value(struct spdk_blob *blob, const char *name, 9409 const void **value, size_t *value_len, bool internal) 9410 { 9411 struct spdk_xattr *xattr; 9412 struct spdk_xattr_tailq *xattrs; 9413 9414 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 9415 9416 TAILQ_FOREACH(xattr, xattrs, link) { 9417 if (!strcmp(name, xattr->name)) { 9418 *value = xattr->value; 9419 *value_len = xattr->value_len; 9420 return 0; 9421 } 9422 } 9423 return -ENOENT; 9424 } 9425 9426 int 9427 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 9428 const void **value, size_t *value_len) 9429 { 9430 blob_verify_md_op(blob); 9431 9432 return blob_get_xattr_value(blob, name, value, value_len, false); 9433 } 9434 9435 struct spdk_xattr_names { 9436 uint32_t count; 9437 const char *names[0]; 9438 }; 9439 9440 static int 9441 blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names) 9442 { 9443 struct spdk_xattr *xattr; 9444 int count = 0; 9445 9446 TAILQ_FOREACH(xattr, xattrs, link) { 9447 count++; 9448 } 9449 9450 *names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *)); 9451 if (*names == NULL) { 9452 return -ENOMEM; 9453 } 9454 9455 TAILQ_FOREACH(xattr, xattrs, link) { 9456 (*names)->names[(*names)->count++] = xattr->name; 9457 } 9458 9459 return 0; 9460 } 9461 9462 int 9463 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names) 9464 { 9465 blob_verify_md_op(blob); 9466 9467 return blob_get_xattr_names(&blob->xattrs, names); 9468 } 9469 9470 uint32_t 9471 spdk_xattr_names_get_count(struct spdk_xattr_names *names) 9472 { 9473 assert(names != NULL); 9474 9475 return names->count; 9476 } 9477 9478 const char * 9479 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index) 9480 { 9481 if (index >= names->count) { 9482 return NULL; 9483 } 9484 9485 return names->names[index]; 9486 } 9487 9488 void 9489 spdk_xattr_names_free(struct spdk_xattr_names *names) 9490 { 9491 free(names); 9492 } 9493 9494 struct spdk_bs_type 9495 spdk_bs_get_bstype(struct spdk_blob_store *bs) 9496 { 9497 return bs->bstype; 9498 } 9499 9500 void 9501 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype) 9502 { 9503 memcpy(&bs->bstype, &bstype, sizeof(bstype)); 9504 } 9505 9506 bool 9507 spdk_blob_is_read_only(struct spdk_blob *blob) 9508 { 9509 assert(blob != NULL); 9510 return (blob->data_ro || blob->md_ro); 9511 } 9512 9513 bool 9514 spdk_blob_is_snapshot(struct spdk_blob *blob) 9515 { 9516 struct spdk_blob_list *snapshot_entry; 9517 9518 assert(blob != NULL); 9519 9520 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 9521 if (snapshot_entry == NULL) { 9522 return false; 9523 } 9524 9525 return true; 9526 } 9527 9528 bool 9529 spdk_blob_is_clone(struct spdk_blob *blob) 9530 { 9531 assert(blob != NULL); 9532 9533 if (blob->parent_id != SPDK_BLOBID_INVALID && 9534 blob->parent_id != SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 9535 assert(spdk_blob_is_thin_provisioned(blob)); 9536 return true; 9537 } 9538 9539 return false; 9540 } 9541 9542 bool 9543 spdk_blob_is_thin_provisioned(struct spdk_blob *blob) 9544 { 9545 assert(blob != NULL); 9546 return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 9547 } 9548 9549 bool 9550 spdk_blob_is_esnap_clone(const struct spdk_blob *blob) 9551 { 9552 return blob_is_esnap_clone(blob); 9553 } 9554 9555 static void 9556 blob_update_clear_method(struct spdk_blob *blob) 9557 { 9558 enum blob_clear_method stored_cm; 9559 9560 assert(blob != NULL); 9561 9562 /* If BLOB_CLEAR_WITH_DEFAULT was passed in, use the setting stored 9563 * in metadata previously. If something other than the default was 9564 * specified, ignore stored value and used what was passed in. 9565 */ 9566 stored_cm = ((blob->md_ro_flags & SPDK_BLOB_CLEAR_METHOD) >> SPDK_BLOB_CLEAR_METHOD_SHIFT); 9567 9568 if (blob->clear_method == BLOB_CLEAR_WITH_DEFAULT) { 9569 blob->clear_method = stored_cm; 9570 } else if (blob->clear_method != stored_cm) { 9571 SPDK_WARNLOG("Using passed in clear method 0x%x instead of stored value of 0x%x\n", 9572 blob->clear_method, stored_cm); 9573 } 9574 } 9575 9576 spdk_blob_id 9577 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id) 9578 { 9579 struct spdk_blob_list *snapshot_entry = NULL; 9580 struct spdk_blob_list *clone_entry = NULL; 9581 9582 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 9583 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 9584 if (clone_entry->id == blob_id) { 9585 return snapshot_entry->id; 9586 } 9587 } 9588 } 9589 9590 return SPDK_BLOBID_INVALID; 9591 } 9592 9593 int 9594 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids, 9595 size_t *count) 9596 { 9597 struct spdk_blob_list *snapshot_entry, *clone_entry; 9598 size_t n; 9599 9600 snapshot_entry = bs_get_snapshot_entry(bs, blobid); 9601 if (snapshot_entry == NULL) { 9602 *count = 0; 9603 return 0; 9604 } 9605 9606 if (ids == NULL || *count < snapshot_entry->clone_count) { 9607 *count = snapshot_entry->clone_count; 9608 return -ENOMEM; 9609 } 9610 *count = snapshot_entry->clone_count; 9611 9612 n = 0; 9613 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 9614 ids[n++] = clone_entry->id; 9615 } 9616 9617 return 0; 9618 } 9619 9620 static void 9621 bs_load_grow_continue(struct spdk_bs_load_ctx *ctx) 9622 { 9623 int rc; 9624 9625 if (ctx->super->size == 0) { 9626 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9627 } 9628 9629 if (ctx->super->io_unit_size == 0) { 9630 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 9631 } 9632 if (ctx->super->md_page_size == 0) { 9633 ctx->super->md_page_size = SPDK_BS_PAGE_SIZE; 9634 } 9635 9636 /* Parse the super block */ 9637 ctx->bs->clean = 1; 9638 ctx->bs->cluster_sz = ctx->super->cluster_size; 9639 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 9640 ctx->bs->md_page_size = ctx->super->md_page_size; 9641 ctx->bs->io_unit_size = ctx->super->io_unit_size; 9642 bs_init_per_cluster_fields(ctx->bs); 9643 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 9644 if (rc < 0) { 9645 bs_load_ctx_fail(ctx, -ENOMEM); 9646 return; 9647 } 9648 ctx->bs->md_start = ctx->super->md_start; 9649 ctx->bs->md_len = ctx->super->md_len; 9650 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len); 9651 if (rc < 0) { 9652 bs_load_ctx_fail(ctx, -ENOMEM); 9653 return; 9654 } 9655 9656 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 9657 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 9658 ctx->bs->super_blob = ctx->super->super_blob; 9659 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 9660 9661 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0) { 9662 SPDK_ERRLOG("Can not grow an unclean blobstore, please load it normally to clean it.\n"); 9663 bs_load_ctx_fail(ctx, -EIO); 9664 return; 9665 } else { 9666 bs_load_read_used_pages(ctx); 9667 } 9668 } 9669 9670 static void 9671 bs_load_grow_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9672 { 9673 struct spdk_bs_load_ctx *ctx = cb_arg; 9674 9675 if (bserrno != 0) { 9676 bs_load_ctx_fail(ctx, bserrno); 9677 return; 9678 } 9679 bs_load_grow_continue(ctx); 9680 } 9681 9682 static void 9683 bs_load_grow_used_clusters_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9684 { 9685 struct spdk_bs_load_ctx *ctx = cb_arg; 9686 9687 if (bserrno != 0) { 9688 bs_load_ctx_fail(ctx, bserrno); 9689 return; 9690 } 9691 9692 spdk_free(ctx->mask); 9693 9694 bs_sequence_write_dev(ctx->seq, ctx->super, bs_page_to_lba(ctx->bs, 0), 9695 bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 9696 bs_load_grow_super_write_cpl, ctx); 9697 } 9698 9699 static void 9700 bs_load_grow_used_clusters_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9701 { 9702 struct spdk_bs_load_ctx *ctx = cb_arg; 9703 uint64_t lba, lba_count; 9704 uint64_t dev_size; 9705 uint64_t total_clusters; 9706 9707 if (bserrno != 0) { 9708 bs_load_ctx_fail(ctx, bserrno); 9709 return; 9710 } 9711 9712 /* The type must be correct */ 9713 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 9714 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 9715 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 9716 struct spdk_blob_md_page) * 8)); 9717 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9718 total_clusters = dev_size / ctx->super->cluster_size; 9719 ctx->mask->length = total_clusters; 9720 9721 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 9722 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 9723 bs_sequence_write_dev(ctx->seq, ctx->mask, lba, lba_count, 9724 bs_load_grow_used_clusters_write_cpl, ctx); 9725 } 9726 9727 static void 9728 bs_load_try_to_grow(struct spdk_bs_load_ctx *ctx) 9729 { 9730 uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask; 9731 uint64_t lba, lba_count, mask_size; 9732 9733 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9734 total_clusters = dev_size / ctx->super->cluster_size; 9735 used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 9736 spdk_divide_round_up(total_clusters, 8), 9737 ctx->super->md_page_size); 9738 max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start; 9739 /* No necessary to grow or no space to grow */ 9740 if (ctx->super->size >= dev_size || used_cluster_mask_len > max_used_cluster_mask) { 9741 SPDK_DEBUGLOG(blob, "No grow\n"); 9742 bs_load_grow_continue(ctx); 9743 return; 9744 } 9745 9746 SPDK_DEBUGLOG(blob, "Resize blobstore\n"); 9747 9748 ctx->super->size = dev_size; 9749 ctx->super->used_cluster_mask_len = used_cluster_mask_len; 9750 ctx->super->crc = blob_md_page_calc_crc(ctx->super); 9751 9752 mask_size = used_cluster_mask_len * ctx->super->md_page_size; 9753 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_NUMA_ID_ANY, 9754 SPDK_MALLOC_DMA); 9755 if (!ctx->mask) { 9756 bs_load_ctx_fail(ctx, -ENOMEM); 9757 return; 9758 } 9759 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 9760 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 9761 bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count, 9762 bs_load_grow_used_clusters_read_cpl, ctx); 9763 } 9764 9765 static void 9766 bs_grow_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9767 { 9768 struct spdk_bs_load_ctx *ctx = cb_arg; 9769 int rc; 9770 9771 rc = bs_super_validate(ctx->super, ctx->bs); 9772 if (rc != 0) { 9773 bs_load_ctx_fail(ctx, rc); 9774 return; 9775 } 9776 9777 bs_load_try_to_grow(ctx); 9778 } 9779 9780 struct spdk_bs_grow_ctx { 9781 struct spdk_blob_store *bs; 9782 struct spdk_bs_super_block *super; 9783 9784 struct spdk_bit_pool *new_used_clusters; 9785 struct spdk_bs_md_mask *new_used_clusters_mask; 9786 9787 spdk_bs_sequence_t *seq; 9788 }; 9789 9790 static void 9791 bs_grow_live_done(struct spdk_bs_grow_ctx *ctx, int bserrno) 9792 { 9793 if (bserrno != 0) { 9794 spdk_bit_pool_free(&ctx->new_used_clusters); 9795 } 9796 9797 bs_sequence_finish(ctx->seq, bserrno); 9798 free(ctx->new_used_clusters_mask); 9799 spdk_free(ctx->super); 9800 free(ctx); 9801 } 9802 9803 static void 9804 bs_grow_live_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9805 { 9806 struct spdk_bs_grow_ctx *ctx = cb_arg; 9807 struct spdk_blob_store *bs = ctx->bs; 9808 uint64_t total_clusters; 9809 9810 if (bserrno != 0) { 9811 bs_grow_live_done(ctx, bserrno); 9812 return; 9813 } 9814 9815 /* 9816 * Blobstore is not clean until unload, for now only the super block is up to date. 9817 * This is similar to state right after blobstore init, when bs_write_used_md() didn't 9818 * yet execute. 9819 * When cleanly unloaded, the used md pages will be written out. 9820 * In case of unclean shutdown, loading blobstore will go through recovery path correctly 9821 * filling out the used_clusters with new size and writing it out. 9822 */ 9823 bs->clean = 0; 9824 9825 /* Reverting the super->size past this point is complex, avoid any error paths 9826 * that require to do so. */ 9827 spdk_spin_lock(&bs->used_lock); 9828 9829 total_clusters = ctx->super->size / ctx->super->cluster_size; 9830 9831 assert(total_clusters >= spdk_bit_pool_capacity(bs->used_clusters)); 9832 spdk_bit_pool_store_mask(bs->used_clusters, ctx->new_used_clusters_mask); 9833 9834 assert(total_clusters == spdk_bit_pool_capacity(ctx->new_used_clusters)); 9835 spdk_bit_pool_load_mask(ctx->new_used_clusters, ctx->new_used_clusters_mask); 9836 9837 spdk_bit_pool_free(&bs->used_clusters); 9838 bs->used_clusters = ctx->new_used_clusters; 9839 9840 bs->total_clusters = total_clusters; 9841 bs->total_data_clusters = bs->total_clusters - spdk_divide_round_up( 9842 bs->md_start + bs->md_len, bs->pages_per_cluster); 9843 9844 bs->num_free_clusters = spdk_bit_pool_count_free(bs->used_clusters); 9845 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 9846 spdk_spin_unlock(&bs->used_lock); 9847 9848 bs_grow_live_done(ctx, 0); 9849 } 9850 9851 static void 9852 bs_grow_live_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9853 { 9854 struct spdk_bs_grow_ctx *ctx = cb_arg; 9855 uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask; 9856 int rc; 9857 9858 if (bserrno != 0) { 9859 bs_grow_live_done(ctx, bserrno); 9860 return; 9861 } 9862 9863 rc = bs_super_validate(ctx->super, ctx->bs); 9864 if (rc != 0) { 9865 bs_grow_live_done(ctx, rc); 9866 return; 9867 } 9868 9869 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9870 total_clusters = dev_size / ctx->super->cluster_size; 9871 used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 9872 spdk_divide_round_up(total_clusters, 8), 9873 ctx->super->md_page_size); 9874 max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start; 9875 /* Only checking dev_size. Since it can change, but total_clusters remain the same. */ 9876 if (dev_size == ctx->super->size) { 9877 SPDK_DEBUGLOG(blob, "No need to grow blobstore\n"); 9878 bs_grow_live_done(ctx, 0); 9879 return; 9880 } 9881 /* 9882 * Blobstore cannot be shrunk, so check before if: 9883 * - new size of the device is smaller than size in super_block 9884 * - new total number of clusters is smaller than used_clusters bit_pool 9885 * - there is enough space in metadata for used_cluster_mask to be written out 9886 */ 9887 if (dev_size < ctx->super->size || 9888 total_clusters < spdk_bit_pool_capacity(ctx->bs->used_clusters) || 9889 used_cluster_mask_len > max_used_cluster_mask) { 9890 SPDK_DEBUGLOG(blob, "No space to grow blobstore\n"); 9891 bs_grow_live_done(ctx, -ENOSPC); 9892 return; 9893 } 9894 9895 SPDK_DEBUGLOG(blob, "Resizing blobstore\n"); 9896 9897 ctx->new_used_clusters_mask = calloc(1, total_clusters); 9898 if (!ctx->new_used_clusters_mask) { 9899 bs_grow_live_done(ctx, -ENOMEM); 9900 return; 9901 } 9902 ctx->new_used_clusters = spdk_bit_pool_create(total_clusters); 9903 if (!ctx->new_used_clusters) { 9904 bs_grow_live_done(ctx, -ENOMEM); 9905 return; 9906 } 9907 9908 ctx->super->clean = 0; 9909 ctx->super->size = dev_size; 9910 ctx->super->used_cluster_mask_len = used_cluster_mask_len; 9911 bs_write_super(seq, ctx->bs, ctx->super, bs_grow_live_super_write_cpl, ctx); 9912 } 9913 9914 void 9915 spdk_bs_grow_live(struct spdk_blob_store *bs, 9916 spdk_bs_op_complete cb_fn, void *cb_arg) 9917 { 9918 struct spdk_bs_cpl cpl; 9919 struct spdk_bs_grow_ctx *ctx; 9920 9921 assert(spdk_get_thread() == bs->md_thread); 9922 9923 SPDK_DEBUGLOG(blob, "Growing blobstore on dev %p\n", bs->dev); 9924 9925 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 9926 cpl.u.bs_basic.cb_fn = cb_fn; 9927 cpl.u.bs_basic.cb_arg = cb_arg; 9928 9929 ctx = calloc(1, sizeof(struct spdk_bs_grow_ctx)); 9930 if (!ctx) { 9931 cb_fn(cb_arg, -ENOMEM); 9932 return; 9933 } 9934 ctx->bs = bs; 9935 9936 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 9937 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 9938 if (!ctx->super) { 9939 free(ctx); 9940 cb_fn(cb_arg, -ENOMEM); 9941 return; 9942 } 9943 9944 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 9945 if (!ctx->seq) { 9946 spdk_free(ctx->super); 9947 free(ctx); 9948 cb_fn(cb_arg, -ENOMEM); 9949 return; 9950 } 9951 9952 /* Read the super block */ 9953 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 9954 bs_byte_to_lba(bs, sizeof(*ctx->super)), 9955 bs_grow_live_load_super_cpl, ctx); 9956 } 9957 9958 void 9959 spdk_bs_grow(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 9960 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 9961 { 9962 struct spdk_blob_store *bs; 9963 struct spdk_bs_cpl cpl; 9964 struct spdk_bs_load_ctx *ctx; 9965 struct spdk_bs_opts opts = {}; 9966 int err; 9967 9968 SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev); 9969 9970 if ((dev->phys_blocklen % dev->blocklen) != 0) { 9971 SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen); 9972 dev->destroy(dev); 9973 cb_fn(cb_arg, NULL, -EINVAL); 9974 return; 9975 } 9976 9977 spdk_bs_opts_init(&opts, sizeof(opts)); 9978 if (o) { 9979 if (bs_opts_copy(o, &opts)) { 9980 return; 9981 } 9982 } 9983 9984 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 9985 dev->destroy(dev); 9986 cb_fn(cb_arg, NULL, -EINVAL); 9987 return; 9988 } 9989 9990 err = bs_alloc(dev, &opts, &bs, &ctx); 9991 if (err) { 9992 dev->destroy(dev); 9993 cb_fn(cb_arg, NULL, err); 9994 return; 9995 } 9996 9997 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 9998 cpl.u.bs_handle.cb_fn = cb_fn; 9999 cpl.u.bs_handle.cb_arg = cb_arg; 10000 cpl.u.bs_handle.bs = bs; 10001 10002 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 10003 if (!ctx->seq) { 10004 spdk_free(ctx->super); 10005 free(ctx); 10006 bs_free(bs); 10007 cb_fn(cb_arg, NULL, -ENOMEM); 10008 return; 10009 } 10010 10011 /* Read the super block */ 10012 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 10013 bs_byte_to_lba(bs, sizeof(*ctx->super)), 10014 bs_grow_load_super_cpl, ctx); 10015 } 10016 10017 int 10018 spdk_blob_get_esnap_id(struct spdk_blob *blob, const void **id, size_t *len) 10019 { 10020 if (!blob_is_esnap_clone(blob)) { 10021 return -EINVAL; 10022 } 10023 10024 return blob_get_xattr_value(blob, BLOB_EXTERNAL_SNAPSHOT_ID, id, len, true); 10025 } 10026 10027 struct spdk_io_channel * 10028 blob_esnap_get_io_channel(struct spdk_io_channel *ch, struct spdk_blob *blob) 10029 { 10030 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(ch); 10031 struct spdk_bs_dev *bs_dev = blob->back_bs_dev; 10032 struct blob_esnap_channel find = {}; 10033 struct blob_esnap_channel *esnap_channel, *existing; 10034 10035 find.blob_id = blob->id; 10036 esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find); 10037 if (spdk_likely(esnap_channel != NULL)) { 10038 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": using cached channel on thread %s\n", 10039 blob->id, spdk_thread_get_name(spdk_get_thread())); 10040 return esnap_channel->channel; 10041 } 10042 10043 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": allocating channel on thread %s\n", 10044 blob->id, spdk_thread_get_name(spdk_get_thread())); 10045 10046 esnap_channel = calloc(1, sizeof(*esnap_channel)); 10047 if (esnap_channel == NULL) { 10048 SPDK_NOTICELOG("blob 0x%" PRIx64 " channel allocation failed: no memory\n", 10049 find.blob_id); 10050 return NULL; 10051 } 10052 esnap_channel->channel = bs_dev->create_channel(bs_dev); 10053 if (esnap_channel->channel == NULL) { 10054 SPDK_NOTICELOG("blob 0x%" PRIx64 " back channel allocation failed\n", blob->id); 10055 free(esnap_channel); 10056 return NULL; 10057 } 10058 esnap_channel->blob_id = find.blob_id; 10059 existing = RB_INSERT(blob_esnap_channel_tree, &bs_channel->esnap_channels, esnap_channel); 10060 if (spdk_unlikely(existing != NULL)) { 10061 /* 10062 * This should be unreachable: all modifications to this tree happen on this thread. 10063 */ 10064 SPDK_ERRLOG("blob 0x%" PRIx64 "lost race to allocate a channel\n", find.blob_id); 10065 assert(false); 10066 10067 bs_dev->destroy_channel(bs_dev, esnap_channel->channel); 10068 free(esnap_channel); 10069 10070 return existing->channel; 10071 } 10072 10073 return esnap_channel->channel; 10074 } 10075 10076 static int 10077 blob_esnap_channel_compare(struct blob_esnap_channel *c1, struct blob_esnap_channel *c2) 10078 { 10079 return (c1->blob_id < c2->blob_id ? -1 : c1->blob_id > c2->blob_id); 10080 } 10081 10082 struct blob_esnap_destroy_ctx { 10083 spdk_blob_op_with_handle_complete cb_fn; 10084 void *cb_arg; 10085 struct spdk_blob *blob; 10086 struct spdk_bs_dev *back_bs_dev; 10087 bool abort_io; 10088 }; 10089 10090 static void 10091 blob_esnap_destroy_channels_done(struct spdk_io_channel_iter *i, int status) 10092 { 10093 struct blob_esnap_destroy_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 10094 struct spdk_blob *blob = ctx->blob; 10095 struct spdk_blob_store *bs = blob->bs; 10096 10097 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": done destroying channels for this blob\n", 10098 blob->id); 10099 10100 if (ctx->cb_fn != NULL) { 10101 ctx->cb_fn(ctx->cb_arg, blob, status); 10102 } 10103 free(ctx); 10104 10105 bs->esnap_channels_unloading--; 10106 if (bs->esnap_channels_unloading == 0 && bs->esnap_unload_cb_fn != NULL) { 10107 spdk_bs_unload(bs, bs->esnap_unload_cb_fn, bs->esnap_unload_cb_arg); 10108 } 10109 } 10110 10111 static void 10112 blob_esnap_destroy_one_channel(struct spdk_io_channel_iter *i) 10113 { 10114 struct blob_esnap_destroy_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 10115 struct spdk_blob *blob = ctx->blob; 10116 struct spdk_bs_dev *bs_dev = ctx->back_bs_dev; 10117 struct spdk_io_channel *channel = spdk_io_channel_iter_get_channel(i); 10118 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(channel); 10119 struct blob_esnap_channel *esnap_channel; 10120 struct blob_esnap_channel find = {}; 10121 10122 assert(spdk_get_thread() == spdk_io_channel_get_thread(channel)); 10123 10124 find.blob_id = blob->id; 10125 esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find); 10126 if (esnap_channel != NULL) { 10127 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": destroying channel on thread %s\n", 10128 blob->id, spdk_thread_get_name(spdk_get_thread())); 10129 RB_REMOVE(blob_esnap_channel_tree, &bs_channel->esnap_channels, esnap_channel); 10130 10131 if (ctx->abort_io) { 10132 spdk_bs_user_op_t *op, *tmp; 10133 10134 TAILQ_FOREACH_SAFE(op, &bs_channel->queued_io, link, tmp) { 10135 if (op->back_channel == esnap_channel->channel) { 10136 TAILQ_REMOVE(&bs_channel->queued_io, op, link); 10137 bs_user_op_abort(op, -EIO); 10138 } 10139 } 10140 } 10141 10142 bs_dev->destroy_channel(bs_dev, esnap_channel->channel); 10143 free(esnap_channel); 10144 } 10145 10146 spdk_for_each_channel_continue(i, 0); 10147 } 10148 10149 /* 10150 * Destroy the channels for a specific blob on each thread with a blobstore channel. This should be 10151 * used when closing an esnap clone blob and after decoupling from the parent. 10152 */ 10153 static void 10154 blob_esnap_destroy_bs_dev_channels(struct spdk_blob *blob, bool abort_io, 10155 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 10156 { 10157 struct blob_esnap_destroy_ctx *ctx; 10158 10159 if (!blob_is_esnap_clone(blob) || blob->back_bs_dev == NULL) { 10160 if (cb_fn != NULL) { 10161 cb_fn(cb_arg, blob, 0); 10162 } 10163 return; 10164 } 10165 10166 ctx = calloc(1, sizeof(*ctx)); 10167 if (ctx == NULL) { 10168 if (cb_fn != NULL) { 10169 cb_fn(cb_arg, blob, -ENOMEM); 10170 } 10171 return; 10172 } 10173 ctx->cb_fn = cb_fn; 10174 ctx->cb_arg = cb_arg; 10175 ctx->blob = blob; 10176 ctx->back_bs_dev = blob->back_bs_dev; 10177 ctx->abort_io = abort_io; 10178 10179 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": destroying channels for this blob\n", 10180 blob->id); 10181 10182 blob->bs->esnap_channels_unloading++; 10183 spdk_for_each_channel(blob->bs, blob_esnap_destroy_one_channel, ctx, 10184 blob_esnap_destroy_channels_done); 10185 } 10186 10187 /* 10188 * Destroy all bs_dev channels on a specific blobstore channel. This should be used when a 10189 * bs_channel is destroyed. 10190 */ 10191 static void 10192 blob_esnap_destroy_bs_channel(struct spdk_bs_channel *ch) 10193 { 10194 struct blob_esnap_channel *esnap_channel, *esnap_channel_tmp; 10195 10196 assert(spdk_get_thread() == spdk_io_channel_get_thread(spdk_io_channel_from_ctx(ch))); 10197 10198 SPDK_DEBUGLOG(blob_esnap, "destroying channels on thread %s\n", 10199 spdk_thread_get_name(spdk_get_thread())); 10200 RB_FOREACH_SAFE(esnap_channel, blob_esnap_channel_tree, &ch->esnap_channels, 10201 esnap_channel_tmp) { 10202 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 10203 ": destroying one channel in thread %s\n", 10204 esnap_channel->blob_id, spdk_thread_get_name(spdk_get_thread())); 10205 RB_REMOVE(blob_esnap_channel_tree, &ch->esnap_channels, esnap_channel); 10206 spdk_put_io_channel(esnap_channel->channel); 10207 free(esnap_channel); 10208 } 10209 SPDK_DEBUGLOG(blob_esnap, "done destroying channels on thread %s\n", 10210 spdk_thread_get_name(spdk_get_thread())); 10211 } 10212 10213 static void 10214 blob_set_back_bs_dev_done(void *_ctx, int bserrno) 10215 { 10216 struct set_bs_dev_ctx *ctx = _ctx; 10217 10218 if (bserrno != 0) { 10219 /* Even though the unfreeze failed, the update may have succeed. */ 10220 SPDK_ERRLOG("blob 0x%" PRIx64 ": unfreeze failed with error %d\n", ctx->blob->id, 10221 bserrno); 10222 } 10223 ctx->cb_fn(ctx->cb_arg, ctx->bserrno); 10224 free(ctx); 10225 } 10226 10227 static void 10228 blob_frozen_set_back_bs_dev(void *_ctx, struct spdk_blob *blob, int bserrno) 10229 { 10230 struct set_bs_dev_ctx *ctx = _ctx; 10231 int rc; 10232 10233 if (bserrno != 0) { 10234 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to release old back_bs_dev with error %d\n", 10235 blob->id, bserrno); 10236 ctx->bserrno = bserrno; 10237 blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx); 10238 return; 10239 } 10240 10241 if (blob->back_bs_dev != NULL) { 10242 blob_unref_back_bs_dev(blob); 10243 } 10244 10245 if (ctx->parent_refs_cb_fn) { 10246 rc = ctx->parent_refs_cb_fn(blob, ctx->parent_refs_cb_arg); 10247 if (rc != 0) { 10248 ctx->bserrno = rc; 10249 blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx); 10250 return; 10251 } 10252 } 10253 10254 SPDK_NOTICELOG("blob 0x%" PRIx64 ": hotplugged back_bs_dev\n", blob->id); 10255 blob->back_bs_dev = ctx->back_bs_dev; 10256 ctx->bserrno = 0; 10257 10258 blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx); 10259 } 10260 10261 static void 10262 blob_set_back_bs_dev_frozen(void *_ctx, int bserrno) 10263 { 10264 struct set_bs_dev_ctx *ctx = _ctx; 10265 struct spdk_blob *blob = ctx->blob; 10266 10267 if (bserrno != 0) { 10268 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to freeze with error %d\n", blob->id, 10269 bserrno); 10270 ctx->cb_fn(ctx->cb_arg, bserrno); 10271 free(ctx); 10272 return; 10273 } 10274 10275 /* 10276 * This does not prevent future reads from the esnap device because any future IO will 10277 * lazily create a new esnap IO channel. 10278 */ 10279 blob_esnap_destroy_bs_dev_channels(blob, true, blob_frozen_set_back_bs_dev, ctx); 10280 } 10281 10282 void 10283 spdk_blob_set_esnap_bs_dev(struct spdk_blob *blob, struct spdk_bs_dev *back_bs_dev, 10284 spdk_blob_op_complete cb_fn, void *cb_arg) 10285 { 10286 if (!blob_is_esnap_clone(blob)) { 10287 SPDK_ERRLOG("blob 0x%" PRIx64 ": not an esnap clone\n", blob->id); 10288 cb_fn(cb_arg, -EINVAL); 10289 return; 10290 } 10291 10292 blob_set_back_bs_dev(blob, back_bs_dev, NULL, NULL, cb_fn, cb_arg); 10293 } 10294 10295 struct spdk_bs_dev * 10296 spdk_blob_get_esnap_bs_dev(const struct spdk_blob *blob) 10297 { 10298 if (!blob_is_esnap_clone(blob)) { 10299 SPDK_ERRLOG("blob 0x%" PRIx64 ": not an esnap clone\n", blob->id); 10300 return NULL; 10301 } 10302 10303 return blob->back_bs_dev; 10304 } 10305 10306 bool 10307 spdk_blob_is_degraded(const struct spdk_blob *blob) 10308 { 10309 if (blob->bs->dev->is_degraded != NULL && blob->bs->dev->is_degraded(blob->bs->dev)) { 10310 return true; 10311 } 10312 if (blob->back_bs_dev == NULL || blob->back_bs_dev->is_degraded == NULL) { 10313 return false; 10314 } 10315 10316 return blob->back_bs_dev->is_degraded(blob->back_bs_dev); 10317 } 10318 10319 SPDK_LOG_REGISTER_COMPONENT(blob) 10320 SPDK_LOG_REGISTER_COMPONENT(blob_esnap) 10321 10322 static void 10323 blob_trace(void) 10324 { 10325 struct spdk_trace_tpoint_opts opts[] = { 10326 { 10327 "BLOB_REQ_SET_START", TRACE_BLOB_REQ_SET_START, 10328 OWNER_TYPE_NONE, OBJECT_BLOB_CB_ARG, 1, 10329 { 10330 { "ctx", SPDK_TRACE_ARG_TYPE_PTR, 8 } 10331 } 10332 }, 10333 { 10334 "BLOB_REQ_SET_COMPLETE", TRACE_BLOB_REQ_SET_COMPLETE, 10335 OWNER_TYPE_NONE, OBJECT_BLOB_CB_ARG, 0, 10336 { 10337 { "ctx", SPDK_TRACE_ARG_TYPE_PTR, 8 } 10338 } 10339 }, 10340 }; 10341 10342 spdk_trace_register_object(OBJECT_BLOB_CB_ARG, 'a'); 10343 spdk_trace_register_description_ext(opts, SPDK_COUNTOF(opts)); 10344 spdk_trace_tpoint_register_relation(TRACE_BDEV_IO_START, OBJECT_BLOB_CB_ARG, 1); 10345 spdk_trace_tpoint_register_relation(TRACE_BDEV_IO_DONE, OBJECT_BLOB_CB_ARG, 0); 10346 } 10347 SPDK_TRACE_REGISTER_FN(blob_trace, "blob", TRACE_GROUP_BLOB) 10348