1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "spdk/blob.h" 10 #include "spdk/crc32.h" 11 #include "spdk/env.h" 12 #include "spdk/queue.h" 13 #include "spdk/thread.h" 14 #include "spdk/bit_array.h" 15 #include "spdk/bit_pool.h" 16 #include "spdk/likely.h" 17 #include "spdk/util.h" 18 #include "spdk/string.h" 19 #include "spdk/trace.h" 20 21 #include "spdk_internal/assert.h" 22 #include "spdk_internal/trace_defs.h" 23 #include "spdk/log.h" 24 25 #include "blobstore.h" 26 27 #define BLOB_CRC32C_INITIAL 0xffffffffUL 28 29 static int bs_register_md_thread(struct spdk_blob_store *bs); 30 static int bs_unregister_md_thread(struct spdk_blob_store *bs); 31 static void blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 32 static void blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 33 uint64_t cluster, uint32_t extent, struct spdk_blob_md_page *page, 34 spdk_blob_op_complete cb_fn, void *cb_arg); 35 static void blob_free_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 36 uint32_t extent_page, struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg); 37 38 static int blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 39 uint16_t value_len, bool internal); 40 static int blob_get_xattr_value(struct spdk_blob *blob, const char *name, 41 const void **value, size_t *value_len, bool internal); 42 static int blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal); 43 44 static void blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 45 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg); 46 static void blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg); 47 48 static void bs_shallow_copy_cluster_find_next(void *cb_arg); 49 50 /* 51 * External snapshots require a channel per thread per esnap bdev. The tree 52 * is populated lazily as blob IOs are handled by the back_bs_dev. When this 53 * channel is destroyed, all the channels in the tree are destroyed. 54 */ 55 56 struct blob_esnap_channel { 57 RB_ENTRY(blob_esnap_channel) node; 58 spdk_blob_id blob_id; 59 struct spdk_io_channel *channel; 60 }; 61 62 static int blob_esnap_channel_compare(struct blob_esnap_channel *c1, struct blob_esnap_channel *c2); 63 static void blob_esnap_destroy_bs_dev_channels(struct spdk_blob *blob, bool abort_io, 64 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg); 65 static void blob_esnap_destroy_bs_channel(struct spdk_bs_channel *ch); 66 static void blob_set_back_bs_dev_frozen(void *_ctx, int bserrno); 67 RB_GENERATE_STATIC(blob_esnap_channel_tree, blob_esnap_channel, node, blob_esnap_channel_compare) 68 69 static inline bool 70 blob_is_esnap_clone(const struct spdk_blob *blob) 71 { 72 assert(blob != NULL); 73 return !!(blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT); 74 } 75 76 static int 77 blob_id_cmp(struct spdk_blob *blob1, struct spdk_blob *blob2) 78 { 79 assert(blob1 != NULL && blob2 != NULL); 80 return (blob1->id < blob2->id ? -1 : blob1->id > blob2->id); 81 } 82 83 RB_GENERATE_STATIC(spdk_blob_tree, spdk_blob, link, blob_id_cmp); 84 85 static void 86 blob_verify_md_op(struct spdk_blob *blob) 87 { 88 assert(blob != NULL); 89 assert(spdk_get_thread() == blob->bs->md_thread); 90 assert(blob->state != SPDK_BLOB_STATE_LOADING); 91 } 92 93 static struct spdk_blob_list * 94 bs_get_snapshot_entry(struct spdk_blob_store *bs, spdk_blob_id blobid) 95 { 96 struct spdk_blob_list *snapshot_entry = NULL; 97 98 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 99 if (snapshot_entry->id == blobid) { 100 break; 101 } 102 } 103 104 return snapshot_entry; 105 } 106 107 static void 108 bs_claim_md_page(struct spdk_blob_store *bs, uint32_t page) 109 { 110 assert(spdk_spin_held(&bs->used_lock)); 111 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 112 assert(spdk_bit_array_get(bs->used_md_pages, page) == false); 113 114 spdk_bit_array_set(bs->used_md_pages, page); 115 } 116 117 static void 118 bs_release_md_page(struct spdk_blob_store *bs, uint32_t page) 119 { 120 assert(spdk_spin_held(&bs->used_lock)); 121 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 122 assert(spdk_bit_array_get(bs->used_md_pages, page) == true); 123 124 spdk_bit_array_clear(bs->used_md_pages, page); 125 } 126 127 static uint32_t 128 bs_claim_cluster(struct spdk_blob_store *bs) 129 { 130 uint32_t cluster_num; 131 132 assert(spdk_spin_held(&bs->used_lock)); 133 134 cluster_num = spdk_bit_pool_allocate_bit(bs->used_clusters); 135 if (cluster_num == UINT32_MAX) { 136 return UINT32_MAX; 137 } 138 139 SPDK_DEBUGLOG(blob, "Claiming cluster %u\n", cluster_num); 140 bs->num_free_clusters--; 141 142 return cluster_num; 143 } 144 145 static void 146 bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 147 { 148 assert(spdk_spin_held(&bs->used_lock)); 149 assert(cluster_num < spdk_bit_pool_capacity(bs->used_clusters)); 150 assert(spdk_bit_pool_is_allocated(bs->used_clusters, cluster_num) == true); 151 assert(bs->num_free_clusters < bs->total_clusters); 152 153 SPDK_DEBUGLOG(blob, "Releasing cluster %u\n", cluster_num); 154 155 spdk_bit_pool_free_bit(bs->used_clusters, cluster_num); 156 bs->num_free_clusters++; 157 } 158 159 static int 160 blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster) 161 { 162 uint64_t *cluster_lba = &blob->active.clusters[cluster_num]; 163 164 blob_verify_md_op(blob); 165 166 if (*cluster_lba != 0) { 167 return -EEXIST; 168 } 169 170 *cluster_lba = bs_cluster_to_lba(blob->bs, cluster); 171 blob->active.num_allocated_clusters++; 172 173 return 0; 174 } 175 176 static int 177 bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num, 178 uint64_t *cluster, uint32_t *lowest_free_md_page, bool update_map) 179 { 180 uint32_t *extent_page = 0; 181 182 assert(spdk_spin_held(&blob->bs->used_lock)); 183 184 *cluster = bs_claim_cluster(blob->bs); 185 if (*cluster == UINT32_MAX) { 186 /* No more free clusters. Cannot satisfy the request */ 187 return -ENOSPC; 188 } 189 190 if (blob->use_extent_table) { 191 extent_page = bs_cluster_to_extent_page(blob, cluster_num); 192 if (*extent_page == 0) { 193 /* Extent page shall never occupy md_page so start the search from 1 */ 194 if (*lowest_free_md_page == 0) { 195 *lowest_free_md_page = 1; 196 } 197 /* No extent_page is allocated for the cluster */ 198 *lowest_free_md_page = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, 199 *lowest_free_md_page); 200 if (*lowest_free_md_page == UINT32_MAX) { 201 /* No more free md pages. Cannot satisfy the request */ 202 bs_release_cluster(blob->bs, *cluster); 203 return -ENOSPC; 204 } 205 bs_claim_md_page(blob->bs, *lowest_free_md_page); 206 } 207 } 208 209 SPDK_DEBUGLOG(blob, "Claiming cluster %" PRIu64 " for blob 0x%" PRIx64 "\n", *cluster, 210 blob->id); 211 212 if (update_map) { 213 blob_insert_cluster(blob, cluster_num, *cluster); 214 if (blob->use_extent_table && *extent_page == 0) { 215 *extent_page = *lowest_free_md_page; 216 } 217 } 218 219 return 0; 220 } 221 222 static void 223 blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs) 224 { 225 xattrs->count = 0; 226 xattrs->names = NULL; 227 xattrs->ctx = NULL; 228 xattrs->get_value = NULL; 229 } 230 231 void 232 spdk_blob_opts_init(struct spdk_blob_opts *opts, size_t opts_size) 233 { 234 if (!opts) { 235 SPDK_ERRLOG("opts should not be NULL\n"); 236 return; 237 } 238 239 if (!opts_size) { 240 SPDK_ERRLOG("opts_size should not be zero value\n"); 241 return; 242 } 243 244 memset(opts, 0, opts_size); 245 opts->opts_size = opts_size; 246 247 #define FIELD_OK(field) \ 248 offsetof(struct spdk_blob_opts, field) + sizeof(opts->field) <= opts_size 249 250 #define SET_FIELD(field, value) \ 251 if (FIELD_OK(field)) { \ 252 opts->field = value; \ 253 } \ 254 255 SET_FIELD(num_clusters, 0); 256 SET_FIELD(thin_provision, false); 257 SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT); 258 259 if (FIELD_OK(xattrs)) { 260 blob_xattrs_init(&opts->xattrs); 261 } 262 263 SET_FIELD(use_extent_table, true); 264 265 #undef FIELD_OK 266 #undef SET_FIELD 267 } 268 269 void 270 spdk_blob_open_opts_init(struct spdk_blob_open_opts *opts, size_t opts_size) 271 { 272 if (!opts) { 273 SPDK_ERRLOG("opts should not be NULL\n"); 274 return; 275 } 276 277 if (!opts_size) { 278 SPDK_ERRLOG("opts_size should not be zero value\n"); 279 return; 280 } 281 282 memset(opts, 0, opts_size); 283 opts->opts_size = opts_size; 284 285 #define FIELD_OK(field) \ 286 offsetof(struct spdk_blob_open_opts, field) + sizeof(opts->field) <= opts_size 287 288 #define SET_FIELD(field, value) \ 289 if (FIELD_OK(field)) { \ 290 opts->field = value; \ 291 } \ 292 293 SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT); 294 295 #undef FIELD_OK 296 #undef SET_FILED 297 } 298 299 static struct spdk_blob * 300 blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id) 301 { 302 struct spdk_blob *blob; 303 304 blob = calloc(1, sizeof(*blob)); 305 if (!blob) { 306 return NULL; 307 } 308 309 blob->id = id; 310 blob->bs = bs; 311 312 blob->parent_id = SPDK_BLOBID_INVALID; 313 314 blob->state = SPDK_BLOB_STATE_DIRTY; 315 blob->extent_rle_found = false; 316 blob->extent_table_found = false; 317 blob->active.num_pages = 1; 318 blob->active.pages = calloc(1, sizeof(*blob->active.pages)); 319 if (!blob->active.pages) { 320 free(blob); 321 return NULL; 322 } 323 324 blob->active.pages[0] = bs_blobid_to_page(id); 325 326 TAILQ_INIT(&blob->xattrs); 327 TAILQ_INIT(&blob->xattrs_internal); 328 TAILQ_INIT(&blob->pending_persists); 329 TAILQ_INIT(&blob->persists_to_complete); 330 331 return blob; 332 } 333 334 static void 335 xattrs_free(struct spdk_xattr_tailq *xattrs) 336 { 337 struct spdk_xattr *xattr, *xattr_tmp; 338 339 TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) { 340 TAILQ_REMOVE(xattrs, xattr, link); 341 free(xattr->name); 342 free(xattr->value); 343 free(xattr); 344 } 345 } 346 347 static void 348 blob_unref_back_bs_dev(struct spdk_blob *blob) 349 { 350 blob->back_bs_dev->destroy(blob->back_bs_dev); 351 blob->back_bs_dev = NULL; 352 } 353 354 static void 355 blob_free(struct spdk_blob *blob) 356 { 357 assert(blob != NULL); 358 assert(TAILQ_EMPTY(&blob->pending_persists)); 359 assert(TAILQ_EMPTY(&blob->persists_to_complete)); 360 361 free(blob->active.extent_pages); 362 free(blob->clean.extent_pages); 363 free(blob->active.clusters); 364 free(blob->clean.clusters); 365 free(blob->active.pages); 366 free(blob->clean.pages); 367 368 xattrs_free(&blob->xattrs); 369 xattrs_free(&blob->xattrs_internal); 370 371 if (blob->back_bs_dev) { 372 blob_unref_back_bs_dev(blob); 373 } 374 375 free(blob); 376 } 377 378 static void 379 blob_back_bs_destroy_esnap_done(void *ctx, struct spdk_blob *blob, int bserrno) 380 { 381 struct spdk_bs_dev *bs_dev = ctx; 382 383 if (bserrno != 0) { 384 /* 385 * This is probably due to a memory allocation failure when creating the 386 * blob_esnap_destroy_ctx before iterating threads. 387 */ 388 SPDK_ERRLOG("blob 0x%" PRIx64 ": Unable to destroy bs dev channels: error %d\n", 389 blob->id, bserrno); 390 assert(false); 391 } 392 393 if (bs_dev == NULL) { 394 /* 395 * This check exists to make scanbuild happy. 396 * 397 * blob->back_bs_dev for an esnap is NULL during the first iteration of blobs while 398 * the blobstore is being loaded. It could also be NULL if there was an error 399 * opening the esnap device. In each of these cases, no channels could have been 400 * created because back_bs_dev->create_channel() would have led to a NULL pointer 401 * deref. 402 */ 403 assert(false); 404 return; 405 } 406 407 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": calling destroy on back_bs_dev\n", blob->id); 408 bs_dev->destroy(bs_dev); 409 } 410 411 static void 412 blob_back_bs_destroy(struct spdk_blob *blob) 413 { 414 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": preparing to destroy back_bs_dev\n", 415 blob->id); 416 417 blob_esnap_destroy_bs_dev_channels(blob, false, blob_back_bs_destroy_esnap_done, 418 blob->back_bs_dev); 419 blob->back_bs_dev = NULL; 420 } 421 422 struct blob_parent { 423 union { 424 struct { 425 spdk_blob_id id; 426 struct spdk_blob *blob; 427 } snapshot; 428 429 struct { 430 void *id; 431 uint32_t id_len; 432 struct spdk_bs_dev *back_bs_dev; 433 } esnap; 434 } u; 435 }; 436 437 typedef int (*set_parent_refs_cb)(struct spdk_blob *blob, struct blob_parent *parent); 438 439 struct set_bs_dev_ctx { 440 struct spdk_blob *blob; 441 struct spdk_bs_dev *back_bs_dev; 442 443 /* 444 * This callback is used during a set parent operation to change the references 445 * to the parent of the blob. 446 */ 447 set_parent_refs_cb parent_refs_cb_fn; 448 struct blob_parent *parent_refs_cb_arg; 449 450 spdk_blob_op_complete cb_fn; 451 void *cb_arg; 452 int bserrno; 453 }; 454 455 static void 456 blob_set_back_bs_dev(struct spdk_blob *blob, struct spdk_bs_dev *back_bs_dev, 457 set_parent_refs_cb parent_refs_cb_fn, struct blob_parent *parent_refs_cb_arg, 458 spdk_blob_op_complete cb_fn, void *cb_arg) 459 { 460 struct set_bs_dev_ctx *ctx; 461 462 ctx = calloc(1, sizeof(*ctx)); 463 if (ctx == NULL) { 464 SPDK_ERRLOG("blob 0x%" PRIx64 ": out of memory while setting back_bs_dev\n", 465 blob->id); 466 cb_fn(cb_arg, -ENOMEM); 467 return; 468 } 469 470 ctx->parent_refs_cb_fn = parent_refs_cb_fn; 471 ctx->parent_refs_cb_arg = parent_refs_cb_arg; 472 ctx->cb_fn = cb_fn; 473 ctx->cb_arg = cb_arg; 474 ctx->back_bs_dev = back_bs_dev; 475 ctx->blob = blob; 476 477 blob_freeze_io(blob, blob_set_back_bs_dev_frozen, ctx); 478 } 479 480 struct freeze_io_ctx { 481 struct spdk_bs_cpl cpl; 482 struct spdk_blob *blob; 483 }; 484 485 static void 486 blob_io_sync(struct spdk_io_channel_iter *i) 487 { 488 spdk_for_each_channel_continue(i, 0); 489 } 490 491 static void 492 blob_execute_queued_io(struct spdk_io_channel_iter *i) 493 { 494 struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); 495 struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch); 496 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 497 struct spdk_bs_request_set *set; 498 struct spdk_bs_user_op_args *args; 499 spdk_bs_user_op_t *op, *tmp; 500 501 TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) { 502 set = (struct spdk_bs_request_set *)op; 503 args = &set->u.user_op; 504 505 if (args->blob == ctx->blob) { 506 TAILQ_REMOVE(&ch->queued_io, op, link); 507 bs_user_op_execute(op); 508 } 509 } 510 511 spdk_for_each_channel_continue(i, 0); 512 } 513 514 static void 515 blob_io_cpl(struct spdk_io_channel_iter *i, int status) 516 { 517 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 518 519 ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0); 520 521 free(ctx); 522 } 523 524 static void 525 blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 526 { 527 struct freeze_io_ctx *ctx; 528 529 blob_verify_md_op(blob); 530 531 ctx = calloc(1, sizeof(*ctx)); 532 if (!ctx) { 533 cb_fn(cb_arg, -ENOMEM); 534 return; 535 } 536 537 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 538 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 539 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 540 ctx->blob = blob; 541 542 /* Freeze I/O on blob */ 543 blob->frozen_refcnt++; 544 545 spdk_for_each_channel(blob->bs, blob_io_sync, ctx, blob_io_cpl); 546 } 547 548 static void 549 blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 550 { 551 struct freeze_io_ctx *ctx; 552 553 blob_verify_md_op(blob); 554 555 ctx = calloc(1, sizeof(*ctx)); 556 if (!ctx) { 557 cb_fn(cb_arg, -ENOMEM); 558 return; 559 } 560 561 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 562 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 563 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 564 ctx->blob = blob; 565 566 assert(blob->frozen_refcnt > 0); 567 568 blob->frozen_refcnt--; 569 570 spdk_for_each_channel(blob->bs, blob_execute_queued_io, ctx, blob_io_cpl); 571 } 572 573 static int 574 blob_mark_clean(struct spdk_blob *blob) 575 { 576 uint32_t *extent_pages = NULL; 577 uint64_t *clusters = NULL; 578 uint32_t *pages = NULL; 579 580 assert(blob != NULL); 581 582 if (blob->active.num_extent_pages) { 583 assert(blob->active.extent_pages); 584 extent_pages = calloc(blob->active.num_extent_pages, sizeof(*blob->active.extent_pages)); 585 if (!extent_pages) { 586 return -ENOMEM; 587 } 588 memcpy(extent_pages, blob->active.extent_pages, 589 blob->active.num_extent_pages * sizeof(*extent_pages)); 590 } 591 592 if (blob->active.num_clusters) { 593 assert(blob->active.clusters); 594 clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters)); 595 if (!clusters) { 596 free(extent_pages); 597 return -ENOMEM; 598 } 599 memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 600 } 601 602 if (blob->active.num_pages) { 603 assert(blob->active.pages); 604 pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages)); 605 if (!pages) { 606 free(extent_pages); 607 free(clusters); 608 return -ENOMEM; 609 } 610 memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 611 } 612 613 free(blob->clean.extent_pages); 614 free(blob->clean.clusters); 615 free(blob->clean.pages); 616 617 blob->clean.num_extent_pages = blob->active.num_extent_pages; 618 blob->clean.extent_pages = blob->active.extent_pages; 619 blob->clean.num_clusters = blob->active.num_clusters; 620 blob->clean.clusters = blob->active.clusters; 621 blob->clean.num_allocated_clusters = blob->active.num_allocated_clusters; 622 blob->clean.num_pages = blob->active.num_pages; 623 blob->clean.pages = blob->active.pages; 624 625 blob->active.extent_pages = extent_pages; 626 blob->active.clusters = clusters; 627 blob->active.pages = pages; 628 629 /* If the metadata was dirtied again while the metadata was being written to disk, 630 * we do not want to revert the DIRTY state back to CLEAN here. 631 */ 632 if (blob->state == SPDK_BLOB_STATE_LOADING) { 633 blob->state = SPDK_BLOB_STATE_CLEAN; 634 } 635 636 return 0; 637 } 638 639 static int 640 blob_deserialize_xattr(struct spdk_blob *blob, 641 struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal) 642 { 643 struct spdk_xattr *xattr; 644 645 if (desc_xattr->length != sizeof(desc_xattr->name_length) + 646 sizeof(desc_xattr->value_length) + 647 desc_xattr->name_length + desc_xattr->value_length) { 648 return -EINVAL; 649 } 650 651 xattr = calloc(1, sizeof(*xattr)); 652 if (xattr == NULL) { 653 return -ENOMEM; 654 } 655 656 xattr->name = malloc(desc_xattr->name_length + 1); 657 if (xattr->name == NULL) { 658 free(xattr); 659 return -ENOMEM; 660 } 661 662 xattr->value = malloc(desc_xattr->value_length); 663 if (xattr->value == NULL) { 664 free(xattr->name); 665 free(xattr); 666 return -ENOMEM; 667 } 668 669 memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length); 670 xattr->name[desc_xattr->name_length] = '\0'; 671 xattr->value_len = desc_xattr->value_length; 672 memcpy(xattr->value, 673 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 674 desc_xattr->value_length); 675 676 TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link); 677 678 return 0; 679 } 680 681 682 static int 683 blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob) 684 { 685 struct spdk_blob_md_descriptor *desc; 686 size_t cur_desc = 0; 687 void *tmp; 688 689 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 690 while (cur_desc < sizeof(page->descriptors)) { 691 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 692 if (desc->length == 0) { 693 /* If padding and length are 0, this terminates the page */ 694 break; 695 } 696 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 697 struct spdk_blob_md_descriptor_flags *desc_flags; 698 699 desc_flags = (struct spdk_blob_md_descriptor_flags *)desc; 700 701 if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) { 702 return -EINVAL; 703 } 704 705 if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) != 706 SPDK_BLOB_INVALID_FLAGS_MASK) { 707 return -EINVAL; 708 } 709 710 if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) != 711 SPDK_BLOB_DATA_RO_FLAGS_MASK) { 712 blob->data_ro = true; 713 blob->md_ro = true; 714 } 715 716 if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) != 717 SPDK_BLOB_MD_RO_FLAGS_MASK) { 718 blob->md_ro = true; 719 } 720 721 if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 722 blob->data_ro = true; 723 blob->md_ro = true; 724 } 725 726 blob->invalid_flags = desc_flags->invalid_flags; 727 blob->data_ro_flags = desc_flags->data_ro_flags; 728 blob->md_ro_flags = desc_flags->md_ro_flags; 729 730 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 731 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 732 unsigned int i, j; 733 unsigned int cluster_count = blob->active.num_clusters; 734 735 if (blob->extent_table_found) { 736 /* Extent Table already present in the md, 737 * both descriptors should never be at the same time. */ 738 return -EINVAL; 739 } 740 blob->extent_rle_found = true; 741 742 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 743 744 if (desc_extent_rle->length == 0 || 745 (desc_extent_rle->length % sizeof(desc_extent_rle->extents[0]) != 0)) { 746 return -EINVAL; 747 } 748 749 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 750 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 751 if (desc_extent_rle->extents[i].cluster_idx != 0) { 752 if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, 753 desc_extent_rle->extents[i].cluster_idx + j)) { 754 return -EINVAL; 755 } 756 } 757 cluster_count++; 758 } 759 } 760 761 if (cluster_count == 0) { 762 return -EINVAL; 763 } 764 tmp = realloc(blob->active.clusters, cluster_count * sizeof(*blob->active.clusters)); 765 if (tmp == NULL) { 766 return -ENOMEM; 767 } 768 blob->active.clusters = tmp; 769 blob->active.cluster_array_size = cluster_count; 770 771 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 772 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 773 if (desc_extent_rle->extents[i].cluster_idx != 0) { 774 blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs, 775 desc_extent_rle->extents[i].cluster_idx + j); 776 blob->active.num_allocated_clusters++; 777 } else if (spdk_blob_is_thin_provisioned(blob)) { 778 blob->active.clusters[blob->active.num_clusters++] = 0; 779 } else { 780 return -EINVAL; 781 } 782 } 783 } 784 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 785 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 786 uint32_t num_extent_pages = blob->active.num_extent_pages; 787 uint32_t i, j; 788 size_t extent_pages_length; 789 790 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 791 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 792 793 if (blob->extent_rle_found) { 794 /* This means that Extent RLE is present in MD, 795 * both should never be at the same time. */ 796 return -EINVAL; 797 } else if (blob->extent_table_found && 798 desc_extent_table->num_clusters != blob->remaining_clusters_in_et) { 799 /* Number of clusters in this ET does not match number 800 * from previously read EXTENT_TABLE. */ 801 return -EINVAL; 802 } 803 804 if (desc_extent_table->length == 0 || 805 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 806 return -EINVAL; 807 } 808 809 blob->extent_table_found = true; 810 811 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 812 num_extent_pages += desc_extent_table->extent_page[i].num_pages; 813 } 814 815 if (num_extent_pages > 0) { 816 tmp = realloc(blob->active.extent_pages, num_extent_pages * sizeof(uint32_t)); 817 if (tmp == NULL) { 818 return -ENOMEM; 819 } 820 blob->active.extent_pages = tmp; 821 } 822 blob->active.extent_pages_array_size = num_extent_pages; 823 824 blob->remaining_clusters_in_et = desc_extent_table->num_clusters; 825 826 /* Extent table entries contain md page numbers for extent pages. 827 * Zeroes represent unallocated extent pages, those are run-length-encoded. 828 */ 829 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 830 if (desc_extent_table->extent_page[i].page_idx != 0) { 831 assert(desc_extent_table->extent_page[i].num_pages == 1); 832 blob->active.extent_pages[blob->active.num_extent_pages++] = 833 desc_extent_table->extent_page[i].page_idx; 834 } else if (spdk_blob_is_thin_provisioned(blob)) { 835 for (j = 0; j < desc_extent_table->extent_page[i].num_pages; j++) { 836 blob->active.extent_pages[blob->active.num_extent_pages++] = 0; 837 } 838 } else { 839 return -EINVAL; 840 } 841 } 842 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 843 struct spdk_blob_md_descriptor_extent_page *desc_extent; 844 unsigned int i; 845 unsigned int cluster_count = 0; 846 size_t cluster_idx_length; 847 848 if (blob->extent_rle_found) { 849 /* This means that Extent RLE is present in MD, 850 * both should never be at the same time. */ 851 return -EINVAL; 852 } 853 854 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 855 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 856 857 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 858 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 859 return -EINVAL; 860 } 861 862 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 863 if (desc_extent->cluster_idx[i] != 0) { 864 if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, desc_extent->cluster_idx[i])) { 865 return -EINVAL; 866 } 867 } 868 cluster_count++; 869 } 870 871 if (cluster_count == 0) { 872 return -EINVAL; 873 } 874 875 /* When reading extent pages sequentially starting cluster idx should match 876 * current size of a blob. 877 * If changed to batch reading, this check shall be removed. */ 878 if (desc_extent->start_cluster_idx != blob->active.num_clusters) { 879 return -EINVAL; 880 } 881 882 tmp = realloc(blob->active.clusters, 883 (cluster_count + blob->active.num_clusters) * sizeof(*blob->active.clusters)); 884 if (tmp == NULL) { 885 return -ENOMEM; 886 } 887 blob->active.clusters = tmp; 888 blob->active.cluster_array_size = (cluster_count + blob->active.num_clusters); 889 890 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 891 if (desc_extent->cluster_idx[i] != 0) { 892 blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs, 893 desc_extent->cluster_idx[i]); 894 blob->active.num_allocated_clusters++; 895 } else if (spdk_blob_is_thin_provisioned(blob)) { 896 blob->active.clusters[blob->active.num_clusters++] = 0; 897 } else { 898 return -EINVAL; 899 } 900 } 901 assert(desc_extent->start_cluster_idx + cluster_count == blob->active.num_clusters); 902 assert(blob->remaining_clusters_in_et >= cluster_count); 903 blob->remaining_clusters_in_et -= cluster_count; 904 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 905 int rc; 906 907 rc = blob_deserialize_xattr(blob, 908 (struct spdk_blob_md_descriptor_xattr *) desc, false); 909 if (rc != 0) { 910 return rc; 911 } 912 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 913 int rc; 914 915 rc = blob_deserialize_xattr(blob, 916 (struct spdk_blob_md_descriptor_xattr *) desc, true); 917 if (rc != 0) { 918 return rc; 919 } 920 } else { 921 /* Unrecognized descriptor type. Do not fail - just continue to the 922 * next descriptor. If this descriptor is associated with some feature 923 * defined in a newer version of blobstore, that version of blobstore 924 * should create and set an associated feature flag to specify if this 925 * blob can be loaded or not. 926 */ 927 } 928 929 /* Advance to the next descriptor */ 930 cur_desc += sizeof(*desc) + desc->length; 931 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 932 break; 933 } 934 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 935 } 936 937 return 0; 938 } 939 940 static bool bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page); 941 942 static int 943 blob_parse_extent_page(struct spdk_blob_md_page *extent_page, struct spdk_blob *blob) 944 { 945 assert(blob != NULL); 946 assert(blob->state == SPDK_BLOB_STATE_LOADING); 947 948 if (bs_load_cur_extent_page_valid(extent_page) == false) { 949 return -ENOENT; 950 } 951 952 return blob_parse_page(extent_page, blob); 953 } 954 955 static int 956 blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count, 957 struct spdk_blob *blob) 958 { 959 const struct spdk_blob_md_page *page; 960 uint32_t i; 961 int rc; 962 void *tmp; 963 964 assert(page_count > 0); 965 assert(pages[0].sequence_num == 0); 966 assert(blob != NULL); 967 assert(blob->state == SPDK_BLOB_STATE_LOADING); 968 assert(blob->active.clusters == NULL); 969 970 /* The blobid provided doesn't match what's in the MD, this can 971 * happen for example if a bogus blobid is passed in through open. 972 */ 973 if (blob->id != pages[0].id) { 974 SPDK_ERRLOG("Blobid (0x%" PRIx64 ") doesn't match what's in metadata " 975 "(0x%" PRIx64 ")\n", blob->id, pages[0].id); 976 return -ENOENT; 977 } 978 979 tmp = realloc(blob->active.pages, page_count * sizeof(*blob->active.pages)); 980 if (!tmp) { 981 return -ENOMEM; 982 } 983 blob->active.pages = tmp; 984 985 blob->active.pages[0] = pages[0].id; 986 987 for (i = 1; i < page_count; i++) { 988 assert(spdk_bit_array_get(blob->bs->used_md_pages, pages[i - 1].next)); 989 blob->active.pages[i] = pages[i - 1].next; 990 } 991 blob->active.num_pages = page_count; 992 993 for (i = 0; i < page_count; i++) { 994 page = &pages[i]; 995 996 assert(page->id == blob->id); 997 assert(page->sequence_num == i); 998 999 rc = blob_parse_page(page, blob); 1000 if (rc != 0) { 1001 return rc; 1002 } 1003 } 1004 1005 return 0; 1006 } 1007 1008 static int 1009 blob_serialize_add_page(const struct spdk_blob *blob, 1010 struct spdk_blob_md_page **pages, 1011 uint32_t *page_count, 1012 struct spdk_blob_md_page **last_page) 1013 { 1014 struct spdk_blob_md_page *page, *tmp_pages; 1015 1016 assert(pages != NULL); 1017 assert(page_count != NULL); 1018 1019 *last_page = NULL; 1020 if (*page_count == 0) { 1021 assert(*pages == NULL); 1022 *pages = spdk_malloc(blob->bs->md_page_size, 0, 1023 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 1024 if (*pages == NULL) { 1025 return -ENOMEM; 1026 } 1027 *page_count = 1; 1028 } else { 1029 assert(*pages != NULL); 1030 tmp_pages = spdk_realloc(*pages, blob->bs->md_page_size * (*page_count + 1), 0); 1031 if (tmp_pages == NULL) { 1032 return -ENOMEM; 1033 } 1034 (*page_count)++; 1035 *pages = tmp_pages; 1036 } 1037 1038 page = &(*pages)[*page_count - 1]; 1039 memset(page, 0, sizeof(*page)); 1040 page->id = blob->id; 1041 page->sequence_num = *page_count - 1; 1042 page->next = SPDK_INVALID_MD_PAGE; 1043 *last_page = page; 1044 1045 return 0; 1046 } 1047 1048 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor. 1049 * Update required_sz on both success and failure. 1050 * 1051 */ 1052 static int 1053 blob_serialize_xattr(const struct spdk_xattr *xattr, 1054 uint8_t *buf, size_t buf_sz, 1055 size_t *required_sz, bool internal) 1056 { 1057 struct spdk_blob_md_descriptor_xattr *desc; 1058 1059 *required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) + 1060 strlen(xattr->name) + 1061 xattr->value_len; 1062 1063 if (buf_sz < *required_sz) { 1064 return -1; 1065 } 1066 1067 desc = (struct spdk_blob_md_descriptor_xattr *)buf; 1068 1069 desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR; 1070 desc->length = sizeof(desc->name_length) + 1071 sizeof(desc->value_length) + 1072 strlen(xattr->name) + 1073 xattr->value_len; 1074 desc->name_length = strlen(xattr->name); 1075 desc->value_length = xattr->value_len; 1076 1077 memcpy(desc->name, xattr->name, desc->name_length); 1078 memcpy((void *)((uintptr_t)desc->name + desc->name_length), 1079 xattr->value, 1080 desc->value_length); 1081 1082 return 0; 1083 } 1084 1085 static void 1086 blob_serialize_extent_table_entry(const struct spdk_blob *blob, 1087 uint64_t start_ep, uint64_t *next_ep, 1088 uint8_t **buf, size_t *remaining_sz) 1089 { 1090 struct spdk_blob_md_descriptor_extent_table *desc; 1091 size_t cur_sz; 1092 uint64_t i, et_idx; 1093 uint32_t extent_page, ep_len; 1094 1095 /* The buffer must have room for at least num_clusters entry */ 1096 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->num_clusters); 1097 if (*remaining_sz < cur_sz) { 1098 *next_ep = start_ep; 1099 return; 1100 } 1101 1102 desc = (struct spdk_blob_md_descriptor_extent_table *)*buf; 1103 desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE; 1104 1105 desc->num_clusters = blob->active.num_clusters; 1106 1107 ep_len = 1; 1108 et_idx = 0; 1109 for (i = start_ep; i < blob->active.num_extent_pages; i++) { 1110 if (*remaining_sz < cur_sz + sizeof(desc->extent_page[0])) { 1111 /* If we ran out of buffer space, return */ 1112 break; 1113 } 1114 1115 extent_page = blob->active.extent_pages[i]; 1116 /* Verify that next extent_page is unallocated */ 1117 if (extent_page == 0 && 1118 (i + 1 < blob->active.num_extent_pages && blob->active.extent_pages[i + 1] == 0)) { 1119 ep_len++; 1120 continue; 1121 } 1122 desc->extent_page[et_idx].page_idx = extent_page; 1123 desc->extent_page[et_idx].num_pages = ep_len; 1124 et_idx++; 1125 1126 ep_len = 1; 1127 cur_sz += sizeof(desc->extent_page[et_idx]); 1128 } 1129 *next_ep = i; 1130 1131 desc->length = sizeof(desc->num_clusters) + sizeof(desc->extent_page[0]) * et_idx; 1132 *remaining_sz -= sizeof(struct spdk_blob_md_descriptor) + desc->length; 1133 *buf += sizeof(struct spdk_blob_md_descriptor) + desc->length; 1134 } 1135 1136 static int 1137 blob_serialize_extent_table(const struct spdk_blob *blob, 1138 struct spdk_blob_md_page **pages, 1139 struct spdk_blob_md_page *cur_page, 1140 uint32_t *page_count, uint8_t **buf, 1141 size_t *remaining_sz) 1142 { 1143 uint64_t last_extent_page; 1144 int rc; 1145 1146 last_extent_page = 0; 1147 /* At least single extent table entry has to be always persisted. 1148 * Such case occurs with num_extent_pages == 0. */ 1149 while (last_extent_page <= blob->active.num_extent_pages) { 1150 blob_serialize_extent_table_entry(blob, last_extent_page, &last_extent_page, buf, 1151 remaining_sz); 1152 1153 if (last_extent_page == blob->active.num_extent_pages) { 1154 break; 1155 } 1156 1157 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1158 if (rc < 0) { 1159 return rc; 1160 } 1161 1162 *buf = (uint8_t *)cur_page->descriptors; 1163 *remaining_sz = sizeof(cur_page->descriptors); 1164 } 1165 1166 return 0; 1167 } 1168 1169 static void 1170 blob_serialize_extent_rle(const struct spdk_blob *blob, 1171 uint64_t start_cluster, uint64_t *next_cluster, 1172 uint8_t **buf, size_t *buf_sz) 1173 { 1174 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 1175 size_t cur_sz; 1176 uint64_t i, extent_idx; 1177 uint64_t lba, lba_per_cluster, lba_count; 1178 1179 /* The buffer must have room for at least one extent */ 1180 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc_extent_rle->extents[0]); 1181 if (*buf_sz < cur_sz) { 1182 *next_cluster = start_cluster; 1183 return; 1184 } 1185 1186 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)*buf; 1187 desc_extent_rle->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE; 1188 1189 lba_per_cluster = bs_cluster_to_lba(blob->bs, 1); 1190 /* Assert for scan-build false positive */ 1191 assert(lba_per_cluster > 0); 1192 1193 lba = blob->active.clusters[start_cluster]; 1194 lba_count = lba_per_cluster; 1195 extent_idx = 0; 1196 for (i = start_cluster + 1; i < blob->active.num_clusters; i++) { 1197 if ((lba + lba_count) == blob->active.clusters[i] && lba != 0) { 1198 /* Run-length encode sequential non-zero LBA */ 1199 lba_count += lba_per_cluster; 1200 continue; 1201 } else if (lba == 0 && blob->active.clusters[i] == 0) { 1202 /* Run-length encode unallocated clusters */ 1203 lba_count += lba_per_cluster; 1204 continue; 1205 } 1206 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1207 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1208 extent_idx++; 1209 1210 cur_sz += sizeof(desc_extent_rle->extents[extent_idx]); 1211 1212 if (*buf_sz < cur_sz) { 1213 /* If we ran out of buffer space, return */ 1214 *next_cluster = i; 1215 break; 1216 } 1217 1218 lba = blob->active.clusters[i]; 1219 lba_count = lba_per_cluster; 1220 } 1221 1222 if (*buf_sz >= cur_sz) { 1223 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1224 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1225 extent_idx++; 1226 1227 *next_cluster = blob->active.num_clusters; 1228 } 1229 1230 desc_extent_rle->length = sizeof(desc_extent_rle->extents[0]) * extent_idx; 1231 *buf_sz -= sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1232 *buf += sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1233 } 1234 1235 static int 1236 blob_serialize_extents_rle(const struct spdk_blob *blob, 1237 struct spdk_blob_md_page **pages, 1238 struct spdk_blob_md_page *cur_page, 1239 uint32_t *page_count, uint8_t **buf, 1240 size_t *remaining_sz) 1241 { 1242 uint64_t last_cluster; 1243 int rc; 1244 1245 last_cluster = 0; 1246 while (last_cluster < blob->active.num_clusters) { 1247 blob_serialize_extent_rle(blob, last_cluster, &last_cluster, buf, remaining_sz); 1248 1249 if (last_cluster == blob->active.num_clusters) { 1250 break; 1251 } 1252 1253 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1254 if (rc < 0) { 1255 return rc; 1256 } 1257 1258 *buf = (uint8_t *)cur_page->descriptors; 1259 *remaining_sz = sizeof(cur_page->descriptors); 1260 } 1261 1262 return 0; 1263 } 1264 1265 static void 1266 blob_serialize_extent_page(const struct spdk_blob *blob, 1267 uint64_t cluster, struct spdk_blob_md_page *page) 1268 { 1269 struct spdk_blob_md_descriptor_extent_page *desc_extent; 1270 uint64_t i, extent_idx; 1271 uint64_t lba, lba_per_cluster; 1272 uint64_t start_cluster_idx = (cluster / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP; 1273 1274 desc_extent = (struct spdk_blob_md_descriptor_extent_page *) page->descriptors; 1275 desc_extent->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE; 1276 1277 lba_per_cluster = bs_cluster_to_lba(blob->bs, 1); 1278 1279 desc_extent->start_cluster_idx = start_cluster_idx; 1280 extent_idx = 0; 1281 for (i = start_cluster_idx; i < blob->active.num_clusters; i++) { 1282 lba = blob->active.clusters[i]; 1283 desc_extent->cluster_idx[extent_idx++] = lba / lba_per_cluster; 1284 if (extent_idx >= SPDK_EXTENTS_PER_EP) { 1285 break; 1286 } 1287 } 1288 desc_extent->length = sizeof(desc_extent->start_cluster_idx) + 1289 sizeof(desc_extent->cluster_idx[0]) * extent_idx; 1290 } 1291 1292 static void 1293 blob_serialize_flags(const struct spdk_blob *blob, 1294 uint8_t *buf, size_t *buf_sz) 1295 { 1296 struct spdk_blob_md_descriptor_flags *desc; 1297 1298 /* 1299 * Flags get serialized first, so we should always have room for the flags 1300 * descriptor. 1301 */ 1302 assert(*buf_sz >= sizeof(*desc)); 1303 1304 desc = (struct spdk_blob_md_descriptor_flags *)buf; 1305 desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS; 1306 desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor); 1307 desc->invalid_flags = blob->invalid_flags; 1308 desc->data_ro_flags = blob->data_ro_flags; 1309 desc->md_ro_flags = blob->md_ro_flags; 1310 1311 *buf_sz -= sizeof(*desc); 1312 } 1313 1314 static int 1315 blob_serialize_xattrs(const struct spdk_blob *blob, 1316 const struct spdk_xattr_tailq *xattrs, bool internal, 1317 struct spdk_blob_md_page **pages, 1318 struct spdk_blob_md_page *cur_page, 1319 uint32_t *page_count, uint8_t **buf, 1320 size_t *remaining_sz) 1321 { 1322 const struct spdk_xattr *xattr; 1323 int rc; 1324 1325 TAILQ_FOREACH(xattr, xattrs, link) { 1326 size_t required_sz = 0; 1327 1328 rc = blob_serialize_xattr(xattr, 1329 *buf, *remaining_sz, 1330 &required_sz, internal); 1331 if (rc < 0) { 1332 /* Need to add a new page to the chain */ 1333 rc = blob_serialize_add_page(blob, pages, page_count, 1334 &cur_page); 1335 if (rc < 0) { 1336 spdk_free(*pages); 1337 *pages = NULL; 1338 *page_count = 0; 1339 return rc; 1340 } 1341 1342 *buf = (uint8_t *)cur_page->descriptors; 1343 *remaining_sz = sizeof(cur_page->descriptors); 1344 1345 /* Try again */ 1346 required_sz = 0; 1347 rc = blob_serialize_xattr(xattr, 1348 *buf, *remaining_sz, 1349 &required_sz, internal); 1350 1351 if (rc < 0) { 1352 spdk_free(*pages); 1353 *pages = NULL; 1354 *page_count = 0; 1355 return rc; 1356 } 1357 } 1358 1359 *remaining_sz -= required_sz; 1360 *buf += required_sz; 1361 } 1362 1363 return 0; 1364 } 1365 1366 static int 1367 blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages, 1368 uint32_t *page_count) 1369 { 1370 struct spdk_blob_md_page *cur_page; 1371 int rc; 1372 uint8_t *buf; 1373 size_t remaining_sz; 1374 1375 assert(pages != NULL); 1376 assert(page_count != NULL); 1377 assert(blob != NULL); 1378 assert(blob->state == SPDK_BLOB_STATE_DIRTY); 1379 1380 *pages = NULL; 1381 *page_count = 0; 1382 1383 /* A blob always has at least 1 page, even if it has no descriptors */ 1384 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1385 if (rc < 0) { 1386 return rc; 1387 } 1388 1389 buf = (uint8_t *)cur_page->descriptors; 1390 remaining_sz = sizeof(cur_page->descriptors); 1391 1392 /* Serialize flags */ 1393 blob_serialize_flags(blob, buf, &remaining_sz); 1394 buf += sizeof(struct spdk_blob_md_descriptor_flags); 1395 1396 /* Serialize xattrs */ 1397 rc = blob_serialize_xattrs(blob, &blob->xattrs, false, 1398 pages, cur_page, page_count, &buf, &remaining_sz); 1399 if (rc < 0) { 1400 return rc; 1401 } 1402 1403 /* Serialize internal xattrs */ 1404 rc = blob_serialize_xattrs(blob, &blob->xattrs_internal, true, 1405 pages, cur_page, page_count, &buf, &remaining_sz); 1406 if (rc < 0) { 1407 return rc; 1408 } 1409 1410 if (blob->use_extent_table) { 1411 /* Serialize extent table */ 1412 rc = blob_serialize_extent_table(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1413 } else { 1414 /* Serialize extents */ 1415 rc = blob_serialize_extents_rle(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1416 } 1417 1418 return rc; 1419 } 1420 1421 struct spdk_blob_load_ctx { 1422 struct spdk_blob *blob; 1423 1424 struct spdk_blob_md_page *pages; 1425 uint32_t num_pages; 1426 uint32_t next_extent_page; 1427 spdk_bs_sequence_t *seq; 1428 1429 spdk_bs_sequence_cpl cb_fn; 1430 void *cb_arg; 1431 }; 1432 1433 static uint32_t 1434 blob_md_page_calc_crc(void *page) 1435 { 1436 uint32_t crc; 1437 1438 crc = BLOB_CRC32C_INITIAL; 1439 crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc); 1440 crc ^= BLOB_CRC32C_INITIAL; 1441 1442 return crc; 1443 1444 } 1445 1446 static void 1447 blob_load_final(struct spdk_blob_load_ctx *ctx, int bserrno) 1448 { 1449 struct spdk_blob *blob = ctx->blob; 1450 1451 if (bserrno == 0) { 1452 blob_mark_clean(blob); 1453 } 1454 1455 ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno); 1456 1457 /* Free the memory */ 1458 spdk_free(ctx->pages); 1459 free(ctx); 1460 } 1461 1462 static void 1463 blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno) 1464 { 1465 struct spdk_blob_load_ctx *ctx = cb_arg; 1466 struct spdk_blob *blob = ctx->blob; 1467 1468 if (bserrno == 0) { 1469 blob->back_bs_dev = bs_create_blob_bs_dev(snapshot); 1470 if (blob->back_bs_dev == NULL) { 1471 bserrno = -ENOMEM; 1472 } 1473 } 1474 if (bserrno != 0) { 1475 SPDK_ERRLOG("Snapshot fail\n"); 1476 } 1477 1478 blob_load_final(ctx, bserrno); 1479 } 1480 1481 static void blob_update_clear_method(struct spdk_blob *blob); 1482 1483 static int 1484 blob_load_esnap(struct spdk_blob *blob, void *blob_ctx) 1485 { 1486 struct spdk_blob_store *bs = blob->bs; 1487 struct spdk_bs_dev *bs_dev = NULL; 1488 const void *esnap_id = NULL; 1489 size_t id_len = 0; 1490 int rc; 1491 1492 if (bs->esnap_bs_dev_create == NULL) { 1493 SPDK_NOTICELOG("blob 0x%" PRIx64 " is an esnap clone but the blobstore was opened " 1494 "without support for esnap clones\n", blob->id); 1495 return -ENOTSUP; 1496 } 1497 assert(blob->back_bs_dev == NULL); 1498 1499 rc = blob_get_xattr_value(blob, BLOB_EXTERNAL_SNAPSHOT_ID, &esnap_id, &id_len, true); 1500 if (rc != 0) { 1501 SPDK_ERRLOG("blob 0x%" PRIx64 " is an esnap clone but has no esnap ID\n", blob->id); 1502 return -EINVAL; 1503 } 1504 assert(id_len > 0 && id_len < UINT32_MAX); 1505 1506 SPDK_INFOLOG(blob, "Creating external snapshot device\n"); 1507 1508 rc = bs->esnap_bs_dev_create(bs->esnap_ctx, blob_ctx, blob, esnap_id, (uint32_t)id_len, 1509 &bs_dev); 1510 if (rc != 0) { 1511 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": failed to load back_bs_dev " 1512 "with error %d\n", blob->id, rc); 1513 return rc; 1514 } 1515 1516 /* 1517 * Note: bs_dev might be NULL if the consumer chose to not open the external snapshot. 1518 * This especially might happen during spdk_bs_load() iteration. 1519 */ 1520 if (bs_dev != NULL) { 1521 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": loaded back_bs_dev\n", blob->id); 1522 if ((bs->io_unit_size % bs_dev->blocklen) != 0) { 1523 SPDK_NOTICELOG("blob 0x%" PRIx64 " external snapshot device block size %u " 1524 "is not compatible with blobstore block size %u\n", 1525 blob->id, bs_dev->blocklen, bs->io_unit_size); 1526 bs_dev->destroy(bs_dev); 1527 return -EINVAL; 1528 } 1529 } 1530 1531 blob->back_bs_dev = bs_dev; 1532 blob->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT; 1533 1534 return 0; 1535 } 1536 1537 static void 1538 blob_load_backing_dev(spdk_bs_sequence_t *seq, void *cb_arg) 1539 { 1540 struct spdk_blob_load_ctx *ctx = cb_arg; 1541 struct spdk_blob *blob = ctx->blob; 1542 const void *value; 1543 size_t len; 1544 int rc; 1545 1546 if (blob_is_esnap_clone(blob)) { 1547 rc = blob_load_esnap(blob, seq->cpl.u.blob_handle.esnap_ctx); 1548 blob_load_final(ctx, rc); 1549 return; 1550 } 1551 1552 if (spdk_blob_is_thin_provisioned(blob)) { 1553 rc = blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true); 1554 if (rc == 0) { 1555 if (len != sizeof(spdk_blob_id)) { 1556 blob_load_final(ctx, -EINVAL); 1557 return; 1558 } 1559 /* open snapshot blob and continue in the callback function */ 1560 blob->parent_id = *(spdk_blob_id *)value; 1561 spdk_bs_open_blob(blob->bs, blob->parent_id, 1562 blob_load_snapshot_cpl, ctx); 1563 return; 1564 } else { 1565 /* add zeroes_dev for thin provisioned blob */ 1566 blob->back_bs_dev = bs_create_zeroes_dev(); 1567 } 1568 } else { 1569 /* standard blob */ 1570 blob->back_bs_dev = NULL; 1571 } 1572 blob_load_final(ctx, 0); 1573 } 1574 1575 static void 1576 blob_load_cpl_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1577 { 1578 struct spdk_blob_load_ctx *ctx = cb_arg; 1579 struct spdk_blob *blob = ctx->blob; 1580 struct spdk_blob_md_page *page; 1581 uint64_t i; 1582 uint32_t crc; 1583 uint64_t lba; 1584 void *tmp; 1585 uint64_t sz; 1586 1587 if (bserrno) { 1588 SPDK_ERRLOG("Extent page read failed: %d\n", bserrno); 1589 blob_load_final(ctx, bserrno); 1590 return; 1591 } 1592 1593 if (ctx->pages == NULL) { 1594 /* First iteration of this function, allocate buffer for single EXTENT_PAGE */ 1595 ctx->pages = spdk_zmalloc(blob->bs->md_page_size, 0, 1596 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 1597 if (!ctx->pages) { 1598 blob_load_final(ctx, -ENOMEM); 1599 return; 1600 } 1601 ctx->num_pages = 1; 1602 ctx->next_extent_page = 0; 1603 } else { 1604 page = &ctx->pages[0]; 1605 crc = blob_md_page_calc_crc(page); 1606 if (crc != page->crc) { 1607 blob_load_final(ctx, -EINVAL); 1608 return; 1609 } 1610 1611 if (page->next != SPDK_INVALID_MD_PAGE) { 1612 blob_load_final(ctx, -EINVAL); 1613 return; 1614 } 1615 1616 bserrno = blob_parse_extent_page(page, blob); 1617 if (bserrno) { 1618 blob_load_final(ctx, bserrno); 1619 return; 1620 } 1621 } 1622 1623 for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) { 1624 if (blob->active.extent_pages[i] != 0) { 1625 /* Extent page was allocated, read and parse it. */ 1626 lba = bs_md_page_to_lba(blob->bs, blob->active.extent_pages[i]); 1627 ctx->next_extent_page = i + 1; 1628 1629 bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1630 bs_byte_to_lba(blob->bs, blob->bs->md_page_size), 1631 blob_load_cpl_extents_cpl, ctx); 1632 return; 1633 } else { 1634 /* Thin provisioned blobs can point to unallocated extent pages. 1635 * In this case blob size should be increased by up to the amount left in remaining_clusters_in_et. */ 1636 1637 sz = spdk_min(blob->remaining_clusters_in_et, SPDK_EXTENTS_PER_EP); 1638 blob->active.num_clusters += sz; 1639 blob->remaining_clusters_in_et -= sz; 1640 1641 assert(spdk_blob_is_thin_provisioned(blob)); 1642 assert(i + 1 < blob->active.num_extent_pages || blob->remaining_clusters_in_et == 0); 1643 1644 tmp = realloc(blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 1645 if (tmp == NULL) { 1646 blob_load_final(ctx, -ENOMEM); 1647 return; 1648 } 1649 memset(tmp + sizeof(*blob->active.clusters) * blob->active.cluster_array_size, 0, 1650 sizeof(*blob->active.clusters) * (blob->active.num_clusters - blob->active.cluster_array_size)); 1651 blob->active.clusters = tmp; 1652 blob->active.cluster_array_size = blob->active.num_clusters; 1653 } 1654 } 1655 1656 blob_load_backing_dev(seq, ctx); 1657 } 1658 1659 static void 1660 blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1661 { 1662 struct spdk_blob_load_ctx *ctx = cb_arg; 1663 struct spdk_blob *blob = ctx->blob; 1664 struct spdk_blob_md_page *page; 1665 int rc; 1666 uint32_t crc; 1667 uint32_t current_page; 1668 1669 if (ctx->num_pages == 1) { 1670 current_page = bs_blobid_to_page(blob->id); 1671 } else { 1672 assert(ctx->num_pages != 0); 1673 page = &ctx->pages[ctx->num_pages - 2]; 1674 current_page = page->next; 1675 } 1676 1677 if (bserrno) { 1678 SPDK_ERRLOG("Metadata page %d read failed for blobid 0x%" PRIx64 ": %d\n", 1679 current_page, blob->id, bserrno); 1680 blob_load_final(ctx, bserrno); 1681 return; 1682 } 1683 1684 page = &ctx->pages[ctx->num_pages - 1]; 1685 crc = blob_md_page_calc_crc(page); 1686 if (crc != page->crc) { 1687 SPDK_ERRLOG("Metadata page %d crc mismatch for blobid 0x%" PRIx64 "\n", 1688 current_page, blob->id); 1689 blob_load_final(ctx, -EINVAL); 1690 return; 1691 } 1692 1693 if (page->next != SPDK_INVALID_MD_PAGE) { 1694 struct spdk_blob_md_page *tmp_pages; 1695 uint32_t next_page = page->next; 1696 uint64_t next_lba = bs_md_page_to_lba(blob->bs, next_page); 1697 1698 /* Read the next page */ 1699 tmp_pages = spdk_realloc(ctx->pages, (sizeof(*page) * (ctx->num_pages + 1)), 0); 1700 if (tmp_pages == NULL) { 1701 blob_load_final(ctx, -ENOMEM); 1702 return; 1703 } 1704 ctx->num_pages++; 1705 ctx->pages = tmp_pages; 1706 1707 bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1], 1708 next_lba, 1709 bs_byte_to_lba(blob->bs, sizeof(*page)), 1710 blob_load_cpl, ctx); 1711 return; 1712 } 1713 1714 /* Parse the pages */ 1715 rc = blob_parse(ctx->pages, ctx->num_pages, blob); 1716 if (rc) { 1717 blob_load_final(ctx, rc); 1718 return; 1719 } 1720 1721 if (blob->extent_table_found == true) { 1722 /* If EXTENT_TABLE was found, that means support for it should be enabled. */ 1723 assert(blob->extent_rle_found == false); 1724 blob->use_extent_table = true; 1725 } else { 1726 /* If EXTENT_RLE or no extent_* descriptor was found disable support 1727 * for extent table. No extent_* descriptors means that blob has length of 0 1728 * and no extent_rle descriptors were persisted for it. 1729 * EXTENT_TABLE if used, is always present in metadata regardless of length. */ 1730 blob->use_extent_table = false; 1731 } 1732 1733 /* Check the clear_method stored in metadata vs what may have been passed 1734 * via spdk_bs_open_blob_ext() and update accordingly. 1735 */ 1736 blob_update_clear_method(blob); 1737 1738 spdk_free(ctx->pages); 1739 ctx->pages = NULL; 1740 1741 if (blob->extent_table_found) { 1742 blob_load_cpl_extents_cpl(seq, ctx, 0); 1743 } else { 1744 blob_load_backing_dev(seq, ctx); 1745 } 1746 } 1747 1748 /* Load a blob from disk given a blobid */ 1749 static void 1750 blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 1751 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 1752 { 1753 struct spdk_blob_load_ctx *ctx; 1754 struct spdk_blob_store *bs; 1755 uint32_t page_num; 1756 uint64_t lba; 1757 1758 blob_verify_md_op(blob); 1759 1760 bs = blob->bs; 1761 1762 ctx = calloc(1, sizeof(*ctx)); 1763 if (!ctx) { 1764 cb_fn(seq, cb_arg, -ENOMEM); 1765 return; 1766 } 1767 1768 ctx->blob = blob; 1769 ctx->pages = spdk_realloc(ctx->pages, bs->md_page_size, 0); 1770 if (!ctx->pages) { 1771 free(ctx); 1772 cb_fn(seq, cb_arg, -ENOMEM); 1773 return; 1774 } 1775 ctx->num_pages = 1; 1776 ctx->cb_fn = cb_fn; 1777 ctx->cb_arg = cb_arg; 1778 ctx->seq = seq; 1779 1780 page_num = bs_blobid_to_page(blob->id); 1781 lba = bs_md_page_to_lba(blob->bs, page_num); 1782 1783 blob->state = SPDK_BLOB_STATE_LOADING; 1784 1785 bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1786 bs_byte_to_lba(bs, bs->md_page_size), 1787 blob_load_cpl, ctx); 1788 } 1789 1790 struct spdk_blob_persist_ctx { 1791 struct spdk_blob *blob; 1792 1793 struct spdk_blob_md_page *pages; 1794 uint32_t next_extent_page; 1795 struct spdk_blob_md_page *extent_page; 1796 1797 spdk_bs_sequence_t *seq; 1798 spdk_bs_sequence_cpl cb_fn; 1799 void *cb_arg; 1800 TAILQ_ENTRY(spdk_blob_persist_ctx) link; 1801 }; 1802 1803 static void 1804 bs_batch_clear_dev(struct spdk_blob *blob, spdk_bs_batch_t *batch, uint64_t lba, 1805 uint64_t lba_count) 1806 { 1807 switch (blob->clear_method) { 1808 case BLOB_CLEAR_WITH_DEFAULT: 1809 case BLOB_CLEAR_WITH_UNMAP: 1810 bs_batch_unmap_dev(batch, lba, lba_count); 1811 break; 1812 case BLOB_CLEAR_WITH_WRITE_ZEROES: 1813 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1814 break; 1815 case BLOB_CLEAR_WITH_NONE: 1816 default: 1817 break; 1818 } 1819 } 1820 1821 static int 1822 bs_super_validate(struct spdk_bs_super_block *super, struct spdk_blob_store *bs) 1823 { 1824 uint32_t crc; 1825 static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH]; 1826 1827 if (super->version > SPDK_BS_VERSION || 1828 super->version < SPDK_BS_INITIAL_VERSION) { 1829 return -EILSEQ; 1830 } 1831 1832 if (memcmp(super->signature, SPDK_BS_SUPER_BLOCK_SIG, 1833 sizeof(super->signature)) != 0) { 1834 return -EILSEQ; 1835 } 1836 1837 crc = blob_md_page_calc_crc(super); 1838 if (crc != super->crc) { 1839 return -EILSEQ; 1840 } 1841 1842 if (memcmp(&bs->bstype, &super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 1843 SPDK_DEBUGLOG(blob, "Bstype matched - loading blobstore\n"); 1844 } else if (memcmp(&bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 1845 SPDK_DEBUGLOG(blob, "Bstype wildcard used - loading blobstore regardless bstype\n"); 1846 } else { 1847 SPDK_DEBUGLOG(blob, "Unexpected bstype\n"); 1848 SPDK_LOGDUMP(blob, "Expected:", bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 1849 SPDK_LOGDUMP(blob, "Found:", super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 1850 return -ENXIO; 1851 } 1852 1853 if (super->size > bs->dev->blockcnt * bs->dev->blocklen) { 1854 SPDK_NOTICELOG("Size mismatch, dev size: %" PRIu64 ", blobstore size: %" PRIu64 "\n", 1855 bs->dev->blockcnt * bs->dev->blocklen, super->size); 1856 return -EILSEQ; 1857 } 1858 1859 return 0; 1860 } 1861 1862 static void bs_mark_dirty(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 1863 spdk_bs_sequence_cpl cb_fn, void *cb_arg); 1864 1865 static void 1866 blob_persist_complete_cb(void *arg) 1867 { 1868 struct spdk_blob_persist_ctx *ctx = arg; 1869 1870 /* Call user callback */ 1871 ctx->cb_fn(ctx->seq, ctx->cb_arg, 0); 1872 1873 /* Free the memory */ 1874 spdk_free(ctx->pages); 1875 free(ctx); 1876 } 1877 1878 static void blob_persist_start(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 1879 1880 static void 1881 blob_persist_complete(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx, int bserrno) 1882 { 1883 struct spdk_blob_persist_ctx *next_persist, *tmp; 1884 struct spdk_blob *blob = ctx->blob; 1885 1886 if (bserrno == 0) { 1887 blob_mark_clean(blob); 1888 } 1889 1890 assert(ctx == TAILQ_FIRST(&blob->persists_to_complete)); 1891 1892 /* Complete all persists that were pending when the current persist started */ 1893 TAILQ_FOREACH_SAFE(next_persist, &blob->persists_to_complete, link, tmp) { 1894 TAILQ_REMOVE(&blob->persists_to_complete, next_persist, link); 1895 spdk_thread_send_msg(spdk_get_thread(), blob_persist_complete_cb, next_persist); 1896 } 1897 1898 if (TAILQ_EMPTY(&blob->pending_persists)) { 1899 return; 1900 } 1901 1902 /* Queue up all pending persists for completion and start blob persist with first one */ 1903 TAILQ_SWAP(&blob->persists_to_complete, &blob->pending_persists, spdk_blob_persist_ctx, link); 1904 next_persist = TAILQ_FIRST(&blob->persists_to_complete); 1905 1906 blob->state = SPDK_BLOB_STATE_DIRTY; 1907 bs_mark_dirty(seq, blob->bs, blob_persist_start, next_persist); 1908 } 1909 1910 static void 1911 blob_persist_clear_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1912 { 1913 struct spdk_blob_persist_ctx *ctx = cb_arg; 1914 struct spdk_blob *blob = ctx->blob; 1915 struct spdk_blob_store *bs = blob->bs; 1916 size_t i; 1917 1918 if (bserrno != 0) { 1919 blob_persist_complete(seq, ctx, bserrno); 1920 return; 1921 } 1922 1923 spdk_spin_lock(&bs->used_lock); 1924 1925 /* Release all extent_pages that were truncated */ 1926 for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) { 1927 /* Nothing to release if it was not allocated */ 1928 if (blob->active.extent_pages[i] != 0) { 1929 bs_release_md_page(bs, blob->active.extent_pages[i]); 1930 } 1931 } 1932 1933 spdk_spin_unlock(&bs->used_lock); 1934 1935 if (blob->active.num_extent_pages == 0) { 1936 free(blob->active.extent_pages); 1937 blob->active.extent_pages = NULL; 1938 blob->active.extent_pages_array_size = 0; 1939 } else if (blob->active.num_extent_pages != blob->active.extent_pages_array_size) { 1940 #ifndef __clang_analyzer__ 1941 void *tmp; 1942 1943 /* scan-build really can't figure reallocs, workaround it */ 1944 tmp = realloc(blob->active.extent_pages, sizeof(uint32_t) * blob->active.num_extent_pages); 1945 assert(tmp != NULL); 1946 blob->active.extent_pages = tmp; 1947 #endif 1948 blob->active.extent_pages_array_size = blob->active.num_extent_pages; 1949 } 1950 1951 blob_persist_complete(seq, ctx, bserrno); 1952 } 1953 1954 static void 1955 blob_persist_clear_extents(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 1956 { 1957 struct spdk_blob *blob = ctx->blob; 1958 struct spdk_blob_store *bs = blob->bs; 1959 size_t i; 1960 uint64_t lba; 1961 uint64_t lba_count; 1962 spdk_bs_batch_t *batch; 1963 1964 batch = bs_sequence_to_batch(seq, blob_persist_clear_extents_cpl, ctx); 1965 lba_count = bs_byte_to_lba(bs, bs->md_page_size); 1966 1967 /* Clear all extent_pages that were truncated */ 1968 for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) { 1969 /* Nothing to clear if it was not allocated */ 1970 if (blob->active.extent_pages[i] != 0) { 1971 lba = bs_md_page_to_lba(bs, blob->active.extent_pages[i]); 1972 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1973 } 1974 } 1975 1976 bs_batch_close(batch); 1977 } 1978 1979 static void 1980 blob_persist_clear_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1981 { 1982 struct spdk_blob_persist_ctx *ctx = cb_arg; 1983 struct spdk_blob *blob = ctx->blob; 1984 struct spdk_blob_store *bs = blob->bs; 1985 size_t i; 1986 1987 if (bserrno != 0) { 1988 blob_persist_complete(seq, ctx, bserrno); 1989 return; 1990 } 1991 1992 spdk_spin_lock(&bs->used_lock); 1993 /* Release all clusters that were truncated */ 1994 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1995 uint32_t cluster_num = bs_lba_to_cluster(bs, blob->active.clusters[i]); 1996 1997 /* Nothing to release if it was not allocated */ 1998 if (blob->active.clusters[i] != 0) { 1999 bs_release_cluster(bs, cluster_num); 2000 } 2001 } 2002 spdk_spin_unlock(&bs->used_lock); 2003 2004 if (blob->active.num_clusters == 0) { 2005 free(blob->active.clusters); 2006 blob->active.clusters = NULL; 2007 blob->active.cluster_array_size = 0; 2008 } else if (blob->active.num_clusters != blob->active.cluster_array_size) { 2009 #ifndef __clang_analyzer__ 2010 void *tmp; 2011 2012 /* scan-build really can't figure reallocs, workaround it */ 2013 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * blob->active.num_clusters); 2014 assert(tmp != NULL); 2015 blob->active.clusters = tmp; 2016 2017 #endif 2018 blob->active.cluster_array_size = blob->active.num_clusters; 2019 } 2020 2021 /* Move on to clearing extent pages */ 2022 blob_persist_clear_extents(seq, ctx); 2023 } 2024 2025 static void 2026 blob_persist_clear_clusters(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 2027 { 2028 struct spdk_blob *blob = ctx->blob; 2029 struct spdk_blob_store *bs = blob->bs; 2030 spdk_bs_batch_t *batch; 2031 size_t i; 2032 uint64_t lba; 2033 uint64_t lba_count; 2034 2035 /* Clusters don't move around in blobs. The list shrinks or grows 2036 * at the end, but no changes ever occur in the middle of the list. 2037 */ 2038 2039 batch = bs_sequence_to_batch(seq, blob_persist_clear_clusters_cpl, ctx); 2040 2041 /* Clear all clusters that were truncated */ 2042 lba = 0; 2043 lba_count = 0; 2044 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 2045 uint64_t next_lba = blob->active.clusters[i]; 2046 uint64_t next_lba_count = bs_cluster_to_lba(bs, 1); 2047 2048 if (next_lba > 0 && (lba + lba_count) == next_lba) { 2049 /* This cluster is contiguous with the previous one. */ 2050 lba_count += next_lba_count; 2051 continue; 2052 } else if (next_lba == 0) { 2053 continue; 2054 } 2055 2056 /* This cluster is not contiguous with the previous one. */ 2057 2058 /* If a run of LBAs previously existing, clear them now */ 2059 if (lba_count > 0) { 2060 bs_batch_clear_dev(ctx->blob, batch, lba, lba_count); 2061 } 2062 2063 /* Start building the next batch */ 2064 lba = next_lba; 2065 if (next_lba > 0) { 2066 lba_count = next_lba_count; 2067 } else { 2068 lba_count = 0; 2069 } 2070 } 2071 2072 /* If we ended with a contiguous set of LBAs, clear them now */ 2073 if (lba_count > 0) { 2074 bs_batch_clear_dev(ctx->blob, batch, lba, lba_count); 2075 } 2076 2077 bs_batch_close(batch); 2078 } 2079 2080 static void 2081 blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2082 { 2083 struct spdk_blob_persist_ctx *ctx = cb_arg; 2084 struct spdk_blob *blob = ctx->blob; 2085 struct spdk_blob_store *bs = blob->bs; 2086 size_t i; 2087 2088 if (bserrno != 0) { 2089 blob_persist_complete(seq, ctx, bserrno); 2090 return; 2091 } 2092 2093 spdk_spin_lock(&bs->used_lock); 2094 2095 /* This loop starts at 1 because the first page is special and handled 2096 * below. The pages (except the first) are never written in place, 2097 * so any pages in the clean list must be zeroed. 2098 */ 2099 for (i = 1; i < blob->clean.num_pages; i++) { 2100 bs_release_md_page(bs, blob->clean.pages[i]); 2101 } 2102 2103 if (blob->active.num_pages == 0) { 2104 uint32_t page_num; 2105 2106 page_num = bs_blobid_to_page(blob->id); 2107 bs_release_md_page(bs, page_num); 2108 } 2109 2110 spdk_spin_unlock(&bs->used_lock); 2111 2112 /* Move on to clearing clusters */ 2113 blob_persist_clear_clusters(seq, ctx); 2114 } 2115 2116 static void 2117 blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2118 { 2119 struct spdk_blob_persist_ctx *ctx = cb_arg; 2120 struct spdk_blob *blob = ctx->blob; 2121 struct spdk_blob_store *bs = blob->bs; 2122 uint64_t lba; 2123 uint64_t lba_count; 2124 spdk_bs_batch_t *batch; 2125 size_t i; 2126 2127 if (bserrno != 0) { 2128 blob_persist_complete(seq, ctx, bserrno); 2129 return; 2130 } 2131 2132 batch = bs_sequence_to_batch(seq, blob_persist_zero_pages_cpl, ctx); 2133 2134 lba_count = bs_byte_to_lba(bs, bs->md_page_size); 2135 2136 /* This loop starts at 1 because the first page is special and handled 2137 * below. The pages (except the first) are never written in place, 2138 * so any pages in the clean list must be zeroed. 2139 */ 2140 for (i = 1; i < blob->clean.num_pages; i++) { 2141 lba = bs_md_page_to_lba(bs, blob->clean.pages[i]); 2142 2143 bs_batch_write_zeroes_dev(batch, lba, lba_count); 2144 } 2145 2146 /* The first page will only be zeroed if this is a delete. */ 2147 if (blob->active.num_pages == 0) { 2148 uint32_t page_num; 2149 2150 /* The first page in the metadata goes where the blobid indicates */ 2151 page_num = bs_blobid_to_page(blob->id); 2152 lba = bs_md_page_to_lba(bs, page_num); 2153 2154 bs_batch_write_zeroes_dev(batch, lba, lba_count); 2155 } 2156 2157 bs_batch_close(batch); 2158 } 2159 2160 static void 2161 blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2162 { 2163 struct spdk_blob_persist_ctx *ctx = cb_arg; 2164 struct spdk_blob *blob = ctx->blob; 2165 struct spdk_blob_store *bs = blob->bs; 2166 uint64_t lba; 2167 uint32_t lba_count; 2168 struct spdk_blob_md_page *page; 2169 2170 if (bserrno != 0) { 2171 blob_persist_complete(seq, ctx, bserrno); 2172 return; 2173 } 2174 2175 if (blob->active.num_pages == 0) { 2176 /* Move on to the next step */ 2177 blob_persist_zero_pages(seq, ctx, 0); 2178 return; 2179 } 2180 2181 lba_count = bs_byte_to_lba(bs, bs->md_page_size); 2182 2183 page = &ctx->pages[0]; 2184 /* The first page in the metadata goes where the blobid indicates */ 2185 lba = bs_md_page_to_lba(bs, bs_blobid_to_page(blob->id)); 2186 2187 bs_sequence_write_dev(seq, page, lba, lba_count, 2188 blob_persist_zero_pages, ctx); 2189 } 2190 2191 static void 2192 blob_persist_write_page_chain(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 2193 { 2194 struct spdk_blob *blob = ctx->blob; 2195 struct spdk_blob_store *bs = blob->bs; 2196 uint64_t lba; 2197 uint32_t lba_count; 2198 struct spdk_blob_md_page *page; 2199 spdk_bs_batch_t *batch; 2200 size_t i; 2201 2202 /* Clusters don't move around in blobs. The list shrinks or grows 2203 * at the end, but no changes ever occur in the middle of the list. 2204 */ 2205 2206 lba_count = bs_byte_to_lba(bs, sizeof(*page)); 2207 2208 batch = bs_sequence_to_batch(seq, blob_persist_write_page_root, ctx); 2209 2210 /* This starts at 1. The root page is not written until 2211 * all of the others are finished 2212 */ 2213 for (i = 1; i < blob->active.num_pages; i++) { 2214 page = &ctx->pages[i]; 2215 assert(page->sequence_num == i); 2216 2217 lba = bs_md_page_to_lba(bs, blob->active.pages[i]); 2218 2219 bs_batch_write_dev(batch, page, lba, lba_count); 2220 } 2221 2222 bs_batch_close(batch); 2223 } 2224 2225 static int 2226 blob_resize(struct spdk_blob *blob, uint64_t sz) 2227 { 2228 uint64_t i; 2229 uint64_t *tmp; 2230 uint64_t cluster; 2231 uint32_t lfmd; /* lowest free md page */ 2232 uint64_t num_clusters; 2233 uint32_t *ep_tmp; 2234 uint64_t new_num_ep = 0, current_num_ep = 0; 2235 struct spdk_blob_store *bs; 2236 int rc; 2237 2238 bs = blob->bs; 2239 2240 blob_verify_md_op(blob); 2241 2242 if (blob->active.num_clusters == sz) { 2243 return 0; 2244 } 2245 2246 if (blob->active.num_clusters < blob->active.cluster_array_size) { 2247 /* If this blob was resized to be larger, then smaller, then 2248 * larger without syncing, then the cluster array already 2249 * contains spare assigned clusters we can use. 2250 */ 2251 num_clusters = spdk_min(blob->active.cluster_array_size, 2252 sz); 2253 } else { 2254 num_clusters = blob->active.num_clusters; 2255 } 2256 2257 if (blob->use_extent_table) { 2258 /* Round up since every cluster beyond current Extent Table size, 2259 * requires new extent page. */ 2260 new_num_ep = spdk_divide_round_up(sz, SPDK_EXTENTS_PER_EP); 2261 current_num_ep = spdk_divide_round_up(num_clusters, SPDK_EXTENTS_PER_EP); 2262 } 2263 2264 assert(!spdk_spin_held(&bs->used_lock)); 2265 2266 /* Check first that we have enough clusters and md pages before we start claiming them. 2267 * bs->used_lock is held to ensure that clusters we think are free are still free when we go 2268 * to claim them later in this function. 2269 */ 2270 if (sz > num_clusters && spdk_blob_is_thin_provisioned(blob) == false) { 2271 spdk_spin_lock(&bs->used_lock); 2272 if ((sz - num_clusters) > bs->num_free_clusters) { 2273 rc = -ENOSPC; 2274 goto out; 2275 } 2276 lfmd = 0; 2277 for (i = current_num_ep; i < new_num_ep ; i++) { 2278 lfmd = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, lfmd); 2279 if (lfmd == UINT32_MAX) { 2280 /* No more free md pages. Cannot satisfy the request */ 2281 rc = -ENOSPC; 2282 goto out; 2283 } 2284 } 2285 } 2286 2287 if (sz > num_clusters) { 2288 /* Expand the cluster array if necessary. 2289 * We only shrink the array when persisting. 2290 */ 2291 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * sz); 2292 if (sz > 0 && tmp == NULL) { 2293 rc = -ENOMEM; 2294 goto out; 2295 } 2296 memset(tmp + blob->active.cluster_array_size, 0, 2297 sizeof(*blob->active.clusters) * (sz - blob->active.cluster_array_size)); 2298 blob->active.clusters = tmp; 2299 blob->active.cluster_array_size = sz; 2300 2301 /* Expand the extents table, only if enough clusters were added */ 2302 if (new_num_ep > current_num_ep && blob->use_extent_table) { 2303 ep_tmp = realloc(blob->active.extent_pages, sizeof(*blob->active.extent_pages) * new_num_ep); 2304 if (new_num_ep > 0 && ep_tmp == NULL) { 2305 rc = -ENOMEM; 2306 goto out; 2307 } 2308 memset(ep_tmp + blob->active.extent_pages_array_size, 0, 2309 sizeof(*blob->active.extent_pages) * (new_num_ep - blob->active.extent_pages_array_size)); 2310 blob->active.extent_pages = ep_tmp; 2311 blob->active.extent_pages_array_size = new_num_ep; 2312 } 2313 } 2314 2315 blob->state = SPDK_BLOB_STATE_DIRTY; 2316 2317 if (spdk_blob_is_thin_provisioned(blob) == false) { 2318 cluster = 0; 2319 lfmd = 0; 2320 for (i = num_clusters; i < sz; i++) { 2321 bs_allocate_cluster(blob, i, &cluster, &lfmd, true); 2322 /* Do not increment lfmd here. lfmd will get updated 2323 * to the md_page allocated (if any) when a new extent 2324 * page is needed. Just pass that value again, 2325 * bs_allocate_cluster will just start at that index 2326 * to find the next free md_page when needed. 2327 */ 2328 } 2329 } 2330 2331 /* If we are shrinking the blob, we must adjust num_allocated_clusters */ 2332 for (i = sz; i < num_clusters; i++) { 2333 if (blob->active.clusters[i] != 0) { 2334 blob->active.num_allocated_clusters--; 2335 } 2336 } 2337 2338 blob->active.num_clusters = sz; 2339 blob->active.num_extent_pages = new_num_ep; 2340 2341 rc = 0; 2342 out: 2343 if (spdk_spin_held(&bs->used_lock)) { 2344 spdk_spin_unlock(&bs->used_lock); 2345 } 2346 2347 return rc; 2348 } 2349 2350 static void 2351 blob_persist_generate_new_md(struct spdk_blob_persist_ctx *ctx) 2352 { 2353 spdk_bs_sequence_t *seq = ctx->seq; 2354 struct spdk_blob *blob = ctx->blob; 2355 struct spdk_blob_store *bs = blob->bs; 2356 uint64_t i; 2357 uint32_t page_num; 2358 void *tmp; 2359 int rc; 2360 2361 /* Generate the new metadata */ 2362 rc = blob_serialize(blob, &ctx->pages, &blob->active.num_pages); 2363 if (rc < 0) { 2364 blob_persist_complete(seq, ctx, rc); 2365 return; 2366 } 2367 2368 assert(blob->active.num_pages >= 1); 2369 2370 /* Resize the cache of page indices */ 2371 tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 2372 if (!tmp) { 2373 blob_persist_complete(seq, ctx, -ENOMEM); 2374 return; 2375 } 2376 blob->active.pages = tmp; 2377 2378 /* Assign this metadata to pages. This requires two passes - one to verify that there are 2379 * enough pages and a second to actually claim them. The used_lock is held across 2380 * both passes to ensure things don't change in the middle. 2381 */ 2382 spdk_spin_lock(&bs->used_lock); 2383 page_num = 0; 2384 /* Note that this loop starts at one. The first page location is fixed by the blobid. */ 2385 for (i = 1; i < blob->active.num_pages; i++) { 2386 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 2387 if (page_num == UINT32_MAX) { 2388 spdk_spin_unlock(&bs->used_lock); 2389 blob_persist_complete(seq, ctx, -ENOMEM); 2390 return; 2391 } 2392 page_num++; 2393 } 2394 2395 page_num = 0; 2396 blob->active.pages[0] = bs_blobid_to_page(blob->id); 2397 for (i = 1; i < blob->active.num_pages; i++) { 2398 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 2399 ctx->pages[i - 1].next = page_num; 2400 /* Now that previous metadata page is complete, calculate the crc for it. */ 2401 ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]); 2402 blob->active.pages[i] = page_num; 2403 bs_claim_md_page(bs, page_num); 2404 SPDK_DEBUGLOG(blob, "Claiming page %u for blob 0x%" PRIx64 "\n", page_num, 2405 blob->id); 2406 page_num++; 2407 } 2408 spdk_spin_unlock(&bs->used_lock); 2409 ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]); 2410 /* Start writing the metadata from last page to first */ 2411 blob->state = SPDK_BLOB_STATE_CLEAN; 2412 blob_persist_write_page_chain(seq, ctx); 2413 } 2414 2415 static void 2416 blob_persist_write_extent_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2417 { 2418 struct spdk_blob_persist_ctx *ctx = cb_arg; 2419 struct spdk_blob *blob = ctx->blob; 2420 size_t i; 2421 uint32_t extent_page_id; 2422 uint32_t page_count = 0; 2423 int rc; 2424 2425 if (ctx->extent_page != NULL) { 2426 spdk_free(ctx->extent_page); 2427 ctx->extent_page = NULL; 2428 } 2429 2430 if (bserrno != 0) { 2431 blob_persist_complete(seq, ctx, bserrno); 2432 return; 2433 } 2434 2435 /* Only write out Extent Pages when blob was resized. */ 2436 for (i = ctx->next_extent_page; i < blob->active.extent_pages_array_size; i++) { 2437 extent_page_id = blob->active.extent_pages[i]; 2438 if (extent_page_id == 0) { 2439 /* No Extent Page to persist */ 2440 assert(spdk_blob_is_thin_provisioned(blob)); 2441 continue; 2442 } 2443 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent_page_id)); 2444 ctx->next_extent_page = i + 1; 2445 rc = blob_serialize_add_page(ctx->blob, &ctx->extent_page, &page_count, &ctx->extent_page); 2446 if (rc < 0) { 2447 blob_persist_complete(seq, ctx, rc); 2448 return; 2449 } 2450 2451 blob->state = SPDK_BLOB_STATE_DIRTY; 2452 blob_serialize_extent_page(blob, i * SPDK_EXTENTS_PER_EP, ctx->extent_page); 2453 2454 ctx->extent_page->crc = blob_md_page_calc_crc(ctx->extent_page); 2455 2456 bs_sequence_write_dev(seq, ctx->extent_page, bs_md_page_to_lba(blob->bs, extent_page_id), 2457 bs_byte_to_lba(blob->bs, blob->bs->md_page_size), 2458 blob_persist_write_extent_pages, ctx); 2459 return; 2460 } 2461 2462 blob_persist_generate_new_md(ctx); 2463 } 2464 2465 static void 2466 blob_persist_start(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2467 { 2468 struct spdk_blob_persist_ctx *ctx = cb_arg; 2469 struct spdk_blob *blob = ctx->blob; 2470 2471 if (bserrno != 0) { 2472 blob_persist_complete(seq, ctx, bserrno); 2473 return; 2474 } 2475 2476 if (blob->active.num_pages == 0) { 2477 /* This is the signal that the blob should be deleted. 2478 * Immediately jump to the clean up routine. */ 2479 assert(blob->clean.num_pages > 0); 2480 blob->state = SPDK_BLOB_STATE_CLEAN; 2481 blob_persist_zero_pages(seq, ctx, 0); 2482 return; 2483 2484 } 2485 2486 if (blob->clean.num_clusters < blob->active.num_clusters) { 2487 /* Blob was resized up */ 2488 assert(blob->clean.num_extent_pages <= blob->active.num_extent_pages); 2489 ctx->next_extent_page = spdk_max(1, blob->clean.num_extent_pages) - 1; 2490 } else if (blob->active.num_clusters < blob->active.cluster_array_size) { 2491 /* Blob was resized down */ 2492 assert(blob->clean.num_extent_pages >= blob->active.num_extent_pages); 2493 ctx->next_extent_page = spdk_max(1, blob->active.num_extent_pages) - 1; 2494 } else { 2495 /* No change in size occurred */ 2496 blob_persist_generate_new_md(ctx); 2497 return; 2498 } 2499 2500 blob_persist_write_extent_pages(seq, ctx, 0); 2501 } 2502 2503 struct spdk_bs_mark_dirty { 2504 struct spdk_blob_store *bs; 2505 struct spdk_bs_super_block *super; 2506 spdk_bs_sequence_cpl cb_fn; 2507 void *cb_arg; 2508 }; 2509 2510 static void 2511 bs_mark_dirty_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2512 { 2513 struct spdk_bs_mark_dirty *ctx = cb_arg; 2514 2515 if (bserrno == 0) { 2516 ctx->bs->clean = 0; 2517 } 2518 2519 ctx->cb_fn(seq, ctx->cb_arg, bserrno); 2520 2521 spdk_free(ctx->super); 2522 free(ctx); 2523 } 2524 2525 static void bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2526 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg); 2527 2528 2529 static void 2530 bs_mark_dirty_write(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2531 { 2532 struct spdk_bs_mark_dirty *ctx = cb_arg; 2533 int rc; 2534 2535 if (bserrno != 0) { 2536 bs_mark_dirty_write_cpl(seq, ctx, bserrno); 2537 return; 2538 } 2539 2540 rc = bs_super_validate(ctx->super, ctx->bs); 2541 if (rc != 0) { 2542 bs_mark_dirty_write_cpl(seq, ctx, rc); 2543 return; 2544 } 2545 2546 ctx->super->clean = 0; 2547 if (ctx->super->size == 0) { 2548 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 2549 } 2550 2551 bs_write_super(seq, ctx->bs, ctx->super, bs_mark_dirty_write_cpl, ctx); 2552 } 2553 2554 static void 2555 bs_mark_dirty(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2556 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2557 { 2558 struct spdk_bs_mark_dirty *ctx; 2559 2560 /* Blobstore is already marked dirty */ 2561 if (bs->clean == 0) { 2562 cb_fn(seq, cb_arg, 0); 2563 return; 2564 } 2565 2566 ctx = calloc(1, sizeof(*ctx)); 2567 if (!ctx) { 2568 cb_fn(seq, cb_arg, -ENOMEM); 2569 return; 2570 } 2571 ctx->bs = bs; 2572 ctx->cb_fn = cb_fn; 2573 ctx->cb_arg = cb_arg; 2574 2575 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 2576 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 2577 if (!ctx->super) { 2578 free(ctx); 2579 cb_fn(seq, cb_arg, -ENOMEM); 2580 return; 2581 } 2582 2583 bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0), 2584 bs_byte_to_lba(bs, sizeof(*ctx->super)), 2585 bs_mark_dirty_write, ctx); 2586 } 2587 2588 /* Write a blob to disk */ 2589 static void 2590 blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 2591 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2592 { 2593 struct spdk_blob_persist_ctx *ctx; 2594 2595 blob_verify_md_op(blob); 2596 2597 if (blob->state == SPDK_BLOB_STATE_CLEAN && TAILQ_EMPTY(&blob->persists_to_complete)) { 2598 cb_fn(seq, cb_arg, 0); 2599 return; 2600 } 2601 2602 ctx = calloc(1, sizeof(*ctx)); 2603 if (!ctx) { 2604 cb_fn(seq, cb_arg, -ENOMEM); 2605 return; 2606 } 2607 ctx->blob = blob; 2608 ctx->seq = seq; 2609 ctx->cb_fn = cb_fn; 2610 ctx->cb_arg = cb_arg; 2611 2612 /* Multiple blob persists can affect one another, via blob->state or 2613 * blob mutable data changes. To prevent it, queue up the persists. */ 2614 if (!TAILQ_EMPTY(&blob->persists_to_complete)) { 2615 TAILQ_INSERT_TAIL(&blob->pending_persists, ctx, link); 2616 return; 2617 } 2618 TAILQ_INSERT_HEAD(&blob->persists_to_complete, ctx, link); 2619 2620 bs_mark_dirty(seq, blob->bs, blob_persist_start, ctx); 2621 } 2622 2623 struct spdk_blob_copy_cluster_ctx { 2624 struct spdk_blob *blob; 2625 uint8_t *buf; 2626 uint64_t io_unit; 2627 uint64_t new_cluster; 2628 uint32_t new_extent_page; 2629 spdk_bs_sequence_t *seq; 2630 struct spdk_blob_md_page *new_cluster_page; 2631 }; 2632 2633 struct spdk_blob_free_cluster_ctx { 2634 struct spdk_blob *blob; 2635 uint64_t page; 2636 struct spdk_blob_md_page *md_page; 2637 uint64_t cluster_num; 2638 uint32_t extent_page; 2639 spdk_bs_sequence_t *seq; 2640 }; 2641 2642 static void 2643 blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno) 2644 { 2645 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2646 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq; 2647 TAILQ_HEAD(, spdk_bs_request_set) requests; 2648 spdk_bs_user_op_t *op; 2649 2650 TAILQ_INIT(&requests); 2651 TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link); 2652 2653 while (!TAILQ_EMPTY(&requests)) { 2654 op = TAILQ_FIRST(&requests); 2655 TAILQ_REMOVE(&requests, op, link); 2656 if (bserrno == 0) { 2657 bs_user_op_execute(op); 2658 } else { 2659 bs_user_op_abort(op, bserrno); 2660 } 2661 } 2662 2663 spdk_free(ctx->buf); 2664 free(ctx); 2665 } 2666 2667 static void 2668 blob_free_cluster_cpl(void *cb_arg, int bserrno) 2669 { 2670 struct spdk_blob_free_cluster_ctx *ctx = cb_arg; 2671 spdk_bs_sequence_t *seq = ctx->seq; 2672 2673 bs_sequence_finish(seq, bserrno); 2674 2675 free(ctx); 2676 } 2677 2678 static void 2679 blob_insert_cluster_revert(struct spdk_blob_copy_cluster_ctx *ctx) 2680 { 2681 spdk_spin_lock(&ctx->blob->bs->used_lock); 2682 bs_release_cluster(ctx->blob->bs, ctx->new_cluster); 2683 if (ctx->new_extent_page != 0) { 2684 bs_release_md_page(ctx->blob->bs, ctx->new_extent_page); 2685 } 2686 spdk_spin_unlock(&ctx->blob->bs->used_lock); 2687 } 2688 2689 static void 2690 blob_insert_cluster_clear_cpl(void *cb_arg, int bserrno) 2691 { 2692 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2693 2694 if (bserrno) { 2695 SPDK_WARNLOG("Failed to clear cluster: %d\n", bserrno); 2696 } 2697 2698 blob_insert_cluster_revert(ctx); 2699 bs_sequence_finish(ctx->seq, bserrno); 2700 } 2701 2702 static void 2703 blob_insert_cluster_clear(struct spdk_blob_copy_cluster_ctx *ctx) 2704 { 2705 struct spdk_bs_cpl cpl; 2706 spdk_bs_batch_t *batch; 2707 struct spdk_io_channel *ch = spdk_io_channel_from_ctx(ctx->seq->channel); 2708 2709 /* 2710 * We allocated a cluster and we copied data to it. But now, we realized that we don't need 2711 * this cluster and we want to release it. We must ensure that we clear the data on this 2712 * cluster. 2713 * The cluster may later be re-allocated by a thick-provisioned blob for example. When 2714 * reading from this thick-provisioned blob before writing data, we should read zeroes. 2715 */ 2716 2717 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2718 cpl.u.blob_basic.cb_fn = blob_insert_cluster_clear_cpl; 2719 cpl.u.blob_basic.cb_arg = ctx; 2720 2721 batch = bs_batch_open(ch, &cpl, ctx->blob); 2722 if (!batch) { 2723 blob_insert_cluster_clear_cpl(ctx, -ENOMEM); 2724 return; 2725 } 2726 2727 bs_batch_clear_dev(ctx->blob, batch, bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 2728 bs_cluster_to_lba(ctx->blob->bs, 1)); 2729 bs_batch_close(batch); 2730 } 2731 2732 static void 2733 blob_insert_cluster_cpl(void *cb_arg, int bserrno) 2734 { 2735 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2736 2737 if (bserrno) { 2738 if (bserrno == -EEXIST) { 2739 /* The metadata insert failed because another thread 2740 * allocated the cluster first. Clear and free our cluster 2741 * but continue without error. */ 2742 blob_insert_cluster_clear(ctx); 2743 return; 2744 } 2745 2746 blob_insert_cluster_revert(ctx); 2747 } 2748 2749 bs_sequence_finish(ctx->seq, bserrno); 2750 } 2751 2752 static void 2753 blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2754 { 2755 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2756 uint32_t cluster_number; 2757 2758 if (bserrno) { 2759 /* The write failed, so jump to the final completion handler */ 2760 bs_sequence_finish(seq, bserrno); 2761 return; 2762 } 2763 2764 cluster_number = bs_io_unit_to_cluster(ctx->blob->bs, ctx->io_unit); 2765 2766 blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2767 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx); 2768 } 2769 2770 static void 2771 blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2772 { 2773 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2774 2775 if (bserrno != 0) { 2776 /* The read failed, so jump to the final completion handler */ 2777 bs_sequence_finish(seq, bserrno); 2778 return; 2779 } 2780 2781 /* Write whole cluster */ 2782 bs_sequence_write_dev(seq, ctx->buf, 2783 bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 2784 bs_cluster_to_lba(ctx->blob->bs, 1), 2785 blob_write_copy_cpl, ctx); 2786 } 2787 2788 static bool 2789 blob_can_copy(struct spdk_blob *blob, uint64_t cluster_start_io_unit, uint64_t *base_lba) 2790 { 2791 uint64_t lba = bs_dev_io_unit_to_lba(blob, blob->back_bs_dev, cluster_start_io_unit); 2792 2793 return (!blob_is_esnap_clone(blob) && blob->bs->dev->copy != NULL) && 2794 blob->back_bs_dev->translate_lba(blob->back_bs_dev, lba, base_lba); 2795 } 2796 2797 static void 2798 blob_copy(struct spdk_blob_copy_cluster_ctx *ctx, spdk_bs_user_op_t *op, uint64_t src_lba) 2799 { 2800 struct spdk_blob *blob = ctx->blob; 2801 uint64_t lba_count = bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz); 2802 2803 bs_sequence_copy_dev(ctx->seq, 2804 bs_cluster_to_lba(blob->bs, ctx->new_cluster), 2805 src_lba, 2806 lba_count, 2807 blob_write_copy_cpl, ctx); 2808 } 2809 2810 static void 2811 bs_allocate_and_copy_cluster(struct spdk_blob *blob, 2812 struct spdk_io_channel *_ch, 2813 uint64_t io_unit, spdk_bs_user_op_t *op) 2814 { 2815 struct spdk_bs_cpl cpl; 2816 struct spdk_bs_channel *ch; 2817 struct spdk_blob_copy_cluster_ctx *ctx; 2818 uint64_t cluster_start_io_unit; 2819 uint32_t cluster_number; 2820 bool is_zeroes; 2821 bool can_copy; 2822 bool is_valid_range; 2823 uint64_t copy_src_lba; 2824 int rc; 2825 2826 ch = spdk_io_channel_get_ctx(_ch); 2827 2828 if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) { 2829 /* There are already operations pending. Queue this user op 2830 * and return because it will be re-executed when the outstanding 2831 * cluster allocation completes. */ 2832 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2833 return; 2834 } 2835 2836 /* Round the io_unit offset down to the first io_unit in the cluster */ 2837 cluster_start_io_unit = bs_io_unit_to_cluster_start(blob, io_unit); 2838 2839 /* Calculate which index in the metadata cluster array the corresponding 2840 * cluster is supposed to be at. */ 2841 cluster_number = bs_io_unit_to_cluster_number(blob, io_unit); 2842 2843 ctx = calloc(1, sizeof(*ctx)); 2844 if (!ctx) { 2845 bs_user_op_abort(op, -ENOMEM); 2846 return; 2847 } 2848 2849 assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0); 2850 2851 ctx->blob = blob; 2852 ctx->io_unit = cluster_start_io_unit; 2853 ctx->new_cluster_page = ch->new_cluster_page; 2854 memset(ctx->new_cluster_page, 0, blob->bs->md_page_size); 2855 2856 /* Check if the cluster that we intend to do CoW for is valid for 2857 * the backing dev. For zeroes backing dev, it'll be always valid. 2858 * For other backing dev e.g. a snapshot, it could be invalid if 2859 * the blob has been resized after snapshot was taken. */ 2860 is_valid_range = blob->back_bs_dev->is_range_valid(blob->back_bs_dev, 2861 bs_dev_io_unit_to_lba(blob, blob->back_bs_dev, cluster_start_io_unit), 2862 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz)); 2863 2864 can_copy = is_valid_range && blob_can_copy(blob, cluster_start_io_unit, ©_src_lba); 2865 2866 is_zeroes = is_valid_range && blob->back_bs_dev->is_zeroes(blob->back_bs_dev, 2867 bs_dev_io_unit_to_lba(blob, blob->back_bs_dev, cluster_start_io_unit), 2868 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz)); 2869 if (blob->parent_id != SPDK_BLOBID_INVALID && !is_zeroes && !can_copy) { 2870 ctx->buf = spdk_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen, 2871 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 2872 if (!ctx->buf) { 2873 SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n", 2874 blob->bs->cluster_sz); 2875 free(ctx); 2876 bs_user_op_abort(op, -ENOMEM); 2877 return; 2878 } 2879 } 2880 2881 spdk_spin_lock(&blob->bs->used_lock); 2882 rc = bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, &ctx->new_extent_page, 2883 false); 2884 spdk_spin_unlock(&blob->bs->used_lock); 2885 if (rc != 0) { 2886 spdk_free(ctx->buf); 2887 free(ctx); 2888 bs_user_op_abort(op, rc); 2889 return; 2890 } 2891 2892 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2893 cpl.u.blob_basic.cb_fn = blob_allocate_and_copy_cluster_cpl; 2894 cpl.u.blob_basic.cb_arg = ctx; 2895 2896 ctx->seq = bs_sequence_start_blob(_ch, &cpl, blob); 2897 if (!ctx->seq) { 2898 spdk_spin_lock(&blob->bs->used_lock); 2899 bs_release_cluster(blob->bs, ctx->new_cluster); 2900 spdk_spin_unlock(&blob->bs->used_lock); 2901 spdk_free(ctx->buf); 2902 free(ctx); 2903 bs_user_op_abort(op, -ENOMEM); 2904 return; 2905 } 2906 2907 /* Queue the user op to block other incoming operations */ 2908 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2909 2910 if (blob->parent_id != SPDK_BLOBID_INVALID && !is_zeroes) { 2911 if (can_copy) { 2912 blob_copy(ctx, op, copy_src_lba); 2913 } else { 2914 /* Read cluster from backing device */ 2915 bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf, 2916 bs_dev_io_unit_to_lba(blob, blob->back_bs_dev, cluster_start_io_unit), 2917 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz), 2918 blob_write_copy, ctx); 2919 } 2920 2921 } else { 2922 blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2923 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx); 2924 } 2925 } 2926 2927 static inline bool 2928 blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t io_unit, uint64_t length, 2929 uint64_t *lba, uint64_t *lba_count) 2930 { 2931 *lba_count = length; 2932 2933 if (!bs_io_unit_is_allocated(blob, io_unit)) { 2934 assert(blob->back_bs_dev != NULL); 2935 *lba = bs_io_unit_to_back_dev_lba(blob, io_unit); 2936 *lba_count = bs_io_unit_to_back_dev_lba(blob, *lba_count); 2937 return false; 2938 } else { 2939 *lba = bs_blob_io_unit_to_lba(blob, io_unit); 2940 return true; 2941 } 2942 } 2943 2944 struct op_split_ctx { 2945 struct spdk_blob *blob; 2946 struct spdk_io_channel *channel; 2947 uint64_t io_unit_offset; 2948 uint64_t io_units_remaining; 2949 void *curr_payload; 2950 enum spdk_blob_op_type op_type; 2951 spdk_bs_sequence_t *seq; 2952 bool in_submit_ctx; 2953 bool completed_in_submit_ctx; 2954 bool done; 2955 }; 2956 2957 static void 2958 blob_request_submit_op_split_next(void *cb_arg, int bserrno) 2959 { 2960 struct op_split_ctx *ctx = cb_arg; 2961 struct spdk_blob *blob = ctx->blob; 2962 struct spdk_io_channel *ch = ctx->channel; 2963 enum spdk_blob_op_type op_type = ctx->op_type; 2964 uint8_t *buf; 2965 uint64_t offset; 2966 uint64_t length; 2967 uint64_t op_length; 2968 2969 if (bserrno != 0 || ctx->io_units_remaining == 0) { 2970 bs_sequence_finish(ctx->seq, bserrno); 2971 if (ctx->in_submit_ctx) { 2972 /* Defer freeing of the ctx object, since it will be 2973 * accessed when this unwinds back to the submission 2974 * context. 2975 */ 2976 ctx->done = true; 2977 } else { 2978 free(ctx); 2979 } 2980 return; 2981 } 2982 2983 if (ctx->in_submit_ctx) { 2984 /* If this split operation completed in the context 2985 * of its submission, mark the flag and return immediately 2986 * to avoid recursion. 2987 */ 2988 ctx->completed_in_submit_ctx = true; 2989 return; 2990 } 2991 2992 while (true) { 2993 ctx->completed_in_submit_ctx = false; 2994 2995 offset = ctx->io_unit_offset; 2996 length = ctx->io_units_remaining; 2997 buf = ctx->curr_payload; 2998 op_length = spdk_min(length, bs_num_io_units_to_cluster_boundary(blob, 2999 offset)); 3000 3001 /* Update length and payload for next operation */ 3002 ctx->io_units_remaining -= op_length; 3003 ctx->io_unit_offset += op_length; 3004 if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) { 3005 ctx->curr_payload += op_length * blob->bs->io_unit_size; 3006 } 3007 3008 assert(!ctx->in_submit_ctx); 3009 ctx->in_submit_ctx = true; 3010 3011 switch (op_type) { 3012 case SPDK_BLOB_READ: 3013 spdk_blob_io_read(blob, ch, buf, offset, op_length, 3014 blob_request_submit_op_split_next, ctx); 3015 break; 3016 case SPDK_BLOB_WRITE: 3017 spdk_blob_io_write(blob, ch, buf, offset, op_length, 3018 blob_request_submit_op_split_next, ctx); 3019 break; 3020 case SPDK_BLOB_UNMAP: 3021 spdk_blob_io_unmap(blob, ch, offset, op_length, 3022 blob_request_submit_op_split_next, ctx); 3023 break; 3024 case SPDK_BLOB_WRITE_ZEROES: 3025 spdk_blob_io_write_zeroes(blob, ch, offset, op_length, 3026 blob_request_submit_op_split_next, ctx); 3027 break; 3028 case SPDK_BLOB_READV: 3029 case SPDK_BLOB_WRITEV: 3030 SPDK_ERRLOG("readv/write not valid\n"); 3031 bs_sequence_finish(ctx->seq, -EINVAL); 3032 free(ctx); 3033 return; 3034 } 3035 3036 #ifndef __clang_analyzer__ 3037 /* scan-build reports a false positive around accessing the ctx here. It 3038 * forms a path that recursively calls this function, but then says 3039 * "assuming ctx->in_submit_ctx is false", when that isn't possible. 3040 * This path does free(ctx), returns to here, and reports a use-after-free 3041 * bug. Wrapping this bit of code so that scan-build doesn't see it 3042 * works around the scan-build bug. 3043 */ 3044 assert(ctx->in_submit_ctx); 3045 ctx->in_submit_ctx = false; 3046 3047 /* If the operation completed immediately, loop back and submit the 3048 * next operation. Otherwise we can return and the next split 3049 * operation will get submitted when this current operation is 3050 * later completed asynchronously. 3051 */ 3052 if (ctx->completed_in_submit_ctx) { 3053 continue; 3054 } else if (ctx->done) { 3055 free(ctx); 3056 } 3057 #endif 3058 break; 3059 } 3060 } 3061 3062 static void 3063 blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob, 3064 void *payload, uint64_t offset, uint64_t length, 3065 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 3066 { 3067 struct op_split_ctx *ctx; 3068 spdk_bs_sequence_t *seq; 3069 struct spdk_bs_cpl cpl; 3070 3071 assert(blob != NULL); 3072 3073 ctx = calloc(1, sizeof(struct op_split_ctx)); 3074 if (ctx == NULL) { 3075 cb_fn(cb_arg, -ENOMEM); 3076 return; 3077 } 3078 3079 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 3080 cpl.u.blob_basic.cb_fn = cb_fn; 3081 cpl.u.blob_basic.cb_arg = cb_arg; 3082 3083 seq = bs_sequence_start_blob(ch, &cpl, blob); 3084 if (!seq) { 3085 free(ctx); 3086 cb_fn(cb_arg, -ENOMEM); 3087 return; 3088 } 3089 3090 ctx->blob = blob; 3091 ctx->channel = ch; 3092 ctx->curr_payload = payload; 3093 ctx->io_unit_offset = offset; 3094 ctx->io_units_remaining = length; 3095 ctx->op_type = op_type; 3096 ctx->seq = seq; 3097 3098 blob_request_submit_op_split_next(ctx, 0); 3099 } 3100 3101 static void 3102 spdk_free_cluster_unmap_complete(void *cb_arg, int bserrno) 3103 { 3104 struct spdk_blob_free_cluster_ctx *ctx = cb_arg; 3105 3106 if (bserrno) { 3107 bs_sequence_finish(ctx->seq, bserrno); 3108 free(ctx); 3109 return; 3110 } 3111 3112 blob_free_cluster_on_md_thread(ctx->blob, ctx->cluster_num, 3113 ctx->extent_page, ctx->md_page, blob_free_cluster_cpl, ctx); 3114 } 3115 3116 static void 3117 blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob, 3118 void *payload, uint64_t offset, uint64_t length, 3119 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 3120 { 3121 struct spdk_bs_cpl cpl; 3122 uint64_t lba; 3123 uint64_t lba_count; 3124 bool is_allocated; 3125 3126 assert(blob != NULL); 3127 3128 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 3129 cpl.u.blob_basic.cb_fn = cb_fn; 3130 cpl.u.blob_basic.cb_arg = cb_arg; 3131 3132 if (blob->frozen_refcnt) { 3133 /* This blob I/O is frozen */ 3134 spdk_bs_user_op_t *op; 3135 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 3136 3137 op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 3138 if (!op) { 3139 cb_fn(cb_arg, -ENOMEM); 3140 return; 3141 } 3142 3143 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 3144 3145 return; 3146 } 3147 3148 is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 3149 3150 switch (op_type) { 3151 case SPDK_BLOB_READ: { 3152 spdk_bs_batch_t *batch; 3153 3154 batch = bs_batch_open(_ch, &cpl, blob); 3155 if (!batch) { 3156 cb_fn(cb_arg, -ENOMEM); 3157 return; 3158 } 3159 3160 if (is_allocated) { 3161 /* Read from the blob */ 3162 bs_batch_read_dev(batch, payload, lba, lba_count); 3163 } else { 3164 /* Read from the backing block device */ 3165 bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count); 3166 } 3167 3168 bs_batch_close(batch); 3169 break; 3170 } 3171 case SPDK_BLOB_WRITE: 3172 case SPDK_BLOB_WRITE_ZEROES: { 3173 if (is_allocated) { 3174 /* Write to the blob */ 3175 spdk_bs_batch_t *batch; 3176 3177 if (lba_count == 0) { 3178 cb_fn(cb_arg, 0); 3179 return; 3180 } 3181 3182 batch = bs_batch_open(_ch, &cpl, blob); 3183 if (!batch) { 3184 cb_fn(cb_arg, -ENOMEM); 3185 return; 3186 } 3187 3188 if (op_type == SPDK_BLOB_WRITE) { 3189 bs_batch_write_dev(batch, payload, lba, lba_count); 3190 } else { 3191 bs_batch_write_zeroes_dev(batch, lba, lba_count); 3192 } 3193 3194 bs_batch_close(batch); 3195 } else { 3196 /* Queue this operation and allocate the cluster */ 3197 spdk_bs_user_op_t *op; 3198 3199 op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 3200 if (!op) { 3201 cb_fn(cb_arg, -ENOMEM); 3202 return; 3203 } 3204 3205 bs_allocate_and_copy_cluster(blob, _ch, offset, op); 3206 } 3207 break; 3208 } 3209 case SPDK_BLOB_UNMAP: { 3210 struct spdk_blob_free_cluster_ctx *ctx = NULL; 3211 spdk_bs_batch_t *batch; 3212 3213 /* if aligned with cluster release cluster */ 3214 if (spdk_blob_is_thin_provisioned(blob) && is_allocated && 3215 blob_backed_with_zeroes_dev(blob) && 3216 bs_io_units_per_cluster(blob) == length) { 3217 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 3218 uint64_t cluster_start_page; 3219 uint32_t cluster_number; 3220 3221 assert(offset % bs_io_units_per_cluster(blob) == 0); 3222 3223 /* Round the io_unit offset down to the first page in the cluster */ 3224 cluster_start_page = bs_io_unit_to_cluster_start(blob, offset); 3225 3226 /* Calculate which index in the metadata cluster array the corresponding 3227 * cluster is supposed to be at. */ 3228 cluster_number = bs_io_unit_to_cluster_number(blob, offset); 3229 3230 ctx = calloc(1, sizeof(*ctx)); 3231 if (!ctx) { 3232 cb_fn(cb_arg, -ENOMEM); 3233 return; 3234 } 3235 /* When freeing a cluster the flow should be (in order): 3236 * 1. Unmap the underlying area (so if the cluster is reclaimed in the future, it won't leak 3237 * old data) 3238 * 2. Once the unmap completes (to avoid any races with incoming writes that may claim the 3239 * cluster), update and sync metadata freeing the cluster 3240 * 3. Once metadata update is done, complete the user unmap request 3241 */ 3242 ctx->blob = blob; 3243 ctx->page = cluster_start_page; 3244 ctx->cluster_num = cluster_number; 3245 ctx->md_page = bs_channel->new_cluster_page; 3246 ctx->seq = bs_sequence_start_bs(_ch, &cpl); 3247 if (!ctx->seq) { 3248 free(ctx); 3249 cb_fn(cb_arg, -ENOMEM); 3250 return; 3251 } 3252 3253 if (blob->use_extent_table) { 3254 ctx->extent_page = *bs_cluster_to_extent_page(blob, cluster_number); 3255 } 3256 3257 cpl.u.blob_basic.cb_fn = spdk_free_cluster_unmap_complete; 3258 cpl.u.blob_basic.cb_arg = ctx; 3259 } 3260 3261 batch = bs_batch_open(_ch, &cpl, blob); 3262 if (!batch) { 3263 free(ctx); 3264 cb_fn(cb_arg, -ENOMEM); 3265 return; 3266 } 3267 3268 if (is_allocated) { 3269 bs_batch_unmap_dev(batch, lba, lba_count); 3270 } 3271 3272 bs_batch_close(batch); 3273 break; 3274 } 3275 case SPDK_BLOB_READV: 3276 case SPDK_BLOB_WRITEV: 3277 SPDK_ERRLOG("readv/write not valid\n"); 3278 cb_fn(cb_arg, -EINVAL); 3279 break; 3280 } 3281 } 3282 3283 static void 3284 blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel, 3285 void *payload, uint64_t offset, uint64_t length, 3286 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 3287 { 3288 assert(blob != NULL); 3289 3290 if (blob->data_ro && op_type != SPDK_BLOB_READ) { 3291 cb_fn(cb_arg, -EPERM); 3292 return; 3293 } 3294 3295 if (length == 0) { 3296 cb_fn(cb_arg, 0); 3297 return; 3298 } 3299 3300 if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 3301 cb_fn(cb_arg, -EINVAL); 3302 return; 3303 } 3304 if (length <= bs_num_io_units_to_cluster_boundary(blob, offset)) { 3305 blob_request_submit_op_single(_channel, blob, payload, offset, length, 3306 cb_fn, cb_arg, op_type); 3307 } else { 3308 blob_request_submit_op_split(_channel, blob, payload, offset, length, 3309 cb_fn, cb_arg, op_type); 3310 } 3311 } 3312 3313 struct rw_iov_ctx { 3314 struct spdk_blob *blob; 3315 struct spdk_io_channel *channel; 3316 spdk_blob_op_complete cb_fn; 3317 void *cb_arg; 3318 bool read; 3319 int iovcnt; 3320 struct iovec *orig_iov; 3321 uint64_t io_unit_offset; 3322 uint64_t io_units_remaining; 3323 uint64_t io_units_done; 3324 struct spdk_blob_ext_io_opts *ext_io_opts; 3325 struct iovec iov[0]; 3326 }; 3327 3328 static void 3329 rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3330 { 3331 assert(cb_arg == NULL); 3332 bs_sequence_finish(seq, bserrno); 3333 } 3334 3335 static void 3336 rw_iov_split_next(void *cb_arg, int bserrno) 3337 { 3338 struct rw_iov_ctx *ctx = cb_arg; 3339 struct spdk_blob *blob = ctx->blob; 3340 struct iovec *iov, *orig_iov; 3341 int iovcnt; 3342 size_t orig_iovoff; 3343 uint64_t io_units_count, io_units_to_boundary, io_unit_offset; 3344 uint64_t byte_count; 3345 3346 if (bserrno != 0 || ctx->io_units_remaining == 0) { 3347 ctx->cb_fn(ctx->cb_arg, bserrno); 3348 free(ctx); 3349 return; 3350 } 3351 3352 io_unit_offset = ctx->io_unit_offset; 3353 io_units_to_boundary = bs_num_io_units_to_cluster_boundary(blob, io_unit_offset); 3354 io_units_count = spdk_min(ctx->io_units_remaining, io_units_to_boundary); 3355 /* 3356 * Get index and offset into the original iov array for our current position in the I/O sequence. 3357 * byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will 3358 * point to the current position in the I/O sequence. 3359 */ 3360 byte_count = ctx->io_units_done * blob->bs->io_unit_size; 3361 orig_iov = &ctx->orig_iov[0]; 3362 orig_iovoff = 0; 3363 while (byte_count > 0) { 3364 if (byte_count >= orig_iov->iov_len) { 3365 byte_count -= orig_iov->iov_len; 3366 orig_iov++; 3367 } else { 3368 orig_iovoff = byte_count; 3369 byte_count = 0; 3370 } 3371 } 3372 3373 /* 3374 * Build an iov array for the next I/O in the sequence. byte_count will keep track of how many 3375 * bytes of this next I/O remain to be accounted for in the new iov array. 3376 */ 3377 byte_count = io_units_count * blob->bs->io_unit_size; 3378 iov = &ctx->iov[0]; 3379 iovcnt = 0; 3380 while (byte_count > 0) { 3381 assert(iovcnt < ctx->iovcnt); 3382 iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff); 3383 iov->iov_base = orig_iov->iov_base + orig_iovoff; 3384 byte_count -= iov->iov_len; 3385 orig_iovoff = 0; 3386 orig_iov++; 3387 iov++; 3388 iovcnt++; 3389 } 3390 3391 ctx->io_unit_offset += io_units_count; 3392 ctx->io_units_remaining -= io_units_count; 3393 ctx->io_units_done += io_units_count; 3394 iov = &ctx->iov[0]; 3395 3396 if (ctx->read) { 3397 spdk_blob_io_readv_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 3398 io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts); 3399 } else { 3400 spdk_blob_io_writev_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 3401 io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts); 3402 } 3403 } 3404 3405 static void 3406 blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel, 3407 struct iovec *iov, int iovcnt, 3408 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg, bool read, 3409 struct spdk_blob_ext_io_opts *ext_io_opts) 3410 { 3411 struct spdk_bs_cpl cpl; 3412 3413 assert(blob != NULL); 3414 3415 if (!read && blob->data_ro) { 3416 cb_fn(cb_arg, -EPERM); 3417 return; 3418 } 3419 3420 if (length == 0) { 3421 cb_fn(cb_arg, 0); 3422 return; 3423 } 3424 3425 if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 3426 cb_fn(cb_arg, -EINVAL); 3427 return; 3428 } 3429 3430 /* 3431 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having 3432 * to split a request that spans a cluster boundary. For I/O that do not span a cluster boundary, 3433 * there will be no noticeable difference compared to using a batch. For I/O that do span a cluster 3434 * boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need 3435 * to allocate a separate iov array and split the I/O such that none of the resulting 3436 * smaller I/O cross a cluster boundary. These smaller I/O will be issued in sequence (not in parallel) 3437 * but since this case happens very infrequently, any performance impact will be negligible. 3438 * 3439 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs 3440 * for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them 3441 * in a batch. That would also require creating an intermediate spdk_bs_cpl that would get called 3442 * when the batch was completed, to allow for freeing the memory for the iov arrays. 3443 */ 3444 if (spdk_likely(length <= bs_num_io_units_to_cluster_boundary(blob, offset))) { 3445 uint64_t lba_count; 3446 uint64_t lba; 3447 bool is_allocated; 3448 3449 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 3450 cpl.u.blob_basic.cb_fn = cb_fn; 3451 cpl.u.blob_basic.cb_arg = cb_arg; 3452 3453 if (blob->frozen_refcnt) { 3454 /* This blob I/O is frozen */ 3455 enum spdk_blob_op_type op_type; 3456 spdk_bs_user_op_t *op; 3457 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel); 3458 3459 op_type = read ? SPDK_BLOB_READV : SPDK_BLOB_WRITEV; 3460 op = bs_user_op_alloc(_channel, &cpl, op_type, blob, iov, iovcnt, offset, length); 3461 if (!op) { 3462 cb_fn(cb_arg, -ENOMEM); 3463 return; 3464 } 3465 3466 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 3467 3468 return; 3469 } 3470 3471 is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 3472 3473 if (read) { 3474 spdk_bs_sequence_t *seq; 3475 3476 seq = bs_sequence_start_blob(_channel, &cpl, blob); 3477 if (!seq) { 3478 cb_fn(cb_arg, -ENOMEM); 3479 return; 3480 } 3481 3482 seq->ext_io_opts = ext_io_opts; 3483 3484 if (is_allocated) { 3485 bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL); 3486 } else { 3487 bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count, 3488 rw_iov_done, NULL); 3489 } 3490 } else { 3491 if (is_allocated) { 3492 spdk_bs_sequence_t *seq; 3493 3494 seq = bs_sequence_start_blob(_channel, &cpl, blob); 3495 if (!seq) { 3496 cb_fn(cb_arg, -ENOMEM); 3497 return; 3498 } 3499 3500 seq->ext_io_opts = ext_io_opts; 3501 3502 bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL); 3503 } else { 3504 /* Queue this operation and allocate the cluster */ 3505 spdk_bs_user_op_t *op; 3506 3507 op = bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset, 3508 length); 3509 if (!op) { 3510 cb_fn(cb_arg, -ENOMEM); 3511 return; 3512 } 3513 3514 op->ext_io_opts = ext_io_opts; 3515 3516 bs_allocate_and_copy_cluster(blob, _channel, offset, op); 3517 } 3518 } 3519 } else { 3520 struct rw_iov_ctx *ctx; 3521 3522 ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec)); 3523 if (ctx == NULL) { 3524 cb_fn(cb_arg, -ENOMEM); 3525 return; 3526 } 3527 3528 ctx->blob = blob; 3529 ctx->channel = _channel; 3530 ctx->cb_fn = cb_fn; 3531 ctx->cb_arg = cb_arg; 3532 ctx->read = read; 3533 ctx->orig_iov = iov; 3534 ctx->iovcnt = iovcnt; 3535 ctx->io_unit_offset = offset; 3536 ctx->io_units_remaining = length; 3537 ctx->io_units_done = 0; 3538 ctx->ext_io_opts = ext_io_opts; 3539 3540 rw_iov_split_next(ctx, 0); 3541 } 3542 } 3543 3544 static struct spdk_blob * 3545 blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid) 3546 { 3547 struct spdk_blob find; 3548 3549 if (spdk_bit_array_get(bs->open_blobids, blobid) == 0) { 3550 return NULL; 3551 } 3552 3553 find.id = blobid; 3554 return RB_FIND(spdk_blob_tree, &bs->open_blobs, &find); 3555 } 3556 3557 static void 3558 blob_get_snapshot_and_clone_entries(struct spdk_blob *blob, 3559 struct spdk_blob_list **snapshot_entry, struct spdk_blob_list **clone_entry) 3560 { 3561 assert(blob != NULL); 3562 *snapshot_entry = NULL; 3563 *clone_entry = NULL; 3564 3565 if (blob->parent_id == SPDK_BLOBID_INVALID) { 3566 return; 3567 } 3568 3569 TAILQ_FOREACH(*snapshot_entry, &blob->bs->snapshots, link) { 3570 if ((*snapshot_entry)->id == blob->parent_id) { 3571 break; 3572 } 3573 } 3574 3575 if (*snapshot_entry != NULL) { 3576 TAILQ_FOREACH(*clone_entry, &(*snapshot_entry)->clones, link) { 3577 if ((*clone_entry)->id == blob->id) { 3578 break; 3579 } 3580 } 3581 3582 assert(*clone_entry != NULL); 3583 } 3584 } 3585 3586 static int 3587 bs_channel_create(void *io_device, void *ctx_buf) 3588 { 3589 struct spdk_blob_store *bs = io_device; 3590 struct spdk_bs_channel *channel = ctx_buf; 3591 struct spdk_bs_dev *dev; 3592 uint32_t max_ops = bs->max_channel_ops; 3593 uint32_t i; 3594 3595 dev = bs->dev; 3596 3597 channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set)); 3598 if (!channel->req_mem) { 3599 return -1; 3600 } 3601 3602 TAILQ_INIT(&channel->reqs); 3603 3604 for (i = 0; i < max_ops; i++) { 3605 TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link); 3606 } 3607 3608 channel->bs = bs; 3609 channel->dev = dev; 3610 channel->dev_channel = dev->create_channel(dev); 3611 3612 if (!channel->dev_channel) { 3613 SPDK_ERRLOG("Failed to create device channel.\n"); 3614 free(channel->req_mem); 3615 return -1; 3616 } 3617 3618 channel->new_cluster_page = spdk_zmalloc(bs->md_page_size, 0, NULL, SPDK_ENV_NUMA_ID_ANY, 3619 SPDK_MALLOC_DMA); 3620 if (!channel->new_cluster_page) { 3621 SPDK_ERRLOG("Failed to allocate new cluster page\n"); 3622 free(channel->req_mem); 3623 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 3624 return -1; 3625 } 3626 3627 TAILQ_INIT(&channel->need_cluster_alloc); 3628 TAILQ_INIT(&channel->queued_io); 3629 RB_INIT(&channel->esnap_channels); 3630 3631 return 0; 3632 } 3633 3634 static void 3635 bs_channel_destroy(void *io_device, void *ctx_buf) 3636 { 3637 struct spdk_bs_channel *channel = ctx_buf; 3638 spdk_bs_user_op_t *op; 3639 3640 while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) { 3641 op = TAILQ_FIRST(&channel->need_cluster_alloc); 3642 TAILQ_REMOVE(&channel->need_cluster_alloc, op, link); 3643 bs_user_op_abort(op, -EIO); 3644 } 3645 3646 while (!TAILQ_EMPTY(&channel->queued_io)) { 3647 op = TAILQ_FIRST(&channel->queued_io); 3648 TAILQ_REMOVE(&channel->queued_io, op, link); 3649 bs_user_op_abort(op, -EIO); 3650 } 3651 3652 blob_esnap_destroy_bs_channel(channel); 3653 3654 free(channel->req_mem); 3655 spdk_free(channel->new_cluster_page); 3656 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 3657 } 3658 3659 static void 3660 bs_dev_destroy(void *io_device) 3661 { 3662 struct spdk_blob_store *bs = io_device; 3663 struct spdk_blob *blob, *blob_tmp; 3664 3665 bs->dev->destroy(bs->dev); 3666 3667 RB_FOREACH_SAFE(blob, spdk_blob_tree, &bs->open_blobs, blob_tmp) { 3668 RB_REMOVE(spdk_blob_tree, &bs->open_blobs, blob); 3669 spdk_bit_array_clear(bs->open_blobids, blob->id); 3670 blob_free(blob); 3671 } 3672 3673 spdk_spin_destroy(&bs->used_lock); 3674 3675 spdk_bit_array_free(&bs->open_blobids); 3676 spdk_bit_array_free(&bs->used_blobids); 3677 spdk_bit_array_free(&bs->used_md_pages); 3678 spdk_bit_pool_free(&bs->used_clusters); 3679 /* 3680 * If this function is called for any reason except a successful unload, 3681 * the unload_cpl type will be NONE and this will be a nop. 3682 */ 3683 bs_call_cpl(&bs->unload_cpl, bs->unload_err); 3684 3685 free(bs); 3686 } 3687 3688 static int 3689 bs_blob_list_add(struct spdk_blob *blob) 3690 { 3691 spdk_blob_id snapshot_id; 3692 struct spdk_blob_list *snapshot_entry = NULL; 3693 struct spdk_blob_list *clone_entry = NULL; 3694 3695 assert(blob != NULL); 3696 3697 snapshot_id = blob->parent_id; 3698 if (snapshot_id == SPDK_BLOBID_INVALID || 3699 snapshot_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 3700 return 0; 3701 } 3702 3703 snapshot_entry = bs_get_snapshot_entry(blob->bs, snapshot_id); 3704 if (snapshot_entry == NULL) { 3705 /* Snapshot not found */ 3706 snapshot_entry = calloc(1, sizeof(struct spdk_blob_list)); 3707 if (snapshot_entry == NULL) { 3708 return -ENOMEM; 3709 } 3710 snapshot_entry->id = snapshot_id; 3711 TAILQ_INIT(&snapshot_entry->clones); 3712 TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link); 3713 } else { 3714 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 3715 if (clone_entry->id == blob->id) { 3716 break; 3717 } 3718 } 3719 } 3720 3721 if (clone_entry == NULL) { 3722 /* Clone not found */ 3723 clone_entry = calloc(1, sizeof(struct spdk_blob_list)); 3724 if (clone_entry == NULL) { 3725 return -ENOMEM; 3726 } 3727 clone_entry->id = blob->id; 3728 TAILQ_INIT(&clone_entry->clones); 3729 TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link); 3730 snapshot_entry->clone_count++; 3731 } 3732 3733 return 0; 3734 } 3735 3736 static void 3737 bs_blob_list_remove(struct spdk_blob *blob) 3738 { 3739 struct spdk_blob_list *snapshot_entry = NULL; 3740 struct spdk_blob_list *clone_entry = NULL; 3741 3742 blob_get_snapshot_and_clone_entries(blob, &snapshot_entry, &clone_entry); 3743 3744 if (snapshot_entry == NULL) { 3745 return; 3746 } 3747 3748 blob->parent_id = SPDK_BLOBID_INVALID; 3749 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3750 free(clone_entry); 3751 3752 snapshot_entry->clone_count--; 3753 } 3754 3755 static int 3756 bs_blob_list_free(struct spdk_blob_store *bs) 3757 { 3758 struct spdk_blob_list *snapshot_entry; 3759 struct spdk_blob_list *snapshot_entry_tmp; 3760 struct spdk_blob_list *clone_entry; 3761 struct spdk_blob_list *clone_entry_tmp; 3762 3763 TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) { 3764 TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) { 3765 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3766 free(clone_entry); 3767 } 3768 TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link); 3769 free(snapshot_entry); 3770 } 3771 3772 return 0; 3773 } 3774 3775 static void 3776 bs_free(struct spdk_blob_store *bs) 3777 { 3778 bs_blob_list_free(bs); 3779 3780 bs_unregister_md_thread(bs); 3781 spdk_io_device_unregister(bs, bs_dev_destroy); 3782 } 3783 3784 void 3785 spdk_bs_opts_init(struct spdk_bs_opts *opts, size_t opts_size) 3786 { 3787 3788 if (!opts) { 3789 SPDK_ERRLOG("opts should not be NULL\n"); 3790 return; 3791 } 3792 3793 if (!opts_size) { 3794 SPDK_ERRLOG("opts_size should not be zero value\n"); 3795 return; 3796 } 3797 3798 memset(opts, 0, opts_size); 3799 opts->opts_size = opts_size; 3800 3801 #define FIELD_OK(field) \ 3802 offsetof(struct spdk_bs_opts, field) + sizeof(opts->field) <= opts_size 3803 3804 #define SET_FIELD(field, value) \ 3805 if (FIELD_OK(field)) { \ 3806 opts->field = value; \ 3807 } \ 3808 3809 SET_FIELD(cluster_sz, SPDK_BLOB_OPTS_CLUSTER_SZ); 3810 SET_FIELD(num_md_pages, SPDK_BLOB_OPTS_NUM_MD_PAGES); 3811 SET_FIELD(max_md_ops, SPDK_BLOB_OPTS_NUM_MD_PAGES); 3812 SET_FIELD(max_channel_ops, SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS); 3813 SET_FIELD(clear_method, BS_CLEAR_WITH_UNMAP); 3814 3815 if (FIELD_OK(bstype)) { 3816 memset(&opts->bstype, 0, sizeof(opts->bstype)); 3817 } 3818 3819 SET_FIELD(iter_cb_fn, NULL); 3820 SET_FIELD(iter_cb_arg, NULL); 3821 SET_FIELD(force_recover, false); 3822 SET_FIELD(esnap_bs_dev_create, NULL); 3823 SET_FIELD(esnap_ctx, NULL); 3824 3825 #undef FIELD_OK 3826 #undef SET_FIELD 3827 } 3828 3829 static int 3830 bs_opts_verify(struct spdk_bs_opts *opts) 3831 { 3832 if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 || 3833 opts->max_channel_ops == 0) { 3834 SPDK_ERRLOG("Blobstore options cannot be set to 0\n"); 3835 return -1; 3836 } 3837 3838 if ((opts->cluster_sz % SPDK_BS_PAGE_SIZE) != 0) { 3839 SPDK_ERRLOG("Cluster size %" PRIu32 " is not an integral multiple of blocklen %" PRIu32"\n", 3840 opts->cluster_sz, SPDK_BS_PAGE_SIZE); 3841 return -1; 3842 } 3843 3844 return 0; 3845 } 3846 3847 /* START spdk_bs_load */ 3848 3849 /* spdk_bs_load_ctx is used for init, load, unload and dump code paths. */ 3850 3851 struct spdk_bs_load_ctx { 3852 struct spdk_blob_store *bs; 3853 struct spdk_bs_super_block *super; 3854 3855 struct spdk_bs_md_mask *mask; 3856 bool in_page_chain; 3857 uint32_t page_index; 3858 uint32_t cur_page; 3859 struct spdk_blob_md_page *page; 3860 3861 uint64_t num_extent_pages; 3862 uint32_t *extent_page_num; 3863 struct spdk_blob_md_page *extent_pages; 3864 struct spdk_bit_array *used_clusters; 3865 3866 spdk_bs_sequence_t *seq; 3867 spdk_blob_op_with_handle_complete iter_cb_fn; 3868 void *iter_cb_arg; 3869 struct spdk_blob *blob; 3870 spdk_blob_id blobid; 3871 3872 bool force_recover; 3873 3874 /* These fields are used in the spdk_bs_dump path. */ 3875 bool dumping; 3876 FILE *fp; 3877 spdk_bs_dump_print_xattr print_xattr_fn; 3878 char xattr_name[4096]; 3879 }; 3880 3881 static void 3882 bs_init_per_cluster_fields(struct spdk_blob_store *bs) 3883 { 3884 bs->pages_per_cluster = bs->cluster_sz / bs->md_page_size; 3885 if (spdk_u32_is_pow2(bs->pages_per_cluster)) { 3886 bs->pages_per_cluster_shift = spdk_u32log2(bs->pages_per_cluster); 3887 } 3888 bs->io_units_per_cluster = bs->cluster_sz / bs->io_unit_size; 3889 if (spdk_u32_is_pow2(bs->io_units_per_cluster)) { 3890 bs->io_units_per_cluster_shift = spdk_u32log2(bs->io_units_per_cluster); 3891 } 3892 } 3893 3894 static int 3895 bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_blob_store **_bs, 3896 struct spdk_bs_load_ctx **_ctx) 3897 { 3898 struct spdk_blob_store *bs; 3899 struct spdk_bs_load_ctx *ctx; 3900 uint64_t dev_size; 3901 uint32_t md_page_size; 3902 int rc; 3903 3904 dev_size = dev->blocklen * dev->blockcnt; 3905 if (dev_size < opts->cluster_sz) { 3906 /* Device size cannot be smaller than cluster size of blobstore */ 3907 SPDK_INFOLOG(blob, "Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n", 3908 dev_size, opts->cluster_sz); 3909 return -ENOSPC; 3910 } 3911 3912 md_page_size = spdk_max(spdk_max(dev->phys_blocklen, SPDK_BS_PAGE_SIZE), 3913 opts->md_page_size); 3914 if (opts->cluster_sz < md_page_size) { 3915 /* Cluster size cannot be smaller than page size */ 3916 SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n", 3917 opts->cluster_sz, md_page_size); 3918 return -EINVAL; 3919 } 3920 bs = calloc(1, sizeof(struct spdk_blob_store)); 3921 if (!bs) { 3922 return -ENOMEM; 3923 } 3924 3925 ctx = calloc(1, sizeof(struct spdk_bs_load_ctx)); 3926 if (!ctx) { 3927 free(bs); 3928 return -ENOMEM; 3929 } 3930 3931 ctx->bs = bs; 3932 ctx->iter_cb_fn = opts->iter_cb_fn; 3933 ctx->iter_cb_arg = opts->iter_cb_arg; 3934 ctx->force_recover = opts->force_recover; 3935 3936 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 3937 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 3938 if (!ctx->super) { 3939 free(ctx); 3940 free(bs); 3941 return -ENOMEM; 3942 } 3943 3944 RB_INIT(&bs->open_blobs); 3945 TAILQ_INIT(&bs->snapshots); 3946 bs->dev = dev; 3947 bs->md_page_size = md_page_size; 3948 bs->md_thread = spdk_get_thread(); 3949 assert(bs->md_thread != NULL); 3950 3951 /* 3952 * Do not use bs_lba_to_cluster() here since blockcnt may not be an 3953 * even multiple of the cluster size. 3954 */ 3955 bs->cluster_sz = opts->cluster_sz; 3956 bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen); 3957 ctx->used_clusters = spdk_bit_array_create(bs->total_clusters); 3958 if (!ctx->used_clusters) { 3959 spdk_free(ctx->super); 3960 free(ctx); 3961 free(bs); 3962 return -ENOMEM; 3963 } 3964 3965 bs->num_free_clusters = bs->total_clusters; 3966 bs->io_unit_size = dev->blocklen; 3967 bs_init_per_cluster_fields(bs); 3968 3969 bs->max_channel_ops = opts->max_channel_ops; 3970 bs->super_blob = SPDK_BLOBID_INVALID; 3971 memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype)); 3972 bs->esnap_bs_dev_create = opts->esnap_bs_dev_create; 3973 bs->esnap_ctx = opts->esnap_ctx; 3974 3975 /* The metadata is assumed to be at least 1 page */ 3976 bs->used_md_pages = spdk_bit_array_create(1); 3977 bs->used_blobids = spdk_bit_array_create(0); 3978 bs->open_blobids = spdk_bit_array_create(0); 3979 3980 spdk_spin_init(&bs->used_lock); 3981 3982 spdk_io_device_register(bs, bs_channel_create, bs_channel_destroy, 3983 sizeof(struct spdk_bs_channel), "blobstore"); 3984 rc = bs_register_md_thread(bs); 3985 if (rc == -1) { 3986 spdk_io_device_unregister(bs, NULL); 3987 spdk_spin_destroy(&bs->used_lock); 3988 spdk_bit_array_free(&bs->open_blobids); 3989 spdk_bit_array_free(&bs->used_blobids); 3990 spdk_bit_array_free(&bs->used_md_pages); 3991 spdk_bit_array_free(&ctx->used_clusters); 3992 spdk_free(ctx->super); 3993 free(ctx); 3994 free(bs); 3995 /* FIXME: this is a lie but don't know how to get a proper error code here */ 3996 return -ENOMEM; 3997 } 3998 3999 *_ctx = ctx; 4000 *_bs = bs; 4001 return 0; 4002 } 4003 4004 static void 4005 bs_load_ctx_fail(struct spdk_bs_load_ctx *ctx, int bserrno) 4006 { 4007 assert(bserrno != 0); 4008 4009 spdk_free(ctx->super); 4010 bs_sequence_finish(ctx->seq, bserrno); 4011 bs_free(ctx->bs); 4012 spdk_bit_array_free(&ctx->used_clusters); 4013 free(ctx); 4014 } 4015 4016 static void 4017 bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 4018 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg) 4019 { 4020 /* Update the values in the super block */ 4021 super->super_blob = bs->super_blob; 4022 memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype)); 4023 super->crc = blob_md_page_calc_crc(super); 4024 bs_sequence_write_dev(seq, super, bs_page_to_lba(bs, 0), 4025 bs_byte_to_lba(bs, sizeof(*super)), 4026 cb_fn, cb_arg); 4027 } 4028 4029 static void 4030 bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 4031 { 4032 struct spdk_bs_load_ctx *ctx = arg; 4033 uint64_t mask_size, lba, lba_count; 4034 4035 /* Write out the used clusters mask */ 4036 mask_size = ctx->super->used_cluster_mask_len * ctx->bs->md_page_size; 4037 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 4038 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4039 if (!ctx->mask) { 4040 bs_load_ctx_fail(ctx, -ENOMEM); 4041 return; 4042 } 4043 4044 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS; 4045 ctx->mask->length = ctx->bs->total_clusters; 4046 /* We could get here through the normal unload path, or through dirty 4047 * shutdown recovery. For the normal unload path, we use the mask from 4048 * the bit pool. For dirty shutdown recovery, we don't have a bit pool yet - 4049 * only the bit array from the load ctx. 4050 */ 4051 if (ctx->bs->used_clusters) { 4052 assert(ctx->mask->length == spdk_bit_pool_capacity(ctx->bs->used_clusters)); 4053 spdk_bit_pool_store_mask(ctx->bs->used_clusters, ctx->mask->mask); 4054 } else { 4055 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->used_clusters)); 4056 spdk_bit_array_store_mask(ctx->used_clusters, ctx->mask->mask); 4057 } 4058 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 4059 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 4060 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 4061 } 4062 4063 static void 4064 bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 4065 { 4066 struct spdk_bs_load_ctx *ctx = arg; 4067 uint64_t mask_size, lba, lba_count; 4068 4069 mask_size = ctx->super->used_page_mask_len * ctx->bs->md_page_size; 4070 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 4071 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4072 if (!ctx->mask) { 4073 bs_load_ctx_fail(ctx, -ENOMEM); 4074 return; 4075 } 4076 4077 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES; 4078 ctx->mask->length = ctx->super->md_len; 4079 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages)); 4080 4081 spdk_bit_array_store_mask(ctx->bs->used_md_pages, ctx->mask->mask); 4082 lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 4083 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 4084 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 4085 } 4086 4087 static void 4088 bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 4089 { 4090 struct spdk_bs_load_ctx *ctx = arg; 4091 uint64_t mask_size, lba, lba_count; 4092 4093 if (ctx->super->used_blobid_mask_len == 0) { 4094 /* 4095 * This is a pre-v3 on-disk format where the blobid mask does not get 4096 * written to disk. 4097 */ 4098 cb_fn(seq, arg, 0); 4099 return; 4100 } 4101 4102 mask_size = ctx->super->used_blobid_mask_len * ctx->bs->md_page_size; 4103 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_NUMA_ID_ANY, 4104 SPDK_MALLOC_DMA); 4105 if (!ctx->mask) { 4106 bs_load_ctx_fail(ctx, -ENOMEM); 4107 return; 4108 } 4109 4110 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS; 4111 ctx->mask->length = ctx->super->md_len; 4112 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids)); 4113 4114 spdk_bit_array_store_mask(ctx->bs->used_blobids, ctx->mask->mask); 4115 lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 4116 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 4117 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 4118 } 4119 4120 static void 4121 blob_set_thin_provision(struct spdk_blob *blob) 4122 { 4123 blob_verify_md_op(blob); 4124 blob->invalid_flags |= SPDK_BLOB_THIN_PROV; 4125 blob->state = SPDK_BLOB_STATE_DIRTY; 4126 } 4127 4128 static void 4129 blob_set_clear_method(struct spdk_blob *blob, enum blob_clear_method clear_method) 4130 { 4131 blob_verify_md_op(blob); 4132 blob->clear_method = clear_method; 4133 blob->md_ro_flags |= (clear_method << SPDK_BLOB_CLEAR_METHOD_SHIFT); 4134 blob->state = SPDK_BLOB_STATE_DIRTY; 4135 } 4136 4137 static void bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno); 4138 4139 static void 4140 bs_delete_corrupted_blob_cpl(void *cb_arg, int bserrno) 4141 { 4142 struct spdk_bs_load_ctx *ctx = cb_arg; 4143 spdk_blob_id id; 4144 int64_t page_num; 4145 4146 /* Iterate to next blob (we can't use spdk_bs_iter_next function as our 4147 * last blob has been removed */ 4148 page_num = bs_blobid_to_page(ctx->blobid); 4149 page_num++; 4150 page_num = spdk_bit_array_find_first_set(ctx->bs->used_blobids, page_num); 4151 if (page_num >= spdk_bit_array_capacity(ctx->bs->used_blobids)) { 4152 bs_load_iter(ctx, NULL, -ENOENT); 4153 return; 4154 } 4155 4156 id = bs_page_to_blobid(page_num); 4157 4158 spdk_bs_open_blob(ctx->bs, id, bs_load_iter, ctx); 4159 } 4160 4161 static void 4162 bs_delete_corrupted_close_cb(void *cb_arg, int bserrno) 4163 { 4164 struct spdk_bs_load_ctx *ctx = cb_arg; 4165 4166 if (bserrno != 0) { 4167 SPDK_ERRLOG("Failed to close corrupted blob\n"); 4168 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4169 return; 4170 } 4171 4172 spdk_bs_delete_blob(ctx->bs, ctx->blobid, bs_delete_corrupted_blob_cpl, ctx); 4173 } 4174 4175 static void 4176 bs_delete_corrupted_blob(void *cb_arg, int bserrno) 4177 { 4178 struct spdk_bs_load_ctx *ctx = cb_arg; 4179 uint64_t i; 4180 4181 if (bserrno != 0) { 4182 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 4183 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4184 return; 4185 } 4186 4187 /* Snapshot and clone have the same copy of cluster map and extent pages 4188 * at this point. Let's clear both for snapshot now, 4189 * so that it won't be cleared for clone later when we remove snapshot. 4190 * Also set thin provision to pass data corruption check */ 4191 for (i = 0; i < ctx->blob->active.num_clusters; i++) { 4192 ctx->blob->active.clusters[i] = 0; 4193 } 4194 for (i = 0; i < ctx->blob->active.num_extent_pages; i++) { 4195 ctx->blob->active.extent_pages[i] = 0; 4196 } 4197 4198 ctx->blob->active.num_allocated_clusters = 0; 4199 4200 ctx->blob->md_ro = false; 4201 4202 blob_set_thin_provision(ctx->blob); 4203 4204 ctx->blobid = ctx->blob->id; 4205 4206 spdk_blob_close(ctx->blob, bs_delete_corrupted_close_cb, ctx); 4207 } 4208 4209 static void 4210 bs_update_corrupted_blob(void *cb_arg, int bserrno) 4211 { 4212 struct spdk_bs_load_ctx *ctx = cb_arg; 4213 4214 if (bserrno != 0) { 4215 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 4216 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4217 return; 4218 } 4219 4220 ctx->blob->md_ro = false; 4221 blob_remove_xattr(ctx->blob, SNAPSHOT_PENDING_REMOVAL, true); 4222 blob_remove_xattr(ctx->blob, SNAPSHOT_IN_PROGRESS, true); 4223 spdk_blob_set_read_only(ctx->blob); 4224 4225 if (ctx->iter_cb_fn) { 4226 ctx->iter_cb_fn(ctx->iter_cb_arg, ctx->blob, 0); 4227 } 4228 bs_blob_list_add(ctx->blob); 4229 4230 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4231 } 4232 4233 static void 4234 bs_examine_clone(void *cb_arg, struct spdk_blob *blob, int bserrno) 4235 { 4236 struct spdk_bs_load_ctx *ctx = cb_arg; 4237 4238 if (bserrno != 0) { 4239 SPDK_ERRLOG("Failed to open clone of a corrupted blob\n"); 4240 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4241 return; 4242 } 4243 4244 if (blob->parent_id == ctx->blob->id) { 4245 /* Power failure occurred before updating clone (snapshot delete case) 4246 * or after updating clone (creating snapshot case) - keep snapshot */ 4247 spdk_blob_close(blob, bs_update_corrupted_blob, ctx); 4248 } else { 4249 /* Power failure occurred after updating clone (snapshot delete case) 4250 * or before updating clone (creating snapshot case) - remove snapshot */ 4251 spdk_blob_close(blob, bs_delete_corrupted_blob, ctx); 4252 } 4253 } 4254 4255 static void 4256 bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno) 4257 { 4258 struct spdk_bs_load_ctx *ctx = arg; 4259 const void *value; 4260 size_t len; 4261 int rc = 0; 4262 4263 if (bserrno == 0) { 4264 /* Examine blob if it is corrupted after power failure. Fix 4265 * the ones that can be fixed and remove any other corrupted 4266 * ones. If it is not corrupted just process it */ 4267 rc = blob_get_xattr_value(blob, SNAPSHOT_PENDING_REMOVAL, &value, &len, true); 4268 if (rc != 0) { 4269 rc = blob_get_xattr_value(blob, SNAPSHOT_IN_PROGRESS, &value, &len, true); 4270 if (rc != 0) { 4271 /* Not corrupted - process it and continue with iterating through blobs */ 4272 if (ctx->iter_cb_fn) { 4273 ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0); 4274 } 4275 bs_blob_list_add(blob); 4276 spdk_bs_iter_next(ctx->bs, blob, bs_load_iter, ctx); 4277 return; 4278 } 4279 4280 } 4281 4282 assert(len == sizeof(spdk_blob_id)); 4283 4284 ctx->blob = blob; 4285 4286 /* Open clone to check if we are able to fix this blob or should we remove it */ 4287 spdk_bs_open_blob(ctx->bs, *(spdk_blob_id *)value, bs_examine_clone, ctx); 4288 return; 4289 } else if (bserrno == -ENOENT) { 4290 bserrno = 0; 4291 } else { 4292 /* 4293 * This case needs to be looked at further. Same problem 4294 * exists with applications that rely on explicit blob 4295 * iteration. We should just skip the blob that failed 4296 * to load and continue on to the next one. 4297 */ 4298 SPDK_ERRLOG("Error in iterating blobs\n"); 4299 } 4300 4301 ctx->iter_cb_fn = NULL; 4302 4303 spdk_free(ctx->super); 4304 spdk_free(ctx->mask); 4305 bs_sequence_finish(ctx->seq, bserrno); 4306 free(ctx); 4307 } 4308 4309 static void bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg); 4310 4311 static void 4312 bs_load_complete(struct spdk_bs_load_ctx *ctx) 4313 { 4314 ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters); 4315 if (ctx->dumping) { 4316 bs_dump_read_md_page(ctx->seq, ctx); 4317 return; 4318 } 4319 spdk_bs_iter_first(ctx->bs, bs_load_iter, ctx); 4320 } 4321 4322 static void 4323 bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4324 { 4325 struct spdk_bs_load_ctx *ctx = cb_arg; 4326 int rc; 4327 4328 /* The type must be correct */ 4329 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS); 4330 4331 /* The length of the mask (in bits) must not be greater than 4332 * the length of the buffer (converted to bits) */ 4333 assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * ctx->super->md_page_size * 8)); 4334 4335 /* The length of the mask must be exactly equal to the size 4336 * (in pages) of the metadata region */ 4337 assert(ctx->mask->length == ctx->super->md_len); 4338 4339 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->mask->length); 4340 if (rc < 0) { 4341 spdk_free(ctx->mask); 4342 bs_load_ctx_fail(ctx, rc); 4343 return; 4344 } 4345 4346 spdk_bit_array_load_mask(ctx->bs->used_blobids, ctx->mask->mask); 4347 bs_load_complete(ctx); 4348 } 4349 4350 static void 4351 bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4352 { 4353 struct spdk_bs_load_ctx *ctx = cb_arg; 4354 uint64_t lba, lba_count, mask_size; 4355 int rc; 4356 4357 if (bserrno != 0) { 4358 bs_load_ctx_fail(ctx, bserrno); 4359 return; 4360 } 4361 4362 /* The type must be correct */ 4363 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 4364 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 4365 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 4366 struct spdk_blob_md_page) * 8)); 4367 /* 4368 * The length of the mask must be equal to or larger than the total number of clusters. It may be 4369 * larger than the total number of clusters due to a failure spdk_bs_grow. 4370 */ 4371 assert(ctx->mask->length >= ctx->bs->total_clusters); 4372 if (ctx->mask->length > ctx->bs->total_clusters) { 4373 SPDK_WARNLOG("Shrink the used_custers mask length to total_clusters"); 4374 ctx->mask->length = ctx->bs->total_clusters; 4375 } 4376 4377 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->mask->length); 4378 if (rc < 0) { 4379 spdk_free(ctx->mask); 4380 bs_load_ctx_fail(ctx, rc); 4381 return; 4382 } 4383 4384 spdk_bit_array_load_mask(ctx->used_clusters, ctx->mask->mask); 4385 ctx->bs->num_free_clusters = spdk_bit_array_count_clear(ctx->used_clusters); 4386 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 4387 4388 spdk_free(ctx->mask); 4389 4390 /* Read the used blobids mask */ 4391 mask_size = ctx->super->used_blobid_mask_len * ctx->super->md_page_size; 4392 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_NUMA_ID_ANY, 4393 SPDK_MALLOC_DMA); 4394 if (!ctx->mask) { 4395 bs_load_ctx_fail(ctx, -ENOMEM); 4396 return; 4397 } 4398 lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 4399 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 4400 bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 4401 bs_load_used_blobids_cpl, ctx); 4402 } 4403 4404 static void 4405 bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4406 { 4407 struct spdk_bs_load_ctx *ctx = cb_arg; 4408 uint64_t lba, lba_count, mask_size; 4409 int rc; 4410 4411 if (bserrno != 0) { 4412 bs_load_ctx_fail(ctx, bserrno); 4413 return; 4414 } 4415 4416 /* The type must be correct */ 4417 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES); 4418 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 4419 assert(ctx->mask->length <= (ctx->super->used_page_mask_len * ctx->super->md_page_size * 4420 8)); 4421 /* The length of the mask must be exactly equal to the size (in pages) of the metadata region */ 4422 if (ctx->mask->length != ctx->super->md_len) { 4423 SPDK_ERRLOG("mismatched md_len in used_pages mask: " 4424 "mask->length=%" PRIu32 " super->md_len=%" PRIu32 "\n", 4425 ctx->mask->length, ctx->super->md_len); 4426 assert(false); 4427 } 4428 4429 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->mask->length); 4430 if (rc < 0) { 4431 spdk_free(ctx->mask); 4432 bs_load_ctx_fail(ctx, rc); 4433 return; 4434 } 4435 4436 spdk_bit_array_load_mask(ctx->bs->used_md_pages, ctx->mask->mask); 4437 spdk_free(ctx->mask); 4438 4439 /* Read the used clusters mask */ 4440 mask_size = ctx->super->used_cluster_mask_len * ctx->super->md_page_size; 4441 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_NUMA_ID_ANY, 4442 SPDK_MALLOC_DMA); 4443 if (!ctx->mask) { 4444 bs_load_ctx_fail(ctx, -ENOMEM); 4445 return; 4446 } 4447 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 4448 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 4449 bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 4450 bs_load_used_clusters_cpl, ctx); 4451 } 4452 4453 static void 4454 bs_load_read_used_pages(struct spdk_bs_load_ctx *ctx) 4455 { 4456 uint64_t lba, lba_count, mask_size; 4457 4458 /* Read the used pages mask */ 4459 mask_size = ctx->super->used_page_mask_len * ctx->super->md_page_size; 4460 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 4461 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4462 if (!ctx->mask) { 4463 bs_load_ctx_fail(ctx, -ENOMEM); 4464 return; 4465 } 4466 4467 lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 4468 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 4469 bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count, 4470 bs_load_used_pages_cpl, ctx); 4471 } 4472 4473 static int 4474 bs_load_replay_md_parse_page(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_page *page) 4475 { 4476 struct spdk_blob_store *bs = ctx->bs; 4477 struct spdk_blob_md_descriptor *desc; 4478 size_t cur_desc = 0; 4479 4480 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4481 while (cur_desc < sizeof(page->descriptors)) { 4482 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 4483 if (desc->length == 0) { 4484 /* If padding and length are 0, this terminates the page */ 4485 break; 4486 } 4487 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 4488 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 4489 unsigned int i, j; 4490 unsigned int cluster_count = 0; 4491 uint32_t cluster_idx; 4492 4493 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 4494 4495 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 4496 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 4497 cluster_idx = desc_extent_rle->extents[i].cluster_idx; 4498 /* 4499 * cluster_idx = 0 means an unallocated cluster - don't mark that 4500 * in the used cluster map. 4501 */ 4502 if (cluster_idx != 0) { 4503 SPDK_NOTICELOG("Recover: cluster %" PRIu32 "\n", cluster_idx + j); 4504 spdk_bit_array_set(ctx->used_clusters, cluster_idx + j); 4505 if (bs->num_free_clusters == 0) { 4506 return -ENOSPC; 4507 } 4508 bs->num_free_clusters--; 4509 } 4510 cluster_count++; 4511 } 4512 } 4513 if (cluster_count == 0) { 4514 return -EINVAL; 4515 } 4516 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4517 struct spdk_blob_md_descriptor_extent_page *desc_extent; 4518 uint32_t i; 4519 uint32_t cluster_count = 0; 4520 uint32_t cluster_idx; 4521 size_t cluster_idx_length; 4522 4523 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 4524 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 4525 4526 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 4527 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 4528 return -EINVAL; 4529 } 4530 4531 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 4532 cluster_idx = desc_extent->cluster_idx[i]; 4533 /* 4534 * cluster_idx = 0 means an unallocated cluster - don't mark that 4535 * in the used cluster map. 4536 */ 4537 if (cluster_idx != 0) { 4538 if (cluster_idx < desc_extent->start_cluster_idx && 4539 cluster_idx >= desc_extent->start_cluster_idx + cluster_count) { 4540 return -EINVAL; 4541 } 4542 spdk_bit_array_set(ctx->used_clusters, cluster_idx); 4543 if (bs->num_free_clusters == 0) { 4544 return -ENOSPC; 4545 } 4546 bs->num_free_clusters--; 4547 } 4548 cluster_count++; 4549 } 4550 4551 if (cluster_count == 0) { 4552 return -EINVAL; 4553 } 4554 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 4555 /* Skip this item */ 4556 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 4557 /* Skip this item */ 4558 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 4559 /* Skip this item */ 4560 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 4561 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 4562 uint32_t num_extent_pages = ctx->num_extent_pages; 4563 uint32_t i; 4564 size_t extent_pages_length; 4565 void *tmp; 4566 4567 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 4568 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 4569 4570 if (desc_extent_table->length == 0 || 4571 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 4572 return -EINVAL; 4573 } 4574 4575 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 4576 if (desc_extent_table->extent_page[i].page_idx != 0) { 4577 if (desc_extent_table->extent_page[i].num_pages != 1) { 4578 return -EINVAL; 4579 } 4580 num_extent_pages += 1; 4581 } 4582 } 4583 4584 if (num_extent_pages > 0) { 4585 tmp = realloc(ctx->extent_page_num, num_extent_pages * sizeof(uint32_t)); 4586 if (tmp == NULL) { 4587 return -ENOMEM; 4588 } 4589 ctx->extent_page_num = tmp; 4590 4591 /* Extent table entries contain md page numbers for extent pages. 4592 * Zeroes represent unallocated extent pages, those are run-length-encoded. 4593 */ 4594 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 4595 if (desc_extent_table->extent_page[i].page_idx != 0) { 4596 ctx->extent_page_num[ctx->num_extent_pages] = desc_extent_table->extent_page[i].page_idx; 4597 ctx->num_extent_pages += 1; 4598 } 4599 } 4600 } 4601 } else { 4602 /* Error */ 4603 return -EINVAL; 4604 } 4605 /* Advance to the next descriptor */ 4606 cur_desc += sizeof(*desc) + desc->length; 4607 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 4608 break; 4609 } 4610 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 4611 } 4612 return 0; 4613 } 4614 4615 static bool 4616 bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page) 4617 { 4618 uint32_t crc; 4619 struct spdk_blob_md_descriptor *desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4620 size_t desc_len; 4621 4622 crc = blob_md_page_calc_crc(page); 4623 if (crc != page->crc) { 4624 return false; 4625 } 4626 4627 /* Extent page should always be of sequence num 0. */ 4628 if (page->sequence_num != 0) { 4629 return false; 4630 } 4631 4632 /* Descriptor type must be EXTENT_PAGE. */ 4633 if (desc->type != SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4634 return false; 4635 } 4636 4637 /* Descriptor length cannot exceed the page. */ 4638 desc_len = sizeof(*desc) + desc->length; 4639 if (desc_len > sizeof(page->descriptors)) { 4640 return false; 4641 } 4642 4643 /* It has to be the only descriptor in the page. */ 4644 if (desc_len + sizeof(*desc) <= sizeof(page->descriptors)) { 4645 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + desc_len); 4646 if (desc->length != 0) { 4647 return false; 4648 } 4649 } 4650 4651 return true; 4652 } 4653 4654 static bool 4655 bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx) 4656 { 4657 uint32_t crc; 4658 struct spdk_blob_md_page *page = ctx->page; 4659 4660 crc = blob_md_page_calc_crc(page); 4661 if (crc != page->crc) { 4662 return false; 4663 } 4664 4665 /* First page of a sequence should match the blobid. */ 4666 if (page->sequence_num == 0 && 4667 bs_page_to_blobid(ctx->cur_page) != page->id) { 4668 return false; 4669 } 4670 assert(bs_load_cur_extent_page_valid(page) == false); 4671 4672 return true; 4673 } 4674 4675 static void bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx); 4676 4677 static void 4678 bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4679 { 4680 struct spdk_bs_load_ctx *ctx = cb_arg; 4681 4682 if (bserrno != 0) { 4683 bs_load_ctx_fail(ctx, bserrno); 4684 return; 4685 } 4686 4687 bs_load_complete(ctx); 4688 } 4689 4690 static void 4691 bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4692 { 4693 struct spdk_bs_load_ctx *ctx = cb_arg; 4694 4695 spdk_free(ctx->mask); 4696 ctx->mask = NULL; 4697 4698 if (bserrno != 0) { 4699 bs_load_ctx_fail(ctx, bserrno); 4700 return; 4701 } 4702 4703 bs_write_used_clusters(seq, ctx, bs_load_write_used_clusters_cpl); 4704 } 4705 4706 static void 4707 bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4708 { 4709 struct spdk_bs_load_ctx *ctx = cb_arg; 4710 4711 spdk_free(ctx->mask); 4712 ctx->mask = NULL; 4713 4714 if (bserrno != 0) { 4715 bs_load_ctx_fail(ctx, bserrno); 4716 return; 4717 } 4718 4719 bs_write_used_blobids(seq, ctx, bs_load_write_used_blobids_cpl); 4720 } 4721 4722 static void 4723 bs_load_write_used_md(struct spdk_bs_load_ctx *ctx) 4724 { 4725 bs_write_used_md(ctx->seq, ctx, bs_load_write_used_pages_cpl); 4726 } 4727 4728 static void 4729 bs_load_replay_md_chain_cpl(struct spdk_bs_load_ctx *ctx) 4730 { 4731 uint64_t num_md_clusters; 4732 uint64_t i; 4733 4734 ctx->in_page_chain = false; 4735 4736 do { 4737 ctx->page_index++; 4738 } while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true); 4739 4740 if (ctx->page_index < ctx->super->md_len) { 4741 ctx->cur_page = ctx->page_index; 4742 bs_load_replay_cur_md_page(ctx); 4743 } else { 4744 /* Claim all of the clusters used by the metadata */ 4745 num_md_clusters = spdk_divide_round_up( 4746 ctx->super->md_start + ctx->super->md_len, ctx->bs->pages_per_cluster); 4747 for (i = 0; i < num_md_clusters; i++) { 4748 spdk_bit_array_set(ctx->used_clusters, i); 4749 } 4750 ctx->bs->num_free_clusters -= num_md_clusters; 4751 spdk_free(ctx->page); 4752 bs_load_write_used_md(ctx); 4753 } 4754 } 4755 4756 static void 4757 bs_load_replay_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4758 { 4759 struct spdk_bs_load_ctx *ctx = cb_arg; 4760 uint32_t page_num; 4761 uint64_t i; 4762 4763 if (bserrno != 0) { 4764 spdk_free(ctx->extent_pages); 4765 bs_load_ctx_fail(ctx, bserrno); 4766 return; 4767 } 4768 4769 for (i = 0; i < ctx->num_extent_pages; i++) { 4770 /* Extent pages are only read when present within in chain md. 4771 * Integrity of md is not right if that page was not a valid extent page. */ 4772 if (bs_load_cur_extent_page_valid(&ctx->extent_pages[i]) != true) { 4773 spdk_free(ctx->extent_pages); 4774 bs_load_ctx_fail(ctx, -EILSEQ); 4775 return; 4776 } 4777 4778 page_num = ctx->extent_page_num[i]; 4779 spdk_bit_array_set(ctx->bs->used_md_pages, page_num); 4780 if (bs_load_replay_md_parse_page(ctx, &ctx->extent_pages[i])) { 4781 spdk_free(ctx->extent_pages); 4782 bs_load_ctx_fail(ctx, -EILSEQ); 4783 return; 4784 } 4785 } 4786 4787 spdk_free(ctx->extent_pages); 4788 free(ctx->extent_page_num); 4789 ctx->extent_page_num = NULL; 4790 ctx->num_extent_pages = 0; 4791 4792 bs_load_replay_md_chain_cpl(ctx); 4793 } 4794 4795 static void 4796 bs_load_replay_extent_pages(struct spdk_bs_load_ctx *ctx) 4797 { 4798 spdk_bs_batch_t *batch; 4799 uint32_t page; 4800 uint64_t lba; 4801 uint64_t i; 4802 4803 ctx->extent_pages = spdk_zmalloc(ctx->super->md_page_size * ctx->num_extent_pages, 0, 4804 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4805 if (!ctx->extent_pages) { 4806 bs_load_ctx_fail(ctx, -ENOMEM); 4807 return; 4808 } 4809 4810 batch = bs_sequence_to_batch(ctx->seq, bs_load_replay_extent_page_cpl, ctx); 4811 4812 for (i = 0; i < ctx->num_extent_pages; i++) { 4813 page = ctx->extent_page_num[i]; 4814 assert(page < ctx->super->md_len); 4815 lba = bs_md_page_to_lba(ctx->bs, page); 4816 bs_batch_read_dev(batch, &ctx->extent_pages[i], lba, 4817 bs_byte_to_lba(ctx->bs, ctx->super->md_page_size)); 4818 } 4819 4820 bs_batch_close(batch); 4821 } 4822 4823 static void 4824 bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4825 { 4826 struct spdk_bs_load_ctx *ctx = cb_arg; 4827 uint32_t page_num; 4828 struct spdk_blob_md_page *page; 4829 4830 if (bserrno != 0) { 4831 bs_load_ctx_fail(ctx, bserrno); 4832 return; 4833 } 4834 4835 page_num = ctx->cur_page; 4836 page = ctx->page; 4837 if (bs_load_cur_md_page_valid(ctx) == true) { 4838 if (page->sequence_num == 0 || ctx->in_page_chain == true) { 4839 spdk_spin_lock(&ctx->bs->used_lock); 4840 bs_claim_md_page(ctx->bs, page_num); 4841 spdk_spin_unlock(&ctx->bs->used_lock); 4842 if (page->sequence_num == 0) { 4843 SPDK_NOTICELOG("Recover: blob 0x%" PRIx32 "\n", page_num); 4844 spdk_bit_array_set(ctx->bs->used_blobids, page_num); 4845 } 4846 if (bs_load_replay_md_parse_page(ctx, page)) { 4847 bs_load_ctx_fail(ctx, -EILSEQ); 4848 return; 4849 } 4850 if (page->next != SPDK_INVALID_MD_PAGE) { 4851 ctx->in_page_chain = true; 4852 ctx->cur_page = page->next; 4853 bs_load_replay_cur_md_page(ctx); 4854 return; 4855 } 4856 if (ctx->num_extent_pages != 0) { 4857 bs_load_replay_extent_pages(ctx); 4858 return; 4859 } 4860 } 4861 } 4862 bs_load_replay_md_chain_cpl(ctx); 4863 } 4864 4865 static void 4866 bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx) 4867 { 4868 uint64_t lba; 4869 4870 assert(ctx->cur_page < ctx->super->md_len); 4871 lba = bs_md_page_to_lba(ctx->bs, ctx->cur_page); 4872 bs_sequence_read_dev(ctx->seq, ctx->page, lba, 4873 bs_byte_to_lba(ctx->bs, ctx->super->md_page_size), 4874 bs_load_replay_md_cpl, ctx); 4875 } 4876 4877 static void 4878 bs_load_replay_md(struct spdk_bs_load_ctx *ctx) 4879 { 4880 ctx->page_index = 0; 4881 ctx->cur_page = 0; 4882 ctx->page = spdk_zmalloc(ctx->bs->md_page_size, 0, 4883 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4884 if (!ctx->page) { 4885 bs_load_ctx_fail(ctx, -ENOMEM); 4886 return; 4887 } 4888 bs_load_replay_cur_md_page(ctx); 4889 } 4890 4891 static void 4892 bs_recover(struct spdk_bs_load_ctx *ctx) 4893 { 4894 int rc; 4895 4896 SPDK_NOTICELOG("Performing recovery on blobstore\n"); 4897 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len); 4898 if (rc < 0) { 4899 bs_load_ctx_fail(ctx, -ENOMEM); 4900 return; 4901 } 4902 4903 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len); 4904 if (rc < 0) { 4905 bs_load_ctx_fail(ctx, -ENOMEM); 4906 return; 4907 } 4908 4909 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 4910 if (rc < 0) { 4911 bs_load_ctx_fail(ctx, -ENOMEM); 4912 return; 4913 } 4914 4915 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->super->md_len); 4916 if (rc < 0) { 4917 bs_load_ctx_fail(ctx, -ENOMEM); 4918 return; 4919 } 4920 4921 ctx->bs->num_free_clusters = ctx->bs->total_clusters; 4922 bs_load_replay_md(ctx); 4923 } 4924 4925 static int 4926 bs_parse_super(struct spdk_bs_load_ctx *ctx) 4927 { 4928 int rc; 4929 4930 if (ctx->super->size == 0) { 4931 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 4932 } 4933 4934 if (ctx->super->io_unit_size == 0) { 4935 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 4936 } 4937 if (ctx->super->md_page_size == 0) { 4938 ctx->super->md_page_size = SPDK_BS_PAGE_SIZE; 4939 } 4940 4941 ctx->bs->clean = 1; 4942 ctx->bs->cluster_sz = ctx->super->cluster_size; 4943 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 4944 ctx->bs->io_unit_size = ctx->super->io_unit_size; 4945 ctx->bs->md_page_size = ctx->super->md_page_size; 4946 bs_init_per_cluster_fields(ctx->bs); 4947 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 4948 if (rc < 0) { 4949 return -ENOMEM; 4950 } 4951 ctx->bs->md_start = ctx->super->md_start; 4952 ctx->bs->md_len = ctx->super->md_len; 4953 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len); 4954 if (rc < 0) { 4955 return -ENOMEM; 4956 } 4957 4958 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 4959 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 4960 ctx->bs->super_blob = ctx->super->super_blob; 4961 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 4962 4963 return 0; 4964 } 4965 4966 static void 4967 bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4968 { 4969 struct spdk_bs_load_ctx *ctx = cb_arg; 4970 int rc; 4971 4972 rc = bs_super_validate(ctx->super, ctx->bs); 4973 if (rc != 0) { 4974 bs_load_ctx_fail(ctx, rc); 4975 return; 4976 } 4977 4978 rc = bs_parse_super(ctx); 4979 if (rc < 0) { 4980 bs_load_ctx_fail(ctx, rc); 4981 return; 4982 } 4983 4984 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0 || ctx->force_recover) { 4985 bs_recover(ctx); 4986 } else { 4987 bs_load_read_used_pages(ctx); 4988 } 4989 } 4990 4991 static inline int 4992 bs_opts_copy(struct spdk_bs_opts *src, struct spdk_bs_opts *dst) 4993 { 4994 4995 if (!src->opts_size) { 4996 SPDK_ERRLOG("opts_size should not be zero value\n"); 4997 return -1; 4998 } 4999 5000 #define FIELD_OK(field) \ 5001 offsetof(struct spdk_bs_opts, field) + sizeof(src->field) <= src->opts_size 5002 5003 #define SET_FIELD(field) \ 5004 if (FIELD_OK(field)) { \ 5005 dst->field = src->field; \ 5006 } \ 5007 5008 SET_FIELD(cluster_sz); 5009 SET_FIELD(num_md_pages); 5010 SET_FIELD(max_md_ops); 5011 SET_FIELD(max_channel_ops); 5012 SET_FIELD(clear_method); 5013 5014 if (FIELD_OK(bstype)) { 5015 memcpy(&dst->bstype, &src->bstype, sizeof(dst->bstype)); 5016 } 5017 SET_FIELD(md_page_size); 5018 SET_FIELD(iter_cb_fn); 5019 SET_FIELD(iter_cb_arg); 5020 SET_FIELD(force_recover); 5021 SET_FIELD(esnap_bs_dev_create); 5022 SET_FIELD(esnap_ctx); 5023 5024 dst->opts_size = src->opts_size; 5025 5026 /* You should not remove this statement, but need to update the assert statement 5027 * if you add a new field, and also add a corresponding SET_FIELD statement */ 5028 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_opts) == 88, "Incorrect size"); 5029 5030 #undef FIELD_OK 5031 #undef SET_FIELD 5032 5033 return 0; 5034 } 5035 5036 void 5037 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 5038 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 5039 { 5040 struct spdk_blob_store *bs; 5041 struct spdk_bs_cpl cpl; 5042 struct spdk_bs_load_ctx *ctx; 5043 struct spdk_bs_opts opts = {}; 5044 int err; 5045 5046 SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev); 5047 5048 if ((dev->phys_blocklen % dev->blocklen) != 0) { 5049 SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen); 5050 dev->destroy(dev); 5051 cb_fn(cb_arg, NULL, -EINVAL); 5052 return; 5053 } 5054 5055 spdk_bs_opts_init(&opts, sizeof(opts)); 5056 if (o) { 5057 if (bs_opts_copy(o, &opts)) { 5058 return; 5059 } 5060 } 5061 5062 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 5063 dev->destroy(dev); 5064 cb_fn(cb_arg, NULL, -EINVAL); 5065 return; 5066 } 5067 5068 err = bs_alloc(dev, &opts, &bs, &ctx); 5069 if (err) { 5070 dev->destroy(dev); 5071 cb_fn(cb_arg, NULL, err); 5072 return; 5073 } 5074 5075 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 5076 cpl.u.bs_handle.cb_fn = cb_fn; 5077 cpl.u.bs_handle.cb_arg = cb_arg; 5078 cpl.u.bs_handle.bs = bs; 5079 5080 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5081 if (!ctx->seq) { 5082 spdk_free(ctx->super); 5083 free(ctx); 5084 bs_free(bs); 5085 cb_fn(cb_arg, NULL, -ENOMEM); 5086 return; 5087 } 5088 5089 /* Read the super block */ 5090 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5091 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5092 bs_load_super_cpl, ctx); 5093 } 5094 5095 /* END spdk_bs_load */ 5096 5097 /* START spdk_bs_dump */ 5098 5099 static void 5100 bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno) 5101 { 5102 spdk_free(ctx->super); 5103 5104 /* 5105 * We need to defer calling bs_call_cpl() until after 5106 * dev destruction, so tuck these away for later use. 5107 */ 5108 ctx->bs->unload_err = bserrno; 5109 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5110 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5111 5112 bs_sequence_finish(seq, 0); 5113 bs_free(ctx->bs); 5114 free(ctx); 5115 } 5116 5117 static void 5118 bs_dump_print_xattr(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 5119 { 5120 struct spdk_blob_md_descriptor_xattr *desc_xattr; 5121 uint32_t i; 5122 const char *type; 5123 5124 desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc; 5125 5126 if (desc_xattr->length != 5127 sizeof(desc_xattr->name_length) + sizeof(desc_xattr->value_length) + 5128 desc_xattr->name_length + desc_xattr->value_length) { 5129 } 5130 5131 memcpy(ctx->xattr_name, desc_xattr->name, desc_xattr->name_length); 5132 ctx->xattr_name[desc_xattr->name_length] = '\0'; 5133 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 5134 type = "XATTR"; 5135 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 5136 type = "XATTR_INTERNAL"; 5137 } else { 5138 assert(false); 5139 type = "XATTR_?"; 5140 } 5141 fprintf(ctx->fp, "%s: name = \"%s\"\n", type, ctx->xattr_name); 5142 fprintf(ctx->fp, " value = \""); 5143 ctx->print_xattr_fn(ctx->fp, ctx->super->bstype.bstype, ctx->xattr_name, 5144 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 5145 desc_xattr->value_length); 5146 fprintf(ctx->fp, "\"\n"); 5147 for (i = 0; i < desc_xattr->value_length; i++) { 5148 if (i % 16 == 0) { 5149 fprintf(ctx->fp, " "); 5150 } 5151 fprintf(ctx->fp, "%02" PRIx8 " ", *((uint8_t *)desc_xattr->name + desc_xattr->name_length + i)); 5152 if ((i + 1) % 16 == 0) { 5153 fprintf(ctx->fp, "\n"); 5154 } 5155 } 5156 if (i % 16 != 0) { 5157 fprintf(ctx->fp, "\n"); 5158 } 5159 } 5160 5161 struct type_flag_desc { 5162 uint64_t mask; 5163 uint64_t val; 5164 const char *name; 5165 }; 5166 5167 static void 5168 bs_dump_print_type_bits(struct spdk_bs_load_ctx *ctx, uint64_t flags, 5169 struct type_flag_desc *desc, size_t numflags) 5170 { 5171 uint64_t covered = 0; 5172 size_t i; 5173 5174 for (i = 0; i < numflags; i++) { 5175 if ((desc[i].mask & flags) != desc[i].val) { 5176 continue; 5177 } 5178 fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " %s", desc[i].val, desc[i].name); 5179 if (desc[i].mask != desc[i].val) { 5180 fprintf(ctx->fp, " (mask 0x%" PRIx64 " value 0x%" PRIx64 ")", 5181 desc[i].mask, desc[i].val); 5182 } 5183 fprintf(ctx->fp, "\n"); 5184 covered |= desc[i].mask; 5185 } 5186 if ((flags & ~covered) != 0) { 5187 fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " Unknown\n", flags & ~covered); 5188 } 5189 } 5190 5191 static void 5192 bs_dump_print_type_flags(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 5193 { 5194 struct spdk_blob_md_descriptor_flags *type_desc; 5195 #define ADD_FLAG(f) { f, f, #f } 5196 #define ADD_MASK_VAL(m, v) { m, v, #v } 5197 static struct type_flag_desc invalid[] = { 5198 ADD_FLAG(SPDK_BLOB_THIN_PROV), 5199 ADD_FLAG(SPDK_BLOB_INTERNAL_XATTR), 5200 ADD_FLAG(SPDK_BLOB_EXTENT_TABLE), 5201 }; 5202 static struct type_flag_desc data_ro[] = { 5203 ADD_FLAG(SPDK_BLOB_READ_ONLY), 5204 }; 5205 static struct type_flag_desc md_ro[] = { 5206 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_DEFAULT), 5207 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_NONE), 5208 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_UNMAP), 5209 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_WRITE_ZEROES), 5210 }; 5211 #undef ADD_FLAG 5212 #undef ADD_MASK_VAL 5213 5214 type_desc = (struct spdk_blob_md_descriptor_flags *)desc; 5215 fprintf(ctx->fp, "Flags:\n"); 5216 fprintf(ctx->fp, "\tinvalid: 0x%016" PRIx64 "\n", type_desc->invalid_flags); 5217 bs_dump_print_type_bits(ctx, type_desc->invalid_flags, invalid, 5218 SPDK_COUNTOF(invalid)); 5219 fprintf(ctx->fp, "\tdata_ro: 0x%016" PRIx64 "\n", type_desc->data_ro_flags); 5220 bs_dump_print_type_bits(ctx, type_desc->data_ro_flags, data_ro, 5221 SPDK_COUNTOF(data_ro)); 5222 fprintf(ctx->fp, "\t md_ro: 0x%016" PRIx64 "\n", type_desc->md_ro_flags); 5223 bs_dump_print_type_bits(ctx, type_desc->md_ro_flags, md_ro, 5224 SPDK_COUNTOF(md_ro)); 5225 } 5226 5227 static void 5228 bs_dump_print_extent_table(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 5229 { 5230 struct spdk_blob_md_descriptor_extent_table *et_desc; 5231 uint64_t num_extent_pages; 5232 uint32_t et_idx; 5233 5234 et_desc = (struct spdk_blob_md_descriptor_extent_table *)desc; 5235 num_extent_pages = (et_desc->length - sizeof(et_desc->num_clusters)) / 5236 sizeof(et_desc->extent_page[0]); 5237 5238 fprintf(ctx->fp, "Extent table:\n"); 5239 for (et_idx = 0; et_idx < num_extent_pages; et_idx++) { 5240 if (et_desc->extent_page[et_idx].page_idx == 0) { 5241 /* Zeroes represent unallocated extent pages. */ 5242 continue; 5243 } 5244 fprintf(ctx->fp, "\tExtent page: %5" PRIu32 " length %3" PRIu32 5245 " at LBA %" PRIu64 "\n", et_desc->extent_page[et_idx].page_idx, 5246 et_desc->extent_page[et_idx].num_pages, 5247 bs_md_page_to_lba(ctx->bs, et_desc->extent_page[et_idx].page_idx)); 5248 } 5249 } 5250 5251 static void 5252 bs_dump_print_md_page(struct spdk_bs_load_ctx *ctx) 5253 { 5254 uint32_t page_idx = ctx->cur_page; 5255 struct spdk_blob_md_page *page = ctx->page; 5256 struct spdk_blob_md_descriptor *desc; 5257 size_t cur_desc = 0; 5258 uint32_t crc; 5259 5260 fprintf(ctx->fp, "=========\n"); 5261 fprintf(ctx->fp, "Metadata Page Index: %" PRIu32 " (0x%" PRIx32 ")\n", page_idx, page_idx); 5262 fprintf(ctx->fp, "Start LBA: %" PRIu64 "\n", bs_md_page_to_lba(ctx->bs, page_idx)); 5263 fprintf(ctx->fp, "Blob ID: 0x%" PRIx64 "\n", page->id); 5264 fprintf(ctx->fp, "Sequence: %" PRIu32 "\n", page->sequence_num); 5265 if (page->next == SPDK_INVALID_MD_PAGE) { 5266 fprintf(ctx->fp, "Next: None\n"); 5267 } else { 5268 fprintf(ctx->fp, "Next: %" PRIu32 "\n", page->next); 5269 } 5270 fprintf(ctx->fp, "In used bit array%s:", ctx->super->clean ? "" : " (not clean: dubious)"); 5271 if (spdk_bit_array_get(ctx->bs->used_md_pages, page_idx)) { 5272 fprintf(ctx->fp, " md"); 5273 } 5274 if (spdk_bit_array_get(ctx->bs->used_blobids, page_idx)) { 5275 fprintf(ctx->fp, " blob"); 5276 } 5277 fprintf(ctx->fp, "\n"); 5278 5279 crc = blob_md_page_calc_crc(page); 5280 fprintf(ctx->fp, "CRC: 0x%" PRIx32 " (%s)\n", page->crc, crc == page->crc ? "OK" : "Mismatch"); 5281 5282 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 5283 while (cur_desc < sizeof(page->descriptors)) { 5284 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 5285 if (desc->length == 0) { 5286 /* If padding and length are 0, this terminates the page */ 5287 break; 5288 } 5289 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 5290 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 5291 unsigned int i; 5292 5293 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 5294 5295 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 5296 if (desc_extent_rle->extents[i].cluster_idx != 0) { 5297 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 5298 desc_extent_rle->extents[i].cluster_idx); 5299 } else { 5300 fprintf(ctx->fp, "Unallocated Extent - "); 5301 } 5302 fprintf(ctx->fp, " Length: %" PRIu32, desc_extent_rle->extents[i].length); 5303 fprintf(ctx->fp, "\n"); 5304 } 5305 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 5306 struct spdk_blob_md_descriptor_extent_page *desc_extent; 5307 unsigned int i; 5308 5309 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 5310 5311 for (i = 0; i < desc_extent->length / sizeof(desc_extent->cluster_idx[0]); i++) { 5312 if (desc_extent->cluster_idx[i] != 0) { 5313 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 5314 desc_extent->cluster_idx[i]); 5315 } else { 5316 fprintf(ctx->fp, "Unallocated Extent"); 5317 } 5318 fprintf(ctx->fp, "\n"); 5319 } 5320 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 5321 bs_dump_print_xattr(ctx, desc); 5322 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 5323 bs_dump_print_xattr(ctx, desc); 5324 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 5325 bs_dump_print_type_flags(ctx, desc); 5326 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 5327 bs_dump_print_extent_table(ctx, desc); 5328 } else { 5329 /* Error */ 5330 fprintf(ctx->fp, "Unknown descriptor type %" PRIu8 "\n", desc->type); 5331 } 5332 /* Advance to the next descriptor */ 5333 cur_desc += sizeof(*desc) + desc->length; 5334 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 5335 break; 5336 } 5337 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 5338 } 5339 } 5340 5341 static void 5342 bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5343 { 5344 struct spdk_bs_load_ctx *ctx = cb_arg; 5345 5346 if (bserrno != 0) { 5347 bs_dump_finish(seq, ctx, bserrno); 5348 return; 5349 } 5350 5351 if (ctx->page->id != 0) { 5352 bs_dump_print_md_page(ctx); 5353 } 5354 5355 ctx->cur_page++; 5356 5357 if (ctx->cur_page < ctx->super->md_len) { 5358 bs_dump_read_md_page(seq, ctx); 5359 } else { 5360 spdk_free(ctx->page); 5361 bs_dump_finish(seq, ctx, 0); 5362 } 5363 } 5364 5365 static void 5366 bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg) 5367 { 5368 struct spdk_bs_load_ctx *ctx = cb_arg; 5369 uint64_t lba; 5370 5371 assert(ctx->cur_page < ctx->super->md_len); 5372 lba = bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page); 5373 bs_sequence_read_dev(seq, ctx->page, lba, 5374 bs_byte_to_lba(ctx->bs, ctx->super->md_page_size), 5375 bs_dump_read_md_page_cpl, ctx); 5376 } 5377 5378 static void 5379 bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5380 { 5381 struct spdk_bs_load_ctx *ctx = cb_arg; 5382 int rc; 5383 5384 fprintf(ctx->fp, "Signature: \"%.8s\" ", ctx->super->signature); 5385 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 5386 sizeof(ctx->super->signature)) != 0) { 5387 fprintf(ctx->fp, "(Mismatch)\n"); 5388 bs_dump_finish(seq, ctx, bserrno); 5389 return; 5390 } else { 5391 fprintf(ctx->fp, "(OK)\n"); 5392 } 5393 fprintf(ctx->fp, "Version: %" PRIu32 "\n", ctx->super->version); 5394 fprintf(ctx->fp, "CRC: 0x%x (%s)\n", ctx->super->crc, 5395 (ctx->super->crc == blob_md_page_calc_crc(ctx->super)) ? "OK" : "Mismatch"); 5396 fprintf(ctx->fp, "Blobstore Type: %.*s\n", SPDK_BLOBSTORE_TYPE_LENGTH, ctx->super->bstype.bstype); 5397 fprintf(ctx->fp, "Cluster Size: %" PRIu32 "\n", ctx->super->cluster_size); 5398 fprintf(ctx->fp, "Super Blob ID: "); 5399 if (ctx->super->super_blob == SPDK_BLOBID_INVALID) { 5400 fprintf(ctx->fp, "(None)\n"); 5401 } else { 5402 fprintf(ctx->fp, "0x%" PRIx64 "\n", ctx->super->super_blob); 5403 } 5404 fprintf(ctx->fp, "Clean: %" PRIu32 "\n", ctx->super->clean); 5405 fprintf(ctx->fp, "Used Metadata Page Mask Start: %" PRIu32 "\n", ctx->super->used_page_mask_start); 5406 fprintf(ctx->fp, "Used Metadata Page Mask Length: %" PRIu32 "\n", ctx->super->used_page_mask_len); 5407 fprintf(ctx->fp, "Used Cluster Mask Start: %" PRIu32 "\n", ctx->super->used_cluster_mask_start); 5408 fprintf(ctx->fp, "Used Cluster Mask Length: %" PRIu32 "\n", ctx->super->used_cluster_mask_len); 5409 fprintf(ctx->fp, "Used Blob ID Mask Start: %" PRIu32 "\n", ctx->super->used_blobid_mask_start); 5410 fprintf(ctx->fp, "Used Blob ID Mask Length: %" PRIu32 "\n", ctx->super->used_blobid_mask_len); 5411 fprintf(ctx->fp, "Metadata Start: %" PRIu32 "\n", ctx->super->md_start); 5412 fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len); 5413 5414 ctx->cur_page = 0; 5415 ctx->page = spdk_zmalloc(ctx->super->md_page_size, 0, 5416 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 5417 if (!ctx->page) { 5418 bs_dump_finish(seq, ctx, -ENOMEM); 5419 return; 5420 } 5421 5422 rc = bs_parse_super(ctx); 5423 if (rc < 0) { 5424 bs_load_ctx_fail(ctx, rc); 5425 return; 5426 } 5427 5428 bs_load_read_used_pages(ctx); 5429 } 5430 5431 void 5432 spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_xattr_fn, 5433 spdk_bs_op_complete cb_fn, void *cb_arg) 5434 { 5435 struct spdk_blob_store *bs; 5436 struct spdk_bs_cpl cpl; 5437 struct spdk_bs_load_ctx *ctx; 5438 struct spdk_bs_opts opts = {}; 5439 int err; 5440 5441 SPDK_DEBUGLOG(blob, "Dumping blobstore from dev %p\n", dev); 5442 5443 spdk_bs_opts_init(&opts, sizeof(opts)); 5444 5445 err = bs_alloc(dev, &opts, &bs, &ctx); 5446 if (err) { 5447 dev->destroy(dev); 5448 cb_fn(cb_arg, err); 5449 return; 5450 } 5451 5452 ctx->dumping = true; 5453 ctx->fp = fp; 5454 ctx->print_xattr_fn = print_xattr_fn; 5455 5456 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5457 cpl.u.bs_basic.cb_fn = cb_fn; 5458 cpl.u.bs_basic.cb_arg = cb_arg; 5459 5460 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5461 if (!ctx->seq) { 5462 spdk_free(ctx->super); 5463 free(ctx); 5464 bs_free(bs); 5465 cb_fn(cb_arg, -ENOMEM); 5466 return; 5467 } 5468 5469 /* Read the super block */ 5470 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5471 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5472 bs_dump_super_cpl, ctx); 5473 } 5474 5475 /* END spdk_bs_dump */ 5476 5477 /* START spdk_bs_init */ 5478 5479 static void 5480 bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5481 { 5482 struct spdk_bs_load_ctx *ctx = cb_arg; 5483 5484 ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters); 5485 spdk_free(ctx->super); 5486 free(ctx); 5487 5488 bs_sequence_finish(seq, bserrno); 5489 } 5490 5491 static void 5492 bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5493 { 5494 struct spdk_bs_load_ctx *ctx = cb_arg; 5495 5496 /* Write super block */ 5497 bs_sequence_write_dev(seq, ctx->super, bs_page_to_lba(ctx->bs, 0), 5498 bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 5499 bs_init_persist_super_cpl, ctx); 5500 } 5501 5502 void 5503 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 5504 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 5505 { 5506 struct spdk_bs_load_ctx *ctx; 5507 struct spdk_blob_store *bs; 5508 struct spdk_bs_cpl cpl; 5509 spdk_bs_sequence_t *seq; 5510 spdk_bs_batch_t *batch; 5511 uint64_t num_md_lba; 5512 uint64_t num_md_pages; 5513 uint64_t num_md_clusters; 5514 uint64_t max_used_cluster_mask_len; 5515 uint32_t i; 5516 struct spdk_bs_opts opts = {}; 5517 int rc; 5518 uint64_t lba, lba_count; 5519 5520 SPDK_DEBUGLOG(blob, "Initializing blobstore on dev %p\n", dev); 5521 if ((dev->phys_blocklen % dev->blocklen) != 0) { 5522 SPDK_ERRLOG("unsupported dev block length of %d\n", 5523 dev->blocklen); 5524 dev->destroy(dev); 5525 cb_fn(cb_arg, NULL, -EINVAL); 5526 return; 5527 } 5528 5529 spdk_bs_opts_init(&opts, sizeof(opts)); 5530 if (o) { 5531 if (bs_opts_copy(o, &opts)) { 5532 return; 5533 } 5534 } 5535 5536 if (bs_opts_verify(&opts) != 0) { 5537 dev->destroy(dev); 5538 cb_fn(cb_arg, NULL, -EINVAL); 5539 return; 5540 } 5541 5542 rc = bs_alloc(dev, &opts, &bs, &ctx); 5543 if (rc) { 5544 dev->destroy(dev); 5545 cb_fn(cb_arg, NULL, rc); 5546 return; 5547 } 5548 5549 if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) { 5550 /* By default, allocate 1 page per cluster. 5551 * Technically, this over-allocates metadata 5552 * because more metadata will reduce the number 5553 * of usable clusters. This can be addressed with 5554 * more complex math in the future. 5555 */ 5556 bs->md_len = bs->total_clusters; 5557 } else { 5558 bs->md_len = opts.num_md_pages; 5559 } 5560 rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len); 5561 if (rc < 0) { 5562 spdk_free(ctx->super); 5563 free(ctx); 5564 bs_free(bs); 5565 cb_fn(cb_arg, NULL, -ENOMEM); 5566 return; 5567 } 5568 5569 rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len); 5570 if (rc < 0) { 5571 spdk_free(ctx->super); 5572 free(ctx); 5573 bs_free(bs); 5574 cb_fn(cb_arg, NULL, -ENOMEM); 5575 return; 5576 } 5577 5578 rc = spdk_bit_array_resize(&bs->open_blobids, bs->md_len); 5579 if (rc < 0) { 5580 spdk_free(ctx->super); 5581 free(ctx); 5582 bs_free(bs); 5583 cb_fn(cb_arg, NULL, -ENOMEM); 5584 return; 5585 } 5586 5587 memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 5588 sizeof(ctx->super->signature)); 5589 ctx->super->version = SPDK_BS_VERSION; 5590 ctx->super->length = sizeof(*ctx->super); 5591 ctx->super->super_blob = bs->super_blob; 5592 ctx->super->clean = 0; 5593 ctx->super->cluster_size = bs->cluster_sz; 5594 ctx->super->io_unit_size = bs->io_unit_size; 5595 ctx->super->md_page_size = bs->md_page_size; 5596 memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype)); 5597 5598 /* Calculate how many pages the metadata consumes at the front 5599 * of the disk. 5600 */ 5601 5602 /* The super block uses 1 page */ 5603 num_md_pages = 1; 5604 5605 /* The used_md_pages mask requires 1 bit per metadata page, rounded 5606 * up to the nearest page, plus a header. 5607 */ 5608 ctx->super->used_page_mask_start = num_md_pages; 5609 ctx->super->used_page_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5610 spdk_divide_round_up(bs->md_len, 8), 5611 ctx->super->md_page_size); 5612 num_md_pages += ctx->super->used_page_mask_len; 5613 5614 /* The used_clusters mask requires 1 bit per cluster, rounded 5615 * up to the nearest page, plus a header. 5616 */ 5617 ctx->super->used_cluster_mask_start = num_md_pages; 5618 ctx->super->used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5619 spdk_divide_round_up(bs->total_clusters, 8), 5620 ctx->super->md_page_size); 5621 /* The blobstore might be extended, then the used_cluster bitmap will need more space. 5622 * Here we calculate the max clusters we can support according to the 5623 * num_md_pages (bs->md_len). 5624 */ 5625 max_used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5626 spdk_divide_round_up(bs->md_len, 8), 5627 ctx->super->md_page_size); 5628 max_used_cluster_mask_len = spdk_max(max_used_cluster_mask_len, 5629 ctx->super->used_cluster_mask_len); 5630 num_md_pages += max_used_cluster_mask_len; 5631 5632 /* The used_blobids mask requires 1 bit per metadata page, rounded 5633 * up to the nearest page, plus a header. 5634 */ 5635 ctx->super->used_blobid_mask_start = num_md_pages; 5636 ctx->super->used_blobid_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5637 spdk_divide_round_up(bs->md_len, 8), 5638 ctx->super->md_page_size); 5639 num_md_pages += ctx->super->used_blobid_mask_len; 5640 5641 /* The metadata region size was chosen above */ 5642 ctx->super->md_start = bs->md_start = num_md_pages; 5643 ctx->super->md_len = bs->md_len; 5644 num_md_pages += bs->md_len; 5645 5646 num_md_lba = bs_page_to_lba(bs, num_md_pages); 5647 5648 ctx->super->size = dev->blockcnt * dev->blocklen; 5649 5650 ctx->super->crc = blob_md_page_calc_crc(ctx->super); 5651 5652 num_md_clusters = spdk_divide_round_up(num_md_pages, bs->pages_per_cluster); 5653 if (num_md_clusters > bs->total_clusters) { 5654 SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, " 5655 "please decrease number of pages reserved for metadata " 5656 "or increase cluster size.\n"); 5657 spdk_free(ctx->super); 5658 spdk_bit_array_free(&ctx->used_clusters); 5659 free(ctx); 5660 bs_free(bs); 5661 cb_fn(cb_arg, NULL, -ENOMEM); 5662 return; 5663 } 5664 /* Claim all of the clusters used by the metadata */ 5665 for (i = 0; i < num_md_clusters; i++) { 5666 spdk_bit_array_set(ctx->used_clusters, i); 5667 } 5668 5669 bs->num_free_clusters -= num_md_clusters; 5670 bs->total_data_clusters = bs->num_free_clusters; 5671 5672 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 5673 cpl.u.bs_handle.cb_fn = cb_fn; 5674 cpl.u.bs_handle.cb_arg = cb_arg; 5675 cpl.u.bs_handle.bs = bs; 5676 5677 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5678 if (!seq) { 5679 spdk_free(ctx->super); 5680 free(ctx); 5681 bs_free(bs); 5682 cb_fn(cb_arg, NULL, -ENOMEM); 5683 return; 5684 } 5685 5686 batch = bs_sequence_to_batch(seq, bs_init_trim_cpl, ctx); 5687 5688 /* Clear metadata space */ 5689 bs_batch_write_zeroes_dev(batch, 0, num_md_lba); 5690 5691 lba = num_md_lba; 5692 lba_count = ctx->bs->dev->blockcnt - lba; 5693 switch (opts.clear_method) { 5694 case BS_CLEAR_WITH_UNMAP: 5695 /* Trim data clusters */ 5696 bs_batch_unmap_dev(batch, lba, lba_count); 5697 break; 5698 case BS_CLEAR_WITH_WRITE_ZEROES: 5699 /* Write_zeroes to data clusters */ 5700 bs_batch_write_zeroes_dev(batch, lba, lba_count); 5701 break; 5702 case BS_CLEAR_WITH_NONE: 5703 default: 5704 break; 5705 } 5706 5707 bs_batch_close(batch); 5708 } 5709 5710 /* END spdk_bs_init */ 5711 5712 /* START spdk_bs_destroy */ 5713 5714 static void 5715 bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5716 { 5717 struct spdk_bs_load_ctx *ctx = cb_arg; 5718 struct spdk_blob_store *bs = ctx->bs; 5719 5720 /* 5721 * We need to defer calling bs_call_cpl() until after 5722 * dev destruction, so tuck these away for later use. 5723 */ 5724 bs->unload_err = bserrno; 5725 memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5726 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5727 5728 bs_sequence_finish(seq, bserrno); 5729 5730 bs_free(bs); 5731 free(ctx); 5732 } 5733 5734 void 5735 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, 5736 void *cb_arg) 5737 { 5738 struct spdk_bs_cpl cpl; 5739 spdk_bs_sequence_t *seq; 5740 struct spdk_bs_load_ctx *ctx; 5741 5742 SPDK_DEBUGLOG(blob, "Destroying blobstore\n"); 5743 5744 if (!RB_EMPTY(&bs->open_blobs)) { 5745 SPDK_ERRLOG("Blobstore still has open blobs\n"); 5746 cb_fn(cb_arg, -EBUSY); 5747 return; 5748 } 5749 5750 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5751 cpl.u.bs_basic.cb_fn = cb_fn; 5752 cpl.u.bs_basic.cb_arg = cb_arg; 5753 5754 ctx = calloc(1, sizeof(*ctx)); 5755 if (!ctx) { 5756 cb_fn(cb_arg, -ENOMEM); 5757 return; 5758 } 5759 5760 ctx->bs = bs; 5761 5762 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5763 if (!seq) { 5764 free(ctx); 5765 cb_fn(cb_arg, -ENOMEM); 5766 return; 5767 } 5768 5769 /* Write zeroes to the super block */ 5770 bs_sequence_write_zeroes_dev(seq, 5771 bs_page_to_lba(bs, 0), 5772 bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)), 5773 bs_destroy_trim_cpl, ctx); 5774 } 5775 5776 /* END spdk_bs_destroy */ 5777 5778 /* START spdk_bs_unload */ 5779 5780 static void 5781 bs_unload_finish(struct spdk_bs_load_ctx *ctx, int bserrno) 5782 { 5783 spdk_bs_sequence_t *seq = ctx->seq; 5784 5785 spdk_free(ctx->super); 5786 5787 /* 5788 * We need to defer calling bs_call_cpl() until after 5789 * dev destruction, so tuck these away for later use. 5790 */ 5791 ctx->bs->unload_err = bserrno; 5792 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5793 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5794 5795 bs_sequence_finish(seq, bserrno); 5796 5797 bs_free(ctx->bs); 5798 free(ctx); 5799 } 5800 5801 static void 5802 bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5803 { 5804 struct spdk_bs_load_ctx *ctx = cb_arg; 5805 5806 bs_unload_finish(ctx, bserrno); 5807 } 5808 5809 static void 5810 bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5811 { 5812 struct spdk_bs_load_ctx *ctx = cb_arg; 5813 5814 spdk_free(ctx->mask); 5815 5816 if (bserrno != 0) { 5817 bs_unload_finish(ctx, bserrno); 5818 return; 5819 } 5820 5821 ctx->super->clean = 1; 5822 5823 bs_write_super(seq, ctx->bs, ctx->super, bs_unload_write_super_cpl, ctx); 5824 } 5825 5826 static void 5827 bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5828 { 5829 struct spdk_bs_load_ctx *ctx = cb_arg; 5830 5831 spdk_free(ctx->mask); 5832 ctx->mask = NULL; 5833 5834 if (bserrno != 0) { 5835 bs_unload_finish(ctx, bserrno); 5836 return; 5837 } 5838 5839 bs_write_used_clusters(seq, ctx, bs_unload_write_used_clusters_cpl); 5840 } 5841 5842 static void 5843 bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5844 { 5845 struct spdk_bs_load_ctx *ctx = cb_arg; 5846 5847 spdk_free(ctx->mask); 5848 ctx->mask = NULL; 5849 5850 if (bserrno != 0) { 5851 bs_unload_finish(ctx, bserrno); 5852 return; 5853 } 5854 5855 bs_write_used_blobids(seq, ctx, bs_unload_write_used_blobids_cpl); 5856 } 5857 5858 static void 5859 bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5860 { 5861 struct spdk_bs_load_ctx *ctx = cb_arg; 5862 int rc; 5863 5864 if (bserrno != 0) { 5865 bs_unload_finish(ctx, bserrno); 5866 return; 5867 } 5868 5869 rc = bs_super_validate(ctx->super, ctx->bs); 5870 if (rc != 0) { 5871 bs_unload_finish(ctx, rc); 5872 return; 5873 } 5874 5875 bs_write_used_md(seq, cb_arg, bs_unload_write_used_pages_cpl); 5876 } 5877 5878 void 5879 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg) 5880 { 5881 struct spdk_bs_cpl cpl; 5882 struct spdk_bs_load_ctx *ctx; 5883 5884 SPDK_DEBUGLOG(blob, "Syncing blobstore\n"); 5885 5886 /* 5887 * If external snapshot channels are being destroyed while the blobstore is unloaded, the 5888 * unload is deferred until after the channel destruction completes. 5889 */ 5890 if (bs->esnap_channels_unloading != 0) { 5891 if (bs->esnap_unload_cb_fn != NULL) { 5892 SPDK_ERRLOG("Blobstore unload in progress\n"); 5893 cb_fn(cb_arg, -EBUSY); 5894 return; 5895 } 5896 SPDK_DEBUGLOG(blob_esnap, "Blobstore unload deferred: %" PRIu32 5897 " esnap clones are unloading\n", bs->esnap_channels_unloading); 5898 bs->esnap_unload_cb_fn = cb_fn; 5899 bs->esnap_unload_cb_arg = cb_arg; 5900 return; 5901 } 5902 if (bs->esnap_unload_cb_fn != NULL) { 5903 SPDK_DEBUGLOG(blob_esnap, "Blobstore deferred unload progressing\n"); 5904 assert(bs->esnap_unload_cb_fn == cb_fn); 5905 assert(bs->esnap_unload_cb_arg == cb_arg); 5906 bs->esnap_unload_cb_fn = NULL; 5907 bs->esnap_unload_cb_arg = NULL; 5908 } 5909 5910 if (!RB_EMPTY(&bs->open_blobs)) { 5911 SPDK_ERRLOG("Blobstore still has open blobs\n"); 5912 cb_fn(cb_arg, -EBUSY); 5913 return; 5914 } 5915 5916 ctx = calloc(1, sizeof(*ctx)); 5917 if (!ctx) { 5918 cb_fn(cb_arg, -ENOMEM); 5919 return; 5920 } 5921 5922 ctx->bs = bs; 5923 5924 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 5925 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 5926 if (!ctx->super) { 5927 free(ctx); 5928 cb_fn(cb_arg, -ENOMEM); 5929 return; 5930 } 5931 5932 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5933 cpl.u.bs_basic.cb_fn = cb_fn; 5934 cpl.u.bs_basic.cb_arg = cb_arg; 5935 5936 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5937 if (!ctx->seq) { 5938 spdk_free(ctx->super); 5939 free(ctx); 5940 cb_fn(cb_arg, -ENOMEM); 5941 return; 5942 } 5943 5944 /* Read super block */ 5945 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5946 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5947 bs_unload_read_super_cpl, ctx); 5948 } 5949 5950 /* END spdk_bs_unload */ 5951 5952 /* START spdk_bs_set_super */ 5953 5954 struct spdk_bs_set_super_ctx { 5955 struct spdk_blob_store *bs; 5956 struct spdk_bs_super_block *super; 5957 }; 5958 5959 static void 5960 bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5961 { 5962 struct spdk_bs_set_super_ctx *ctx = cb_arg; 5963 5964 if (bserrno != 0) { 5965 SPDK_ERRLOG("Unable to write to super block of blobstore\n"); 5966 } 5967 5968 spdk_free(ctx->super); 5969 5970 bs_sequence_finish(seq, bserrno); 5971 5972 free(ctx); 5973 } 5974 5975 static void 5976 bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5977 { 5978 struct spdk_bs_set_super_ctx *ctx = cb_arg; 5979 int rc; 5980 5981 if (bserrno != 0) { 5982 SPDK_ERRLOG("Unable to read super block of blobstore\n"); 5983 spdk_free(ctx->super); 5984 bs_sequence_finish(seq, bserrno); 5985 free(ctx); 5986 return; 5987 } 5988 5989 rc = bs_super_validate(ctx->super, ctx->bs); 5990 if (rc != 0) { 5991 SPDK_ERRLOG("Not a valid super block\n"); 5992 spdk_free(ctx->super); 5993 bs_sequence_finish(seq, rc); 5994 free(ctx); 5995 return; 5996 } 5997 5998 bs_write_super(seq, ctx->bs, ctx->super, bs_set_super_write_cpl, ctx); 5999 } 6000 6001 void 6002 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid, 6003 spdk_bs_op_complete cb_fn, void *cb_arg) 6004 { 6005 struct spdk_bs_cpl cpl; 6006 spdk_bs_sequence_t *seq; 6007 struct spdk_bs_set_super_ctx *ctx; 6008 6009 SPDK_DEBUGLOG(blob, "Setting super blob id on blobstore\n"); 6010 6011 ctx = calloc(1, sizeof(*ctx)); 6012 if (!ctx) { 6013 cb_fn(cb_arg, -ENOMEM); 6014 return; 6015 } 6016 6017 ctx->bs = bs; 6018 6019 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 6020 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 6021 if (!ctx->super) { 6022 free(ctx); 6023 cb_fn(cb_arg, -ENOMEM); 6024 return; 6025 } 6026 6027 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 6028 cpl.u.bs_basic.cb_fn = cb_fn; 6029 cpl.u.bs_basic.cb_arg = cb_arg; 6030 6031 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 6032 if (!seq) { 6033 spdk_free(ctx->super); 6034 free(ctx); 6035 cb_fn(cb_arg, -ENOMEM); 6036 return; 6037 } 6038 6039 bs->super_blob = blobid; 6040 6041 /* Read super block */ 6042 bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0), 6043 bs_byte_to_lba(bs, sizeof(*ctx->super)), 6044 bs_set_super_read_cpl, ctx); 6045 } 6046 6047 /* END spdk_bs_set_super */ 6048 6049 void 6050 spdk_bs_get_super(struct spdk_blob_store *bs, 6051 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6052 { 6053 if (bs->super_blob == SPDK_BLOBID_INVALID) { 6054 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT); 6055 } else { 6056 cb_fn(cb_arg, bs->super_blob, 0); 6057 } 6058 } 6059 6060 uint64_t 6061 spdk_bs_get_cluster_size(struct spdk_blob_store *bs) 6062 { 6063 return bs->cluster_sz; 6064 } 6065 6066 uint64_t 6067 spdk_bs_get_page_size(struct spdk_blob_store *bs) 6068 { 6069 return bs->md_page_size; 6070 } 6071 6072 uint64_t 6073 spdk_bs_get_io_unit_size(struct spdk_blob_store *bs) 6074 { 6075 return bs->io_unit_size; 6076 } 6077 6078 uint64_t 6079 spdk_bs_free_cluster_count(struct spdk_blob_store *bs) 6080 { 6081 return bs->num_free_clusters; 6082 } 6083 6084 uint64_t 6085 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs) 6086 { 6087 return bs->total_data_clusters; 6088 } 6089 6090 static int 6091 bs_register_md_thread(struct spdk_blob_store *bs) 6092 { 6093 bs->md_channel = spdk_get_io_channel(bs); 6094 if (!bs->md_channel) { 6095 SPDK_ERRLOG("Failed to get IO channel.\n"); 6096 return -1; 6097 } 6098 6099 return 0; 6100 } 6101 6102 static int 6103 bs_unregister_md_thread(struct spdk_blob_store *bs) 6104 { 6105 spdk_put_io_channel(bs->md_channel); 6106 6107 return 0; 6108 } 6109 6110 spdk_blob_id 6111 spdk_blob_get_id(struct spdk_blob *blob) 6112 { 6113 assert(blob != NULL); 6114 6115 return blob->id; 6116 } 6117 6118 uint64_t 6119 spdk_blob_get_num_io_units(struct spdk_blob *blob) 6120 { 6121 assert(blob != NULL); 6122 6123 return bs_cluster_to_io_unit(blob->bs, blob->active.num_clusters); 6124 } 6125 6126 uint64_t 6127 spdk_blob_get_num_clusters(struct spdk_blob *blob) 6128 { 6129 assert(blob != NULL); 6130 6131 return blob->active.num_clusters; 6132 } 6133 6134 uint64_t 6135 spdk_blob_get_num_allocated_clusters(struct spdk_blob *blob) 6136 { 6137 assert(blob != NULL); 6138 6139 return blob->active.num_allocated_clusters; 6140 } 6141 6142 static uint64_t 6143 blob_find_io_unit(struct spdk_blob *blob, uint64_t offset, bool is_allocated) 6144 { 6145 uint64_t blob_io_unit_num = spdk_blob_get_num_io_units(blob); 6146 6147 while (offset < blob_io_unit_num) { 6148 if (bs_io_unit_is_allocated(blob, offset) == is_allocated) { 6149 return offset; 6150 } 6151 6152 offset += bs_num_io_units_to_cluster_boundary(blob, offset); 6153 } 6154 6155 return UINT64_MAX; 6156 } 6157 6158 uint64_t 6159 spdk_blob_get_next_allocated_io_unit(struct spdk_blob *blob, uint64_t offset) 6160 { 6161 return blob_find_io_unit(blob, offset, true); 6162 } 6163 6164 uint64_t 6165 spdk_blob_get_next_unallocated_io_unit(struct spdk_blob *blob, uint64_t offset) 6166 { 6167 return blob_find_io_unit(blob, offset, false); 6168 } 6169 6170 /* START spdk_bs_create_blob */ 6171 6172 static void 6173 bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 6174 { 6175 struct spdk_blob *blob = cb_arg; 6176 uint32_t page_idx = bs_blobid_to_page(blob->id); 6177 6178 if (bserrno != 0) { 6179 spdk_spin_lock(&blob->bs->used_lock); 6180 spdk_bit_array_clear(blob->bs->used_blobids, page_idx); 6181 bs_release_md_page(blob->bs, page_idx); 6182 spdk_spin_unlock(&blob->bs->used_lock); 6183 } 6184 6185 blob_free(blob); 6186 6187 bs_sequence_finish(seq, bserrno); 6188 } 6189 6190 static int 6191 blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs, 6192 bool internal) 6193 { 6194 uint64_t i; 6195 size_t value_len = 0; 6196 int rc; 6197 const void *value = NULL; 6198 if (xattrs->count > 0 && xattrs->get_value == NULL) { 6199 return -EINVAL; 6200 } 6201 for (i = 0; i < xattrs->count; i++) { 6202 xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len); 6203 if (value == NULL || value_len == 0) { 6204 return -EINVAL; 6205 } 6206 rc = blob_set_xattr(blob, xattrs->names[i], value, value_len, internal); 6207 if (rc < 0) { 6208 return rc; 6209 } 6210 } 6211 return 0; 6212 } 6213 6214 static void 6215 blob_opts_copy(const struct spdk_blob_opts *src, struct spdk_blob_opts *dst) 6216 { 6217 #define FIELD_OK(field) \ 6218 offsetof(struct spdk_blob_opts, field) + sizeof(src->field) <= src->opts_size 6219 6220 #define SET_FIELD(field) \ 6221 if (FIELD_OK(field)) { \ 6222 dst->field = src->field; \ 6223 } \ 6224 6225 SET_FIELD(num_clusters); 6226 SET_FIELD(thin_provision); 6227 SET_FIELD(clear_method); 6228 6229 if (FIELD_OK(xattrs)) { 6230 memcpy(&dst->xattrs, &src->xattrs, sizeof(src->xattrs)); 6231 } 6232 6233 SET_FIELD(use_extent_table); 6234 SET_FIELD(esnap_id); 6235 SET_FIELD(esnap_id_len); 6236 6237 dst->opts_size = src->opts_size; 6238 6239 /* You should not remove this statement, but need to update the assert statement 6240 * if you add a new field, and also add a corresponding SET_FIELD statement */ 6241 SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_opts) == 80, "Incorrect size"); 6242 6243 #undef FIELD_OK 6244 #undef SET_FIELD 6245 } 6246 6247 static void 6248 bs_create_blob(struct spdk_blob_store *bs, 6249 const struct spdk_blob_opts *opts, 6250 const struct spdk_blob_xattr_opts *internal_xattrs, 6251 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6252 { 6253 struct spdk_blob *blob; 6254 uint32_t page_idx; 6255 struct spdk_bs_cpl cpl; 6256 struct spdk_blob_opts opts_local; 6257 struct spdk_blob_xattr_opts internal_xattrs_default; 6258 spdk_bs_sequence_t *seq; 6259 spdk_blob_id id; 6260 int rc; 6261 6262 assert(spdk_get_thread() == bs->md_thread); 6263 6264 spdk_spin_lock(&bs->used_lock); 6265 page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); 6266 if (page_idx == UINT32_MAX) { 6267 spdk_spin_unlock(&bs->used_lock); 6268 cb_fn(cb_arg, 0, -ENOMEM); 6269 return; 6270 } 6271 spdk_bit_array_set(bs->used_blobids, page_idx); 6272 bs_claim_md_page(bs, page_idx); 6273 spdk_spin_unlock(&bs->used_lock); 6274 6275 id = bs_page_to_blobid(page_idx); 6276 6277 SPDK_DEBUGLOG(blob, "Creating blob with id 0x%" PRIx64 " at page %u\n", id, page_idx); 6278 6279 spdk_blob_opts_init(&opts_local, sizeof(opts_local)); 6280 if (opts) { 6281 blob_opts_copy(opts, &opts_local); 6282 } 6283 6284 blob = blob_alloc(bs, id); 6285 if (!blob) { 6286 rc = -ENOMEM; 6287 goto error; 6288 } 6289 6290 blob->use_extent_table = opts_local.use_extent_table; 6291 if (blob->use_extent_table) { 6292 blob->invalid_flags |= SPDK_BLOB_EXTENT_TABLE; 6293 } 6294 6295 if (!internal_xattrs) { 6296 blob_xattrs_init(&internal_xattrs_default); 6297 internal_xattrs = &internal_xattrs_default; 6298 } 6299 6300 rc = blob_set_xattrs(blob, &opts_local.xattrs, false); 6301 if (rc < 0) { 6302 goto error; 6303 } 6304 6305 rc = blob_set_xattrs(blob, internal_xattrs, true); 6306 if (rc < 0) { 6307 goto error; 6308 } 6309 6310 if (opts_local.thin_provision) { 6311 blob_set_thin_provision(blob); 6312 } 6313 6314 blob_set_clear_method(blob, opts_local.clear_method); 6315 6316 if (opts_local.esnap_id != NULL) { 6317 if (opts_local.esnap_id_len > UINT16_MAX) { 6318 SPDK_ERRLOG("esnap id length %" PRIu64 "is too long\n", 6319 opts_local.esnap_id_len); 6320 rc = -EINVAL; 6321 goto error; 6322 6323 } 6324 blob_set_thin_provision(blob); 6325 blob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 6326 rc = blob_set_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID, 6327 opts_local.esnap_id, opts_local.esnap_id_len, true); 6328 if (rc != 0) { 6329 goto error; 6330 } 6331 } 6332 6333 rc = blob_resize(blob, opts_local.num_clusters); 6334 if (rc < 0) { 6335 goto error; 6336 } 6337 cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6338 cpl.u.blobid.cb_fn = cb_fn; 6339 cpl.u.blobid.cb_arg = cb_arg; 6340 cpl.u.blobid.blobid = blob->id; 6341 6342 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 6343 if (!seq) { 6344 rc = -ENOMEM; 6345 goto error; 6346 } 6347 6348 blob_persist(seq, blob, bs_create_blob_cpl, blob); 6349 return; 6350 6351 error: 6352 SPDK_ERRLOG("Failed to create blob: %s, size in clusters/size: %lu (clusters)\n", 6353 spdk_strerror(rc), opts_local.num_clusters); 6354 if (blob != NULL) { 6355 blob_free(blob); 6356 } 6357 spdk_spin_lock(&bs->used_lock); 6358 spdk_bit_array_clear(bs->used_blobids, page_idx); 6359 bs_release_md_page(bs, page_idx); 6360 spdk_spin_unlock(&bs->used_lock); 6361 cb_fn(cb_arg, 0, rc); 6362 } 6363 6364 void 6365 spdk_bs_create_blob(struct spdk_blob_store *bs, 6366 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6367 { 6368 bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg); 6369 } 6370 6371 void 6372 spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts, 6373 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6374 { 6375 bs_create_blob(bs, opts, NULL, cb_fn, cb_arg); 6376 } 6377 6378 /* END spdk_bs_create_blob */ 6379 6380 /* START blob_cleanup */ 6381 6382 struct spdk_clone_snapshot_ctx { 6383 struct spdk_bs_cpl cpl; 6384 int bserrno; 6385 bool frozen; 6386 6387 struct spdk_io_channel *channel; 6388 6389 /* Current cluster for inflate operation */ 6390 uint64_t cluster; 6391 6392 /* For inflation force allocation of all unallocated clusters and remove 6393 * thin-provisioning. Otherwise only decouple parent and keep clone thin. */ 6394 bool allocate_all; 6395 6396 struct { 6397 spdk_blob_id id; 6398 struct spdk_blob *blob; 6399 bool md_ro; 6400 } original; 6401 struct { 6402 spdk_blob_id id; 6403 struct spdk_blob *blob; 6404 } new; 6405 6406 /* xattrs specified for snapshot/clones only. They have no impact on 6407 * the original blobs xattrs. */ 6408 const struct spdk_blob_xattr_opts *xattrs; 6409 }; 6410 6411 static void 6412 bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno) 6413 { 6414 struct spdk_clone_snapshot_ctx *ctx = cb_arg; 6415 struct spdk_bs_cpl *cpl = &ctx->cpl; 6416 6417 if (bserrno != 0) { 6418 if (ctx->bserrno != 0) { 6419 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6420 } else { 6421 ctx->bserrno = bserrno; 6422 } 6423 } 6424 6425 switch (cpl->type) { 6426 case SPDK_BS_CPL_TYPE_BLOBID: 6427 cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno); 6428 break; 6429 case SPDK_BS_CPL_TYPE_BLOB_BASIC: 6430 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno); 6431 break; 6432 default: 6433 SPDK_UNREACHABLE(); 6434 break; 6435 } 6436 6437 free(ctx); 6438 } 6439 6440 static void 6441 bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 6442 { 6443 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6444 struct spdk_blob *origblob = ctx->original.blob; 6445 6446 if (bserrno != 0) { 6447 if (ctx->bserrno != 0) { 6448 SPDK_ERRLOG("Unfreeze error %d\n", bserrno); 6449 } else { 6450 ctx->bserrno = bserrno; 6451 } 6452 } 6453 6454 ctx->original.id = origblob->id; 6455 origblob->locked_operation_in_progress = false; 6456 6457 /* Revert md_ro to original state */ 6458 origblob->md_ro = ctx->original.md_ro; 6459 6460 spdk_blob_close(origblob, bs_clone_snapshot_cleanup_finish, ctx); 6461 } 6462 6463 static void 6464 bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno) 6465 { 6466 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6467 struct spdk_blob *origblob = ctx->original.blob; 6468 6469 if (bserrno != 0) { 6470 if (ctx->bserrno != 0) { 6471 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6472 } else { 6473 ctx->bserrno = bserrno; 6474 } 6475 } 6476 6477 if (ctx->frozen) { 6478 /* Unfreeze any outstanding I/O */ 6479 blob_unfreeze_io(origblob, bs_snapshot_unfreeze_cpl, ctx); 6480 } else { 6481 bs_snapshot_unfreeze_cpl(ctx, 0); 6482 } 6483 6484 } 6485 6486 static void 6487 bs_clone_snapshot_newblob_cleanup(struct spdk_clone_snapshot_ctx *ctx, int bserrno) 6488 { 6489 struct spdk_blob *newblob = ctx->new.blob; 6490 6491 if (bserrno != 0) { 6492 if (ctx->bserrno != 0) { 6493 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6494 } else { 6495 ctx->bserrno = bserrno; 6496 } 6497 } 6498 6499 ctx->new.id = newblob->id; 6500 spdk_blob_close(newblob, bs_clone_snapshot_origblob_cleanup, ctx); 6501 } 6502 6503 /* END blob_cleanup */ 6504 6505 /* START spdk_bs_create_snapshot */ 6506 6507 static void 6508 bs_snapshot_swap_cluster_maps(struct spdk_blob *blob1, struct spdk_blob *blob2) 6509 { 6510 uint64_t *cluster_temp; 6511 uint64_t num_allocated_clusters_temp; 6512 uint32_t *extent_page_temp; 6513 6514 cluster_temp = blob1->active.clusters; 6515 blob1->active.clusters = blob2->active.clusters; 6516 blob2->active.clusters = cluster_temp; 6517 6518 num_allocated_clusters_temp = blob1->active.num_allocated_clusters; 6519 blob1->active.num_allocated_clusters = blob2->active.num_allocated_clusters; 6520 blob2->active.num_allocated_clusters = num_allocated_clusters_temp; 6521 6522 extent_page_temp = blob1->active.extent_pages; 6523 blob1->active.extent_pages = blob2->active.extent_pages; 6524 blob2->active.extent_pages = extent_page_temp; 6525 } 6526 6527 /* Copies an internal xattr */ 6528 static int 6529 bs_snapshot_copy_xattr(struct spdk_blob *toblob, struct spdk_blob *fromblob, const char *name) 6530 { 6531 const void *val = NULL; 6532 size_t len; 6533 int bserrno; 6534 6535 bserrno = blob_get_xattr_value(fromblob, name, &val, &len, true); 6536 if (bserrno != 0) { 6537 SPDK_ERRLOG("blob 0x%" PRIx64 " missing %s XATTR\n", fromblob->id, name); 6538 return bserrno; 6539 } 6540 6541 bserrno = blob_set_xattr(toblob, name, val, len, true); 6542 if (bserrno != 0) { 6543 SPDK_ERRLOG("could not set %s XATTR on blob 0x%" PRIx64 "\n", 6544 name, toblob->id); 6545 return bserrno; 6546 } 6547 return 0; 6548 } 6549 6550 static void 6551 bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno) 6552 { 6553 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6554 struct spdk_blob *origblob = ctx->original.blob; 6555 struct spdk_blob *newblob = ctx->new.blob; 6556 6557 if (bserrno != 0) { 6558 bs_snapshot_swap_cluster_maps(newblob, origblob); 6559 if (blob_is_esnap_clone(newblob)) { 6560 bs_snapshot_copy_xattr(origblob, newblob, BLOB_EXTERNAL_SNAPSHOT_ID); 6561 origblob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 6562 } 6563 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6564 return; 6565 } 6566 6567 /* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */ 6568 bserrno = blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true); 6569 if (bserrno != 0) { 6570 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6571 return; 6572 } 6573 6574 bs_blob_list_add(ctx->original.blob); 6575 6576 spdk_blob_set_read_only(newblob); 6577 6578 /* sync snapshot metadata */ 6579 spdk_blob_sync_md(newblob, bs_clone_snapshot_origblob_cleanup, ctx); 6580 } 6581 6582 static void 6583 bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno) 6584 { 6585 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6586 struct spdk_blob *origblob = ctx->original.blob; 6587 struct spdk_blob *newblob = ctx->new.blob; 6588 6589 if (bserrno != 0) { 6590 /* return cluster map back to original */ 6591 bs_snapshot_swap_cluster_maps(newblob, origblob); 6592 6593 /* Newblob md sync failed. Valid clusters are only present in origblob. 6594 * Since I/O is frozen on origblob, not changes to zeroed out cluster map should have occurred. 6595 * Newblob needs to be reverted to thin_provisioned state at creation to properly close. */ 6596 blob_set_thin_provision(newblob); 6597 assert(spdk_mem_all_zero(newblob->active.clusters, 6598 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 6599 assert(spdk_mem_all_zero(newblob->active.extent_pages, 6600 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 6601 6602 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6603 return; 6604 } 6605 6606 /* Set internal xattr for snapshot id */ 6607 bserrno = blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true); 6608 if (bserrno != 0) { 6609 /* return cluster map back to original */ 6610 bs_snapshot_swap_cluster_maps(newblob, origblob); 6611 blob_set_thin_provision(newblob); 6612 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6613 return; 6614 } 6615 6616 /* Create new back_bs_dev for snapshot */ 6617 origblob->back_bs_dev = bs_create_blob_bs_dev(newblob); 6618 if (origblob->back_bs_dev == NULL) { 6619 /* return cluster map back to original */ 6620 bs_snapshot_swap_cluster_maps(newblob, origblob); 6621 blob_set_thin_provision(newblob); 6622 bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL); 6623 return; 6624 } 6625 6626 /* Remove the xattr that references an external snapshot */ 6627 if (blob_is_esnap_clone(origblob)) { 6628 origblob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT; 6629 bserrno = blob_remove_xattr(origblob, BLOB_EXTERNAL_SNAPSHOT_ID, true); 6630 if (bserrno != 0) { 6631 if (bserrno == -ENOENT) { 6632 SPDK_ERRLOG("blob 0x%" PRIx64 " has no " BLOB_EXTERNAL_SNAPSHOT_ID 6633 " xattr to remove\n", origblob->id); 6634 assert(false); 6635 } else { 6636 /* return cluster map back to original */ 6637 bs_snapshot_swap_cluster_maps(newblob, origblob); 6638 blob_set_thin_provision(newblob); 6639 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6640 return; 6641 } 6642 } 6643 } 6644 6645 bs_blob_list_remove(origblob); 6646 origblob->parent_id = newblob->id; 6647 /* set clone blob as thin provisioned */ 6648 blob_set_thin_provision(origblob); 6649 6650 bs_blob_list_add(newblob); 6651 6652 /* sync clone metadata */ 6653 spdk_blob_sync_md(origblob, bs_snapshot_origblob_sync_cpl, ctx); 6654 } 6655 6656 static void 6657 bs_snapshot_freeze_cpl(void *cb_arg, int rc) 6658 { 6659 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6660 struct spdk_blob *origblob = ctx->original.blob; 6661 struct spdk_blob *newblob = ctx->new.blob; 6662 int bserrno; 6663 6664 if (rc != 0) { 6665 bs_clone_snapshot_newblob_cleanup(ctx, rc); 6666 return; 6667 } 6668 6669 ctx->frozen = true; 6670 6671 if (blob_is_esnap_clone(origblob)) { 6672 /* Clean up any channels associated with the original blob id because future IO will 6673 * perform IO using the snapshot blob_id. 6674 */ 6675 blob_esnap_destroy_bs_dev_channels(origblob, false, NULL, NULL); 6676 } 6677 if (newblob->back_bs_dev) { 6678 blob_back_bs_destroy(newblob); 6679 } 6680 /* set new back_bs_dev for snapshot */ 6681 newblob->back_bs_dev = origblob->back_bs_dev; 6682 /* Set invalid flags from origblob */ 6683 newblob->invalid_flags = origblob->invalid_flags; 6684 6685 /* inherit parent from original blob if set */ 6686 newblob->parent_id = origblob->parent_id; 6687 switch (origblob->parent_id) { 6688 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 6689 bserrno = bs_snapshot_copy_xattr(newblob, origblob, BLOB_EXTERNAL_SNAPSHOT_ID); 6690 if (bserrno != 0) { 6691 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6692 return; 6693 } 6694 break; 6695 case SPDK_BLOBID_INVALID: 6696 break; 6697 default: 6698 /* Set internal xattr for snapshot id */ 6699 bserrno = blob_set_xattr(newblob, BLOB_SNAPSHOT, 6700 &origblob->parent_id, sizeof(spdk_blob_id), true); 6701 if (bserrno != 0) { 6702 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6703 return; 6704 } 6705 } 6706 6707 /* swap cluster maps */ 6708 bs_snapshot_swap_cluster_maps(newblob, origblob); 6709 6710 /* Set the clear method on the new blob to match the original. */ 6711 blob_set_clear_method(newblob, origblob->clear_method); 6712 6713 /* sync snapshot metadata */ 6714 spdk_blob_sync_md(newblob, bs_snapshot_newblob_sync_cpl, ctx); 6715 } 6716 6717 static void 6718 bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6719 { 6720 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6721 struct spdk_blob *origblob = ctx->original.blob; 6722 struct spdk_blob *newblob = _blob; 6723 6724 if (bserrno != 0) { 6725 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6726 return; 6727 } 6728 6729 ctx->new.blob = newblob; 6730 assert(spdk_blob_is_thin_provisioned(newblob)); 6731 assert(spdk_mem_all_zero(newblob->active.clusters, 6732 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 6733 assert(spdk_mem_all_zero(newblob->active.extent_pages, 6734 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 6735 6736 blob_freeze_io(origblob, bs_snapshot_freeze_cpl, ctx); 6737 } 6738 6739 static void 6740 bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 6741 { 6742 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6743 struct spdk_blob *origblob = ctx->original.blob; 6744 6745 if (bserrno != 0) { 6746 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6747 return; 6748 } 6749 6750 ctx->new.id = blobid; 6751 ctx->cpl.u.blobid.blobid = blobid; 6752 6753 spdk_bs_open_blob(origblob->bs, ctx->new.id, bs_snapshot_newblob_open_cpl, ctx); 6754 } 6755 6756 6757 static void 6758 bs_xattr_snapshot(void *arg, const char *name, 6759 const void **value, size_t *value_len) 6760 { 6761 assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0); 6762 6763 struct spdk_blob *blob = (struct spdk_blob *)arg; 6764 *value = &blob->id; 6765 *value_len = sizeof(blob->id); 6766 } 6767 6768 static void 6769 bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6770 { 6771 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6772 struct spdk_blob_opts opts; 6773 struct spdk_blob_xattr_opts internal_xattrs; 6774 char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS }; 6775 6776 if (bserrno != 0) { 6777 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6778 return; 6779 } 6780 6781 ctx->original.blob = _blob; 6782 6783 if (_blob->data_ro || _blob->md_ro) { 6784 SPDK_DEBUGLOG(blob, "Cannot create snapshot from read only blob with id 0x%" 6785 PRIx64 "\n", _blob->id); 6786 ctx->bserrno = -EINVAL; 6787 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6788 return; 6789 } 6790 6791 if (_blob->locked_operation_in_progress) { 6792 SPDK_DEBUGLOG(blob, "Cannot create snapshot - another operation in progress\n"); 6793 ctx->bserrno = -EBUSY; 6794 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6795 return; 6796 } 6797 6798 _blob->locked_operation_in_progress = true; 6799 6800 spdk_blob_opts_init(&opts, sizeof(opts)); 6801 blob_xattrs_init(&internal_xattrs); 6802 6803 /* Change the size of new blob to the same as in original blob, 6804 * but do not allocate clusters */ 6805 opts.thin_provision = true; 6806 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 6807 opts.use_extent_table = _blob->use_extent_table; 6808 6809 /* If there are any xattrs specified for snapshot, set them now */ 6810 if (ctx->xattrs) { 6811 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 6812 } 6813 /* Set internal xattr SNAPSHOT_IN_PROGRESS */ 6814 internal_xattrs.count = 1; 6815 internal_xattrs.ctx = _blob; 6816 internal_xattrs.names = xattrs_names; 6817 internal_xattrs.get_value = bs_xattr_snapshot; 6818 6819 bs_create_blob(_blob->bs, &opts, &internal_xattrs, 6820 bs_snapshot_newblob_create_cpl, ctx); 6821 } 6822 6823 void 6824 spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid, 6825 const struct spdk_blob_xattr_opts *snapshot_xattrs, 6826 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6827 { 6828 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6829 6830 if (!ctx) { 6831 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 6832 return; 6833 } 6834 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6835 ctx->cpl.u.blobid.cb_fn = cb_fn; 6836 ctx->cpl.u.blobid.cb_arg = cb_arg; 6837 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 6838 ctx->bserrno = 0; 6839 ctx->frozen = false; 6840 ctx->original.id = blobid; 6841 ctx->xattrs = snapshot_xattrs; 6842 6843 spdk_bs_open_blob(bs, ctx->original.id, bs_snapshot_origblob_open_cpl, ctx); 6844 } 6845 /* END spdk_bs_create_snapshot */ 6846 6847 /* START spdk_bs_create_clone */ 6848 6849 static void 6850 bs_xattr_clone(void *arg, const char *name, 6851 const void **value, size_t *value_len) 6852 { 6853 assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0); 6854 6855 struct spdk_blob *blob = (struct spdk_blob *)arg; 6856 *value = &blob->id; 6857 *value_len = sizeof(blob->id); 6858 } 6859 6860 static void 6861 bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6862 { 6863 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6864 struct spdk_blob *clone = _blob; 6865 6866 ctx->new.blob = clone; 6867 bs_blob_list_add(clone); 6868 6869 spdk_blob_close(clone, bs_clone_snapshot_origblob_cleanup, ctx); 6870 } 6871 6872 static void 6873 bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 6874 { 6875 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6876 6877 ctx->cpl.u.blobid.blobid = blobid; 6878 spdk_bs_open_blob(ctx->original.blob->bs, blobid, bs_clone_newblob_open_cpl, ctx); 6879 } 6880 6881 static void 6882 bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6883 { 6884 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6885 struct spdk_blob_opts opts; 6886 struct spdk_blob_xattr_opts internal_xattrs; 6887 char *xattr_names[] = { BLOB_SNAPSHOT }; 6888 6889 if (bserrno != 0) { 6890 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6891 return; 6892 } 6893 6894 ctx->original.blob = _blob; 6895 ctx->original.md_ro = _blob->md_ro; 6896 6897 if (!_blob->data_ro || !_blob->md_ro) { 6898 SPDK_DEBUGLOG(blob, "Clone not from read-only blob\n"); 6899 ctx->bserrno = -EINVAL; 6900 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6901 return; 6902 } 6903 6904 if (_blob->locked_operation_in_progress) { 6905 SPDK_DEBUGLOG(blob, "Cannot create clone - another operation in progress\n"); 6906 ctx->bserrno = -EBUSY; 6907 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6908 return; 6909 } 6910 6911 _blob->locked_operation_in_progress = true; 6912 6913 spdk_blob_opts_init(&opts, sizeof(opts)); 6914 blob_xattrs_init(&internal_xattrs); 6915 6916 opts.thin_provision = true; 6917 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 6918 opts.use_extent_table = _blob->use_extent_table; 6919 if (ctx->xattrs) { 6920 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 6921 } 6922 6923 /* Set internal xattr BLOB_SNAPSHOT */ 6924 internal_xattrs.count = 1; 6925 internal_xattrs.ctx = _blob; 6926 internal_xattrs.names = xattr_names; 6927 internal_xattrs.get_value = bs_xattr_clone; 6928 6929 bs_create_blob(_blob->bs, &opts, &internal_xattrs, 6930 bs_clone_newblob_create_cpl, ctx); 6931 } 6932 6933 void 6934 spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid, 6935 const struct spdk_blob_xattr_opts *clone_xattrs, 6936 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6937 { 6938 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6939 6940 if (!ctx) { 6941 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 6942 return; 6943 } 6944 6945 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6946 ctx->cpl.u.blobid.cb_fn = cb_fn; 6947 ctx->cpl.u.blobid.cb_arg = cb_arg; 6948 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 6949 ctx->bserrno = 0; 6950 ctx->xattrs = clone_xattrs; 6951 ctx->original.id = blobid; 6952 6953 spdk_bs_open_blob(bs, ctx->original.id, bs_clone_origblob_open_cpl, ctx); 6954 } 6955 6956 /* END spdk_bs_create_clone */ 6957 6958 /* START spdk_bs_inflate_blob */ 6959 6960 static void 6961 bs_inflate_blob_set_parent_cpl(void *cb_arg, struct spdk_blob *_parent, int bserrno) 6962 { 6963 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6964 struct spdk_blob *_blob = ctx->original.blob; 6965 6966 if (bserrno != 0) { 6967 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6968 return; 6969 } 6970 6971 /* Temporarily override md_ro flag for MD modification */ 6972 _blob->md_ro = false; 6973 6974 bserrno = blob_set_xattr(_blob, BLOB_SNAPSHOT, &_parent->id, sizeof(spdk_blob_id), true); 6975 if (bserrno != 0) { 6976 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6977 return; 6978 } 6979 6980 assert(_parent != NULL); 6981 6982 bs_blob_list_remove(_blob); 6983 _blob->parent_id = _parent->id; 6984 6985 blob_back_bs_destroy(_blob); 6986 _blob->back_bs_dev = bs_create_blob_bs_dev(_parent); 6987 bs_blob_list_add(_blob); 6988 6989 spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx); 6990 } 6991 6992 static void 6993 bs_inflate_blob_done(struct spdk_clone_snapshot_ctx *ctx) 6994 { 6995 struct spdk_blob *_blob = ctx->original.blob; 6996 struct spdk_blob *_parent; 6997 6998 if (ctx->allocate_all) { 6999 /* remove thin provisioning */ 7000 bs_blob_list_remove(_blob); 7001 if (_blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 7002 blob_remove_xattr(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, true); 7003 _blob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT; 7004 } else { 7005 blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 7006 } 7007 _blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV; 7008 blob_back_bs_destroy(_blob); 7009 _blob->parent_id = SPDK_BLOBID_INVALID; 7010 } else { 7011 /* For now, esnap clones always have allocate_all set. */ 7012 assert(!blob_is_esnap_clone(_blob)); 7013 7014 _parent = ((struct spdk_blob_bs_dev *)(_blob->back_bs_dev))->blob; 7015 if (_parent->parent_id != SPDK_BLOBID_INVALID) { 7016 /* We must change the parent of the inflated blob */ 7017 spdk_bs_open_blob(_blob->bs, _parent->parent_id, 7018 bs_inflate_blob_set_parent_cpl, ctx); 7019 return; 7020 } 7021 7022 bs_blob_list_remove(_blob); 7023 _blob->parent_id = SPDK_BLOBID_INVALID; 7024 blob_back_bs_destroy(_blob); 7025 _blob->back_bs_dev = bs_create_zeroes_dev(); 7026 } 7027 7028 /* Temporarily override md_ro flag for MD modification */ 7029 _blob->md_ro = false; 7030 blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 7031 _blob->state = SPDK_BLOB_STATE_DIRTY; 7032 7033 spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx); 7034 } 7035 7036 /* Check if cluster needs allocation */ 7037 static inline bool 7038 bs_cluster_needs_allocation(struct spdk_blob *blob, uint64_t cluster, bool allocate_all) 7039 { 7040 struct spdk_blob_bs_dev *b; 7041 7042 assert(blob != NULL); 7043 7044 if (blob->active.clusters[cluster] != 0) { 7045 /* Cluster is already allocated */ 7046 return false; 7047 } 7048 7049 if (blob->parent_id == SPDK_BLOBID_INVALID) { 7050 /* Blob have no parent blob */ 7051 return allocate_all; 7052 } 7053 7054 if (blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 7055 return true; 7056 } 7057 7058 b = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 7059 return (allocate_all || b->blob->active.clusters[cluster] != 0); 7060 } 7061 7062 static void 7063 bs_inflate_blob_touch_next(void *cb_arg, int bserrno) 7064 { 7065 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 7066 struct spdk_blob *_blob = ctx->original.blob; 7067 struct spdk_bs_cpl cpl; 7068 spdk_bs_user_op_t *op; 7069 uint64_t offset; 7070 7071 if (bserrno != 0) { 7072 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 7073 return; 7074 } 7075 7076 for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) { 7077 if (bs_cluster_needs_allocation(_blob, ctx->cluster, ctx->allocate_all)) { 7078 break; 7079 } 7080 } 7081 7082 if (ctx->cluster < _blob->active.num_clusters) { 7083 offset = bs_cluster_to_lba(_blob->bs, ctx->cluster); 7084 7085 /* We may safely increment a cluster before copying */ 7086 ctx->cluster++; 7087 7088 /* Use a dummy 0B read as a context for cluster copy */ 7089 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7090 cpl.u.blob_basic.cb_fn = bs_inflate_blob_touch_next; 7091 cpl.u.blob_basic.cb_arg = ctx; 7092 7093 op = bs_user_op_alloc(ctx->channel, &cpl, SPDK_BLOB_READ, _blob, 7094 NULL, 0, offset, 0); 7095 if (!op) { 7096 bs_clone_snapshot_origblob_cleanup(ctx, -ENOMEM); 7097 return; 7098 } 7099 7100 bs_allocate_and_copy_cluster(_blob, ctx->channel, offset, op); 7101 } else { 7102 bs_inflate_blob_done(ctx); 7103 } 7104 } 7105 7106 static void 7107 bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 7108 { 7109 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 7110 uint64_t clusters_needed; 7111 uint64_t i; 7112 7113 if (bserrno != 0) { 7114 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 7115 return; 7116 } 7117 7118 ctx->original.blob = _blob; 7119 ctx->original.md_ro = _blob->md_ro; 7120 7121 if (_blob->locked_operation_in_progress) { 7122 SPDK_DEBUGLOG(blob, "Cannot inflate blob - another operation in progress\n"); 7123 ctx->bserrno = -EBUSY; 7124 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 7125 return; 7126 } 7127 7128 _blob->locked_operation_in_progress = true; 7129 7130 switch (_blob->parent_id) { 7131 case SPDK_BLOBID_INVALID: 7132 if (!ctx->allocate_all) { 7133 /* This blob has no parent, so we cannot decouple it. */ 7134 SPDK_ERRLOG("Cannot decouple parent of blob with no parent.\n"); 7135 bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 7136 return; 7137 } 7138 break; 7139 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 7140 /* 7141 * It would be better to rely on back_bs_dev->is_zeroes(), to determine which 7142 * clusters require allocation. Until there is a blobstore consumer that 7143 * uses esnaps with an spdk_bs_dev that implements a useful is_zeroes() it is not 7144 * worth the effort. 7145 */ 7146 ctx->allocate_all = true; 7147 break; 7148 default: 7149 break; 7150 } 7151 7152 if (spdk_blob_is_thin_provisioned(_blob) == false) { 7153 /* This is not thin provisioned blob. No need to inflate. */ 7154 bs_clone_snapshot_origblob_cleanup(ctx, 0); 7155 return; 7156 } 7157 7158 /* Do two passes - one to verify that we can obtain enough clusters 7159 * and another to actually claim them. 7160 */ 7161 clusters_needed = 0; 7162 for (i = 0; i < _blob->active.num_clusters; i++) { 7163 if (bs_cluster_needs_allocation(_blob, i, ctx->allocate_all)) { 7164 clusters_needed++; 7165 } 7166 } 7167 7168 if (clusters_needed > _blob->bs->num_free_clusters) { 7169 /* Not enough free clusters. Cannot satisfy the request. */ 7170 bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC); 7171 return; 7172 } 7173 7174 ctx->cluster = 0; 7175 bs_inflate_blob_touch_next(ctx, 0); 7176 } 7177 7178 static void 7179 bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7180 spdk_blob_id blobid, bool allocate_all, spdk_blob_op_complete cb_fn, void *cb_arg) 7181 { 7182 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 7183 7184 if (!ctx) { 7185 cb_fn(cb_arg, -ENOMEM); 7186 return; 7187 } 7188 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7189 ctx->cpl.u.bs_basic.cb_fn = cb_fn; 7190 ctx->cpl.u.bs_basic.cb_arg = cb_arg; 7191 ctx->bserrno = 0; 7192 ctx->original.id = blobid; 7193 ctx->channel = channel; 7194 ctx->allocate_all = allocate_all; 7195 7196 spdk_bs_open_blob(bs, ctx->original.id, bs_inflate_blob_open_cpl, ctx); 7197 } 7198 7199 void 7200 spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7201 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 7202 { 7203 bs_inflate_blob(bs, channel, blobid, true, cb_fn, cb_arg); 7204 } 7205 7206 void 7207 spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7208 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 7209 { 7210 bs_inflate_blob(bs, channel, blobid, false, cb_fn, cb_arg); 7211 } 7212 /* END spdk_bs_inflate_blob */ 7213 7214 /* START spdk_bs_blob_shallow_copy */ 7215 7216 struct shallow_copy_ctx { 7217 struct spdk_bs_cpl cpl; 7218 int bserrno; 7219 7220 /* Blob source for copy */ 7221 struct spdk_blob_store *bs; 7222 spdk_blob_id blobid; 7223 struct spdk_blob *blob; 7224 struct spdk_io_channel *blob_channel; 7225 7226 /* Destination device for copy */ 7227 struct spdk_bs_dev *ext_dev; 7228 struct spdk_io_channel *ext_channel; 7229 7230 /* Current cluster for copy operation */ 7231 uint64_t cluster; 7232 7233 /* Buffer for blob reading */ 7234 uint8_t *read_buff; 7235 7236 /* Struct for external device writing */ 7237 struct spdk_bs_dev_cb_args ext_args; 7238 7239 /* Actual number of copied clusters */ 7240 uint64_t copied_clusters_count; 7241 7242 /* Status callback for updates about the ongoing operation */ 7243 spdk_blob_shallow_copy_status status_cb; 7244 7245 /* Argument passed to function status_cb */ 7246 void *status_cb_arg; 7247 }; 7248 7249 static void 7250 bs_shallow_copy_cleanup_finish(void *cb_arg, int bserrno) 7251 { 7252 struct shallow_copy_ctx *ctx = cb_arg; 7253 struct spdk_bs_cpl *cpl = &ctx->cpl; 7254 7255 if (bserrno != 0) { 7256 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, cleanup error %d\n", ctx->blob->id, bserrno); 7257 ctx->bserrno = bserrno; 7258 } 7259 7260 ctx->ext_dev->destroy_channel(ctx->ext_dev, ctx->ext_channel); 7261 spdk_free(ctx->read_buff); 7262 7263 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno); 7264 7265 free(ctx); 7266 } 7267 7268 static void 7269 bs_shallow_copy_bdev_write_cpl(struct spdk_io_channel *channel, void *cb_arg, int bserrno) 7270 { 7271 struct shallow_copy_ctx *ctx = cb_arg; 7272 struct spdk_blob *_blob = ctx->blob; 7273 7274 if (bserrno != 0) { 7275 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, ext dev write error %d\n", ctx->blob->id, bserrno); 7276 ctx->bserrno = bserrno; 7277 _blob->locked_operation_in_progress = false; 7278 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7279 return; 7280 } 7281 7282 ctx->cluster++; 7283 if (ctx->status_cb) { 7284 ctx->copied_clusters_count++; 7285 ctx->status_cb(ctx->copied_clusters_count, ctx->status_cb_arg); 7286 } 7287 7288 bs_shallow_copy_cluster_find_next(ctx); 7289 } 7290 7291 static void 7292 bs_shallow_copy_blob_read_cpl(void *cb_arg, int bserrno) 7293 { 7294 struct shallow_copy_ctx *ctx = cb_arg; 7295 struct spdk_bs_dev *ext_dev = ctx->ext_dev; 7296 struct spdk_blob *_blob = ctx->blob; 7297 7298 if (bserrno != 0) { 7299 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, blob read error %d\n", ctx->blob->id, bserrno); 7300 ctx->bserrno = bserrno; 7301 _blob->locked_operation_in_progress = false; 7302 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7303 return; 7304 } 7305 7306 ctx->ext_args.channel = ctx->ext_channel; 7307 ctx->ext_args.cb_fn = bs_shallow_copy_bdev_write_cpl; 7308 ctx->ext_args.cb_arg = ctx; 7309 7310 ext_dev->write(ext_dev, ctx->ext_channel, ctx->read_buff, 7311 bs_cluster_to_lba(_blob->bs, ctx->cluster), 7312 bs_dev_byte_to_lba(_blob->bs->dev, _blob->bs->cluster_sz), 7313 &ctx->ext_args); 7314 } 7315 7316 static void 7317 bs_shallow_copy_cluster_find_next(void *cb_arg) 7318 { 7319 struct shallow_copy_ctx *ctx = cb_arg; 7320 struct spdk_blob *_blob = ctx->blob; 7321 7322 while (ctx->cluster < _blob->active.num_clusters) { 7323 if (_blob->active.clusters[ctx->cluster] != 0) { 7324 break; 7325 } 7326 7327 ctx->cluster++; 7328 } 7329 7330 if (ctx->cluster < _blob->active.num_clusters) { 7331 blob_request_submit_op_single(ctx->blob_channel, _blob, ctx->read_buff, 7332 bs_cluster_to_lba(_blob->bs, ctx->cluster), 7333 bs_dev_byte_to_lba(_blob->bs->dev, _blob->bs->cluster_sz), 7334 bs_shallow_copy_blob_read_cpl, ctx, SPDK_BLOB_READ); 7335 } else { 7336 _blob->locked_operation_in_progress = false; 7337 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7338 } 7339 } 7340 7341 static void 7342 bs_shallow_copy_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 7343 { 7344 struct shallow_copy_ctx *ctx = cb_arg; 7345 struct spdk_bs_dev *ext_dev = ctx->ext_dev; 7346 uint32_t blob_block_size; 7347 uint64_t blob_total_size; 7348 7349 if (bserrno != 0) { 7350 SPDK_ERRLOG("Shallow copy blob open error %d\n", bserrno); 7351 ctx->bserrno = bserrno; 7352 bs_shallow_copy_cleanup_finish(ctx, 0); 7353 return; 7354 } 7355 7356 if (!spdk_blob_is_read_only(_blob)) { 7357 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, blob must be read only\n", _blob->id); 7358 ctx->bserrno = -EPERM; 7359 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7360 return; 7361 } 7362 7363 blob_block_size = _blob->bs->dev->blocklen; 7364 blob_total_size = spdk_blob_get_num_clusters(_blob) * spdk_bs_get_cluster_size(_blob->bs); 7365 7366 if (blob_total_size > ext_dev->blockcnt * ext_dev->blocklen) { 7367 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, external device must have at least blob size\n", 7368 _blob->id); 7369 ctx->bserrno = -EINVAL; 7370 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7371 return; 7372 } 7373 7374 if (blob_block_size % ext_dev->blocklen != 0) { 7375 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, external device block size is not compatible with \ 7376 blobstore block size\n", _blob->id); 7377 ctx->bserrno = -EINVAL; 7378 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7379 return; 7380 } 7381 7382 ctx->blob = _blob; 7383 7384 if (_blob->locked_operation_in_progress) { 7385 SPDK_DEBUGLOG(blob, "blob 0x%" PRIx64 " shallow copy - another operation in progress\n", _blob->id); 7386 ctx->bserrno = -EBUSY; 7387 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7388 return; 7389 } 7390 7391 _blob->locked_operation_in_progress = true; 7392 7393 ctx->cluster = 0; 7394 bs_shallow_copy_cluster_find_next(ctx); 7395 } 7396 7397 int 7398 spdk_bs_blob_shallow_copy(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7399 spdk_blob_id blobid, struct spdk_bs_dev *ext_dev, 7400 spdk_blob_shallow_copy_status status_cb_fn, void *status_cb_arg, 7401 spdk_blob_op_complete cb_fn, void *cb_arg) 7402 { 7403 struct shallow_copy_ctx *ctx; 7404 struct spdk_io_channel *ext_channel; 7405 7406 ctx = calloc(1, sizeof(*ctx)); 7407 if (!ctx) { 7408 return -ENOMEM; 7409 } 7410 7411 ctx->bs = bs; 7412 ctx->blobid = blobid; 7413 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7414 ctx->cpl.u.bs_basic.cb_fn = cb_fn; 7415 ctx->cpl.u.bs_basic.cb_arg = cb_arg; 7416 ctx->bserrno = 0; 7417 ctx->blob_channel = channel; 7418 ctx->status_cb = status_cb_fn; 7419 ctx->status_cb_arg = status_cb_arg; 7420 ctx->read_buff = spdk_malloc(bs->cluster_sz, bs->dev->blocklen, NULL, 7421 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 7422 if (!ctx->read_buff) { 7423 free(ctx); 7424 return -ENOMEM; 7425 } 7426 7427 ext_channel = ext_dev->create_channel(ext_dev); 7428 if (!ext_channel) { 7429 spdk_free(ctx->read_buff); 7430 free(ctx); 7431 return -ENOMEM; 7432 } 7433 ctx->ext_dev = ext_dev; 7434 ctx->ext_channel = ext_channel; 7435 7436 spdk_bs_open_blob(ctx->bs, ctx->blobid, bs_shallow_copy_blob_open_cpl, ctx); 7437 7438 return 0; 7439 } 7440 /* END spdk_bs_blob_shallow_copy */ 7441 7442 /* START spdk_bs_blob_set_parent */ 7443 7444 struct set_parent_ctx { 7445 struct spdk_blob_store *bs; 7446 int bserrno; 7447 spdk_bs_op_complete cb_fn; 7448 void *cb_arg; 7449 7450 struct spdk_blob *blob; 7451 bool blob_md_ro; 7452 7453 struct blob_parent parent; 7454 }; 7455 7456 static void 7457 bs_set_parent_cleanup_finish(void *cb_arg, int bserrno) 7458 { 7459 struct set_parent_ctx *ctx = cb_arg; 7460 7461 assert(ctx != NULL); 7462 7463 if (bserrno != 0) { 7464 SPDK_ERRLOG("blob set parent finish error %d\n", bserrno); 7465 if (ctx->bserrno == 0) { 7466 ctx->bserrno = bserrno; 7467 } 7468 } 7469 7470 ctx->cb_fn(ctx->cb_arg, ctx->bserrno); 7471 7472 free(ctx); 7473 } 7474 7475 static void 7476 bs_set_parent_close_snapshot(void *cb_arg, int bserrno) 7477 { 7478 struct set_parent_ctx *ctx = cb_arg; 7479 7480 if (ctx->bserrno != 0) { 7481 spdk_blob_close(ctx->parent.u.snapshot.blob, bs_set_parent_cleanup_finish, ctx); 7482 return; 7483 } 7484 7485 if (bserrno != 0) { 7486 SPDK_ERRLOG("blob close error %d\n", bserrno); 7487 ctx->bserrno = bserrno; 7488 } 7489 7490 bs_set_parent_cleanup_finish(ctx, ctx->bserrno); 7491 } 7492 7493 static void 7494 bs_set_parent_close_blob(void *cb_arg, int bserrno) 7495 { 7496 struct set_parent_ctx *ctx = cb_arg; 7497 struct spdk_blob *blob = ctx->blob; 7498 struct spdk_blob *snapshot = ctx->parent.u.snapshot.blob; 7499 7500 if (bserrno != 0 && ctx->bserrno == 0) { 7501 SPDK_ERRLOG("error %d in metadata sync\n", bserrno); 7502 ctx->bserrno = bserrno; 7503 } 7504 7505 /* Revert md_ro to original state */ 7506 blob->md_ro = ctx->blob_md_ro; 7507 7508 blob->locked_operation_in_progress = false; 7509 snapshot->locked_operation_in_progress = false; 7510 7511 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7512 } 7513 7514 static void 7515 bs_set_parent_set_back_bs_dev_done(void *cb_arg, int bserrno) 7516 { 7517 struct set_parent_ctx *ctx = cb_arg; 7518 struct spdk_blob *blob = ctx->blob; 7519 7520 if (bserrno != 0) { 7521 SPDK_ERRLOG("error %d setting back_bs_dev\n", bserrno); 7522 ctx->bserrno = bserrno; 7523 bs_set_parent_close_blob(ctx, bserrno); 7524 return; 7525 } 7526 7527 spdk_blob_sync_md(blob, bs_set_parent_close_blob, ctx); 7528 } 7529 7530 static int 7531 bs_set_parent_refs(struct spdk_blob *blob, struct blob_parent *parent) 7532 { 7533 int rc; 7534 7535 bs_blob_list_remove(blob); 7536 7537 rc = blob_set_xattr(blob, BLOB_SNAPSHOT, &parent->u.snapshot.id, sizeof(spdk_blob_id), true); 7538 if (rc != 0) { 7539 SPDK_ERRLOG("error %d setting snapshot xattr\n", rc); 7540 return rc; 7541 } 7542 blob->parent_id = parent->u.snapshot.id; 7543 7544 if (blob_is_esnap_clone(blob)) { 7545 /* Remove the xattr that references the external snapshot */ 7546 blob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT; 7547 blob_remove_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID, true); 7548 } 7549 7550 bs_blob_list_add(blob); 7551 7552 return 0; 7553 } 7554 7555 static void 7556 bs_set_parent_snapshot_open_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno) 7557 { 7558 struct set_parent_ctx *ctx = cb_arg; 7559 struct spdk_blob *blob = ctx->blob; 7560 struct spdk_bs_dev *back_bs_dev; 7561 7562 if (bserrno != 0) { 7563 SPDK_ERRLOG("snapshot open error %d\n", bserrno); 7564 ctx->bserrno = bserrno; 7565 spdk_blob_close(blob, bs_set_parent_cleanup_finish, ctx); 7566 return; 7567 } 7568 7569 ctx->parent.u.snapshot.blob = snapshot; 7570 ctx->parent.u.snapshot.id = snapshot->id; 7571 7572 if (!spdk_blob_is_snapshot(snapshot)) { 7573 SPDK_ERRLOG("parent blob is not a snapshot\n"); 7574 ctx->bserrno = -EINVAL; 7575 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7576 return; 7577 } 7578 7579 if (blob->active.num_clusters != snapshot->active.num_clusters) { 7580 SPDK_ERRLOG("parent blob has a number of clusters different from child's ones\n"); 7581 ctx->bserrno = -EINVAL; 7582 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7583 return; 7584 } 7585 7586 if (blob->locked_operation_in_progress || snapshot->locked_operation_in_progress) { 7587 SPDK_ERRLOG("cannot set parent of blob, another operation in progress\n"); 7588 ctx->bserrno = -EBUSY; 7589 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7590 return; 7591 } 7592 7593 blob->locked_operation_in_progress = true; 7594 snapshot->locked_operation_in_progress = true; 7595 7596 /* Temporarily override md_ro flag for MD modification */ 7597 blob->md_ro = false; 7598 7599 back_bs_dev = bs_create_blob_bs_dev(snapshot); 7600 7601 blob_set_back_bs_dev(blob, back_bs_dev, bs_set_parent_refs, &ctx->parent, 7602 bs_set_parent_set_back_bs_dev_done, 7603 ctx); 7604 } 7605 7606 static void 7607 bs_set_parent_blob_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 7608 { 7609 struct set_parent_ctx *ctx = cb_arg; 7610 7611 if (bserrno != 0) { 7612 SPDK_ERRLOG("blob open error %d\n", bserrno); 7613 ctx->bserrno = bserrno; 7614 bs_set_parent_cleanup_finish(ctx, 0); 7615 return; 7616 } 7617 7618 if (!spdk_blob_is_thin_provisioned(blob)) { 7619 SPDK_ERRLOG("blob is not thin-provisioned\n"); 7620 ctx->bserrno = -EINVAL; 7621 spdk_blob_close(blob, bs_set_parent_cleanup_finish, ctx); 7622 return; 7623 } 7624 7625 ctx->blob = blob; 7626 ctx->blob_md_ro = blob->md_ro; 7627 7628 spdk_bs_open_blob(ctx->bs, ctx->parent.u.snapshot.id, bs_set_parent_snapshot_open_cpl, ctx); 7629 } 7630 7631 void 7632 spdk_bs_blob_set_parent(struct spdk_blob_store *bs, spdk_blob_id blob_id, 7633 spdk_blob_id snapshot_id, spdk_blob_op_complete cb_fn, void *cb_arg) 7634 { 7635 struct set_parent_ctx *ctx; 7636 7637 if (snapshot_id == SPDK_BLOBID_INVALID) { 7638 SPDK_ERRLOG("snapshot id not valid\n"); 7639 cb_fn(cb_arg, -EINVAL); 7640 return; 7641 } 7642 7643 if (blob_id == snapshot_id) { 7644 SPDK_ERRLOG("blob id and snapshot id cannot be the same\n"); 7645 cb_fn(cb_arg, -EINVAL); 7646 return; 7647 } 7648 7649 if (spdk_blob_get_parent_snapshot(bs, blob_id) == snapshot_id) { 7650 SPDK_NOTICELOG("snapshot is already the parent of blob\n"); 7651 cb_fn(cb_arg, -EEXIST); 7652 return; 7653 } 7654 7655 ctx = calloc(1, sizeof(*ctx)); 7656 if (!ctx) { 7657 cb_fn(cb_arg, -ENOMEM); 7658 return; 7659 } 7660 7661 ctx->bs = bs; 7662 ctx->parent.u.snapshot.id = snapshot_id; 7663 ctx->cb_fn = cb_fn; 7664 ctx->cb_arg = cb_arg; 7665 ctx->bserrno = 0; 7666 7667 spdk_bs_open_blob(bs, blob_id, bs_set_parent_blob_open_cpl, ctx); 7668 } 7669 /* END spdk_bs_blob_set_parent */ 7670 7671 /* START spdk_bs_blob_set_external_parent */ 7672 7673 static void 7674 bs_set_external_parent_cleanup_finish(void *cb_arg, int bserrno) 7675 { 7676 struct set_parent_ctx *ctx = cb_arg; 7677 7678 if (bserrno != 0) { 7679 SPDK_ERRLOG("blob set external parent finish error %d\n", bserrno); 7680 if (ctx->bserrno == 0) { 7681 ctx->bserrno = bserrno; 7682 } 7683 } 7684 7685 ctx->cb_fn(ctx->cb_arg, ctx->bserrno); 7686 7687 free(ctx->parent.u.esnap.id); 7688 free(ctx); 7689 } 7690 7691 static void 7692 bs_set_external_parent_close_blob(void *cb_arg, int bserrno) 7693 { 7694 struct set_parent_ctx *ctx = cb_arg; 7695 struct spdk_blob *blob = ctx->blob; 7696 7697 if (bserrno != 0 && ctx->bserrno == 0) { 7698 SPDK_ERRLOG("error %d in metadata sync\n", bserrno); 7699 ctx->bserrno = bserrno; 7700 } 7701 7702 /* Revert md_ro to original state */ 7703 blob->md_ro = ctx->blob_md_ro; 7704 7705 blob->locked_operation_in_progress = false; 7706 7707 spdk_blob_close(blob, bs_set_external_parent_cleanup_finish, ctx); 7708 } 7709 7710 static void 7711 bs_set_external_parent_unfrozen(void *cb_arg, int bserrno) 7712 { 7713 struct set_parent_ctx *ctx = cb_arg; 7714 struct spdk_blob *blob = ctx->blob; 7715 7716 if (bserrno != 0) { 7717 SPDK_ERRLOG("error %d setting back_bs_dev\n", bserrno); 7718 ctx->bserrno = bserrno; 7719 bs_set_external_parent_close_blob(ctx, bserrno); 7720 return; 7721 } 7722 7723 spdk_blob_sync_md(blob, bs_set_external_parent_close_blob, ctx); 7724 } 7725 7726 static int 7727 bs_set_external_parent_refs(struct spdk_blob *blob, struct blob_parent *parent) 7728 { 7729 int rc; 7730 7731 bs_blob_list_remove(blob); 7732 7733 if (spdk_blob_is_clone(blob)) { 7734 /* Remove the xattr that references the snapshot */ 7735 blob->parent_id = SPDK_BLOBID_INVALID; 7736 blob_remove_xattr(blob, BLOB_SNAPSHOT, true); 7737 } 7738 7739 rc = blob_set_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID, parent->u.esnap.id, 7740 parent->u.esnap.id_len, true); 7741 if (rc != 0) { 7742 SPDK_ERRLOG("error %d setting external snapshot xattr\n", rc); 7743 return rc; 7744 } 7745 blob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 7746 blob->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT; 7747 7748 bs_blob_list_add(blob); 7749 7750 return 0; 7751 } 7752 7753 static void 7754 bs_set_external_parent_blob_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 7755 { 7756 struct set_parent_ctx *ctx = cb_arg; 7757 const void *esnap_id; 7758 size_t esnap_id_len; 7759 int rc; 7760 7761 if (bserrno != 0) { 7762 SPDK_ERRLOG("blob open error %d\n", bserrno); 7763 ctx->bserrno = bserrno; 7764 bs_set_parent_cleanup_finish(ctx, 0); 7765 return; 7766 } 7767 7768 ctx->blob = blob; 7769 ctx->blob_md_ro = blob->md_ro; 7770 7771 rc = spdk_blob_get_esnap_id(blob, &esnap_id, &esnap_id_len); 7772 if (rc == 0 && esnap_id != NULL && esnap_id_len == ctx->parent.u.esnap.id_len && 7773 memcmp(esnap_id, ctx->parent.u.esnap.id, esnap_id_len) == 0) { 7774 SPDK_ERRLOG("external snapshot is already the parent of blob\n"); 7775 ctx->bserrno = -EEXIST; 7776 goto error; 7777 } 7778 7779 if (!spdk_blob_is_thin_provisioned(blob)) { 7780 SPDK_ERRLOG("blob is not thin-provisioned\n"); 7781 ctx->bserrno = -EINVAL; 7782 goto error; 7783 } 7784 7785 if (blob->locked_operation_in_progress) { 7786 SPDK_ERRLOG("cannot set external parent of blob, another operation in progress\n"); 7787 ctx->bserrno = -EBUSY; 7788 goto error; 7789 } 7790 7791 blob->locked_operation_in_progress = true; 7792 7793 /* Temporarily override md_ro flag for MD modification */ 7794 blob->md_ro = false; 7795 7796 blob_set_back_bs_dev(blob, ctx->parent.u.esnap.back_bs_dev, bs_set_external_parent_refs, 7797 &ctx->parent, bs_set_external_parent_unfrozen, ctx); 7798 return; 7799 7800 error: 7801 spdk_blob_close(blob, bs_set_external_parent_cleanup_finish, ctx); 7802 } 7803 7804 void 7805 spdk_bs_blob_set_external_parent(struct spdk_blob_store *bs, spdk_blob_id blob_id, 7806 struct spdk_bs_dev *esnap_bs_dev, const void *esnap_id, 7807 uint32_t esnap_id_len, spdk_blob_op_complete cb_fn, void *cb_arg) 7808 { 7809 struct set_parent_ctx *ctx; 7810 uint64_t esnap_dev_size, cluster_sz; 7811 7812 if (sizeof(blob_id) == esnap_id_len && memcmp(&blob_id, esnap_id, sizeof(blob_id)) == 0) { 7813 SPDK_ERRLOG("blob id and external snapshot id cannot be the same\n"); 7814 cb_fn(cb_arg, -EINVAL); 7815 return; 7816 } 7817 7818 esnap_dev_size = esnap_bs_dev->blockcnt * esnap_bs_dev->blocklen; 7819 cluster_sz = spdk_bs_get_cluster_size(bs); 7820 if ((esnap_dev_size % cluster_sz) != 0) { 7821 SPDK_ERRLOG("Esnap device size %" PRIu64 " is not an integer multiple of " 7822 "cluster size %" PRIu64 "\n", esnap_dev_size, cluster_sz); 7823 cb_fn(cb_arg, -EINVAL); 7824 return; 7825 } 7826 7827 ctx = calloc(1, sizeof(*ctx)); 7828 if (!ctx) { 7829 cb_fn(cb_arg, -ENOMEM); 7830 return; 7831 } 7832 7833 ctx->parent.u.esnap.id = calloc(1, esnap_id_len); 7834 if (!ctx->parent.u.esnap.id) { 7835 free(ctx); 7836 cb_fn(cb_arg, -ENOMEM); 7837 return; 7838 } 7839 7840 ctx->bs = bs; 7841 ctx->parent.u.esnap.back_bs_dev = esnap_bs_dev; 7842 memcpy(ctx->parent.u.esnap.id, esnap_id, esnap_id_len); 7843 ctx->parent.u.esnap.id_len = esnap_id_len; 7844 ctx->cb_fn = cb_fn; 7845 ctx->cb_arg = cb_arg; 7846 ctx->bserrno = 0; 7847 7848 spdk_bs_open_blob(bs, blob_id, bs_set_external_parent_blob_open_cpl, ctx); 7849 } 7850 /* END spdk_bs_blob_set_external_parent */ 7851 7852 /* START spdk_blob_resize */ 7853 struct spdk_bs_resize_ctx { 7854 spdk_blob_op_complete cb_fn; 7855 void *cb_arg; 7856 struct spdk_blob *blob; 7857 uint64_t sz; 7858 int rc; 7859 }; 7860 7861 static void 7862 bs_resize_unfreeze_cpl(void *cb_arg, int rc) 7863 { 7864 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 7865 7866 if (rc != 0) { 7867 SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc); 7868 } 7869 7870 if (ctx->rc != 0) { 7871 SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc); 7872 rc = ctx->rc; 7873 } 7874 7875 ctx->blob->locked_operation_in_progress = false; 7876 7877 ctx->cb_fn(ctx->cb_arg, rc); 7878 free(ctx); 7879 } 7880 7881 static void 7882 bs_resize_freeze_cpl(void *cb_arg, int rc) 7883 { 7884 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 7885 7886 if (rc != 0) { 7887 ctx->blob->locked_operation_in_progress = false; 7888 ctx->cb_fn(ctx->cb_arg, rc); 7889 free(ctx); 7890 return; 7891 } 7892 7893 ctx->rc = blob_resize(ctx->blob, ctx->sz); 7894 7895 blob_unfreeze_io(ctx->blob, bs_resize_unfreeze_cpl, ctx); 7896 } 7897 7898 void 7899 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg) 7900 { 7901 struct spdk_bs_resize_ctx *ctx; 7902 7903 blob_verify_md_op(blob); 7904 7905 SPDK_DEBUGLOG(blob, "Resizing blob 0x%" PRIx64 " to %" PRIu64 " clusters\n", blob->id, sz); 7906 7907 if (blob->md_ro) { 7908 cb_fn(cb_arg, -EPERM); 7909 return; 7910 } 7911 7912 if (sz == blob->active.num_clusters) { 7913 cb_fn(cb_arg, 0); 7914 return; 7915 } 7916 7917 if (blob->locked_operation_in_progress) { 7918 cb_fn(cb_arg, -EBUSY); 7919 return; 7920 } 7921 7922 ctx = calloc(1, sizeof(*ctx)); 7923 if (!ctx) { 7924 cb_fn(cb_arg, -ENOMEM); 7925 return; 7926 } 7927 7928 blob->locked_operation_in_progress = true; 7929 ctx->cb_fn = cb_fn; 7930 ctx->cb_arg = cb_arg; 7931 ctx->blob = blob; 7932 ctx->sz = sz; 7933 blob_freeze_io(blob, bs_resize_freeze_cpl, ctx); 7934 } 7935 7936 /* END spdk_blob_resize */ 7937 7938 7939 /* START spdk_bs_delete_blob */ 7940 7941 static void 7942 bs_delete_close_cpl(void *cb_arg, int bserrno) 7943 { 7944 spdk_bs_sequence_t *seq = cb_arg; 7945 7946 bs_sequence_finish(seq, bserrno); 7947 } 7948 7949 static void 7950 bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7951 { 7952 struct spdk_blob *blob = cb_arg; 7953 7954 if (bserrno != 0) { 7955 /* 7956 * We already removed this blob from the blobstore tailq, so 7957 * we need to free it here since this is the last reference 7958 * to it. 7959 */ 7960 blob_free(blob); 7961 bs_delete_close_cpl(seq, bserrno); 7962 return; 7963 } 7964 7965 /* 7966 * This will immediately decrement the ref_count and call 7967 * the completion routine since the metadata state is clean. 7968 * By calling spdk_blob_close, we reduce the number of call 7969 * points into code that touches the blob->open_ref count 7970 * and the blobstore's blob list. 7971 */ 7972 spdk_blob_close(blob, bs_delete_close_cpl, seq); 7973 } 7974 7975 struct delete_snapshot_ctx { 7976 struct spdk_blob_list *parent_snapshot_entry; 7977 struct spdk_blob *snapshot; 7978 struct spdk_blob_md_page *page; 7979 bool snapshot_md_ro; 7980 struct spdk_blob *clone; 7981 bool clone_md_ro; 7982 spdk_blob_op_with_handle_complete cb_fn; 7983 void *cb_arg; 7984 int bserrno; 7985 uint32_t next_extent_page; 7986 }; 7987 7988 static void 7989 delete_blob_cleanup_finish(void *cb_arg, int bserrno) 7990 { 7991 struct delete_snapshot_ctx *ctx = cb_arg; 7992 7993 if (bserrno != 0) { 7994 SPDK_ERRLOG("Snapshot cleanup error %d\n", bserrno); 7995 } 7996 7997 assert(ctx != NULL); 7998 7999 if (bserrno != 0 && ctx->bserrno == 0) { 8000 ctx->bserrno = bserrno; 8001 } 8002 8003 ctx->cb_fn(ctx->cb_arg, ctx->snapshot, ctx->bserrno); 8004 spdk_free(ctx->page); 8005 free(ctx); 8006 } 8007 8008 static void 8009 delete_snapshot_cleanup_snapshot(void *cb_arg, int bserrno) 8010 { 8011 struct delete_snapshot_ctx *ctx = cb_arg; 8012 8013 if (bserrno != 0) { 8014 ctx->bserrno = bserrno; 8015 SPDK_ERRLOG("Clone cleanup error %d\n", bserrno); 8016 } 8017 8018 if (ctx->bserrno != 0) { 8019 assert(blob_lookup(ctx->snapshot->bs, ctx->snapshot->id) == NULL); 8020 RB_INSERT(spdk_blob_tree, &ctx->snapshot->bs->open_blobs, ctx->snapshot); 8021 spdk_bit_array_set(ctx->snapshot->bs->open_blobids, ctx->snapshot->id); 8022 } 8023 8024 ctx->snapshot->locked_operation_in_progress = false; 8025 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 8026 8027 spdk_blob_close(ctx->snapshot, delete_blob_cleanup_finish, ctx); 8028 } 8029 8030 static void 8031 delete_snapshot_cleanup_clone(void *cb_arg, int bserrno) 8032 { 8033 struct delete_snapshot_ctx *ctx = cb_arg; 8034 8035 ctx->clone->locked_operation_in_progress = false; 8036 ctx->clone->md_ro = ctx->clone_md_ro; 8037 8038 spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx); 8039 } 8040 8041 static void 8042 delete_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 8043 { 8044 struct delete_snapshot_ctx *ctx = cb_arg; 8045 8046 if (bserrno) { 8047 ctx->bserrno = bserrno; 8048 delete_snapshot_cleanup_clone(ctx, 0); 8049 return; 8050 } 8051 8052 ctx->clone->locked_operation_in_progress = false; 8053 spdk_blob_close(ctx->clone, delete_blob_cleanup_finish, ctx); 8054 } 8055 8056 static void 8057 delete_snapshot_sync_snapshot_cpl(void *cb_arg, int bserrno) 8058 { 8059 struct delete_snapshot_ctx *ctx = cb_arg; 8060 struct spdk_blob_list *parent_snapshot_entry = NULL; 8061 struct spdk_blob_list *snapshot_entry = NULL; 8062 struct spdk_blob_list *clone_entry = NULL; 8063 struct spdk_blob_list *snapshot_clone_entry = NULL; 8064 8065 if (bserrno) { 8066 SPDK_ERRLOG("Failed to sync MD on blob\n"); 8067 ctx->bserrno = bserrno; 8068 delete_snapshot_cleanup_clone(ctx, 0); 8069 return; 8070 } 8071 8072 /* Get snapshot entry for the snapshot we want to remove */ 8073 snapshot_entry = bs_get_snapshot_entry(ctx->snapshot->bs, ctx->snapshot->id); 8074 8075 assert(snapshot_entry != NULL); 8076 8077 /* Remove clone entry in this snapshot (at this point there can be only one clone) */ 8078 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 8079 assert(clone_entry != NULL); 8080 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 8081 snapshot_entry->clone_count--; 8082 assert(TAILQ_EMPTY(&snapshot_entry->clones)); 8083 8084 switch (ctx->snapshot->parent_id) { 8085 case SPDK_BLOBID_INVALID: 8086 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 8087 /* No parent snapshot - just remove clone entry */ 8088 free(clone_entry); 8089 break; 8090 default: 8091 /* This snapshot is at the same time a clone of another snapshot - we need to 8092 * update parent snapshot (remove current clone, add new one inherited from 8093 * the snapshot that is being removed) */ 8094 8095 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 8096 * snapshot that we are removing */ 8097 blob_get_snapshot_and_clone_entries(ctx->snapshot, &parent_snapshot_entry, 8098 &snapshot_clone_entry); 8099 8100 /* Switch clone entry in parent snapshot */ 8101 TAILQ_INSERT_TAIL(&parent_snapshot_entry->clones, clone_entry, link); 8102 TAILQ_REMOVE(&parent_snapshot_entry->clones, snapshot_clone_entry, link); 8103 free(snapshot_clone_entry); 8104 } 8105 8106 /* Restore md_ro flags */ 8107 ctx->clone->md_ro = ctx->clone_md_ro; 8108 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 8109 8110 blob_unfreeze_io(ctx->clone, delete_snapshot_unfreeze_cpl, ctx); 8111 } 8112 8113 static void 8114 delete_snapshot_sync_clone_cpl(void *cb_arg, int bserrno) 8115 { 8116 struct delete_snapshot_ctx *ctx = cb_arg; 8117 uint64_t i; 8118 8119 ctx->snapshot->md_ro = false; 8120 8121 if (bserrno) { 8122 SPDK_ERRLOG("Failed to sync MD on clone\n"); 8123 ctx->bserrno = bserrno; 8124 8125 /* Restore snapshot to previous state */ 8126 bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true); 8127 if (bserrno != 0) { 8128 delete_snapshot_cleanup_clone(ctx, bserrno); 8129 return; 8130 } 8131 8132 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx); 8133 return; 8134 } 8135 8136 /* Clear cluster map entries for snapshot */ 8137 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 8138 if (ctx->clone->active.clusters[i] == ctx->snapshot->active.clusters[i]) { 8139 if (ctx->snapshot->active.clusters[i] != 0) { 8140 ctx->snapshot->active.num_allocated_clusters--; 8141 } 8142 ctx->snapshot->active.clusters[i] = 0; 8143 } 8144 } 8145 for (i = 0; i < ctx->snapshot->active.num_extent_pages && 8146 i < ctx->clone->active.num_extent_pages; i++) { 8147 if (ctx->clone->active.extent_pages[i] == ctx->snapshot->active.extent_pages[i]) { 8148 ctx->snapshot->active.extent_pages[i] = 0; 8149 } 8150 } 8151 8152 blob_set_thin_provision(ctx->snapshot); 8153 ctx->snapshot->state = SPDK_BLOB_STATE_DIRTY; 8154 8155 if (ctx->parent_snapshot_entry != NULL) { 8156 ctx->snapshot->back_bs_dev = NULL; 8157 } 8158 8159 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_cpl, ctx); 8160 } 8161 8162 static void 8163 delete_snapshot_update_extent_pages_cpl(struct delete_snapshot_ctx *ctx) 8164 { 8165 int bserrno; 8166 8167 /* Delete old backing bs_dev from clone (related to snapshot that will be removed) */ 8168 blob_back_bs_destroy(ctx->clone); 8169 8170 /* Set/remove snapshot xattr and switch parent ID and backing bs_dev on clone... */ 8171 if (ctx->snapshot->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 8172 bserrno = bs_snapshot_copy_xattr(ctx->clone, ctx->snapshot, 8173 BLOB_EXTERNAL_SNAPSHOT_ID); 8174 if (bserrno != 0) { 8175 ctx->bserrno = bserrno; 8176 8177 /* Restore snapshot to previous state */ 8178 bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true); 8179 if (bserrno != 0) { 8180 delete_snapshot_cleanup_clone(ctx, bserrno); 8181 return; 8182 } 8183 8184 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx); 8185 return; 8186 } 8187 ctx->clone->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT; 8188 ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev; 8189 /* Do not delete the external snapshot along with this snapshot */ 8190 ctx->snapshot->back_bs_dev = NULL; 8191 ctx->clone->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 8192 } else if (ctx->parent_snapshot_entry != NULL) { 8193 /* ...to parent snapshot */ 8194 ctx->clone->parent_id = ctx->parent_snapshot_entry->id; 8195 ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev; 8196 blob_set_xattr(ctx->clone, BLOB_SNAPSHOT, &ctx->parent_snapshot_entry->id, 8197 sizeof(spdk_blob_id), 8198 true); 8199 } else { 8200 /* ...to blobid invalid and zeroes dev */ 8201 ctx->clone->parent_id = SPDK_BLOBID_INVALID; 8202 ctx->clone->back_bs_dev = bs_create_zeroes_dev(); 8203 blob_remove_xattr(ctx->clone, BLOB_SNAPSHOT, true); 8204 } 8205 8206 spdk_blob_sync_md(ctx->clone, delete_snapshot_sync_clone_cpl, ctx); 8207 } 8208 8209 static void 8210 delete_snapshot_update_extent_pages(void *cb_arg, int bserrno) 8211 { 8212 struct delete_snapshot_ctx *ctx = cb_arg; 8213 uint32_t *extent_page; 8214 uint64_t i; 8215 8216 for (i = ctx->next_extent_page; i < ctx->snapshot->active.num_extent_pages && 8217 i < ctx->clone->active.num_extent_pages; i++) { 8218 if (ctx->snapshot->active.extent_pages[i] == 0) { 8219 /* No extent page to use from snapshot */ 8220 continue; 8221 } 8222 8223 extent_page = &ctx->clone->active.extent_pages[i]; 8224 if (*extent_page == 0) { 8225 /* Copy extent page from snapshot when clone did not have a matching one */ 8226 *extent_page = ctx->snapshot->active.extent_pages[i]; 8227 continue; 8228 } 8229 8230 /* Clone and snapshot both contain partially filled matching extent pages. 8231 * Update the clone extent page in place with cluster map containing the mix of both. */ 8232 ctx->next_extent_page = i + 1; 8233 memset(ctx->page, 0, SPDK_BS_PAGE_SIZE); 8234 8235 blob_write_extent_page(ctx->clone, *extent_page, i * SPDK_EXTENTS_PER_EP, ctx->page, 8236 delete_snapshot_update_extent_pages, ctx); 8237 return; 8238 } 8239 delete_snapshot_update_extent_pages_cpl(ctx); 8240 } 8241 8242 static void 8243 delete_snapshot_sync_snapshot_xattr_cpl(void *cb_arg, int bserrno) 8244 { 8245 struct delete_snapshot_ctx *ctx = cb_arg; 8246 uint64_t i; 8247 8248 /* Temporarily override md_ro flag for clone for MD modification */ 8249 ctx->clone_md_ro = ctx->clone->md_ro; 8250 ctx->clone->md_ro = false; 8251 8252 if (bserrno) { 8253 SPDK_ERRLOG("Failed to sync MD with xattr on blob\n"); 8254 ctx->bserrno = bserrno; 8255 delete_snapshot_cleanup_clone(ctx, 0); 8256 return; 8257 } 8258 8259 /* Copy snapshot map to clone map (only unallocated clusters in clone) */ 8260 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 8261 if (ctx->clone->active.clusters[i] == 0) { 8262 ctx->clone->active.clusters[i] = ctx->snapshot->active.clusters[i]; 8263 if (ctx->clone->active.clusters[i] != 0) { 8264 ctx->clone->active.num_allocated_clusters++; 8265 } 8266 } 8267 } 8268 ctx->next_extent_page = 0; 8269 delete_snapshot_update_extent_pages(ctx, 0); 8270 } 8271 8272 static void 8273 delete_snapshot_esnap_channels_destroyed_cb(void *cb_arg, struct spdk_blob *blob, int bserrno) 8274 { 8275 struct delete_snapshot_ctx *ctx = cb_arg; 8276 8277 if (bserrno != 0) { 8278 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to destroy esnap channels: %d\n", 8279 blob->id, bserrno); 8280 /* That error should not stop us from syncing metadata. */ 8281 } 8282 8283 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx); 8284 } 8285 8286 static void 8287 delete_snapshot_freeze_io_cb(void *cb_arg, int bserrno) 8288 { 8289 struct delete_snapshot_ctx *ctx = cb_arg; 8290 8291 if (bserrno) { 8292 SPDK_ERRLOG("Failed to freeze I/O on clone\n"); 8293 ctx->bserrno = bserrno; 8294 delete_snapshot_cleanup_clone(ctx, 0); 8295 return; 8296 } 8297 8298 /* Temporarily override md_ro flag for snapshot for MD modification */ 8299 ctx->snapshot_md_ro = ctx->snapshot->md_ro; 8300 ctx->snapshot->md_ro = false; 8301 8302 /* Mark blob as pending for removal for power failure safety, use clone id for recovery */ 8303 ctx->bserrno = blob_set_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, &ctx->clone->id, 8304 sizeof(spdk_blob_id), true); 8305 if (ctx->bserrno != 0) { 8306 delete_snapshot_cleanup_clone(ctx, 0); 8307 return; 8308 } 8309 8310 if (blob_is_esnap_clone(ctx->snapshot)) { 8311 blob_esnap_destroy_bs_dev_channels(ctx->snapshot, false, 8312 delete_snapshot_esnap_channels_destroyed_cb, 8313 ctx); 8314 return; 8315 } 8316 8317 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx); 8318 } 8319 8320 static void 8321 delete_snapshot_open_clone_cb(void *cb_arg, struct spdk_blob *clone, int bserrno) 8322 { 8323 struct delete_snapshot_ctx *ctx = cb_arg; 8324 8325 if (bserrno) { 8326 SPDK_ERRLOG("Failed to open clone\n"); 8327 ctx->bserrno = bserrno; 8328 delete_snapshot_cleanup_snapshot(ctx, 0); 8329 return; 8330 } 8331 8332 ctx->clone = clone; 8333 8334 if (clone->locked_operation_in_progress) { 8335 SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress on its clone\n"); 8336 ctx->bserrno = -EBUSY; 8337 spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx); 8338 return; 8339 } 8340 8341 clone->locked_operation_in_progress = true; 8342 8343 blob_freeze_io(clone, delete_snapshot_freeze_io_cb, ctx); 8344 } 8345 8346 static void 8347 update_clone_on_snapshot_deletion(struct spdk_blob *snapshot, struct delete_snapshot_ctx *ctx) 8348 { 8349 struct spdk_blob_list *snapshot_entry = NULL; 8350 struct spdk_blob_list *clone_entry = NULL; 8351 struct spdk_blob_list *snapshot_clone_entry = NULL; 8352 8353 /* Get snapshot entry for the snapshot we want to remove */ 8354 snapshot_entry = bs_get_snapshot_entry(snapshot->bs, snapshot->id); 8355 8356 assert(snapshot_entry != NULL); 8357 8358 /* Get clone of the snapshot (at this point there can be only one clone) */ 8359 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 8360 assert(snapshot_entry->clone_count == 1); 8361 assert(clone_entry != NULL); 8362 8363 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 8364 * snapshot that we are removing */ 8365 blob_get_snapshot_and_clone_entries(snapshot, &ctx->parent_snapshot_entry, 8366 &snapshot_clone_entry); 8367 8368 spdk_bs_open_blob(snapshot->bs, clone_entry->id, delete_snapshot_open_clone_cb, ctx); 8369 } 8370 8371 static void 8372 bs_delete_blob_finish(void *cb_arg, struct spdk_blob *blob, int bserrno) 8373 { 8374 spdk_bs_sequence_t *seq = cb_arg; 8375 struct spdk_blob_list *snapshot_entry = NULL; 8376 uint32_t page_num; 8377 8378 if (bserrno) { 8379 SPDK_ERRLOG("Failed to remove blob\n"); 8380 bs_sequence_finish(seq, bserrno); 8381 return; 8382 } 8383 8384 /* Remove snapshot from the list */ 8385 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 8386 if (snapshot_entry != NULL) { 8387 TAILQ_REMOVE(&blob->bs->snapshots, snapshot_entry, link); 8388 free(snapshot_entry); 8389 } 8390 8391 page_num = bs_blobid_to_page(blob->id); 8392 spdk_bit_array_clear(blob->bs->used_blobids, page_num); 8393 blob->state = SPDK_BLOB_STATE_DIRTY; 8394 blob->active.num_pages = 0; 8395 blob_resize(blob, 0); 8396 8397 blob_persist(seq, blob, bs_delete_persist_cpl, blob); 8398 } 8399 8400 static int 8401 bs_is_blob_deletable(struct spdk_blob *blob, bool *update_clone) 8402 { 8403 struct spdk_blob_list *snapshot_entry = NULL; 8404 struct spdk_blob_list *clone_entry = NULL; 8405 struct spdk_blob *clone = NULL; 8406 bool has_one_clone = false; 8407 8408 /* Check if this is a snapshot with clones */ 8409 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 8410 if (snapshot_entry != NULL) { 8411 if (snapshot_entry->clone_count > 1) { 8412 SPDK_ERRLOG("Cannot remove snapshot with more than one clone\n"); 8413 return -EBUSY; 8414 } else if (snapshot_entry->clone_count == 1) { 8415 has_one_clone = true; 8416 } 8417 } 8418 8419 /* Check if someone has this blob open (besides this delete context): 8420 * - open_ref = 1 - only this context opened blob, so it is ok to remove it 8421 * - open_ref <= 2 && has_one_clone = true - clone is holding snapshot 8422 * and that is ok, because we will update it accordingly */ 8423 if (blob->open_ref <= 2 && has_one_clone) { 8424 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 8425 assert(clone_entry != NULL); 8426 clone = blob_lookup(blob->bs, clone_entry->id); 8427 8428 if (blob->open_ref == 2 && clone == NULL) { 8429 /* Clone is closed and someone else opened this blob */ 8430 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 8431 return -EBUSY; 8432 } 8433 8434 *update_clone = true; 8435 return 0; 8436 } 8437 8438 if (blob->open_ref > 1) { 8439 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 8440 return -EBUSY; 8441 } 8442 8443 assert(has_one_clone == false); 8444 *update_clone = false; 8445 return 0; 8446 } 8447 8448 static void 8449 bs_delete_enomem_close_cpl(void *cb_arg, int bserrno) 8450 { 8451 spdk_bs_sequence_t *seq = cb_arg; 8452 8453 bs_sequence_finish(seq, -ENOMEM); 8454 } 8455 8456 static void 8457 bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 8458 { 8459 spdk_bs_sequence_t *seq = cb_arg; 8460 struct delete_snapshot_ctx *ctx; 8461 bool update_clone = false; 8462 8463 if (bserrno != 0) { 8464 bs_sequence_finish(seq, bserrno); 8465 return; 8466 } 8467 8468 blob_verify_md_op(blob); 8469 8470 ctx = calloc(1, sizeof(*ctx)); 8471 if (ctx == NULL) { 8472 spdk_blob_close(blob, bs_delete_enomem_close_cpl, seq); 8473 return; 8474 } 8475 8476 ctx->snapshot = blob; 8477 ctx->cb_fn = bs_delete_blob_finish; 8478 ctx->cb_arg = seq; 8479 8480 /* Check if blob can be removed and if it is a snapshot with clone on top of it */ 8481 ctx->bserrno = bs_is_blob_deletable(blob, &update_clone); 8482 if (ctx->bserrno) { 8483 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 8484 return; 8485 } 8486 8487 if (blob->locked_operation_in_progress) { 8488 SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress\n"); 8489 ctx->bserrno = -EBUSY; 8490 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 8491 return; 8492 } 8493 8494 blob->locked_operation_in_progress = true; 8495 8496 /* 8497 * Remove the blob from the blob_store list now, to ensure it does not 8498 * get returned after this point by blob_lookup(). 8499 */ 8500 spdk_bit_array_clear(blob->bs->open_blobids, blob->id); 8501 RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob); 8502 8503 if (update_clone) { 8504 ctx->page = spdk_zmalloc(blob->bs->md_page_size, 0, NULL, SPDK_ENV_NUMA_ID_ANY, 8505 SPDK_MALLOC_DMA); 8506 if (!ctx->page) { 8507 ctx->bserrno = -ENOMEM; 8508 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 8509 return; 8510 } 8511 /* This blob is a snapshot with active clone - update clone first */ 8512 update_clone_on_snapshot_deletion(blob, ctx); 8513 } else { 8514 /* This blob does not have any clones - just remove it */ 8515 bs_blob_list_remove(blob); 8516 bs_delete_blob_finish(seq, blob, 0); 8517 free(ctx); 8518 } 8519 } 8520 8521 void 8522 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 8523 spdk_blob_op_complete cb_fn, void *cb_arg) 8524 { 8525 struct spdk_bs_cpl cpl; 8526 spdk_bs_sequence_t *seq; 8527 8528 SPDK_DEBUGLOG(blob, "Deleting blob 0x%" PRIx64 "\n", blobid); 8529 8530 assert(spdk_get_thread() == bs->md_thread); 8531 8532 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 8533 cpl.u.blob_basic.cb_fn = cb_fn; 8534 cpl.u.blob_basic.cb_arg = cb_arg; 8535 8536 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 8537 if (!seq) { 8538 cb_fn(cb_arg, -ENOMEM); 8539 return; 8540 } 8541 8542 spdk_bs_open_blob(bs, blobid, bs_delete_open_cpl, seq); 8543 } 8544 8545 /* END spdk_bs_delete_blob */ 8546 8547 /* START spdk_bs_open_blob */ 8548 8549 static void 8550 bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8551 { 8552 struct spdk_blob *blob = cb_arg; 8553 struct spdk_blob *existing; 8554 8555 if (bserrno != 0) { 8556 blob_free(blob); 8557 seq->cpl.u.blob_handle.blob = NULL; 8558 bs_sequence_finish(seq, bserrno); 8559 return; 8560 } 8561 8562 existing = blob_lookup(blob->bs, blob->id); 8563 if (existing) { 8564 blob_free(blob); 8565 existing->open_ref++; 8566 seq->cpl.u.blob_handle.blob = existing; 8567 bs_sequence_finish(seq, 0); 8568 return; 8569 } 8570 8571 blob->open_ref++; 8572 8573 spdk_bit_array_set(blob->bs->open_blobids, blob->id); 8574 RB_INSERT(spdk_blob_tree, &blob->bs->open_blobs, blob); 8575 8576 bs_sequence_finish(seq, bserrno); 8577 } 8578 8579 static inline void 8580 blob_open_opts_copy(const struct spdk_blob_open_opts *src, struct spdk_blob_open_opts *dst) 8581 { 8582 #define FIELD_OK(field) \ 8583 offsetof(struct spdk_blob_open_opts, field) + sizeof(src->field) <= src->opts_size 8584 8585 #define SET_FIELD(field) \ 8586 if (FIELD_OK(field)) { \ 8587 dst->field = src->field; \ 8588 } \ 8589 8590 SET_FIELD(clear_method); 8591 SET_FIELD(esnap_ctx); 8592 8593 dst->opts_size = src->opts_size; 8594 8595 /* You should not remove this statement, but need to update the assert statement 8596 * if you add a new field, and also add a corresponding SET_FIELD statement */ 8597 SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_open_opts) == 24, "Incorrect size"); 8598 8599 #undef FIELD_OK 8600 #undef SET_FIELD 8601 } 8602 8603 static void 8604 bs_open_blob(struct spdk_blob_store *bs, 8605 spdk_blob_id blobid, 8606 struct spdk_blob_open_opts *opts, 8607 spdk_blob_op_with_handle_complete cb_fn, 8608 void *cb_arg) 8609 { 8610 struct spdk_blob *blob; 8611 struct spdk_bs_cpl cpl; 8612 struct spdk_blob_open_opts opts_local; 8613 spdk_bs_sequence_t *seq; 8614 uint32_t page_num; 8615 8616 SPDK_DEBUGLOG(blob, "Opening blob 0x%" PRIx64 "\n", blobid); 8617 assert(spdk_get_thread() == bs->md_thread); 8618 8619 page_num = bs_blobid_to_page(blobid); 8620 if (spdk_bit_array_get(bs->used_blobids, page_num) == false) { 8621 /* Invalid blobid */ 8622 cb_fn(cb_arg, NULL, -ENOENT); 8623 return; 8624 } 8625 8626 blob = blob_lookup(bs, blobid); 8627 if (blob) { 8628 blob->open_ref++; 8629 cb_fn(cb_arg, blob, 0); 8630 return; 8631 } 8632 8633 blob = blob_alloc(bs, blobid); 8634 if (!blob) { 8635 cb_fn(cb_arg, NULL, -ENOMEM); 8636 return; 8637 } 8638 8639 spdk_blob_open_opts_init(&opts_local, sizeof(opts_local)); 8640 if (opts) { 8641 blob_open_opts_copy(opts, &opts_local); 8642 } 8643 8644 blob->clear_method = opts_local.clear_method; 8645 8646 cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE; 8647 cpl.u.blob_handle.cb_fn = cb_fn; 8648 cpl.u.blob_handle.cb_arg = cb_arg; 8649 cpl.u.blob_handle.blob = blob; 8650 cpl.u.blob_handle.esnap_ctx = opts_local.esnap_ctx; 8651 8652 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 8653 if (!seq) { 8654 blob_free(blob); 8655 cb_fn(cb_arg, NULL, -ENOMEM); 8656 return; 8657 } 8658 8659 blob_load(seq, blob, bs_open_blob_cpl, blob); 8660 } 8661 8662 void 8663 spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 8664 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 8665 { 8666 bs_open_blob(bs, blobid, NULL, cb_fn, cb_arg); 8667 } 8668 8669 void 8670 spdk_bs_open_blob_ext(struct spdk_blob_store *bs, spdk_blob_id blobid, 8671 struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 8672 { 8673 bs_open_blob(bs, blobid, opts, cb_fn, cb_arg); 8674 } 8675 8676 /* END spdk_bs_open_blob */ 8677 8678 /* START spdk_blob_set_read_only */ 8679 int 8680 spdk_blob_set_read_only(struct spdk_blob *blob) 8681 { 8682 blob_verify_md_op(blob); 8683 8684 blob->data_ro_flags |= SPDK_BLOB_READ_ONLY; 8685 8686 blob->state = SPDK_BLOB_STATE_DIRTY; 8687 return 0; 8688 } 8689 /* END spdk_blob_set_read_only */ 8690 8691 /* START spdk_blob_sync_md */ 8692 8693 static void 8694 blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8695 { 8696 struct spdk_blob *blob = cb_arg; 8697 8698 if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 8699 blob->data_ro = true; 8700 blob->md_ro = true; 8701 } 8702 8703 bs_sequence_finish(seq, bserrno); 8704 } 8705 8706 static void 8707 blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 8708 { 8709 struct spdk_bs_cpl cpl; 8710 spdk_bs_sequence_t *seq; 8711 8712 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 8713 cpl.u.blob_basic.cb_fn = cb_fn; 8714 cpl.u.blob_basic.cb_arg = cb_arg; 8715 8716 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 8717 if (!seq) { 8718 cb_fn(cb_arg, -ENOMEM); 8719 return; 8720 } 8721 8722 blob_persist(seq, blob, blob_sync_md_cpl, blob); 8723 } 8724 8725 void 8726 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 8727 { 8728 blob_verify_md_op(blob); 8729 8730 SPDK_DEBUGLOG(blob, "Syncing blob 0x%" PRIx64 "\n", blob->id); 8731 8732 if (blob->md_ro) { 8733 assert(blob->state == SPDK_BLOB_STATE_CLEAN); 8734 cb_fn(cb_arg, 0); 8735 return; 8736 } 8737 8738 blob_sync_md(blob, cb_fn, cb_arg); 8739 } 8740 8741 /* END spdk_blob_sync_md */ 8742 8743 struct spdk_blob_cluster_op_ctx { 8744 struct spdk_thread *thread; 8745 struct spdk_blob *blob; 8746 uint32_t cluster_num; /* cluster index in blob */ 8747 uint32_t cluster; /* cluster on disk */ 8748 uint32_t extent_page; /* extent page on disk */ 8749 struct spdk_blob_md_page *page; /* preallocated extent page */ 8750 int rc; 8751 spdk_blob_op_complete cb_fn; 8752 void *cb_arg; 8753 }; 8754 8755 static void 8756 blob_op_cluster_msg_cpl(void *arg) 8757 { 8758 struct spdk_blob_cluster_op_ctx *ctx = arg; 8759 8760 ctx->cb_fn(ctx->cb_arg, ctx->rc); 8761 free(ctx); 8762 } 8763 8764 static void 8765 blob_op_cluster_msg_cb(void *arg, int bserrno) 8766 { 8767 struct spdk_blob_cluster_op_ctx *ctx = arg; 8768 8769 ctx->rc = bserrno; 8770 spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx); 8771 } 8772 8773 static void 8774 blob_insert_new_ep_cb(void *arg, int bserrno) 8775 { 8776 struct spdk_blob_cluster_op_ctx *ctx = arg; 8777 uint32_t *extent_page; 8778 8779 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 8780 *extent_page = ctx->extent_page; 8781 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8782 blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx); 8783 } 8784 8785 struct spdk_blob_write_extent_page_ctx { 8786 struct spdk_blob_store *bs; 8787 8788 uint32_t extent; 8789 struct spdk_blob_md_page *page; 8790 }; 8791 8792 static void 8793 blob_free_cluster_msg_cb(void *arg, int bserrno) 8794 { 8795 struct spdk_blob_cluster_op_ctx *ctx = arg; 8796 8797 spdk_spin_lock(&ctx->blob->bs->used_lock); 8798 bs_release_cluster(ctx->blob->bs, ctx->cluster); 8799 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8800 8801 ctx->rc = bserrno; 8802 spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx); 8803 } 8804 8805 static void 8806 blob_free_cluster_update_ep_cb(void *arg, int bserrno) 8807 { 8808 struct spdk_blob_cluster_op_ctx *ctx = arg; 8809 8810 if (bserrno != 0 || ctx->blob->bs->clean == 0) { 8811 blob_free_cluster_msg_cb(ctx, bserrno); 8812 return; 8813 } 8814 8815 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8816 blob_sync_md(ctx->blob, blob_free_cluster_msg_cb, ctx); 8817 } 8818 8819 static void 8820 blob_free_cluster_free_ep_cb(void *arg, int bserrno) 8821 { 8822 struct spdk_blob_cluster_op_ctx *ctx = arg; 8823 8824 spdk_spin_lock(&ctx->blob->bs->used_lock); 8825 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8826 bs_release_md_page(ctx->blob->bs, ctx->extent_page); 8827 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8828 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8829 blob_sync_md(ctx->blob, blob_free_cluster_msg_cb, ctx); 8830 } 8831 8832 static void 8833 blob_persist_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8834 { 8835 struct spdk_blob_write_extent_page_ctx *ctx = cb_arg; 8836 8837 free(ctx); 8838 bs_sequence_finish(seq, bserrno); 8839 } 8840 8841 static void 8842 blob_write_extent_page_ready(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8843 { 8844 struct spdk_blob_write_extent_page_ctx *ctx = cb_arg; 8845 8846 if (bserrno != 0) { 8847 blob_persist_extent_page_cpl(seq, ctx, bserrno); 8848 return; 8849 } 8850 bs_sequence_write_dev(seq, ctx->page, bs_md_page_to_lba(ctx->bs, ctx->extent), 8851 bs_byte_to_lba(ctx->bs, ctx->bs->md_page_size), 8852 blob_persist_extent_page_cpl, ctx); 8853 } 8854 8855 static void 8856 blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 8857 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg) 8858 { 8859 struct spdk_blob_write_extent_page_ctx *ctx; 8860 spdk_bs_sequence_t *seq; 8861 struct spdk_bs_cpl cpl; 8862 8863 ctx = calloc(1, sizeof(*ctx)); 8864 if (!ctx) { 8865 cb_fn(cb_arg, -ENOMEM); 8866 return; 8867 } 8868 ctx->bs = blob->bs; 8869 ctx->extent = extent; 8870 ctx->page = page; 8871 8872 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 8873 cpl.u.blob_basic.cb_fn = cb_fn; 8874 cpl.u.blob_basic.cb_arg = cb_arg; 8875 8876 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 8877 if (!seq) { 8878 free(ctx); 8879 cb_fn(cb_arg, -ENOMEM); 8880 return; 8881 } 8882 8883 assert(page); 8884 page->next = SPDK_INVALID_MD_PAGE; 8885 page->id = blob->id; 8886 page->sequence_num = 0; 8887 8888 blob_serialize_extent_page(blob, cluster_num, page); 8889 8890 page->crc = blob_md_page_calc_crc(page); 8891 8892 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent) == true); 8893 8894 bs_mark_dirty(seq, blob->bs, blob_write_extent_page_ready, ctx); 8895 } 8896 8897 static void 8898 blob_insert_cluster_msg(void *arg) 8899 { 8900 struct spdk_blob_cluster_op_ctx *ctx = arg; 8901 uint32_t *extent_page; 8902 8903 ctx->rc = blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster); 8904 if (ctx->rc != 0) { 8905 spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx); 8906 return; 8907 } 8908 8909 if (ctx->blob->use_extent_table == false) { 8910 /* Extent table is not used, proceed with sync of md that will only use extents_rle. */ 8911 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8912 blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx); 8913 return; 8914 } 8915 8916 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 8917 if (*extent_page == 0) { 8918 /* Extent page requires allocation. 8919 * It was already claimed in the used_md_pages map and placed in ctx. */ 8920 assert(ctx->extent_page != 0); 8921 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8922 blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, ctx->page, 8923 blob_insert_new_ep_cb, ctx); 8924 } else { 8925 /* It is possible for original thread to allocate extent page for 8926 * different cluster in the same extent page. In such case proceed with 8927 * updating the existing extent page, but release the additional one. */ 8928 if (ctx->extent_page != 0) { 8929 spdk_spin_lock(&ctx->blob->bs->used_lock); 8930 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8931 bs_release_md_page(ctx->blob->bs, ctx->extent_page); 8932 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8933 ctx->extent_page = 0; 8934 } 8935 /* Extent page already allocated. 8936 * Every cluster allocation, requires just an update of single extent page. */ 8937 blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, ctx->page, 8938 blob_op_cluster_msg_cb, ctx); 8939 } 8940 } 8941 8942 static void 8943 blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 8944 uint64_t cluster, uint32_t extent_page, struct spdk_blob_md_page *page, 8945 spdk_blob_op_complete cb_fn, void *cb_arg) 8946 { 8947 struct spdk_blob_cluster_op_ctx *ctx; 8948 8949 ctx = calloc(1, sizeof(*ctx)); 8950 if (ctx == NULL) { 8951 cb_fn(cb_arg, -ENOMEM); 8952 return; 8953 } 8954 8955 ctx->thread = spdk_get_thread(); 8956 ctx->blob = blob; 8957 ctx->cluster_num = cluster_num; 8958 ctx->cluster = cluster; 8959 ctx->extent_page = extent_page; 8960 ctx->page = page; 8961 ctx->cb_fn = cb_fn; 8962 ctx->cb_arg = cb_arg; 8963 8964 spdk_thread_send_msg(blob->bs->md_thread, blob_insert_cluster_msg, ctx); 8965 } 8966 8967 static void 8968 blob_free_cluster_msg(void *arg) 8969 { 8970 struct spdk_blob_cluster_op_ctx *ctx = arg; 8971 uint32_t *extent_page; 8972 uint32_t start_cluster_idx; 8973 bool free_extent_page = true; 8974 size_t i; 8975 8976 ctx->cluster = bs_lba_to_cluster(ctx->blob->bs, ctx->blob->active.clusters[ctx->cluster_num]); 8977 8978 /* There were concurrent unmaps to the same cluster, only release the cluster on the first one */ 8979 if (ctx->cluster == 0) { 8980 blob_op_cluster_msg_cb(ctx, 0); 8981 return; 8982 } 8983 8984 ctx->blob->active.clusters[ctx->cluster_num] = 0; 8985 if (ctx->cluster != 0) { 8986 ctx->blob->active.num_allocated_clusters--; 8987 } 8988 8989 if (ctx->blob->use_extent_table == false) { 8990 /* Extent table is not used, proceed with sync of md that will only use extents_rle. */ 8991 spdk_spin_lock(&ctx->blob->bs->used_lock); 8992 bs_release_cluster(ctx->blob->bs, ctx->cluster); 8993 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8994 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8995 blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx); 8996 return; 8997 } 8998 8999 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 9000 9001 /* There shouldn't be parallel release operations on same cluster */ 9002 assert(*extent_page == ctx->extent_page); 9003 9004 start_cluster_idx = (ctx->cluster_num / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP; 9005 for (i = 0; i < SPDK_EXTENTS_PER_EP; ++i) { 9006 if (ctx->blob->active.clusters[start_cluster_idx + i] != 0) { 9007 free_extent_page = false; 9008 break; 9009 } 9010 } 9011 9012 if (free_extent_page) { 9013 assert(ctx->extent_page != 0); 9014 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 9015 ctx->blob->active.extent_pages[bs_cluster_to_extent_table_id(ctx->cluster_num)] = 0; 9016 blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, ctx->page, 9017 blob_free_cluster_free_ep_cb, ctx); 9018 } else { 9019 blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, ctx->page, 9020 blob_free_cluster_update_ep_cb, ctx); 9021 } 9022 } 9023 9024 9025 static void 9026 blob_free_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, uint32_t extent_page, 9027 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg) 9028 { 9029 struct spdk_blob_cluster_op_ctx *ctx; 9030 9031 ctx = calloc(1, sizeof(*ctx)); 9032 if (ctx == NULL) { 9033 cb_fn(cb_arg, -ENOMEM); 9034 return; 9035 } 9036 9037 ctx->thread = spdk_get_thread(); 9038 ctx->blob = blob; 9039 ctx->cluster_num = cluster_num; 9040 ctx->extent_page = extent_page; 9041 ctx->page = page; 9042 ctx->cb_fn = cb_fn; 9043 ctx->cb_arg = cb_arg; 9044 9045 spdk_thread_send_msg(blob->bs->md_thread, blob_free_cluster_msg, ctx); 9046 } 9047 9048 /* START spdk_blob_close */ 9049 9050 static void 9051 blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9052 { 9053 struct spdk_blob *blob = cb_arg; 9054 9055 if (bserrno == 0) { 9056 blob->open_ref--; 9057 if (blob->open_ref == 0) { 9058 /* 9059 * Blobs with active.num_pages == 0 are deleted blobs. 9060 * these blobs are removed from the blob_store list 9061 * when the deletion process starts - so don't try to 9062 * remove them again. 9063 */ 9064 if (blob->active.num_pages > 0) { 9065 spdk_bit_array_clear(blob->bs->open_blobids, blob->id); 9066 RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob); 9067 } 9068 blob_free(blob); 9069 } 9070 } 9071 9072 bs_sequence_finish(seq, bserrno); 9073 } 9074 9075 static void 9076 blob_close_esnap_done(void *cb_arg, struct spdk_blob *blob, int bserrno) 9077 { 9078 spdk_bs_sequence_t *seq = cb_arg; 9079 9080 if (bserrno != 0) { 9081 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": close failed with error %d\n", 9082 blob->id, bserrno); 9083 bs_sequence_finish(seq, bserrno); 9084 return; 9085 } 9086 9087 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": closed, syncing metadata on thread %s\n", 9088 blob->id, spdk_thread_get_name(spdk_get_thread())); 9089 9090 /* Sync metadata */ 9091 blob_persist(seq, blob, blob_close_cpl, blob); 9092 } 9093 9094 void 9095 spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 9096 { 9097 struct spdk_bs_cpl cpl; 9098 spdk_bs_sequence_t *seq; 9099 9100 blob_verify_md_op(blob); 9101 9102 SPDK_DEBUGLOG(blob, "Closing blob 0x%" PRIx64 "\n", blob->id); 9103 9104 if (blob->open_ref == 0) { 9105 cb_fn(cb_arg, -EBADF); 9106 return; 9107 } 9108 9109 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 9110 cpl.u.blob_basic.cb_fn = cb_fn; 9111 cpl.u.blob_basic.cb_arg = cb_arg; 9112 9113 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 9114 if (!seq) { 9115 cb_fn(cb_arg, -ENOMEM); 9116 return; 9117 } 9118 9119 if (blob->open_ref == 1 && blob_is_esnap_clone(blob)) { 9120 blob_esnap_destroy_bs_dev_channels(blob, false, blob_close_esnap_done, seq); 9121 return; 9122 } 9123 9124 /* Sync metadata */ 9125 blob_persist(seq, blob, blob_close_cpl, blob); 9126 } 9127 9128 /* END spdk_blob_close */ 9129 9130 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs) 9131 { 9132 return spdk_get_io_channel(bs); 9133 } 9134 9135 void 9136 spdk_bs_free_io_channel(struct spdk_io_channel *channel) 9137 { 9138 blob_esnap_destroy_bs_channel(spdk_io_channel_get_ctx(channel)); 9139 spdk_put_io_channel(channel); 9140 } 9141 9142 void 9143 spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel, 9144 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 9145 { 9146 blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 9147 SPDK_BLOB_UNMAP); 9148 } 9149 9150 void 9151 spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel, 9152 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 9153 { 9154 blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 9155 SPDK_BLOB_WRITE_ZEROES); 9156 } 9157 9158 void 9159 spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel, 9160 void *payload, uint64_t offset, uint64_t length, 9161 spdk_blob_op_complete cb_fn, void *cb_arg) 9162 { 9163 blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 9164 SPDK_BLOB_WRITE); 9165 } 9166 9167 void 9168 spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel, 9169 void *payload, uint64_t offset, uint64_t length, 9170 spdk_blob_op_complete cb_fn, void *cb_arg) 9171 { 9172 blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 9173 SPDK_BLOB_READ); 9174 } 9175 9176 void 9177 spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel, 9178 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9179 spdk_blob_op_complete cb_fn, void *cb_arg) 9180 { 9181 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, NULL); 9182 } 9183 9184 void 9185 spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel, 9186 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9187 spdk_blob_op_complete cb_fn, void *cb_arg) 9188 { 9189 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, NULL); 9190 } 9191 9192 void 9193 spdk_blob_io_writev_ext(struct spdk_blob *blob, struct spdk_io_channel *channel, 9194 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9195 spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts) 9196 { 9197 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, 9198 io_opts); 9199 } 9200 9201 void 9202 spdk_blob_io_readv_ext(struct spdk_blob *blob, struct spdk_io_channel *channel, 9203 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9204 spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts) 9205 { 9206 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, 9207 io_opts); 9208 } 9209 9210 struct spdk_bs_iter_ctx { 9211 int64_t page_num; 9212 struct spdk_blob_store *bs; 9213 9214 spdk_blob_op_with_handle_complete cb_fn; 9215 void *cb_arg; 9216 }; 9217 9218 static void 9219 bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 9220 { 9221 struct spdk_bs_iter_ctx *ctx = cb_arg; 9222 struct spdk_blob_store *bs = ctx->bs; 9223 spdk_blob_id id; 9224 9225 if (bserrno == 0) { 9226 ctx->cb_fn(ctx->cb_arg, _blob, bserrno); 9227 free(ctx); 9228 return; 9229 } 9230 9231 ctx->page_num++; 9232 ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num); 9233 if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) { 9234 ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT); 9235 free(ctx); 9236 return; 9237 } 9238 9239 id = bs_page_to_blobid(ctx->page_num); 9240 9241 spdk_bs_open_blob(bs, id, bs_iter_cpl, ctx); 9242 } 9243 9244 void 9245 spdk_bs_iter_first(struct spdk_blob_store *bs, 9246 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 9247 { 9248 struct spdk_bs_iter_ctx *ctx; 9249 9250 ctx = calloc(1, sizeof(*ctx)); 9251 if (!ctx) { 9252 cb_fn(cb_arg, NULL, -ENOMEM); 9253 return; 9254 } 9255 9256 ctx->page_num = -1; 9257 ctx->bs = bs; 9258 ctx->cb_fn = cb_fn; 9259 ctx->cb_arg = cb_arg; 9260 9261 bs_iter_cpl(ctx, NULL, -1); 9262 } 9263 9264 static void 9265 bs_iter_close_cpl(void *cb_arg, int bserrno) 9266 { 9267 struct spdk_bs_iter_ctx *ctx = cb_arg; 9268 9269 bs_iter_cpl(ctx, NULL, -1); 9270 } 9271 9272 void 9273 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob, 9274 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 9275 { 9276 struct spdk_bs_iter_ctx *ctx; 9277 9278 assert(blob != NULL); 9279 9280 ctx = calloc(1, sizeof(*ctx)); 9281 if (!ctx) { 9282 cb_fn(cb_arg, NULL, -ENOMEM); 9283 return; 9284 } 9285 9286 ctx->page_num = bs_blobid_to_page(blob->id); 9287 ctx->bs = bs; 9288 ctx->cb_fn = cb_fn; 9289 ctx->cb_arg = cb_arg; 9290 9291 /* Close the existing blob */ 9292 spdk_blob_close(blob, bs_iter_close_cpl, ctx); 9293 } 9294 9295 static int 9296 blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 9297 uint16_t value_len, bool internal) 9298 { 9299 struct spdk_xattr_tailq *xattrs; 9300 struct spdk_xattr *xattr; 9301 size_t desc_size; 9302 void *tmp; 9303 9304 blob_verify_md_op(blob); 9305 9306 if (blob->md_ro) { 9307 return -EPERM; 9308 } 9309 9310 desc_size = sizeof(struct spdk_blob_md_descriptor_xattr) + strlen(name) + value_len; 9311 if (desc_size > SPDK_BS_MAX_DESC_SIZE) { 9312 SPDK_DEBUGLOG(blob, "Xattr '%s' of size %zu does not fix into single page %zu\n", name, 9313 desc_size, SPDK_BS_MAX_DESC_SIZE); 9314 return -ENOMEM; 9315 } 9316 9317 if (internal) { 9318 xattrs = &blob->xattrs_internal; 9319 blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR; 9320 } else { 9321 xattrs = &blob->xattrs; 9322 } 9323 9324 TAILQ_FOREACH(xattr, xattrs, link) { 9325 if (!strcmp(name, xattr->name)) { 9326 tmp = malloc(value_len); 9327 if (!tmp) { 9328 return -ENOMEM; 9329 } 9330 9331 free(xattr->value); 9332 xattr->value_len = value_len; 9333 xattr->value = tmp; 9334 memcpy(xattr->value, value, value_len); 9335 9336 blob->state = SPDK_BLOB_STATE_DIRTY; 9337 9338 return 0; 9339 } 9340 } 9341 9342 xattr = calloc(1, sizeof(*xattr)); 9343 if (!xattr) { 9344 return -ENOMEM; 9345 } 9346 9347 xattr->name = strdup(name); 9348 if (!xattr->name) { 9349 free(xattr); 9350 return -ENOMEM; 9351 } 9352 9353 xattr->value_len = value_len; 9354 xattr->value = malloc(value_len); 9355 if (!xattr->value) { 9356 free(xattr->name); 9357 free(xattr); 9358 return -ENOMEM; 9359 } 9360 memcpy(xattr->value, value, value_len); 9361 TAILQ_INSERT_TAIL(xattrs, xattr, link); 9362 9363 blob->state = SPDK_BLOB_STATE_DIRTY; 9364 9365 return 0; 9366 } 9367 9368 int 9369 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 9370 uint16_t value_len) 9371 { 9372 return blob_set_xattr(blob, name, value, value_len, false); 9373 } 9374 9375 static int 9376 blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal) 9377 { 9378 struct spdk_xattr_tailq *xattrs; 9379 struct spdk_xattr *xattr; 9380 9381 blob_verify_md_op(blob); 9382 9383 if (blob->md_ro) { 9384 return -EPERM; 9385 } 9386 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 9387 9388 TAILQ_FOREACH(xattr, xattrs, link) { 9389 if (!strcmp(name, xattr->name)) { 9390 TAILQ_REMOVE(xattrs, xattr, link); 9391 free(xattr->value); 9392 free(xattr->name); 9393 free(xattr); 9394 9395 if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) { 9396 blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR; 9397 } 9398 blob->state = SPDK_BLOB_STATE_DIRTY; 9399 9400 return 0; 9401 } 9402 } 9403 9404 return -ENOENT; 9405 } 9406 9407 int 9408 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name) 9409 { 9410 return blob_remove_xattr(blob, name, false); 9411 } 9412 9413 static int 9414 blob_get_xattr_value(struct spdk_blob *blob, const char *name, 9415 const void **value, size_t *value_len, bool internal) 9416 { 9417 struct spdk_xattr *xattr; 9418 struct spdk_xattr_tailq *xattrs; 9419 9420 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 9421 9422 TAILQ_FOREACH(xattr, xattrs, link) { 9423 if (!strcmp(name, xattr->name)) { 9424 *value = xattr->value; 9425 *value_len = xattr->value_len; 9426 return 0; 9427 } 9428 } 9429 return -ENOENT; 9430 } 9431 9432 int 9433 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 9434 const void **value, size_t *value_len) 9435 { 9436 blob_verify_md_op(blob); 9437 9438 return blob_get_xattr_value(blob, name, value, value_len, false); 9439 } 9440 9441 struct spdk_xattr_names { 9442 uint32_t count; 9443 const char *names[0]; 9444 }; 9445 9446 static int 9447 blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names) 9448 { 9449 struct spdk_xattr *xattr; 9450 int count = 0; 9451 9452 TAILQ_FOREACH(xattr, xattrs, link) { 9453 count++; 9454 } 9455 9456 *names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *)); 9457 if (*names == NULL) { 9458 return -ENOMEM; 9459 } 9460 9461 TAILQ_FOREACH(xattr, xattrs, link) { 9462 (*names)->names[(*names)->count++] = xattr->name; 9463 } 9464 9465 return 0; 9466 } 9467 9468 int 9469 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names) 9470 { 9471 blob_verify_md_op(blob); 9472 9473 return blob_get_xattr_names(&blob->xattrs, names); 9474 } 9475 9476 uint32_t 9477 spdk_xattr_names_get_count(struct spdk_xattr_names *names) 9478 { 9479 assert(names != NULL); 9480 9481 return names->count; 9482 } 9483 9484 const char * 9485 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index) 9486 { 9487 if (index >= names->count) { 9488 return NULL; 9489 } 9490 9491 return names->names[index]; 9492 } 9493 9494 void 9495 spdk_xattr_names_free(struct spdk_xattr_names *names) 9496 { 9497 free(names); 9498 } 9499 9500 struct spdk_bs_type 9501 spdk_bs_get_bstype(struct spdk_blob_store *bs) 9502 { 9503 return bs->bstype; 9504 } 9505 9506 void 9507 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype) 9508 { 9509 memcpy(&bs->bstype, &bstype, sizeof(bstype)); 9510 } 9511 9512 bool 9513 spdk_blob_is_read_only(struct spdk_blob *blob) 9514 { 9515 assert(blob != NULL); 9516 return (blob->data_ro || blob->md_ro); 9517 } 9518 9519 bool 9520 spdk_blob_is_snapshot(struct spdk_blob *blob) 9521 { 9522 struct spdk_blob_list *snapshot_entry; 9523 9524 assert(blob != NULL); 9525 9526 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 9527 if (snapshot_entry == NULL) { 9528 return false; 9529 } 9530 9531 return true; 9532 } 9533 9534 bool 9535 spdk_blob_is_clone(struct spdk_blob *blob) 9536 { 9537 assert(blob != NULL); 9538 9539 if (blob->parent_id != SPDK_BLOBID_INVALID && 9540 blob->parent_id != SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 9541 assert(spdk_blob_is_thin_provisioned(blob)); 9542 return true; 9543 } 9544 9545 return false; 9546 } 9547 9548 bool 9549 spdk_blob_is_thin_provisioned(struct spdk_blob *blob) 9550 { 9551 assert(blob != NULL); 9552 return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 9553 } 9554 9555 bool 9556 spdk_blob_is_esnap_clone(const struct spdk_blob *blob) 9557 { 9558 return blob_is_esnap_clone(blob); 9559 } 9560 9561 static void 9562 blob_update_clear_method(struct spdk_blob *blob) 9563 { 9564 enum blob_clear_method stored_cm; 9565 9566 assert(blob != NULL); 9567 9568 /* If BLOB_CLEAR_WITH_DEFAULT was passed in, use the setting stored 9569 * in metadata previously. If something other than the default was 9570 * specified, ignore stored value and used what was passed in. 9571 */ 9572 stored_cm = ((blob->md_ro_flags & SPDK_BLOB_CLEAR_METHOD) >> SPDK_BLOB_CLEAR_METHOD_SHIFT); 9573 9574 if (blob->clear_method == BLOB_CLEAR_WITH_DEFAULT) { 9575 blob->clear_method = stored_cm; 9576 } else if (blob->clear_method != stored_cm) { 9577 SPDK_WARNLOG("Using passed in clear method 0x%x instead of stored value of 0x%x\n", 9578 blob->clear_method, stored_cm); 9579 } 9580 } 9581 9582 spdk_blob_id 9583 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id) 9584 { 9585 struct spdk_blob_list *snapshot_entry = NULL; 9586 struct spdk_blob_list *clone_entry = NULL; 9587 9588 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 9589 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 9590 if (clone_entry->id == blob_id) { 9591 return snapshot_entry->id; 9592 } 9593 } 9594 } 9595 9596 return SPDK_BLOBID_INVALID; 9597 } 9598 9599 int 9600 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids, 9601 size_t *count) 9602 { 9603 struct spdk_blob_list *snapshot_entry, *clone_entry; 9604 size_t n; 9605 9606 snapshot_entry = bs_get_snapshot_entry(bs, blobid); 9607 if (snapshot_entry == NULL) { 9608 *count = 0; 9609 return 0; 9610 } 9611 9612 if (ids == NULL || *count < snapshot_entry->clone_count) { 9613 *count = snapshot_entry->clone_count; 9614 return -ENOMEM; 9615 } 9616 *count = snapshot_entry->clone_count; 9617 9618 n = 0; 9619 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 9620 ids[n++] = clone_entry->id; 9621 } 9622 9623 return 0; 9624 } 9625 9626 static void 9627 bs_load_grow_continue(struct spdk_bs_load_ctx *ctx) 9628 { 9629 int rc; 9630 9631 if (ctx->super->size == 0) { 9632 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9633 } 9634 9635 if (ctx->super->io_unit_size == 0) { 9636 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 9637 } 9638 if (ctx->super->md_page_size == 0) { 9639 ctx->super->md_page_size = SPDK_BS_PAGE_SIZE; 9640 } 9641 9642 /* Parse the super block */ 9643 ctx->bs->clean = 1; 9644 ctx->bs->cluster_sz = ctx->super->cluster_size; 9645 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 9646 ctx->bs->md_page_size = ctx->super->md_page_size; 9647 ctx->bs->io_unit_size = ctx->super->io_unit_size; 9648 bs_init_per_cluster_fields(ctx->bs); 9649 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 9650 if (rc < 0) { 9651 bs_load_ctx_fail(ctx, -ENOMEM); 9652 return; 9653 } 9654 ctx->bs->md_start = ctx->super->md_start; 9655 ctx->bs->md_len = ctx->super->md_len; 9656 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len); 9657 if (rc < 0) { 9658 bs_load_ctx_fail(ctx, -ENOMEM); 9659 return; 9660 } 9661 9662 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 9663 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 9664 ctx->bs->super_blob = ctx->super->super_blob; 9665 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 9666 9667 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0) { 9668 SPDK_ERRLOG("Can not grow an unclean blobstore, please load it normally to clean it.\n"); 9669 bs_load_ctx_fail(ctx, -EIO); 9670 return; 9671 } else { 9672 bs_load_read_used_pages(ctx); 9673 } 9674 } 9675 9676 static void 9677 bs_load_grow_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9678 { 9679 struct spdk_bs_load_ctx *ctx = cb_arg; 9680 9681 if (bserrno != 0) { 9682 bs_load_ctx_fail(ctx, bserrno); 9683 return; 9684 } 9685 bs_load_grow_continue(ctx); 9686 } 9687 9688 static void 9689 bs_load_grow_used_clusters_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9690 { 9691 struct spdk_bs_load_ctx *ctx = cb_arg; 9692 9693 if (bserrno != 0) { 9694 bs_load_ctx_fail(ctx, bserrno); 9695 return; 9696 } 9697 9698 spdk_free(ctx->mask); 9699 9700 bs_sequence_write_dev(ctx->seq, ctx->super, bs_page_to_lba(ctx->bs, 0), 9701 bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 9702 bs_load_grow_super_write_cpl, ctx); 9703 } 9704 9705 static void 9706 bs_load_grow_used_clusters_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9707 { 9708 struct spdk_bs_load_ctx *ctx = cb_arg; 9709 uint64_t lba, lba_count; 9710 uint64_t dev_size; 9711 uint64_t total_clusters; 9712 9713 if (bserrno != 0) { 9714 bs_load_ctx_fail(ctx, bserrno); 9715 return; 9716 } 9717 9718 /* The type must be correct */ 9719 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 9720 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 9721 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 9722 struct spdk_blob_md_page) * 8)); 9723 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9724 total_clusters = dev_size / ctx->super->cluster_size; 9725 ctx->mask->length = total_clusters; 9726 9727 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 9728 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 9729 bs_sequence_write_dev(ctx->seq, ctx->mask, lba, lba_count, 9730 bs_load_grow_used_clusters_write_cpl, ctx); 9731 } 9732 9733 static void 9734 bs_load_try_to_grow(struct spdk_bs_load_ctx *ctx) 9735 { 9736 uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask; 9737 uint64_t lba, lba_count, mask_size; 9738 9739 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9740 total_clusters = dev_size / ctx->super->cluster_size; 9741 used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 9742 spdk_divide_round_up(total_clusters, 8), 9743 ctx->super->md_page_size); 9744 max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start; 9745 /* No necessary to grow or no space to grow */ 9746 if (ctx->super->size >= dev_size || used_cluster_mask_len > max_used_cluster_mask) { 9747 SPDK_DEBUGLOG(blob, "No grow\n"); 9748 bs_load_grow_continue(ctx); 9749 return; 9750 } 9751 9752 SPDK_DEBUGLOG(blob, "Resize blobstore\n"); 9753 9754 ctx->super->size = dev_size; 9755 ctx->super->used_cluster_mask_len = used_cluster_mask_len; 9756 ctx->super->crc = blob_md_page_calc_crc(ctx->super); 9757 9758 mask_size = used_cluster_mask_len * ctx->super->md_page_size; 9759 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_NUMA_ID_ANY, 9760 SPDK_MALLOC_DMA); 9761 if (!ctx->mask) { 9762 bs_load_ctx_fail(ctx, -ENOMEM); 9763 return; 9764 } 9765 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 9766 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 9767 bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count, 9768 bs_load_grow_used_clusters_read_cpl, ctx); 9769 } 9770 9771 static void 9772 bs_grow_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9773 { 9774 struct spdk_bs_load_ctx *ctx = cb_arg; 9775 int rc; 9776 9777 rc = bs_super_validate(ctx->super, ctx->bs); 9778 if (rc != 0) { 9779 bs_load_ctx_fail(ctx, rc); 9780 return; 9781 } 9782 9783 bs_load_try_to_grow(ctx); 9784 } 9785 9786 struct spdk_bs_grow_ctx { 9787 struct spdk_blob_store *bs; 9788 struct spdk_bs_super_block *super; 9789 9790 struct spdk_bit_pool *new_used_clusters; 9791 struct spdk_bs_md_mask *new_used_clusters_mask; 9792 9793 spdk_bs_sequence_t *seq; 9794 }; 9795 9796 static void 9797 bs_grow_live_done(struct spdk_bs_grow_ctx *ctx, int bserrno) 9798 { 9799 if (bserrno != 0) { 9800 spdk_bit_pool_free(&ctx->new_used_clusters); 9801 } 9802 9803 bs_sequence_finish(ctx->seq, bserrno); 9804 free(ctx->new_used_clusters_mask); 9805 spdk_free(ctx->super); 9806 free(ctx); 9807 } 9808 9809 static void 9810 bs_grow_live_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9811 { 9812 struct spdk_bs_grow_ctx *ctx = cb_arg; 9813 struct spdk_blob_store *bs = ctx->bs; 9814 uint64_t total_clusters; 9815 9816 if (bserrno != 0) { 9817 bs_grow_live_done(ctx, bserrno); 9818 return; 9819 } 9820 9821 /* 9822 * Blobstore is not clean until unload, for now only the super block is up to date. 9823 * This is similar to state right after blobstore init, when bs_write_used_md() didn't 9824 * yet execute. 9825 * When cleanly unloaded, the used md pages will be written out. 9826 * In case of unclean shutdown, loading blobstore will go through recovery path correctly 9827 * filling out the used_clusters with new size and writing it out. 9828 */ 9829 bs->clean = 0; 9830 9831 /* Reverting the super->size past this point is complex, avoid any error paths 9832 * that require to do so. */ 9833 spdk_spin_lock(&bs->used_lock); 9834 9835 total_clusters = ctx->super->size / ctx->super->cluster_size; 9836 9837 assert(total_clusters >= spdk_bit_pool_capacity(bs->used_clusters)); 9838 spdk_bit_pool_store_mask(bs->used_clusters, ctx->new_used_clusters_mask); 9839 9840 assert(total_clusters == spdk_bit_pool_capacity(ctx->new_used_clusters)); 9841 spdk_bit_pool_load_mask(ctx->new_used_clusters, ctx->new_used_clusters_mask); 9842 9843 spdk_bit_pool_free(&bs->used_clusters); 9844 bs->used_clusters = ctx->new_used_clusters; 9845 9846 bs->total_clusters = total_clusters; 9847 bs->total_data_clusters = bs->total_clusters - spdk_divide_round_up( 9848 bs->md_start + bs->md_len, bs->pages_per_cluster); 9849 9850 bs->num_free_clusters = spdk_bit_pool_count_free(bs->used_clusters); 9851 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 9852 spdk_spin_unlock(&bs->used_lock); 9853 9854 bs_grow_live_done(ctx, 0); 9855 } 9856 9857 static void 9858 bs_grow_live_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9859 { 9860 struct spdk_bs_grow_ctx *ctx = cb_arg; 9861 uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask; 9862 int rc; 9863 9864 if (bserrno != 0) { 9865 bs_grow_live_done(ctx, bserrno); 9866 return; 9867 } 9868 9869 rc = bs_super_validate(ctx->super, ctx->bs); 9870 if (rc != 0) { 9871 bs_grow_live_done(ctx, rc); 9872 return; 9873 } 9874 9875 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9876 total_clusters = dev_size / ctx->super->cluster_size; 9877 used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 9878 spdk_divide_round_up(total_clusters, 8), 9879 ctx->super->md_page_size); 9880 max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start; 9881 /* Only checking dev_size. Since it can change, but total_clusters remain the same. */ 9882 if (dev_size == ctx->super->size) { 9883 SPDK_DEBUGLOG(blob, "No need to grow blobstore\n"); 9884 bs_grow_live_done(ctx, 0); 9885 return; 9886 } 9887 /* 9888 * Blobstore cannot be shrunk, so check before if: 9889 * - new size of the device is smaller than size in super_block 9890 * - new total number of clusters is smaller than used_clusters bit_pool 9891 * - there is enough space in metadata for used_cluster_mask to be written out 9892 */ 9893 if (dev_size < ctx->super->size || 9894 total_clusters < spdk_bit_pool_capacity(ctx->bs->used_clusters) || 9895 used_cluster_mask_len > max_used_cluster_mask) { 9896 SPDK_DEBUGLOG(blob, "No space to grow blobstore\n"); 9897 bs_grow_live_done(ctx, -ENOSPC); 9898 return; 9899 } 9900 9901 SPDK_DEBUGLOG(blob, "Resizing blobstore\n"); 9902 9903 ctx->new_used_clusters_mask = calloc(1, total_clusters); 9904 if (!ctx->new_used_clusters_mask) { 9905 bs_grow_live_done(ctx, -ENOMEM); 9906 return; 9907 } 9908 ctx->new_used_clusters = spdk_bit_pool_create(total_clusters); 9909 if (!ctx->new_used_clusters) { 9910 bs_grow_live_done(ctx, -ENOMEM); 9911 return; 9912 } 9913 9914 ctx->super->clean = 0; 9915 ctx->super->size = dev_size; 9916 ctx->super->used_cluster_mask_len = used_cluster_mask_len; 9917 bs_write_super(seq, ctx->bs, ctx->super, bs_grow_live_super_write_cpl, ctx); 9918 } 9919 9920 void 9921 spdk_bs_grow_live(struct spdk_blob_store *bs, 9922 spdk_bs_op_complete cb_fn, void *cb_arg) 9923 { 9924 struct spdk_bs_cpl cpl; 9925 struct spdk_bs_grow_ctx *ctx; 9926 9927 assert(spdk_get_thread() == bs->md_thread); 9928 9929 SPDK_DEBUGLOG(blob, "Growing blobstore on dev %p\n", bs->dev); 9930 9931 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 9932 cpl.u.bs_basic.cb_fn = cb_fn; 9933 cpl.u.bs_basic.cb_arg = cb_arg; 9934 9935 ctx = calloc(1, sizeof(struct spdk_bs_grow_ctx)); 9936 if (!ctx) { 9937 cb_fn(cb_arg, -ENOMEM); 9938 return; 9939 } 9940 ctx->bs = bs; 9941 9942 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 9943 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 9944 if (!ctx->super) { 9945 free(ctx); 9946 cb_fn(cb_arg, -ENOMEM); 9947 return; 9948 } 9949 9950 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 9951 if (!ctx->seq) { 9952 spdk_free(ctx->super); 9953 free(ctx); 9954 cb_fn(cb_arg, -ENOMEM); 9955 return; 9956 } 9957 9958 /* Read the super block */ 9959 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 9960 bs_byte_to_lba(bs, sizeof(*ctx->super)), 9961 bs_grow_live_load_super_cpl, ctx); 9962 } 9963 9964 void 9965 spdk_bs_grow(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 9966 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 9967 { 9968 struct spdk_blob_store *bs; 9969 struct spdk_bs_cpl cpl; 9970 struct spdk_bs_load_ctx *ctx; 9971 struct spdk_bs_opts opts = {}; 9972 int err; 9973 9974 SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev); 9975 9976 if ((dev->phys_blocklen % dev->blocklen) != 0) { 9977 SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen); 9978 dev->destroy(dev); 9979 cb_fn(cb_arg, NULL, -EINVAL); 9980 return; 9981 } 9982 9983 spdk_bs_opts_init(&opts, sizeof(opts)); 9984 if (o) { 9985 if (bs_opts_copy(o, &opts)) { 9986 return; 9987 } 9988 } 9989 9990 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 9991 dev->destroy(dev); 9992 cb_fn(cb_arg, NULL, -EINVAL); 9993 return; 9994 } 9995 9996 err = bs_alloc(dev, &opts, &bs, &ctx); 9997 if (err) { 9998 dev->destroy(dev); 9999 cb_fn(cb_arg, NULL, err); 10000 return; 10001 } 10002 10003 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 10004 cpl.u.bs_handle.cb_fn = cb_fn; 10005 cpl.u.bs_handle.cb_arg = cb_arg; 10006 cpl.u.bs_handle.bs = bs; 10007 10008 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 10009 if (!ctx->seq) { 10010 spdk_free(ctx->super); 10011 free(ctx); 10012 bs_free(bs); 10013 cb_fn(cb_arg, NULL, -ENOMEM); 10014 return; 10015 } 10016 10017 /* Read the super block */ 10018 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 10019 bs_byte_to_lba(bs, sizeof(*ctx->super)), 10020 bs_grow_load_super_cpl, ctx); 10021 } 10022 10023 int 10024 spdk_blob_get_esnap_id(struct spdk_blob *blob, const void **id, size_t *len) 10025 { 10026 if (!blob_is_esnap_clone(blob)) { 10027 return -EINVAL; 10028 } 10029 10030 return blob_get_xattr_value(blob, BLOB_EXTERNAL_SNAPSHOT_ID, id, len, true); 10031 } 10032 10033 struct spdk_io_channel * 10034 blob_esnap_get_io_channel(struct spdk_io_channel *ch, struct spdk_blob *blob) 10035 { 10036 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(ch); 10037 struct spdk_bs_dev *bs_dev = blob->back_bs_dev; 10038 struct blob_esnap_channel find = {}; 10039 struct blob_esnap_channel *esnap_channel, *existing; 10040 10041 find.blob_id = blob->id; 10042 esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find); 10043 if (spdk_likely(esnap_channel != NULL)) { 10044 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": using cached channel on thread %s\n", 10045 blob->id, spdk_thread_get_name(spdk_get_thread())); 10046 return esnap_channel->channel; 10047 } 10048 10049 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": allocating channel on thread %s\n", 10050 blob->id, spdk_thread_get_name(spdk_get_thread())); 10051 10052 esnap_channel = calloc(1, sizeof(*esnap_channel)); 10053 if (esnap_channel == NULL) { 10054 SPDK_NOTICELOG("blob 0x%" PRIx64 " channel allocation failed: no memory\n", 10055 find.blob_id); 10056 return NULL; 10057 } 10058 esnap_channel->channel = bs_dev->create_channel(bs_dev); 10059 if (esnap_channel->channel == NULL) { 10060 SPDK_NOTICELOG("blob 0x%" PRIx64 " back channel allocation failed\n", blob->id); 10061 free(esnap_channel); 10062 return NULL; 10063 } 10064 esnap_channel->blob_id = find.blob_id; 10065 existing = RB_INSERT(blob_esnap_channel_tree, &bs_channel->esnap_channels, esnap_channel); 10066 if (spdk_unlikely(existing != NULL)) { 10067 /* 10068 * This should be unreachable: all modifications to this tree happen on this thread. 10069 */ 10070 SPDK_ERRLOG("blob 0x%" PRIx64 "lost race to allocate a channel\n", find.blob_id); 10071 assert(false); 10072 10073 bs_dev->destroy_channel(bs_dev, esnap_channel->channel); 10074 free(esnap_channel); 10075 10076 return existing->channel; 10077 } 10078 10079 return esnap_channel->channel; 10080 } 10081 10082 static int 10083 blob_esnap_channel_compare(struct blob_esnap_channel *c1, struct blob_esnap_channel *c2) 10084 { 10085 return (c1->blob_id < c2->blob_id ? -1 : c1->blob_id > c2->blob_id); 10086 } 10087 10088 struct blob_esnap_destroy_ctx { 10089 spdk_blob_op_with_handle_complete cb_fn; 10090 void *cb_arg; 10091 struct spdk_blob *blob; 10092 struct spdk_bs_dev *back_bs_dev; 10093 bool abort_io; 10094 }; 10095 10096 static void 10097 blob_esnap_destroy_channels_done(struct spdk_io_channel_iter *i, int status) 10098 { 10099 struct blob_esnap_destroy_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 10100 struct spdk_blob *blob = ctx->blob; 10101 struct spdk_blob_store *bs = blob->bs; 10102 10103 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": done destroying channels for this blob\n", 10104 blob->id); 10105 10106 if (ctx->cb_fn != NULL) { 10107 ctx->cb_fn(ctx->cb_arg, blob, status); 10108 } 10109 free(ctx); 10110 10111 bs->esnap_channels_unloading--; 10112 if (bs->esnap_channels_unloading == 0 && bs->esnap_unload_cb_fn != NULL) { 10113 spdk_bs_unload(bs, bs->esnap_unload_cb_fn, bs->esnap_unload_cb_arg); 10114 } 10115 } 10116 10117 static void 10118 blob_esnap_destroy_one_channel(struct spdk_io_channel_iter *i) 10119 { 10120 struct blob_esnap_destroy_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 10121 struct spdk_blob *blob = ctx->blob; 10122 struct spdk_bs_dev *bs_dev = ctx->back_bs_dev; 10123 struct spdk_io_channel *channel = spdk_io_channel_iter_get_channel(i); 10124 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(channel); 10125 struct blob_esnap_channel *esnap_channel; 10126 struct blob_esnap_channel find = {}; 10127 10128 assert(spdk_get_thread() == spdk_io_channel_get_thread(channel)); 10129 10130 find.blob_id = blob->id; 10131 esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find); 10132 if (esnap_channel != NULL) { 10133 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": destroying channel on thread %s\n", 10134 blob->id, spdk_thread_get_name(spdk_get_thread())); 10135 RB_REMOVE(blob_esnap_channel_tree, &bs_channel->esnap_channels, esnap_channel); 10136 10137 if (ctx->abort_io) { 10138 spdk_bs_user_op_t *op, *tmp; 10139 10140 TAILQ_FOREACH_SAFE(op, &bs_channel->queued_io, link, tmp) { 10141 if (op->back_channel == esnap_channel->channel) { 10142 TAILQ_REMOVE(&bs_channel->queued_io, op, link); 10143 bs_user_op_abort(op, -EIO); 10144 } 10145 } 10146 } 10147 10148 bs_dev->destroy_channel(bs_dev, esnap_channel->channel); 10149 free(esnap_channel); 10150 } 10151 10152 spdk_for_each_channel_continue(i, 0); 10153 } 10154 10155 /* 10156 * Destroy the channels for a specific blob on each thread with a blobstore channel. This should be 10157 * used when closing an esnap clone blob and after decoupling from the parent. 10158 */ 10159 static void 10160 blob_esnap_destroy_bs_dev_channels(struct spdk_blob *blob, bool abort_io, 10161 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 10162 { 10163 struct blob_esnap_destroy_ctx *ctx; 10164 10165 if (!blob_is_esnap_clone(blob) || blob->back_bs_dev == NULL) { 10166 if (cb_fn != NULL) { 10167 cb_fn(cb_arg, blob, 0); 10168 } 10169 return; 10170 } 10171 10172 ctx = calloc(1, sizeof(*ctx)); 10173 if (ctx == NULL) { 10174 if (cb_fn != NULL) { 10175 cb_fn(cb_arg, blob, -ENOMEM); 10176 } 10177 return; 10178 } 10179 ctx->cb_fn = cb_fn; 10180 ctx->cb_arg = cb_arg; 10181 ctx->blob = blob; 10182 ctx->back_bs_dev = blob->back_bs_dev; 10183 ctx->abort_io = abort_io; 10184 10185 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": destroying channels for this blob\n", 10186 blob->id); 10187 10188 blob->bs->esnap_channels_unloading++; 10189 spdk_for_each_channel(blob->bs, blob_esnap_destroy_one_channel, ctx, 10190 blob_esnap_destroy_channels_done); 10191 } 10192 10193 /* 10194 * Destroy all bs_dev channels on a specific blobstore channel. This should be used when a 10195 * bs_channel is destroyed. 10196 */ 10197 static void 10198 blob_esnap_destroy_bs_channel(struct spdk_bs_channel *ch) 10199 { 10200 struct blob_esnap_channel *esnap_channel, *esnap_channel_tmp; 10201 10202 assert(spdk_get_thread() == spdk_io_channel_get_thread(spdk_io_channel_from_ctx(ch))); 10203 10204 SPDK_DEBUGLOG(blob_esnap, "destroying channels on thread %s\n", 10205 spdk_thread_get_name(spdk_get_thread())); 10206 RB_FOREACH_SAFE(esnap_channel, blob_esnap_channel_tree, &ch->esnap_channels, 10207 esnap_channel_tmp) { 10208 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 10209 ": destroying one channel in thread %s\n", 10210 esnap_channel->blob_id, spdk_thread_get_name(spdk_get_thread())); 10211 RB_REMOVE(blob_esnap_channel_tree, &ch->esnap_channels, esnap_channel); 10212 spdk_put_io_channel(esnap_channel->channel); 10213 free(esnap_channel); 10214 } 10215 SPDK_DEBUGLOG(blob_esnap, "done destroying channels on thread %s\n", 10216 spdk_thread_get_name(spdk_get_thread())); 10217 } 10218 10219 static void 10220 blob_set_back_bs_dev_done(void *_ctx, int bserrno) 10221 { 10222 struct set_bs_dev_ctx *ctx = _ctx; 10223 10224 if (bserrno != 0) { 10225 /* Even though the unfreeze failed, the update may have succeed. */ 10226 SPDK_ERRLOG("blob 0x%" PRIx64 ": unfreeze failed with error %d\n", ctx->blob->id, 10227 bserrno); 10228 } 10229 ctx->cb_fn(ctx->cb_arg, ctx->bserrno); 10230 free(ctx); 10231 } 10232 10233 static void 10234 blob_frozen_set_back_bs_dev(void *_ctx, struct spdk_blob *blob, int bserrno) 10235 { 10236 struct set_bs_dev_ctx *ctx = _ctx; 10237 int rc; 10238 10239 if (bserrno != 0) { 10240 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to release old back_bs_dev with error %d\n", 10241 blob->id, bserrno); 10242 ctx->bserrno = bserrno; 10243 blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx); 10244 return; 10245 } 10246 10247 if (blob->back_bs_dev != NULL) { 10248 blob_unref_back_bs_dev(blob); 10249 } 10250 10251 if (ctx->parent_refs_cb_fn) { 10252 rc = ctx->parent_refs_cb_fn(blob, ctx->parent_refs_cb_arg); 10253 if (rc != 0) { 10254 ctx->bserrno = rc; 10255 blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx); 10256 return; 10257 } 10258 } 10259 10260 SPDK_NOTICELOG("blob 0x%" PRIx64 ": hotplugged back_bs_dev\n", blob->id); 10261 blob->back_bs_dev = ctx->back_bs_dev; 10262 ctx->bserrno = 0; 10263 10264 blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx); 10265 } 10266 10267 static void 10268 blob_set_back_bs_dev_frozen(void *_ctx, int bserrno) 10269 { 10270 struct set_bs_dev_ctx *ctx = _ctx; 10271 struct spdk_blob *blob = ctx->blob; 10272 10273 if (bserrno != 0) { 10274 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to freeze with error %d\n", blob->id, 10275 bserrno); 10276 ctx->cb_fn(ctx->cb_arg, bserrno); 10277 free(ctx); 10278 return; 10279 } 10280 10281 /* 10282 * This does not prevent future reads from the esnap device because any future IO will 10283 * lazily create a new esnap IO channel. 10284 */ 10285 blob_esnap_destroy_bs_dev_channels(blob, true, blob_frozen_set_back_bs_dev, ctx); 10286 } 10287 10288 void 10289 spdk_blob_set_esnap_bs_dev(struct spdk_blob *blob, struct spdk_bs_dev *back_bs_dev, 10290 spdk_blob_op_complete cb_fn, void *cb_arg) 10291 { 10292 if (!blob_is_esnap_clone(blob)) { 10293 SPDK_ERRLOG("blob 0x%" PRIx64 ": not an esnap clone\n", blob->id); 10294 cb_fn(cb_arg, -EINVAL); 10295 return; 10296 } 10297 10298 blob_set_back_bs_dev(blob, back_bs_dev, NULL, NULL, cb_fn, cb_arg); 10299 } 10300 10301 struct spdk_bs_dev * 10302 spdk_blob_get_esnap_bs_dev(const struct spdk_blob *blob) 10303 { 10304 if (!blob_is_esnap_clone(blob)) { 10305 SPDK_ERRLOG("blob 0x%" PRIx64 ": not an esnap clone\n", blob->id); 10306 return NULL; 10307 } 10308 10309 return blob->back_bs_dev; 10310 } 10311 10312 bool 10313 spdk_blob_is_degraded(const struct spdk_blob *blob) 10314 { 10315 if (blob->bs->dev->is_degraded != NULL && blob->bs->dev->is_degraded(blob->bs->dev)) { 10316 return true; 10317 } 10318 if (blob->back_bs_dev == NULL || blob->back_bs_dev->is_degraded == NULL) { 10319 return false; 10320 } 10321 10322 return blob->back_bs_dev->is_degraded(blob->back_bs_dev); 10323 } 10324 10325 SPDK_LOG_REGISTER_COMPONENT(blob) 10326 SPDK_LOG_REGISTER_COMPONENT(blob_esnap) 10327 10328 static void 10329 blob_trace(void) 10330 { 10331 struct spdk_trace_tpoint_opts opts[] = { 10332 { 10333 "BLOB_REQ_SET_START", TRACE_BLOB_REQ_SET_START, 10334 OWNER_TYPE_NONE, OBJECT_BLOB_CB_ARG, 1, 10335 { 10336 { "ctx", SPDK_TRACE_ARG_TYPE_PTR, 8 } 10337 } 10338 }, 10339 { 10340 "BLOB_REQ_SET_COMPLETE", TRACE_BLOB_REQ_SET_COMPLETE, 10341 OWNER_TYPE_NONE, OBJECT_BLOB_CB_ARG, 0, 10342 { 10343 { "ctx", SPDK_TRACE_ARG_TYPE_PTR, 8 } 10344 } 10345 }, 10346 }; 10347 10348 spdk_trace_register_object(OBJECT_BLOB_CB_ARG, 'a'); 10349 spdk_trace_register_description_ext(opts, SPDK_COUNTOF(opts)); 10350 spdk_trace_tpoint_register_relation(TRACE_BDEV_IO_START, OBJECT_BLOB_CB_ARG, 1); 10351 spdk_trace_tpoint_register_relation(TRACE_BDEV_IO_DONE, OBJECT_BLOB_CB_ARG, 0); 10352 } 10353 SPDK_TRACE_REGISTER_FN(blob_trace, "blob", TRACE_GROUP_BLOB) 10354