1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "spdk/blob.h" 10 #include "spdk/crc32.h" 11 #include "spdk/env.h" 12 #include "spdk/queue.h" 13 #include "spdk/thread.h" 14 #include "spdk/bit_array.h" 15 #include "spdk/bit_pool.h" 16 #include "spdk/likely.h" 17 #include "spdk/util.h" 18 #include "spdk/string.h" 19 #include "spdk/trace.h" 20 21 #include "spdk_internal/assert.h" 22 #include "spdk_internal/trace_defs.h" 23 #include "spdk/log.h" 24 25 #include "blobstore.h" 26 27 #define BLOB_CRC32C_INITIAL 0xffffffffUL 28 29 static int bs_register_md_thread(struct spdk_blob_store *bs); 30 static int bs_unregister_md_thread(struct spdk_blob_store *bs); 31 static void blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 32 static void blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 33 uint64_t cluster, uint32_t extent, struct spdk_blob_md_page *page, 34 spdk_blob_op_complete cb_fn, void *cb_arg); 35 static void blob_free_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 36 uint32_t extent_page, struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg); 37 38 static int blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 39 uint16_t value_len, bool internal); 40 static int blob_get_xattr_value(struct spdk_blob *blob, const char *name, 41 const void **value, size_t *value_len, bool internal); 42 static int blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal); 43 44 static void blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 45 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg); 46 static void blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg); 47 48 static void bs_shallow_copy_cluster_find_next(void *cb_arg); 49 50 /* 51 * External snapshots require a channel per thread per esnap bdev. The tree 52 * is populated lazily as blob IOs are handled by the back_bs_dev. When this 53 * channel is destroyed, all the channels in the tree are destroyed. 54 */ 55 56 struct blob_esnap_channel { 57 RB_ENTRY(blob_esnap_channel) node; 58 spdk_blob_id blob_id; 59 struct spdk_io_channel *channel; 60 }; 61 62 static int blob_esnap_channel_compare(struct blob_esnap_channel *c1, struct blob_esnap_channel *c2); 63 static void blob_esnap_destroy_bs_dev_channels(struct spdk_blob *blob, bool abort_io, 64 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg); 65 static void blob_esnap_destroy_bs_channel(struct spdk_bs_channel *ch); 66 static void blob_set_back_bs_dev_frozen(void *_ctx, int bserrno); 67 RB_GENERATE_STATIC(blob_esnap_channel_tree, blob_esnap_channel, node, blob_esnap_channel_compare) 68 69 static inline bool 70 blob_is_esnap_clone(const struct spdk_blob *blob) 71 { 72 assert(blob != NULL); 73 return !!(blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT); 74 } 75 76 static int 77 blob_id_cmp(struct spdk_blob *blob1, struct spdk_blob *blob2) 78 { 79 assert(blob1 != NULL && blob2 != NULL); 80 return (blob1->id < blob2->id ? -1 : blob1->id > blob2->id); 81 } 82 83 RB_GENERATE_STATIC(spdk_blob_tree, spdk_blob, link, blob_id_cmp); 84 85 static void 86 blob_verify_md_op(struct spdk_blob *blob) 87 { 88 assert(blob != NULL); 89 assert(spdk_get_thread() == blob->bs->md_thread); 90 assert(blob->state != SPDK_BLOB_STATE_LOADING); 91 } 92 93 static struct spdk_blob_list * 94 bs_get_snapshot_entry(struct spdk_blob_store *bs, spdk_blob_id blobid) 95 { 96 struct spdk_blob_list *snapshot_entry = NULL; 97 98 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 99 if (snapshot_entry->id == blobid) { 100 break; 101 } 102 } 103 104 return snapshot_entry; 105 } 106 107 static void 108 bs_claim_md_page(struct spdk_blob_store *bs, uint32_t page) 109 { 110 assert(spdk_spin_held(&bs->used_lock)); 111 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 112 assert(spdk_bit_array_get(bs->used_md_pages, page) == false); 113 114 spdk_bit_array_set(bs->used_md_pages, page); 115 } 116 117 static void 118 bs_release_md_page(struct spdk_blob_store *bs, uint32_t page) 119 { 120 assert(spdk_spin_held(&bs->used_lock)); 121 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 122 assert(spdk_bit_array_get(bs->used_md_pages, page) == true); 123 124 spdk_bit_array_clear(bs->used_md_pages, page); 125 } 126 127 static uint32_t 128 bs_claim_cluster(struct spdk_blob_store *bs) 129 { 130 uint32_t cluster_num; 131 132 assert(spdk_spin_held(&bs->used_lock)); 133 134 cluster_num = spdk_bit_pool_allocate_bit(bs->used_clusters); 135 if (cluster_num == UINT32_MAX) { 136 return UINT32_MAX; 137 } 138 139 SPDK_DEBUGLOG(blob, "Claiming cluster %u\n", cluster_num); 140 bs->num_free_clusters--; 141 142 return cluster_num; 143 } 144 145 static void 146 bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 147 { 148 assert(spdk_spin_held(&bs->used_lock)); 149 assert(cluster_num < spdk_bit_pool_capacity(bs->used_clusters)); 150 assert(spdk_bit_pool_is_allocated(bs->used_clusters, cluster_num) == true); 151 assert(bs->num_free_clusters < bs->total_clusters); 152 153 SPDK_DEBUGLOG(blob, "Releasing cluster %u\n", cluster_num); 154 155 spdk_bit_pool_free_bit(bs->used_clusters, cluster_num); 156 bs->num_free_clusters++; 157 } 158 159 static int 160 blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster) 161 { 162 uint64_t *cluster_lba = &blob->active.clusters[cluster_num]; 163 164 blob_verify_md_op(blob); 165 166 if (*cluster_lba != 0) { 167 return -EEXIST; 168 } 169 170 *cluster_lba = bs_cluster_to_lba(blob->bs, cluster); 171 blob->active.num_allocated_clusters++; 172 173 return 0; 174 } 175 176 static int 177 bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num, 178 uint64_t *cluster, uint32_t *lowest_free_md_page, bool update_map) 179 { 180 uint32_t *extent_page = 0; 181 182 assert(spdk_spin_held(&blob->bs->used_lock)); 183 184 *cluster = bs_claim_cluster(blob->bs); 185 if (*cluster == UINT32_MAX) { 186 /* No more free clusters. Cannot satisfy the request */ 187 return -ENOSPC; 188 } 189 190 if (blob->use_extent_table) { 191 extent_page = bs_cluster_to_extent_page(blob, cluster_num); 192 if (*extent_page == 0) { 193 /* Extent page shall never occupy md_page so start the search from 1 */ 194 if (*lowest_free_md_page == 0) { 195 *lowest_free_md_page = 1; 196 } 197 /* No extent_page is allocated for the cluster */ 198 *lowest_free_md_page = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, 199 *lowest_free_md_page); 200 if (*lowest_free_md_page == UINT32_MAX) { 201 /* No more free md pages. Cannot satisfy the request */ 202 bs_release_cluster(blob->bs, *cluster); 203 return -ENOSPC; 204 } 205 bs_claim_md_page(blob->bs, *lowest_free_md_page); 206 } 207 } 208 209 SPDK_DEBUGLOG(blob, "Claiming cluster %" PRIu64 " for blob 0x%" PRIx64 "\n", *cluster, 210 blob->id); 211 212 if (update_map) { 213 blob_insert_cluster(blob, cluster_num, *cluster); 214 if (blob->use_extent_table && *extent_page == 0) { 215 *extent_page = *lowest_free_md_page; 216 } 217 } 218 219 return 0; 220 } 221 222 static void 223 blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs) 224 { 225 xattrs->count = 0; 226 xattrs->names = NULL; 227 xattrs->ctx = NULL; 228 xattrs->get_value = NULL; 229 } 230 231 void 232 spdk_blob_opts_init(struct spdk_blob_opts *opts, size_t opts_size) 233 { 234 if (!opts) { 235 SPDK_ERRLOG("opts should not be NULL\n"); 236 return; 237 } 238 239 if (!opts_size) { 240 SPDK_ERRLOG("opts_size should not be zero value\n"); 241 return; 242 } 243 244 memset(opts, 0, opts_size); 245 opts->opts_size = opts_size; 246 247 #define FIELD_OK(field) \ 248 offsetof(struct spdk_blob_opts, field) + sizeof(opts->field) <= opts_size 249 250 #define SET_FIELD(field, value) \ 251 if (FIELD_OK(field)) { \ 252 opts->field = value; \ 253 } \ 254 255 SET_FIELD(num_clusters, 0); 256 SET_FIELD(thin_provision, false); 257 SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT); 258 259 if (FIELD_OK(xattrs)) { 260 blob_xattrs_init(&opts->xattrs); 261 } 262 263 SET_FIELD(use_extent_table, true); 264 265 #undef FIELD_OK 266 #undef SET_FIELD 267 } 268 269 void 270 spdk_blob_open_opts_init(struct spdk_blob_open_opts *opts, size_t opts_size) 271 { 272 if (!opts) { 273 SPDK_ERRLOG("opts should not be NULL\n"); 274 return; 275 } 276 277 if (!opts_size) { 278 SPDK_ERRLOG("opts_size should not be zero value\n"); 279 return; 280 } 281 282 memset(opts, 0, opts_size); 283 opts->opts_size = opts_size; 284 285 #define FIELD_OK(field) \ 286 offsetof(struct spdk_blob_open_opts, field) + sizeof(opts->field) <= opts_size 287 288 #define SET_FIELD(field, value) \ 289 if (FIELD_OK(field)) { \ 290 opts->field = value; \ 291 } \ 292 293 SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT); 294 295 #undef FIELD_OK 296 #undef SET_FILED 297 } 298 299 static struct spdk_blob * 300 blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id) 301 { 302 struct spdk_blob *blob; 303 304 blob = calloc(1, sizeof(*blob)); 305 if (!blob) { 306 return NULL; 307 } 308 309 blob->id = id; 310 blob->bs = bs; 311 312 blob->parent_id = SPDK_BLOBID_INVALID; 313 314 blob->state = SPDK_BLOB_STATE_DIRTY; 315 blob->extent_rle_found = false; 316 blob->extent_table_found = false; 317 blob->active.num_pages = 1; 318 blob->active.pages = calloc(1, sizeof(*blob->active.pages)); 319 if (!blob->active.pages) { 320 free(blob); 321 return NULL; 322 } 323 324 blob->active.pages[0] = bs_blobid_to_page(id); 325 326 TAILQ_INIT(&blob->xattrs); 327 TAILQ_INIT(&blob->xattrs_internal); 328 TAILQ_INIT(&blob->pending_persists); 329 TAILQ_INIT(&blob->persists_to_complete); 330 331 return blob; 332 } 333 334 static void 335 xattrs_free(struct spdk_xattr_tailq *xattrs) 336 { 337 struct spdk_xattr *xattr, *xattr_tmp; 338 339 TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) { 340 TAILQ_REMOVE(xattrs, xattr, link); 341 free(xattr->name); 342 free(xattr->value); 343 free(xattr); 344 } 345 } 346 347 static void 348 blob_free(struct spdk_blob *blob) 349 { 350 assert(blob != NULL); 351 assert(TAILQ_EMPTY(&blob->pending_persists)); 352 assert(TAILQ_EMPTY(&blob->persists_to_complete)); 353 354 free(blob->active.extent_pages); 355 free(blob->clean.extent_pages); 356 free(blob->active.clusters); 357 free(blob->clean.clusters); 358 free(blob->active.pages); 359 free(blob->clean.pages); 360 361 xattrs_free(&blob->xattrs); 362 xattrs_free(&blob->xattrs_internal); 363 364 if (blob->back_bs_dev) { 365 blob->back_bs_dev->destroy(blob->back_bs_dev); 366 } 367 368 free(blob); 369 } 370 371 static void 372 blob_back_bs_destroy_esnap_done(void *ctx, struct spdk_blob *blob, int bserrno) 373 { 374 struct spdk_bs_dev *bs_dev = ctx; 375 376 if (bserrno != 0) { 377 /* 378 * This is probably due to a memory allocation failure when creating the 379 * blob_esnap_destroy_ctx before iterating threads. 380 */ 381 SPDK_ERRLOG("blob 0x%" PRIx64 ": Unable to destroy bs dev channels: error %d\n", 382 blob->id, bserrno); 383 assert(false); 384 } 385 386 if (bs_dev == NULL) { 387 /* 388 * This check exists to make scanbuild happy. 389 * 390 * blob->back_bs_dev for an esnap is NULL during the first iteration of blobs while 391 * the blobstore is being loaded. It could also be NULL if there was an error 392 * opening the esnap device. In each of these cases, no channels could have been 393 * created because back_bs_dev->create_channel() would have led to a NULL pointer 394 * deref. 395 */ 396 assert(false); 397 return; 398 } 399 400 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": calling destroy on back_bs_dev\n", blob->id); 401 bs_dev->destroy(bs_dev); 402 } 403 404 static void 405 blob_back_bs_destroy(struct spdk_blob *blob) 406 { 407 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": preparing to destroy back_bs_dev\n", 408 blob->id); 409 410 blob_esnap_destroy_bs_dev_channels(blob, false, blob_back_bs_destroy_esnap_done, 411 blob->back_bs_dev); 412 blob->back_bs_dev = NULL; 413 } 414 415 struct blob_parent { 416 union { 417 struct { 418 spdk_blob_id id; 419 struct spdk_blob *blob; 420 } snapshot; 421 422 struct { 423 void *id; 424 uint32_t id_len; 425 struct spdk_bs_dev *back_bs_dev; 426 } esnap; 427 } u; 428 }; 429 430 typedef int (*set_parent_refs_cb)(struct spdk_blob *blob, struct blob_parent *parent); 431 432 struct set_bs_dev_ctx { 433 struct spdk_blob *blob; 434 struct spdk_bs_dev *back_bs_dev; 435 436 /* 437 * This callback is used during a set parent operation to change the references 438 * to the parent of the blob. 439 */ 440 set_parent_refs_cb parent_refs_cb_fn; 441 struct blob_parent *parent_refs_cb_arg; 442 443 spdk_blob_op_complete cb_fn; 444 void *cb_arg; 445 int bserrno; 446 }; 447 448 static void 449 blob_set_back_bs_dev(struct spdk_blob *blob, struct spdk_bs_dev *back_bs_dev, 450 set_parent_refs_cb parent_refs_cb_fn, struct blob_parent *parent_refs_cb_arg, 451 spdk_blob_op_complete cb_fn, void *cb_arg) 452 { 453 struct set_bs_dev_ctx *ctx; 454 455 ctx = calloc(1, sizeof(*ctx)); 456 if (ctx == NULL) { 457 SPDK_ERRLOG("blob 0x%" PRIx64 ": out of memory while setting back_bs_dev\n", 458 blob->id); 459 cb_fn(cb_arg, -ENOMEM); 460 return; 461 } 462 463 ctx->parent_refs_cb_fn = parent_refs_cb_fn; 464 ctx->parent_refs_cb_arg = parent_refs_cb_arg; 465 ctx->cb_fn = cb_fn; 466 ctx->cb_arg = cb_arg; 467 ctx->back_bs_dev = back_bs_dev; 468 ctx->blob = blob; 469 470 blob_freeze_io(blob, blob_set_back_bs_dev_frozen, ctx); 471 } 472 473 struct freeze_io_ctx { 474 struct spdk_bs_cpl cpl; 475 struct spdk_blob *blob; 476 }; 477 478 static void 479 blob_io_sync(struct spdk_io_channel_iter *i) 480 { 481 spdk_for_each_channel_continue(i, 0); 482 } 483 484 static void 485 blob_execute_queued_io(struct spdk_io_channel_iter *i) 486 { 487 struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); 488 struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch); 489 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 490 struct spdk_bs_request_set *set; 491 struct spdk_bs_user_op_args *args; 492 spdk_bs_user_op_t *op, *tmp; 493 494 TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) { 495 set = (struct spdk_bs_request_set *)op; 496 args = &set->u.user_op; 497 498 if (args->blob == ctx->blob) { 499 TAILQ_REMOVE(&ch->queued_io, op, link); 500 bs_user_op_execute(op); 501 } 502 } 503 504 spdk_for_each_channel_continue(i, 0); 505 } 506 507 static void 508 blob_io_cpl(struct spdk_io_channel_iter *i, int status) 509 { 510 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 511 512 ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0); 513 514 free(ctx); 515 } 516 517 static void 518 blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 519 { 520 struct freeze_io_ctx *ctx; 521 522 blob_verify_md_op(blob); 523 524 ctx = calloc(1, sizeof(*ctx)); 525 if (!ctx) { 526 cb_fn(cb_arg, -ENOMEM); 527 return; 528 } 529 530 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 531 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 532 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 533 ctx->blob = blob; 534 535 /* Freeze I/O on blob */ 536 blob->frozen_refcnt++; 537 538 spdk_for_each_channel(blob->bs, blob_io_sync, ctx, blob_io_cpl); 539 } 540 541 static void 542 blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 543 { 544 struct freeze_io_ctx *ctx; 545 546 blob_verify_md_op(blob); 547 548 ctx = calloc(1, sizeof(*ctx)); 549 if (!ctx) { 550 cb_fn(cb_arg, -ENOMEM); 551 return; 552 } 553 554 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 555 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 556 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 557 ctx->blob = blob; 558 559 assert(blob->frozen_refcnt > 0); 560 561 blob->frozen_refcnt--; 562 563 spdk_for_each_channel(blob->bs, blob_execute_queued_io, ctx, blob_io_cpl); 564 } 565 566 static int 567 blob_mark_clean(struct spdk_blob *blob) 568 { 569 uint32_t *extent_pages = NULL; 570 uint64_t *clusters = NULL; 571 uint32_t *pages = NULL; 572 573 assert(blob != NULL); 574 575 if (blob->active.num_extent_pages) { 576 assert(blob->active.extent_pages); 577 extent_pages = calloc(blob->active.num_extent_pages, sizeof(*blob->active.extent_pages)); 578 if (!extent_pages) { 579 return -ENOMEM; 580 } 581 memcpy(extent_pages, blob->active.extent_pages, 582 blob->active.num_extent_pages * sizeof(*extent_pages)); 583 } 584 585 if (blob->active.num_clusters) { 586 assert(blob->active.clusters); 587 clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters)); 588 if (!clusters) { 589 free(extent_pages); 590 return -ENOMEM; 591 } 592 memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 593 } 594 595 if (blob->active.num_pages) { 596 assert(blob->active.pages); 597 pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages)); 598 if (!pages) { 599 free(extent_pages); 600 free(clusters); 601 return -ENOMEM; 602 } 603 memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 604 } 605 606 free(blob->clean.extent_pages); 607 free(blob->clean.clusters); 608 free(blob->clean.pages); 609 610 blob->clean.num_extent_pages = blob->active.num_extent_pages; 611 blob->clean.extent_pages = blob->active.extent_pages; 612 blob->clean.num_clusters = blob->active.num_clusters; 613 blob->clean.clusters = blob->active.clusters; 614 blob->clean.num_allocated_clusters = blob->active.num_allocated_clusters; 615 blob->clean.num_pages = blob->active.num_pages; 616 blob->clean.pages = blob->active.pages; 617 618 blob->active.extent_pages = extent_pages; 619 blob->active.clusters = clusters; 620 blob->active.pages = pages; 621 622 /* If the metadata was dirtied again while the metadata was being written to disk, 623 * we do not want to revert the DIRTY state back to CLEAN here. 624 */ 625 if (blob->state == SPDK_BLOB_STATE_LOADING) { 626 blob->state = SPDK_BLOB_STATE_CLEAN; 627 } 628 629 return 0; 630 } 631 632 static int 633 blob_deserialize_xattr(struct spdk_blob *blob, 634 struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal) 635 { 636 struct spdk_xattr *xattr; 637 638 if (desc_xattr->length != sizeof(desc_xattr->name_length) + 639 sizeof(desc_xattr->value_length) + 640 desc_xattr->name_length + desc_xattr->value_length) { 641 return -EINVAL; 642 } 643 644 xattr = calloc(1, sizeof(*xattr)); 645 if (xattr == NULL) { 646 return -ENOMEM; 647 } 648 649 xattr->name = malloc(desc_xattr->name_length + 1); 650 if (xattr->name == NULL) { 651 free(xattr); 652 return -ENOMEM; 653 } 654 655 xattr->value = malloc(desc_xattr->value_length); 656 if (xattr->value == NULL) { 657 free(xattr->name); 658 free(xattr); 659 return -ENOMEM; 660 } 661 662 memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length); 663 xattr->name[desc_xattr->name_length] = '\0'; 664 xattr->value_len = desc_xattr->value_length; 665 memcpy(xattr->value, 666 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 667 desc_xattr->value_length); 668 669 TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link); 670 671 return 0; 672 } 673 674 675 static int 676 blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob) 677 { 678 struct spdk_blob_md_descriptor *desc; 679 size_t cur_desc = 0; 680 void *tmp; 681 682 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 683 while (cur_desc < sizeof(page->descriptors)) { 684 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 685 if (desc->length == 0) { 686 /* If padding and length are 0, this terminates the page */ 687 break; 688 } 689 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 690 struct spdk_blob_md_descriptor_flags *desc_flags; 691 692 desc_flags = (struct spdk_blob_md_descriptor_flags *)desc; 693 694 if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) { 695 return -EINVAL; 696 } 697 698 if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) != 699 SPDK_BLOB_INVALID_FLAGS_MASK) { 700 return -EINVAL; 701 } 702 703 if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) != 704 SPDK_BLOB_DATA_RO_FLAGS_MASK) { 705 blob->data_ro = true; 706 blob->md_ro = true; 707 } 708 709 if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) != 710 SPDK_BLOB_MD_RO_FLAGS_MASK) { 711 blob->md_ro = true; 712 } 713 714 if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 715 blob->data_ro = true; 716 blob->md_ro = true; 717 } 718 719 blob->invalid_flags = desc_flags->invalid_flags; 720 blob->data_ro_flags = desc_flags->data_ro_flags; 721 blob->md_ro_flags = desc_flags->md_ro_flags; 722 723 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 724 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 725 unsigned int i, j; 726 unsigned int cluster_count = blob->active.num_clusters; 727 728 if (blob->extent_table_found) { 729 /* Extent Table already present in the md, 730 * both descriptors should never be at the same time. */ 731 return -EINVAL; 732 } 733 blob->extent_rle_found = true; 734 735 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 736 737 if (desc_extent_rle->length == 0 || 738 (desc_extent_rle->length % sizeof(desc_extent_rle->extents[0]) != 0)) { 739 return -EINVAL; 740 } 741 742 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 743 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 744 if (desc_extent_rle->extents[i].cluster_idx != 0) { 745 if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, 746 desc_extent_rle->extents[i].cluster_idx + j)) { 747 return -EINVAL; 748 } 749 } 750 cluster_count++; 751 } 752 } 753 754 if (cluster_count == 0) { 755 return -EINVAL; 756 } 757 tmp = realloc(blob->active.clusters, cluster_count * sizeof(*blob->active.clusters)); 758 if (tmp == NULL) { 759 return -ENOMEM; 760 } 761 blob->active.clusters = tmp; 762 blob->active.cluster_array_size = cluster_count; 763 764 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 765 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 766 if (desc_extent_rle->extents[i].cluster_idx != 0) { 767 blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs, 768 desc_extent_rle->extents[i].cluster_idx + j); 769 blob->active.num_allocated_clusters++; 770 } else if (spdk_blob_is_thin_provisioned(blob)) { 771 blob->active.clusters[blob->active.num_clusters++] = 0; 772 } else { 773 return -EINVAL; 774 } 775 } 776 } 777 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 778 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 779 uint32_t num_extent_pages = blob->active.num_extent_pages; 780 uint32_t i, j; 781 size_t extent_pages_length; 782 783 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 784 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 785 786 if (blob->extent_rle_found) { 787 /* This means that Extent RLE is present in MD, 788 * both should never be at the same time. */ 789 return -EINVAL; 790 } else if (blob->extent_table_found && 791 desc_extent_table->num_clusters != blob->remaining_clusters_in_et) { 792 /* Number of clusters in this ET does not match number 793 * from previously read EXTENT_TABLE. */ 794 return -EINVAL; 795 } 796 797 if (desc_extent_table->length == 0 || 798 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 799 return -EINVAL; 800 } 801 802 blob->extent_table_found = true; 803 804 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 805 num_extent_pages += desc_extent_table->extent_page[i].num_pages; 806 } 807 808 if (num_extent_pages > 0) { 809 tmp = realloc(blob->active.extent_pages, num_extent_pages * sizeof(uint32_t)); 810 if (tmp == NULL) { 811 return -ENOMEM; 812 } 813 blob->active.extent_pages = tmp; 814 } 815 blob->active.extent_pages_array_size = num_extent_pages; 816 817 blob->remaining_clusters_in_et = desc_extent_table->num_clusters; 818 819 /* Extent table entries contain md page numbers for extent pages. 820 * Zeroes represent unallocated extent pages, those are run-length-encoded. 821 */ 822 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 823 if (desc_extent_table->extent_page[i].page_idx != 0) { 824 assert(desc_extent_table->extent_page[i].num_pages == 1); 825 blob->active.extent_pages[blob->active.num_extent_pages++] = 826 desc_extent_table->extent_page[i].page_idx; 827 } else if (spdk_blob_is_thin_provisioned(blob)) { 828 for (j = 0; j < desc_extent_table->extent_page[i].num_pages; j++) { 829 blob->active.extent_pages[blob->active.num_extent_pages++] = 0; 830 } 831 } else { 832 return -EINVAL; 833 } 834 } 835 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 836 struct spdk_blob_md_descriptor_extent_page *desc_extent; 837 unsigned int i; 838 unsigned int cluster_count = 0; 839 size_t cluster_idx_length; 840 841 if (blob->extent_rle_found) { 842 /* This means that Extent RLE is present in MD, 843 * both should never be at the same time. */ 844 return -EINVAL; 845 } 846 847 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 848 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 849 850 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 851 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 852 return -EINVAL; 853 } 854 855 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 856 if (desc_extent->cluster_idx[i] != 0) { 857 if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, desc_extent->cluster_idx[i])) { 858 return -EINVAL; 859 } 860 } 861 cluster_count++; 862 } 863 864 if (cluster_count == 0) { 865 return -EINVAL; 866 } 867 868 /* When reading extent pages sequentially starting cluster idx should match 869 * current size of a blob. 870 * If changed to batch reading, this check shall be removed. */ 871 if (desc_extent->start_cluster_idx != blob->active.num_clusters) { 872 return -EINVAL; 873 } 874 875 tmp = realloc(blob->active.clusters, 876 (cluster_count + blob->active.num_clusters) * sizeof(*blob->active.clusters)); 877 if (tmp == NULL) { 878 return -ENOMEM; 879 } 880 blob->active.clusters = tmp; 881 blob->active.cluster_array_size = (cluster_count + blob->active.num_clusters); 882 883 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 884 if (desc_extent->cluster_idx[i] != 0) { 885 blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs, 886 desc_extent->cluster_idx[i]); 887 blob->active.num_allocated_clusters++; 888 } else if (spdk_blob_is_thin_provisioned(blob)) { 889 blob->active.clusters[blob->active.num_clusters++] = 0; 890 } else { 891 return -EINVAL; 892 } 893 } 894 assert(desc_extent->start_cluster_idx + cluster_count == blob->active.num_clusters); 895 assert(blob->remaining_clusters_in_et >= cluster_count); 896 blob->remaining_clusters_in_et -= cluster_count; 897 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 898 int rc; 899 900 rc = blob_deserialize_xattr(blob, 901 (struct spdk_blob_md_descriptor_xattr *) desc, false); 902 if (rc != 0) { 903 return rc; 904 } 905 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 906 int rc; 907 908 rc = blob_deserialize_xattr(blob, 909 (struct spdk_blob_md_descriptor_xattr *) desc, true); 910 if (rc != 0) { 911 return rc; 912 } 913 } else { 914 /* Unrecognized descriptor type. Do not fail - just continue to the 915 * next descriptor. If this descriptor is associated with some feature 916 * defined in a newer version of blobstore, that version of blobstore 917 * should create and set an associated feature flag to specify if this 918 * blob can be loaded or not. 919 */ 920 } 921 922 /* Advance to the next descriptor */ 923 cur_desc += sizeof(*desc) + desc->length; 924 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 925 break; 926 } 927 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 928 } 929 930 return 0; 931 } 932 933 static bool bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page); 934 935 static int 936 blob_parse_extent_page(struct spdk_blob_md_page *extent_page, struct spdk_blob *blob) 937 { 938 assert(blob != NULL); 939 assert(blob->state == SPDK_BLOB_STATE_LOADING); 940 941 if (bs_load_cur_extent_page_valid(extent_page) == false) { 942 return -ENOENT; 943 } 944 945 return blob_parse_page(extent_page, blob); 946 } 947 948 static int 949 blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count, 950 struct spdk_blob *blob) 951 { 952 const struct spdk_blob_md_page *page; 953 uint32_t i; 954 int rc; 955 void *tmp; 956 957 assert(page_count > 0); 958 assert(pages[0].sequence_num == 0); 959 assert(blob != NULL); 960 assert(blob->state == SPDK_BLOB_STATE_LOADING); 961 assert(blob->active.clusters == NULL); 962 963 /* The blobid provided doesn't match what's in the MD, this can 964 * happen for example if a bogus blobid is passed in through open. 965 */ 966 if (blob->id != pages[0].id) { 967 SPDK_ERRLOG("Blobid (0x%" PRIx64 ") doesn't match what's in metadata " 968 "(0x%" PRIx64 ")\n", blob->id, pages[0].id); 969 return -ENOENT; 970 } 971 972 tmp = realloc(blob->active.pages, page_count * sizeof(*blob->active.pages)); 973 if (!tmp) { 974 return -ENOMEM; 975 } 976 blob->active.pages = tmp; 977 978 blob->active.pages[0] = pages[0].id; 979 980 for (i = 1; i < page_count; i++) { 981 assert(spdk_bit_array_get(blob->bs->used_md_pages, pages[i - 1].next)); 982 blob->active.pages[i] = pages[i - 1].next; 983 } 984 blob->active.num_pages = page_count; 985 986 for (i = 0; i < page_count; i++) { 987 page = &pages[i]; 988 989 assert(page->id == blob->id); 990 assert(page->sequence_num == i); 991 992 rc = blob_parse_page(page, blob); 993 if (rc != 0) { 994 return rc; 995 } 996 } 997 998 return 0; 999 } 1000 1001 static int 1002 blob_serialize_add_page(const struct spdk_blob *blob, 1003 struct spdk_blob_md_page **pages, 1004 uint32_t *page_count, 1005 struct spdk_blob_md_page **last_page) 1006 { 1007 struct spdk_blob_md_page *page, *tmp_pages; 1008 1009 assert(pages != NULL); 1010 assert(page_count != NULL); 1011 1012 *last_page = NULL; 1013 if (*page_count == 0) { 1014 assert(*pages == NULL); 1015 *pages = spdk_malloc(SPDK_BS_PAGE_SIZE, 0, 1016 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 1017 if (*pages == NULL) { 1018 return -ENOMEM; 1019 } 1020 *page_count = 1; 1021 } else { 1022 assert(*pages != NULL); 1023 tmp_pages = spdk_realloc(*pages, SPDK_BS_PAGE_SIZE * (*page_count + 1), 0); 1024 if (tmp_pages == NULL) { 1025 return -ENOMEM; 1026 } 1027 (*page_count)++; 1028 *pages = tmp_pages; 1029 } 1030 1031 page = &(*pages)[*page_count - 1]; 1032 memset(page, 0, sizeof(*page)); 1033 page->id = blob->id; 1034 page->sequence_num = *page_count - 1; 1035 page->next = SPDK_INVALID_MD_PAGE; 1036 *last_page = page; 1037 1038 return 0; 1039 } 1040 1041 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor. 1042 * Update required_sz on both success and failure. 1043 * 1044 */ 1045 static int 1046 blob_serialize_xattr(const struct spdk_xattr *xattr, 1047 uint8_t *buf, size_t buf_sz, 1048 size_t *required_sz, bool internal) 1049 { 1050 struct spdk_blob_md_descriptor_xattr *desc; 1051 1052 *required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) + 1053 strlen(xattr->name) + 1054 xattr->value_len; 1055 1056 if (buf_sz < *required_sz) { 1057 return -1; 1058 } 1059 1060 desc = (struct spdk_blob_md_descriptor_xattr *)buf; 1061 1062 desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR; 1063 desc->length = sizeof(desc->name_length) + 1064 sizeof(desc->value_length) + 1065 strlen(xattr->name) + 1066 xattr->value_len; 1067 desc->name_length = strlen(xattr->name); 1068 desc->value_length = xattr->value_len; 1069 1070 memcpy(desc->name, xattr->name, desc->name_length); 1071 memcpy((void *)((uintptr_t)desc->name + desc->name_length), 1072 xattr->value, 1073 desc->value_length); 1074 1075 return 0; 1076 } 1077 1078 static void 1079 blob_serialize_extent_table_entry(const struct spdk_blob *blob, 1080 uint64_t start_ep, uint64_t *next_ep, 1081 uint8_t **buf, size_t *remaining_sz) 1082 { 1083 struct spdk_blob_md_descriptor_extent_table *desc; 1084 size_t cur_sz; 1085 uint64_t i, et_idx; 1086 uint32_t extent_page, ep_len; 1087 1088 /* The buffer must have room for at least num_clusters entry */ 1089 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->num_clusters); 1090 if (*remaining_sz < cur_sz) { 1091 *next_ep = start_ep; 1092 return; 1093 } 1094 1095 desc = (struct spdk_blob_md_descriptor_extent_table *)*buf; 1096 desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE; 1097 1098 desc->num_clusters = blob->active.num_clusters; 1099 1100 ep_len = 1; 1101 et_idx = 0; 1102 for (i = start_ep; i < blob->active.num_extent_pages; i++) { 1103 if (*remaining_sz < cur_sz + sizeof(desc->extent_page[0])) { 1104 /* If we ran out of buffer space, return */ 1105 break; 1106 } 1107 1108 extent_page = blob->active.extent_pages[i]; 1109 /* Verify that next extent_page is unallocated */ 1110 if (extent_page == 0 && 1111 (i + 1 < blob->active.num_extent_pages && blob->active.extent_pages[i + 1] == 0)) { 1112 ep_len++; 1113 continue; 1114 } 1115 desc->extent_page[et_idx].page_idx = extent_page; 1116 desc->extent_page[et_idx].num_pages = ep_len; 1117 et_idx++; 1118 1119 ep_len = 1; 1120 cur_sz += sizeof(desc->extent_page[et_idx]); 1121 } 1122 *next_ep = i; 1123 1124 desc->length = sizeof(desc->num_clusters) + sizeof(desc->extent_page[0]) * et_idx; 1125 *remaining_sz -= sizeof(struct spdk_blob_md_descriptor) + desc->length; 1126 *buf += sizeof(struct spdk_blob_md_descriptor) + desc->length; 1127 } 1128 1129 static int 1130 blob_serialize_extent_table(const struct spdk_blob *blob, 1131 struct spdk_blob_md_page **pages, 1132 struct spdk_blob_md_page *cur_page, 1133 uint32_t *page_count, uint8_t **buf, 1134 size_t *remaining_sz) 1135 { 1136 uint64_t last_extent_page; 1137 int rc; 1138 1139 last_extent_page = 0; 1140 /* At least single extent table entry has to be always persisted. 1141 * Such case occurs with num_extent_pages == 0. */ 1142 while (last_extent_page <= blob->active.num_extent_pages) { 1143 blob_serialize_extent_table_entry(blob, last_extent_page, &last_extent_page, buf, 1144 remaining_sz); 1145 1146 if (last_extent_page == blob->active.num_extent_pages) { 1147 break; 1148 } 1149 1150 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1151 if (rc < 0) { 1152 return rc; 1153 } 1154 1155 *buf = (uint8_t *)cur_page->descriptors; 1156 *remaining_sz = sizeof(cur_page->descriptors); 1157 } 1158 1159 return 0; 1160 } 1161 1162 static void 1163 blob_serialize_extent_rle(const struct spdk_blob *blob, 1164 uint64_t start_cluster, uint64_t *next_cluster, 1165 uint8_t **buf, size_t *buf_sz) 1166 { 1167 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 1168 size_t cur_sz; 1169 uint64_t i, extent_idx; 1170 uint64_t lba, lba_per_cluster, lba_count; 1171 1172 /* The buffer must have room for at least one extent */ 1173 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc_extent_rle->extents[0]); 1174 if (*buf_sz < cur_sz) { 1175 *next_cluster = start_cluster; 1176 return; 1177 } 1178 1179 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)*buf; 1180 desc_extent_rle->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE; 1181 1182 lba_per_cluster = bs_cluster_to_lba(blob->bs, 1); 1183 /* Assert for scan-build false positive */ 1184 assert(lba_per_cluster > 0); 1185 1186 lba = blob->active.clusters[start_cluster]; 1187 lba_count = lba_per_cluster; 1188 extent_idx = 0; 1189 for (i = start_cluster + 1; i < blob->active.num_clusters; i++) { 1190 if ((lba + lba_count) == blob->active.clusters[i] && lba != 0) { 1191 /* Run-length encode sequential non-zero LBA */ 1192 lba_count += lba_per_cluster; 1193 continue; 1194 } else if (lba == 0 && blob->active.clusters[i] == 0) { 1195 /* Run-length encode unallocated clusters */ 1196 lba_count += lba_per_cluster; 1197 continue; 1198 } 1199 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1200 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1201 extent_idx++; 1202 1203 cur_sz += sizeof(desc_extent_rle->extents[extent_idx]); 1204 1205 if (*buf_sz < cur_sz) { 1206 /* If we ran out of buffer space, return */ 1207 *next_cluster = i; 1208 break; 1209 } 1210 1211 lba = blob->active.clusters[i]; 1212 lba_count = lba_per_cluster; 1213 } 1214 1215 if (*buf_sz >= cur_sz) { 1216 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1217 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1218 extent_idx++; 1219 1220 *next_cluster = blob->active.num_clusters; 1221 } 1222 1223 desc_extent_rle->length = sizeof(desc_extent_rle->extents[0]) * extent_idx; 1224 *buf_sz -= sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1225 *buf += sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1226 } 1227 1228 static int 1229 blob_serialize_extents_rle(const struct spdk_blob *blob, 1230 struct spdk_blob_md_page **pages, 1231 struct spdk_blob_md_page *cur_page, 1232 uint32_t *page_count, uint8_t **buf, 1233 size_t *remaining_sz) 1234 { 1235 uint64_t last_cluster; 1236 int rc; 1237 1238 last_cluster = 0; 1239 while (last_cluster < blob->active.num_clusters) { 1240 blob_serialize_extent_rle(blob, last_cluster, &last_cluster, buf, remaining_sz); 1241 1242 if (last_cluster == blob->active.num_clusters) { 1243 break; 1244 } 1245 1246 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1247 if (rc < 0) { 1248 return rc; 1249 } 1250 1251 *buf = (uint8_t *)cur_page->descriptors; 1252 *remaining_sz = sizeof(cur_page->descriptors); 1253 } 1254 1255 return 0; 1256 } 1257 1258 static void 1259 blob_serialize_extent_page(const struct spdk_blob *blob, 1260 uint64_t cluster, struct spdk_blob_md_page *page) 1261 { 1262 struct spdk_blob_md_descriptor_extent_page *desc_extent; 1263 uint64_t i, extent_idx; 1264 uint64_t lba, lba_per_cluster; 1265 uint64_t start_cluster_idx = (cluster / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP; 1266 1267 desc_extent = (struct spdk_blob_md_descriptor_extent_page *) page->descriptors; 1268 desc_extent->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE; 1269 1270 lba_per_cluster = bs_cluster_to_lba(blob->bs, 1); 1271 1272 desc_extent->start_cluster_idx = start_cluster_idx; 1273 extent_idx = 0; 1274 for (i = start_cluster_idx; i < blob->active.num_clusters; i++) { 1275 lba = blob->active.clusters[i]; 1276 desc_extent->cluster_idx[extent_idx++] = lba / lba_per_cluster; 1277 if (extent_idx >= SPDK_EXTENTS_PER_EP) { 1278 break; 1279 } 1280 } 1281 desc_extent->length = sizeof(desc_extent->start_cluster_idx) + 1282 sizeof(desc_extent->cluster_idx[0]) * extent_idx; 1283 } 1284 1285 static void 1286 blob_serialize_flags(const struct spdk_blob *blob, 1287 uint8_t *buf, size_t *buf_sz) 1288 { 1289 struct spdk_blob_md_descriptor_flags *desc; 1290 1291 /* 1292 * Flags get serialized first, so we should always have room for the flags 1293 * descriptor. 1294 */ 1295 assert(*buf_sz >= sizeof(*desc)); 1296 1297 desc = (struct spdk_blob_md_descriptor_flags *)buf; 1298 desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS; 1299 desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor); 1300 desc->invalid_flags = blob->invalid_flags; 1301 desc->data_ro_flags = blob->data_ro_flags; 1302 desc->md_ro_flags = blob->md_ro_flags; 1303 1304 *buf_sz -= sizeof(*desc); 1305 } 1306 1307 static int 1308 blob_serialize_xattrs(const struct spdk_blob *blob, 1309 const struct spdk_xattr_tailq *xattrs, bool internal, 1310 struct spdk_blob_md_page **pages, 1311 struct spdk_blob_md_page *cur_page, 1312 uint32_t *page_count, uint8_t **buf, 1313 size_t *remaining_sz) 1314 { 1315 const struct spdk_xattr *xattr; 1316 int rc; 1317 1318 TAILQ_FOREACH(xattr, xattrs, link) { 1319 size_t required_sz = 0; 1320 1321 rc = blob_serialize_xattr(xattr, 1322 *buf, *remaining_sz, 1323 &required_sz, internal); 1324 if (rc < 0) { 1325 /* Need to add a new page to the chain */ 1326 rc = blob_serialize_add_page(blob, pages, page_count, 1327 &cur_page); 1328 if (rc < 0) { 1329 spdk_free(*pages); 1330 *pages = NULL; 1331 *page_count = 0; 1332 return rc; 1333 } 1334 1335 *buf = (uint8_t *)cur_page->descriptors; 1336 *remaining_sz = sizeof(cur_page->descriptors); 1337 1338 /* Try again */ 1339 required_sz = 0; 1340 rc = blob_serialize_xattr(xattr, 1341 *buf, *remaining_sz, 1342 &required_sz, internal); 1343 1344 if (rc < 0) { 1345 spdk_free(*pages); 1346 *pages = NULL; 1347 *page_count = 0; 1348 return rc; 1349 } 1350 } 1351 1352 *remaining_sz -= required_sz; 1353 *buf += required_sz; 1354 } 1355 1356 return 0; 1357 } 1358 1359 static int 1360 blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages, 1361 uint32_t *page_count) 1362 { 1363 struct spdk_blob_md_page *cur_page; 1364 int rc; 1365 uint8_t *buf; 1366 size_t remaining_sz; 1367 1368 assert(pages != NULL); 1369 assert(page_count != NULL); 1370 assert(blob != NULL); 1371 assert(blob->state == SPDK_BLOB_STATE_DIRTY); 1372 1373 *pages = NULL; 1374 *page_count = 0; 1375 1376 /* A blob always has at least 1 page, even if it has no descriptors */ 1377 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1378 if (rc < 0) { 1379 return rc; 1380 } 1381 1382 buf = (uint8_t *)cur_page->descriptors; 1383 remaining_sz = sizeof(cur_page->descriptors); 1384 1385 /* Serialize flags */ 1386 blob_serialize_flags(blob, buf, &remaining_sz); 1387 buf += sizeof(struct spdk_blob_md_descriptor_flags); 1388 1389 /* Serialize xattrs */ 1390 rc = blob_serialize_xattrs(blob, &blob->xattrs, false, 1391 pages, cur_page, page_count, &buf, &remaining_sz); 1392 if (rc < 0) { 1393 return rc; 1394 } 1395 1396 /* Serialize internal xattrs */ 1397 rc = blob_serialize_xattrs(blob, &blob->xattrs_internal, true, 1398 pages, cur_page, page_count, &buf, &remaining_sz); 1399 if (rc < 0) { 1400 return rc; 1401 } 1402 1403 if (blob->use_extent_table) { 1404 /* Serialize extent table */ 1405 rc = blob_serialize_extent_table(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1406 } else { 1407 /* Serialize extents */ 1408 rc = blob_serialize_extents_rle(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1409 } 1410 1411 return rc; 1412 } 1413 1414 struct spdk_blob_load_ctx { 1415 struct spdk_blob *blob; 1416 1417 struct spdk_blob_md_page *pages; 1418 uint32_t num_pages; 1419 uint32_t next_extent_page; 1420 spdk_bs_sequence_t *seq; 1421 1422 spdk_bs_sequence_cpl cb_fn; 1423 void *cb_arg; 1424 }; 1425 1426 static uint32_t 1427 blob_md_page_calc_crc(void *page) 1428 { 1429 uint32_t crc; 1430 1431 crc = BLOB_CRC32C_INITIAL; 1432 crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc); 1433 crc ^= BLOB_CRC32C_INITIAL; 1434 1435 return crc; 1436 1437 } 1438 1439 static void 1440 blob_load_final(struct spdk_blob_load_ctx *ctx, int bserrno) 1441 { 1442 struct spdk_blob *blob = ctx->blob; 1443 1444 if (bserrno == 0) { 1445 blob_mark_clean(blob); 1446 } 1447 1448 ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno); 1449 1450 /* Free the memory */ 1451 spdk_free(ctx->pages); 1452 free(ctx); 1453 } 1454 1455 static void 1456 blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno) 1457 { 1458 struct spdk_blob_load_ctx *ctx = cb_arg; 1459 struct spdk_blob *blob = ctx->blob; 1460 1461 if (bserrno == 0) { 1462 blob->back_bs_dev = bs_create_blob_bs_dev(snapshot); 1463 if (blob->back_bs_dev == NULL) { 1464 bserrno = -ENOMEM; 1465 } 1466 } 1467 if (bserrno != 0) { 1468 SPDK_ERRLOG("Snapshot fail\n"); 1469 } 1470 1471 blob_load_final(ctx, bserrno); 1472 } 1473 1474 static void blob_update_clear_method(struct spdk_blob *blob); 1475 1476 static int 1477 blob_load_esnap(struct spdk_blob *blob, void *blob_ctx) 1478 { 1479 struct spdk_blob_store *bs = blob->bs; 1480 struct spdk_bs_dev *bs_dev = NULL; 1481 const void *esnap_id = NULL; 1482 size_t id_len = 0; 1483 int rc; 1484 1485 if (bs->esnap_bs_dev_create == NULL) { 1486 SPDK_NOTICELOG("blob 0x%" PRIx64 " is an esnap clone but the blobstore was opened " 1487 "without support for esnap clones\n", blob->id); 1488 return -ENOTSUP; 1489 } 1490 assert(blob->back_bs_dev == NULL); 1491 1492 rc = blob_get_xattr_value(blob, BLOB_EXTERNAL_SNAPSHOT_ID, &esnap_id, &id_len, true); 1493 if (rc != 0) { 1494 SPDK_ERRLOG("blob 0x%" PRIx64 " is an esnap clone but has no esnap ID\n", blob->id); 1495 return -EINVAL; 1496 } 1497 assert(id_len > 0 && id_len < UINT32_MAX); 1498 1499 SPDK_INFOLOG(blob, "Creating external snapshot device\n"); 1500 1501 rc = bs->esnap_bs_dev_create(bs->esnap_ctx, blob_ctx, blob, esnap_id, (uint32_t)id_len, 1502 &bs_dev); 1503 if (rc != 0) { 1504 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": failed to load back_bs_dev " 1505 "with error %d\n", blob->id, rc); 1506 return rc; 1507 } 1508 1509 /* 1510 * Note: bs_dev might be NULL if the consumer chose to not open the external snapshot. 1511 * This especially might happen during spdk_bs_load() iteration. 1512 */ 1513 if (bs_dev != NULL) { 1514 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": loaded back_bs_dev\n", blob->id); 1515 if ((bs->io_unit_size % bs_dev->blocklen) != 0) { 1516 SPDK_NOTICELOG("blob 0x%" PRIx64 " external snapshot device block size %u " 1517 "is not compatible with blobstore block size %u\n", 1518 blob->id, bs_dev->blocklen, bs->io_unit_size); 1519 bs_dev->destroy(bs_dev); 1520 return -EINVAL; 1521 } 1522 } 1523 1524 blob->back_bs_dev = bs_dev; 1525 blob->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT; 1526 1527 return 0; 1528 } 1529 1530 static void 1531 blob_load_backing_dev(spdk_bs_sequence_t *seq, void *cb_arg) 1532 { 1533 struct spdk_blob_load_ctx *ctx = cb_arg; 1534 struct spdk_blob *blob = ctx->blob; 1535 const void *value; 1536 size_t len; 1537 int rc; 1538 1539 if (blob_is_esnap_clone(blob)) { 1540 rc = blob_load_esnap(blob, seq->cpl.u.blob_handle.esnap_ctx); 1541 blob_load_final(ctx, rc); 1542 return; 1543 } 1544 1545 if (spdk_blob_is_thin_provisioned(blob)) { 1546 rc = blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true); 1547 if (rc == 0) { 1548 if (len != sizeof(spdk_blob_id)) { 1549 blob_load_final(ctx, -EINVAL); 1550 return; 1551 } 1552 /* open snapshot blob and continue in the callback function */ 1553 blob->parent_id = *(spdk_blob_id *)value; 1554 spdk_bs_open_blob(blob->bs, blob->parent_id, 1555 blob_load_snapshot_cpl, ctx); 1556 return; 1557 } else { 1558 /* add zeroes_dev for thin provisioned blob */ 1559 blob->back_bs_dev = bs_create_zeroes_dev(); 1560 } 1561 } else { 1562 /* standard blob */ 1563 blob->back_bs_dev = NULL; 1564 } 1565 blob_load_final(ctx, 0); 1566 } 1567 1568 static void 1569 blob_load_cpl_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1570 { 1571 struct spdk_blob_load_ctx *ctx = cb_arg; 1572 struct spdk_blob *blob = ctx->blob; 1573 struct spdk_blob_md_page *page; 1574 uint64_t i; 1575 uint32_t crc; 1576 uint64_t lba; 1577 void *tmp; 1578 uint64_t sz; 1579 1580 if (bserrno) { 1581 SPDK_ERRLOG("Extent page read failed: %d\n", bserrno); 1582 blob_load_final(ctx, bserrno); 1583 return; 1584 } 1585 1586 if (ctx->pages == NULL) { 1587 /* First iteration of this function, allocate buffer for single EXTENT_PAGE */ 1588 ctx->pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 1589 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 1590 if (!ctx->pages) { 1591 blob_load_final(ctx, -ENOMEM); 1592 return; 1593 } 1594 ctx->num_pages = 1; 1595 ctx->next_extent_page = 0; 1596 } else { 1597 page = &ctx->pages[0]; 1598 crc = blob_md_page_calc_crc(page); 1599 if (crc != page->crc) { 1600 blob_load_final(ctx, -EINVAL); 1601 return; 1602 } 1603 1604 if (page->next != SPDK_INVALID_MD_PAGE) { 1605 blob_load_final(ctx, -EINVAL); 1606 return; 1607 } 1608 1609 bserrno = blob_parse_extent_page(page, blob); 1610 if (bserrno) { 1611 blob_load_final(ctx, bserrno); 1612 return; 1613 } 1614 } 1615 1616 for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) { 1617 if (blob->active.extent_pages[i] != 0) { 1618 /* Extent page was allocated, read and parse it. */ 1619 lba = bs_md_page_to_lba(blob->bs, blob->active.extent_pages[i]); 1620 ctx->next_extent_page = i + 1; 1621 1622 bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1623 bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 1624 blob_load_cpl_extents_cpl, ctx); 1625 return; 1626 } else { 1627 /* Thin provisioned blobs can point to unallocated extent pages. 1628 * In this case blob size should be increased by up to the amount left in remaining_clusters_in_et. */ 1629 1630 sz = spdk_min(blob->remaining_clusters_in_et, SPDK_EXTENTS_PER_EP); 1631 blob->active.num_clusters += sz; 1632 blob->remaining_clusters_in_et -= sz; 1633 1634 assert(spdk_blob_is_thin_provisioned(blob)); 1635 assert(i + 1 < blob->active.num_extent_pages || blob->remaining_clusters_in_et == 0); 1636 1637 tmp = realloc(blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 1638 if (tmp == NULL) { 1639 blob_load_final(ctx, -ENOMEM); 1640 return; 1641 } 1642 memset(tmp + sizeof(*blob->active.clusters) * blob->active.cluster_array_size, 0, 1643 sizeof(*blob->active.clusters) * (blob->active.num_clusters - blob->active.cluster_array_size)); 1644 blob->active.clusters = tmp; 1645 blob->active.cluster_array_size = blob->active.num_clusters; 1646 } 1647 } 1648 1649 blob_load_backing_dev(seq, ctx); 1650 } 1651 1652 static void 1653 blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1654 { 1655 struct spdk_blob_load_ctx *ctx = cb_arg; 1656 struct spdk_blob *blob = ctx->blob; 1657 struct spdk_blob_md_page *page; 1658 int rc; 1659 uint32_t crc; 1660 uint32_t current_page; 1661 1662 if (ctx->num_pages == 1) { 1663 current_page = bs_blobid_to_page(blob->id); 1664 } else { 1665 assert(ctx->num_pages != 0); 1666 page = &ctx->pages[ctx->num_pages - 2]; 1667 current_page = page->next; 1668 } 1669 1670 if (bserrno) { 1671 SPDK_ERRLOG("Metadata page %d read failed for blobid 0x%" PRIx64 ": %d\n", 1672 current_page, blob->id, bserrno); 1673 blob_load_final(ctx, bserrno); 1674 return; 1675 } 1676 1677 page = &ctx->pages[ctx->num_pages - 1]; 1678 crc = blob_md_page_calc_crc(page); 1679 if (crc != page->crc) { 1680 SPDK_ERRLOG("Metadata page %d crc mismatch for blobid 0x%" PRIx64 "\n", 1681 current_page, blob->id); 1682 blob_load_final(ctx, -EINVAL); 1683 return; 1684 } 1685 1686 if (page->next != SPDK_INVALID_MD_PAGE) { 1687 struct spdk_blob_md_page *tmp_pages; 1688 uint32_t next_page = page->next; 1689 uint64_t next_lba = bs_md_page_to_lba(blob->bs, next_page); 1690 1691 /* Read the next page */ 1692 tmp_pages = spdk_realloc(ctx->pages, (sizeof(*page) * (ctx->num_pages + 1)), 0); 1693 if (tmp_pages == NULL) { 1694 blob_load_final(ctx, -ENOMEM); 1695 return; 1696 } 1697 ctx->num_pages++; 1698 ctx->pages = tmp_pages; 1699 1700 bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1], 1701 next_lba, 1702 bs_byte_to_lba(blob->bs, sizeof(*page)), 1703 blob_load_cpl, ctx); 1704 return; 1705 } 1706 1707 /* Parse the pages */ 1708 rc = blob_parse(ctx->pages, ctx->num_pages, blob); 1709 if (rc) { 1710 blob_load_final(ctx, rc); 1711 return; 1712 } 1713 1714 if (blob->extent_table_found == true) { 1715 /* If EXTENT_TABLE was found, that means support for it should be enabled. */ 1716 assert(blob->extent_rle_found == false); 1717 blob->use_extent_table = true; 1718 } else { 1719 /* If EXTENT_RLE or no extent_* descriptor was found disable support 1720 * for extent table. No extent_* descriptors means that blob has length of 0 1721 * and no extent_rle descriptors were persisted for it. 1722 * EXTENT_TABLE if used, is always present in metadata regardless of length. */ 1723 blob->use_extent_table = false; 1724 } 1725 1726 /* Check the clear_method stored in metadata vs what may have been passed 1727 * via spdk_bs_open_blob_ext() and update accordingly. 1728 */ 1729 blob_update_clear_method(blob); 1730 1731 spdk_free(ctx->pages); 1732 ctx->pages = NULL; 1733 1734 if (blob->extent_table_found) { 1735 blob_load_cpl_extents_cpl(seq, ctx, 0); 1736 } else { 1737 blob_load_backing_dev(seq, ctx); 1738 } 1739 } 1740 1741 /* Load a blob from disk given a blobid */ 1742 static void 1743 blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 1744 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 1745 { 1746 struct spdk_blob_load_ctx *ctx; 1747 struct spdk_blob_store *bs; 1748 uint32_t page_num; 1749 uint64_t lba; 1750 1751 blob_verify_md_op(blob); 1752 1753 bs = blob->bs; 1754 1755 ctx = calloc(1, sizeof(*ctx)); 1756 if (!ctx) { 1757 cb_fn(seq, cb_arg, -ENOMEM); 1758 return; 1759 } 1760 1761 ctx->blob = blob; 1762 ctx->pages = spdk_realloc(ctx->pages, SPDK_BS_PAGE_SIZE, 0); 1763 if (!ctx->pages) { 1764 free(ctx); 1765 cb_fn(seq, cb_arg, -ENOMEM); 1766 return; 1767 } 1768 ctx->num_pages = 1; 1769 ctx->cb_fn = cb_fn; 1770 ctx->cb_arg = cb_arg; 1771 ctx->seq = seq; 1772 1773 page_num = bs_blobid_to_page(blob->id); 1774 lba = bs_md_page_to_lba(blob->bs, page_num); 1775 1776 blob->state = SPDK_BLOB_STATE_LOADING; 1777 1778 bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1779 bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE), 1780 blob_load_cpl, ctx); 1781 } 1782 1783 struct spdk_blob_persist_ctx { 1784 struct spdk_blob *blob; 1785 1786 struct spdk_blob_md_page *pages; 1787 uint32_t next_extent_page; 1788 struct spdk_blob_md_page *extent_page; 1789 1790 spdk_bs_sequence_t *seq; 1791 spdk_bs_sequence_cpl cb_fn; 1792 void *cb_arg; 1793 TAILQ_ENTRY(spdk_blob_persist_ctx) link; 1794 }; 1795 1796 static void 1797 bs_batch_clear_dev(struct spdk_blob *blob, spdk_bs_batch_t *batch, uint64_t lba, 1798 uint64_t lba_count) 1799 { 1800 switch (blob->clear_method) { 1801 case BLOB_CLEAR_WITH_DEFAULT: 1802 case BLOB_CLEAR_WITH_UNMAP: 1803 bs_batch_unmap_dev(batch, lba, lba_count); 1804 break; 1805 case BLOB_CLEAR_WITH_WRITE_ZEROES: 1806 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1807 break; 1808 case BLOB_CLEAR_WITH_NONE: 1809 default: 1810 break; 1811 } 1812 } 1813 1814 static int 1815 bs_super_validate(struct spdk_bs_super_block *super, struct spdk_blob_store *bs) 1816 { 1817 uint32_t crc; 1818 static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH]; 1819 1820 if (super->version > SPDK_BS_VERSION || 1821 super->version < SPDK_BS_INITIAL_VERSION) { 1822 return -EILSEQ; 1823 } 1824 1825 if (memcmp(super->signature, SPDK_BS_SUPER_BLOCK_SIG, 1826 sizeof(super->signature)) != 0) { 1827 return -EILSEQ; 1828 } 1829 1830 crc = blob_md_page_calc_crc(super); 1831 if (crc != super->crc) { 1832 return -EILSEQ; 1833 } 1834 1835 if (memcmp(&bs->bstype, &super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 1836 SPDK_DEBUGLOG(blob, "Bstype matched - loading blobstore\n"); 1837 } else if (memcmp(&bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 1838 SPDK_DEBUGLOG(blob, "Bstype wildcard used - loading blobstore regardless bstype\n"); 1839 } else { 1840 SPDK_DEBUGLOG(blob, "Unexpected bstype\n"); 1841 SPDK_LOGDUMP(blob, "Expected:", bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 1842 SPDK_LOGDUMP(blob, "Found:", super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 1843 return -ENXIO; 1844 } 1845 1846 if (super->size > bs->dev->blockcnt * bs->dev->blocklen) { 1847 SPDK_NOTICELOG("Size mismatch, dev size: %" PRIu64 ", blobstore size: %" PRIu64 "\n", 1848 bs->dev->blockcnt * bs->dev->blocklen, super->size); 1849 return -EILSEQ; 1850 } 1851 1852 return 0; 1853 } 1854 1855 static void bs_mark_dirty(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 1856 spdk_bs_sequence_cpl cb_fn, void *cb_arg); 1857 1858 static void 1859 blob_persist_complete_cb(void *arg) 1860 { 1861 struct spdk_blob_persist_ctx *ctx = arg; 1862 1863 /* Call user callback */ 1864 ctx->cb_fn(ctx->seq, ctx->cb_arg, 0); 1865 1866 /* Free the memory */ 1867 spdk_free(ctx->pages); 1868 free(ctx); 1869 } 1870 1871 static void blob_persist_start(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 1872 1873 static void 1874 blob_persist_complete(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx, int bserrno) 1875 { 1876 struct spdk_blob_persist_ctx *next_persist, *tmp; 1877 struct spdk_blob *blob = ctx->blob; 1878 1879 if (bserrno == 0) { 1880 blob_mark_clean(blob); 1881 } 1882 1883 assert(ctx == TAILQ_FIRST(&blob->persists_to_complete)); 1884 1885 /* Complete all persists that were pending when the current persist started */ 1886 TAILQ_FOREACH_SAFE(next_persist, &blob->persists_to_complete, link, tmp) { 1887 TAILQ_REMOVE(&blob->persists_to_complete, next_persist, link); 1888 spdk_thread_send_msg(spdk_get_thread(), blob_persist_complete_cb, next_persist); 1889 } 1890 1891 if (TAILQ_EMPTY(&blob->pending_persists)) { 1892 return; 1893 } 1894 1895 /* Queue up all pending persists for completion and start blob persist with first one */ 1896 TAILQ_SWAP(&blob->persists_to_complete, &blob->pending_persists, spdk_blob_persist_ctx, link); 1897 next_persist = TAILQ_FIRST(&blob->persists_to_complete); 1898 1899 blob->state = SPDK_BLOB_STATE_DIRTY; 1900 bs_mark_dirty(seq, blob->bs, blob_persist_start, next_persist); 1901 } 1902 1903 static void 1904 blob_persist_clear_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1905 { 1906 struct spdk_blob_persist_ctx *ctx = cb_arg; 1907 struct spdk_blob *blob = ctx->blob; 1908 struct spdk_blob_store *bs = blob->bs; 1909 size_t i; 1910 1911 if (bserrno != 0) { 1912 blob_persist_complete(seq, ctx, bserrno); 1913 return; 1914 } 1915 1916 spdk_spin_lock(&bs->used_lock); 1917 1918 /* Release all extent_pages that were truncated */ 1919 for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) { 1920 /* Nothing to release if it was not allocated */ 1921 if (blob->active.extent_pages[i] != 0) { 1922 bs_release_md_page(bs, blob->active.extent_pages[i]); 1923 } 1924 } 1925 1926 spdk_spin_unlock(&bs->used_lock); 1927 1928 if (blob->active.num_extent_pages == 0) { 1929 free(blob->active.extent_pages); 1930 blob->active.extent_pages = NULL; 1931 blob->active.extent_pages_array_size = 0; 1932 } else if (blob->active.num_extent_pages != blob->active.extent_pages_array_size) { 1933 #ifndef __clang_analyzer__ 1934 void *tmp; 1935 1936 /* scan-build really can't figure reallocs, workaround it */ 1937 tmp = realloc(blob->active.extent_pages, sizeof(uint32_t) * blob->active.num_extent_pages); 1938 assert(tmp != NULL); 1939 blob->active.extent_pages = tmp; 1940 #endif 1941 blob->active.extent_pages_array_size = blob->active.num_extent_pages; 1942 } 1943 1944 blob_persist_complete(seq, ctx, bserrno); 1945 } 1946 1947 static void 1948 blob_persist_clear_extents(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 1949 { 1950 struct spdk_blob *blob = ctx->blob; 1951 struct spdk_blob_store *bs = blob->bs; 1952 size_t i; 1953 uint64_t lba; 1954 uint64_t lba_count; 1955 spdk_bs_batch_t *batch; 1956 1957 batch = bs_sequence_to_batch(seq, blob_persist_clear_extents_cpl, ctx); 1958 lba_count = bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE); 1959 1960 /* Clear all extent_pages that were truncated */ 1961 for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) { 1962 /* Nothing to clear if it was not allocated */ 1963 if (blob->active.extent_pages[i] != 0) { 1964 lba = bs_md_page_to_lba(bs, blob->active.extent_pages[i]); 1965 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1966 } 1967 } 1968 1969 bs_batch_close(batch); 1970 } 1971 1972 static void 1973 blob_persist_clear_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1974 { 1975 struct spdk_blob_persist_ctx *ctx = cb_arg; 1976 struct spdk_blob *blob = ctx->blob; 1977 struct spdk_blob_store *bs = blob->bs; 1978 size_t i; 1979 1980 if (bserrno != 0) { 1981 blob_persist_complete(seq, ctx, bserrno); 1982 return; 1983 } 1984 1985 spdk_spin_lock(&bs->used_lock); 1986 /* Release all clusters that were truncated */ 1987 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1988 uint32_t cluster_num = bs_lba_to_cluster(bs, blob->active.clusters[i]); 1989 1990 /* Nothing to release if it was not allocated */ 1991 if (blob->active.clusters[i] != 0) { 1992 bs_release_cluster(bs, cluster_num); 1993 } 1994 } 1995 spdk_spin_unlock(&bs->used_lock); 1996 1997 if (blob->active.num_clusters == 0) { 1998 free(blob->active.clusters); 1999 blob->active.clusters = NULL; 2000 blob->active.cluster_array_size = 0; 2001 } else if (blob->active.num_clusters != blob->active.cluster_array_size) { 2002 #ifndef __clang_analyzer__ 2003 void *tmp; 2004 2005 /* scan-build really can't figure reallocs, workaround it */ 2006 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * blob->active.num_clusters); 2007 assert(tmp != NULL); 2008 blob->active.clusters = tmp; 2009 2010 #endif 2011 blob->active.cluster_array_size = blob->active.num_clusters; 2012 } 2013 2014 /* Move on to clearing extent pages */ 2015 blob_persist_clear_extents(seq, ctx); 2016 } 2017 2018 static void 2019 blob_persist_clear_clusters(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 2020 { 2021 struct spdk_blob *blob = ctx->blob; 2022 struct spdk_blob_store *bs = blob->bs; 2023 spdk_bs_batch_t *batch; 2024 size_t i; 2025 uint64_t lba; 2026 uint64_t lba_count; 2027 2028 /* Clusters don't move around in blobs. The list shrinks or grows 2029 * at the end, but no changes ever occur in the middle of the list. 2030 */ 2031 2032 batch = bs_sequence_to_batch(seq, blob_persist_clear_clusters_cpl, ctx); 2033 2034 /* Clear all clusters that were truncated */ 2035 lba = 0; 2036 lba_count = 0; 2037 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 2038 uint64_t next_lba = blob->active.clusters[i]; 2039 uint64_t next_lba_count = bs_cluster_to_lba(bs, 1); 2040 2041 if (next_lba > 0 && (lba + lba_count) == next_lba) { 2042 /* This cluster is contiguous with the previous one. */ 2043 lba_count += next_lba_count; 2044 continue; 2045 } else if (next_lba == 0) { 2046 continue; 2047 } 2048 2049 /* This cluster is not contiguous with the previous one. */ 2050 2051 /* If a run of LBAs previously existing, clear them now */ 2052 if (lba_count > 0) { 2053 bs_batch_clear_dev(ctx->blob, batch, lba, lba_count); 2054 } 2055 2056 /* Start building the next batch */ 2057 lba = next_lba; 2058 if (next_lba > 0) { 2059 lba_count = next_lba_count; 2060 } else { 2061 lba_count = 0; 2062 } 2063 } 2064 2065 /* If we ended with a contiguous set of LBAs, clear them now */ 2066 if (lba_count > 0) { 2067 bs_batch_clear_dev(ctx->blob, batch, lba, lba_count); 2068 } 2069 2070 bs_batch_close(batch); 2071 } 2072 2073 static void 2074 blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2075 { 2076 struct spdk_blob_persist_ctx *ctx = cb_arg; 2077 struct spdk_blob *blob = ctx->blob; 2078 struct spdk_blob_store *bs = blob->bs; 2079 size_t i; 2080 2081 if (bserrno != 0) { 2082 blob_persist_complete(seq, ctx, bserrno); 2083 return; 2084 } 2085 2086 spdk_spin_lock(&bs->used_lock); 2087 2088 /* This loop starts at 1 because the first page is special and handled 2089 * below. The pages (except the first) are never written in place, 2090 * so any pages in the clean list must be zeroed. 2091 */ 2092 for (i = 1; i < blob->clean.num_pages; i++) { 2093 bs_release_md_page(bs, blob->clean.pages[i]); 2094 } 2095 2096 if (blob->active.num_pages == 0) { 2097 uint32_t page_num; 2098 2099 page_num = bs_blobid_to_page(blob->id); 2100 bs_release_md_page(bs, page_num); 2101 } 2102 2103 spdk_spin_unlock(&bs->used_lock); 2104 2105 /* Move on to clearing clusters */ 2106 blob_persist_clear_clusters(seq, ctx); 2107 } 2108 2109 static void 2110 blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2111 { 2112 struct spdk_blob_persist_ctx *ctx = cb_arg; 2113 struct spdk_blob *blob = ctx->blob; 2114 struct spdk_blob_store *bs = blob->bs; 2115 uint64_t lba; 2116 uint64_t lba_count; 2117 spdk_bs_batch_t *batch; 2118 size_t i; 2119 2120 if (bserrno != 0) { 2121 blob_persist_complete(seq, ctx, bserrno); 2122 return; 2123 } 2124 2125 batch = bs_sequence_to_batch(seq, blob_persist_zero_pages_cpl, ctx); 2126 2127 lba_count = bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE); 2128 2129 /* This loop starts at 1 because the first page is special and handled 2130 * below. The pages (except the first) are never written in place, 2131 * so any pages in the clean list must be zeroed. 2132 */ 2133 for (i = 1; i < blob->clean.num_pages; i++) { 2134 lba = bs_md_page_to_lba(bs, blob->clean.pages[i]); 2135 2136 bs_batch_write_zeroes_dev(batch, lba, lba_count); 2137 } 2138 2139 /* The first page will only be zeroed if this is a delete. */ 2140 if (blob->active.num_pages == 0) { 2141 uint32_t page_num; 2142 2143 /* The first page in the metadata goes where the blobid indicates */ 2144 page_num = bs_blobid_to_page(blob->id); 2145 lba = bs_md_page_to_lba(bs, page_num); 2146 2147 bs_batch_write_zeroes_dev(batch, lba, lba_count); 2148 } 2149 2150 bs_batch_close(batch); 2151 } 2152 2153 static void 2154 blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2155 { 2156 struct spdk_blob_persist_ctx *ctx = cb_arg; 2157 struct spdk_blob *blob = ctx->blob; 2158 struct spdk_blob_store *bs = blob->bs; 2159 uint64_t lba; 2160 uint32_t lba_count; 2161 struct spdk_blob_md_page *page; 2162 2163 if (bserrno != 0) { 2164 blob_persist_complete(seq, ctx, bserrno); 2165 return; 2166 } 2167 2168 if (blob->active.num_pages == 0) { 2169 /* Move on to the next step */ 2170 blob_persist_zero_pages(seq, ctx, 0); 2171 return; 2172 } 2173 2174 lba_count = bs_byte_to_lba(bs, sizeof(*page)); 2175 2176 page = &ctx->pages[0]; 2177 /* The first page in the metadata goes where the blobid indicates */ 2178 lba = bs_md_page_to_lba(bs, bs_blobid_to_page(blob->id)); 2179 2180 bs_sequence_write_dev(seq, page, lba, lba_count, 2181 blob_persist_zero_pages, ctx); 2182 } 2183 2184 static void 2185 blob_persist_write_page_chain(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 2186 { 2187 struct spdk_blob *blob = ctx->blob; 2188 struct spdk_blob_store *bs = blob->bs; 2189 uint64_t lba; 2190 uint32_t lba_count; 2191 struct spdk_blob_md_page *page; 2192 spdk_bs_batch_t *batch; 2193 size_t i; 2194 2195 /* Clusters don't move around in blobs. The list shrinks or grows 2196 * at the end, but no changes ever occur in the middle of the list. 2197 */ 2198 2199 lba_count = bs_byte_to_lba(bs, sizeof(*page)); 2200 2201 batch = bs_sequence_to_batch(seq, blob_persist_write_page_root, ctx); 2202 2203 /* This starts at 1. The root page is not written until 2204 * all of the others are finished 2205 */ 2206 for (i = 1; i < blob->active.num_pages; i++) { 2207 page = &ctx->pages[i]; 2208 assert(page->sequence_num == i); 2209 2210 lba = bs_md_page_to_lba(bs, blob->active.pages[i]); 2211 2212 bs_batch_write_dev(batch, page, lba, lba_count); 2213 } 2214 2215 bs_batch_close(batch); 2216 } 2217 2218 static int 2219 blob_resize(struct spdk_blob *blob, uint64_t sz) 2220 { 2221 uint64_t i; 2222 uint64_t *tmp; 2223 uint64_t cluster; 2224 uint32_t lfmd; /* lowest free md page */ 2225 uint64_t num_clusters; 2226 uint32_t *ep_tmp; 2227 uint64_t new_num_ep = 0, current_num_ep = 0; 2228 struct spdk_blob_store *bs; 2229 int rc; 2230 2231 bs = blob->bs; 2232 2233 blob_verify_md_op(blob); 2234 2235 if (blob->active.num_clusters == sz) { 2236 return 0; 2237 } 2238 2239 if (blob->active.num_clusters < blob->active.cluster_array_size) { 2240 /* If this blob was resized to be larger, then smaller, then 2241 * larger without syncing, then the cluster array already 2242 * contains spare assigned clusters we can use. 2243 */ 2244 num_clusters = spdk_min(blob->active.cluster_array_size, 2245 sz); 2246 } else { 2247 num_clusters = blob->active.num_clusters; 2248 } 2249 2250 if (blob->use_extent_table) { 2251 /* Round up since every cluster beyond current Extent Table size, 2252 * requires new extent page. */ 2253 new_num_ep = spdk_divide_round_up(sz, SPDK_EXTENTS_PER_EP); 2254 current_num_ep = spdk_divide_round_up(num_clusters, SPDK_EXTENTS_PER_EP); 2255 } 2256 2257 assert(!spdk_spin_held(&bs->used_lock)); 2258 2259 /* Check first that we have enough clusters and md pages before we start claiming them. 2260 * bs->used_lock is held to ensure that clusters we think are free are still free when we go 2261 * to claim them later in this function. 2262 */ 2263 if (sz > num_clusters && spdk_blob_is_thin_provisioned(blob) == false) { 2264 spdk_spin_lock(&bs->used_lock); 2265 if ((sz - num_clusters) > bs->num_free_clusters) { 2266 rc = -ENOSPC; 2267 goto out; 2268 } 2269 lfmd = 0; 2270 for (i = current_num_ep; i < new_num_ep ; i++) { 2271 lfmd = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, lfmd); 2272 if (lfmd == UINT32_MAX) { 2273 /* No more free md pages. Cannot satisfy the request */ 2274 rc = -ENOSPC; 2275 goto out; 2276 } 2277 } 2278 } 2279 2280 if (sz > num_clusters) { 2281 /* Expand the cluster array if necessary. 2282 * We only shrink the array when persisting. 2283 */ 2284 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * sz); 2285 if (sz > 0 && tmp == NULL) { 2286 rc = -ENOMEM; 2287 goto out; 2288 } 2289 memset(tmp + blob->active.cluster_array_size, 0, 2290 sizeof(*blob->active.clusters) * (sz - blob->active.cluster_array_size)); 2291 blob->active.clusters = tmp; 2292 blob->active.cluster_array_size = sz; 2293 2294 /* Expand the extents table, only if enough clusters were added */ 2295 if (new_num_ep > current_num_ep && blob->use_extent_table) { 2296 ep_tmp = realloc(blob->active.extent_pages, sizeof(*blob->active.extent_pages) * new_num_ep); 2297 if (new_num_ep > 0 && ep_tmp == NULL) { 2298 rc = -ENOMEM; 2299 goto out; 2300 } 2301 memset(ep_tmp + blob->active.extent_pages_array_size, 0, 2302 sizeof(*blob->active.extent_pages) * (new_num_ep - blob->active.extent_pages_array_size)); 2303 blob->active.extent_pages = ep_tmp; 2304 blob->active.extent_pages_array_size = new_num_ep; 2305 } 2306 } 2307 2308 blob->state = SPDK_BLOB_STATE_DIRTY; 2309 2310 if (spdk_blob_is_thin_provisioned(blob) == false) { 2311 cluster = 0; 2312 lfmd = 0; 2313 for (i = num_clusters; i < sz; i++) { 2314 bs_allocate_cluster(blob, i, &cluster, &lfmd, true); 2315 /* Do not increment lfmd here. lfmd will get updated 2316 * to the md_page allocated (if any) when a new extent 2317 * page is needed. Just pass that value again, 2318 * bs_allocate_cluster will just start at that index 2319 * to find the next free md_page when needed. 2320 */ 2321 } 2322 } 2323 2324 /* If we are shrinking the blob, we must adjust num_allocated_clusters */ 2325 for (i = sz; i < num_clusters; i++) { 2326 if (blob->active.clusters[i] != 0) { 2327 blob->active.num_allocated_clusters--; 2328 } 2329 } 2330 2331 blob->active.num_clusters = sz; 2332 blob->active.num_extent_pages = new_num_ep; 2333 2334 rc = 0; 2335 out: 2336 if (spdk_spin_held(&bs->used_lock)) { 2337 spdk_spin_unlock(&bs->used_lock); 2338 } 2339 2340 return rc; 2341 } 2342 2343 static void 2344 blob_persist_generate_new_md(struct spdk_blob_persist_ctx *ctx) 2345 { 2346 spdk_bs_sequence_t *seq = ctx->seq; 2347 struct spdk_blob *blob = ctx->blob; 2348 struct spdk_blob_store *bs = blob->bs; 2349 uint64_t i; 2350 uint32_t page_num; 2351 void *tmp; 2352 int rc; 2353 2354 /* Generate the new metadata */ 2355 rc = blob_serialize(blob, &ctx->pages, &blob->active.num_pages); 2356 if (rc < 0) { 2357 blob_persist_complete(seq, ctx, rc); 2358 return; 2359 } 2360 2361 assert(blob->active.num_pages >= 1); 2362 2363 /* Resize the cache of page indices */ 2364 tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 2365 if (!tmp) { 2366 blob_persist_complete(seq, ctx, -ENOMEM); 2367 return; 2368 } 2369 blob->active.pages = tmp; 2370 2371 /* Assign this metadata to pages. This requires two passes - one to verify that there are 2372 * enough pages and a second to actually claim them. The used_lock is held across 2373 * both passes to ensure things don't change in the middle. 2374 */ 2375 spdk_spin_lock(&bs->used_lock); 2376 page_num = 0; 2377 /* Note that this loop starts at one. The first page location is fixed by the blobid. */ 2378 for (i = 1; i < blob->active.num_pages; i++) { 2379 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 2380 if (page_num == UINT32_MAX) { 2381 spdk_spin_unlock(&bs->used_lock); 2382 blob_persist_complete(seq, ctx, -ENOMEM); 2383 return; 2384 } 2385 page_num++; 2386 } 2387 2388 page_num = 0; 2389 blob->active.pages[0] = bs_blobid_to_page(blob->id); 2390 for (i = 1; i < blob->active.num_pages; i++) { 2391 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 2392 ctx->pages[i - 1].next = page_num; 2393 /* Now that previous metadata page is complete, calculate the crc for it. */ 2394 ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]); 2395 blob->active.pages[i] = page_num; 2396 bs_claim_md_page(bs, page_num); 2397 SPDK_DEBUGLOG(blob, "Claiming page %u for blob 0x%" PRIx64 "\n", page_num, 2398 blob->id); 2399 page_num++; 2400 } 2401 spdk_spin_unlock(&bs->used_lock); 2402 ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]); 2403 /* Start writing the metadata from last page to first */ 2404 blob->state = SPDK_BLOB_STATE_CLEAN; 2405 blob_persist_write_page_chain(seq, ctx); 2406 } 2407 2408 static void 2409 blob_persist_write_extent_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2410 { 2411 struct spdk_blob_persist_ctx *ctx = cb_arg; 2412 struct spdk_blob *blob = ctx->blob; 2413 size_t i; 2414 uint32_t extent_page_id; 2415 uint32_t page_count = 0; 2416 int rc; 2417 2418 if (ctx->extent_page != NULL) { 2419 spdk_free(ctx->extent_page); 2420 ctx->extent_page = NULL; 2421 } 2422 2423 if (bserrno != 0) { 2424 blob_persist_complete(seq, ctx, bserrno); 2425 return; 2426 } 2427 2428 /* Only write out Extent Pages when blob was resized. */ 2429 for (i = ctx->next_extent_page; i < blob->active.extent_pages_array_size; i++) { 2430 extent_page_id = blob->active.extent_pages[i]; 2431 if (extent_page_id == 0) { 2432 /* No Extent Page to persist */ 2433 assert(spdk_blob_is_thin_provisioned(blob)); 2434 continue; 2435 } 2436 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent_page_id)); 2437 ctx->next_extent_page = i + 1; 2438 rc = blob_serialize_add_page(ctx->blob, &ctx->extent_page, &page_count, &ctx->extent_page); 2439 if (rc < 0) { 2440 blob_persist_complete(seq, ctx, rc); 2441 return; 2442 } 2443 2444 blob->state = SPDK_BLOB_STATE_DIRTY; 2445 blob_serialize_extent_page(blob, i * SPDK_EXTENTS_PER_EP, ctx->extent_page); 2446 2447 ctx->extent_page->crc = blob_md_page_calc_crc(ctx->extent_page); 2448 2449 bs_sequence_write_dev(seq, ctx->extent_page, bs_md_page_to_lba(blob->bs, extent_page_id), 2450 bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 2451 blob_persist_write_extent_pages, ctx); 2452 return; 2453 } 2454 2455 blob_persist_generate_new_md(ctx); 2456 } 2457 2458 static void 2459 blob_persist_start(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2460 { 2461 struct spdk_blob_persist_ctx *ctx = cb_arg; 2462 struct spdk_blob *blob = ctx->blob; 2463 2464 if (bserrno != 0) { 2465 blob_persist_complete(seq, ctx, bserrno); 2466 return; 2467 } 2468 2469 if (blob->active.num_pages == 0) { 2470 /* This is the signal that the blob should be deleted. 2471 * Immediately jump to the clean up routine. */ 2472 assert(blob->clean.num_pages > 0); 2473 blob->state = SPDK_BLOB_STATE_CLEAN; 2474 blob_persist_zero_pages(seq, ctx, 0); 2475 return; 2476 2477 } 2478 2479 if (blob->clean.num_clusters < blob->active.num_clusters) { 2480 /* Blob was resized up */ 2481 assert(blob->clean.num_extent_pages <= blob->active.num_extent_pages); 2482 ctx->next_extent_page = spdk_max(1, blob->clean.num_extent_pages) - 1; 2483 } else if (blob->active.num_clusters < blob->active.cluster_array_size) { 2484 /* Blob was resized down */ 2485 assert(blob->clean.num_extent_pages >= blob->active.num_extent_pages); 2486 ctx->next_extent_page = spdk_max(1, blob->active.num_extent_pages) - 1; 2487 } else { 2488 /* No change in size occurred */ 2489 blob_persist_generate_new_md(ctx); 2490 return; 2491 } 2492 2493 blob_persist_write_extent_pages(seq, ctx, 0); 2494 } 2495 2496 struct spdk_bs_mark_dirty { 2497 struct spdk_blob_store *bs; 2498 struct spdk_bs_super_block *super; 2499 spdk_bs_sequence_cpl cb_fn; 2500 void *cb_arg; 2501 }; 2502 2503 static void 2504 bs_mark_dirty_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2505 { 2506 struct spdk_bs_mark_dirty *ctx = cb_arg; 2507 2508 if (bserrno == 0) { 2509 ctx->bs->clean = 0; 2510 } 2511 2512 ctx->cb_fn(seq, ctx->cb_arg, bserrno); 2513 2514 spdk_free(ctx->super); 2515 free(ctx); 2516 } 2517 2518 static void bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2519 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg); 2520 2521 2522 static void 2523 bs_mark_dirty_write(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2524 { 2525 struct spdk_bs_mark_dirty *ctx = cb_arg; 2526 int rc; 2527 2528 if (bserrno != 0) { 2529 bs_mark_dirty_write_cpl(seq, ctx, bserrno); 2530 return; 2531 } 2532 2533 rc = bs_super_validate(ctx->super, ctx->bs); 2534 if (rc != 0) { 2535 bs_mark_dirty_write_cpl(seq, ctx, rc); 2536 return; 2537 } 2538 2539 ctx->super->clean = 0; 2540 if (ctx->super->size == 0) { 2541 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 2542 } 2543 2544 bs_write_super(seq, ctx->bs, ctx->super, bs_mark_dirty_write_cpl, ctx); 2545 } 2546 2547 static void 2548 bs_mark_dirty(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2549 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2550 { 2551 struct spdk_bs_mark_dirty *ctx; 2552 2553 /* Blobstore is already marked dirty */ 2554 if (bs->clean == 0) { 2555 cb_fn(seq, cb_arg, 0); 2556 return; 2557 } 2558 2559 ctx = calloc(1, sizeof(*ctx)); 2560 if (!ctx) { 2561 cb_fn(seq, cb_arg, -ENOMEM); 2562 return; 2563 } 2564 ctx->bs = bs; 2565 ctx->cb_fn = cb_fn; 2566 ctx->cb_arg = cb_arg; 2567 2568 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 2569 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 2570 if (!ctx->super) { 2571 free(ctx); 2572 cb_fn(seq, cb_arg, -ENOMEM); 2573 return; 2574 } 2575 2576 bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0), 2577 bs_byte_to_lba(bs, sizeof(*ctx->super)), 2578 bs_mark_dirty_write, ctx); 2579 } 2580 2581 /* Write a blob to disk */ 2582 static void 2583 blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 2584 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2585 { 2586 struct spdk_blob_persist_ctx *ctx; 2587 2588 blob_verify_md_op(blob); 2589 2590 if (blob->state == SPDK_BLOB_STATE_CLEAN && TAILQ_EMPTY(&blob->persists_to_complete)) { 2591 cb_fn(seq, cb_arg, 0); 2592 return; 2593 } 2594 2595 ctx = calloc(1, sizeof(*ctx)); 2596 if (!ctx) { 2597 cb_fn(seq, cb_arg, -ENOMEM); 2598 return; 2599 } 2600 ctx->blob = blob; 2601 ctx->seq = seq; 2602 ctx->cb_fn = cb_fn; 2603 ctx->cb_arg = cb_arg; 2604 2605 /* Multiple blob persists can affect one another, via blob->state or 2606 * blob mutable data changes. To prevent it, queue up the persists. */ 2607 if (!TAILQ_EMPTY(&blob->persists_to_complete)) { 2608 TAILQ_INSERT_TAIL(&blob->pending_persists, ctx, link); 2609 return; 2610 } 2611 TAILQ_INSERT_HEAD(&blob->persists_to_complete, ctx, link); 2612 2613 bs_mark_dirty(seq, blob->bs, blob_persist_start, ctx); 2614 } 2615 2616 struct spdk_blob_copy_cluster_ctx { 2617 struct spdk_blob *blob; 2618 uint8_t *buf; 2619 uint64_t page; 2620 uint64_t new_cluster; 2621 uint32_t new_extent_page; 2622 spdk_bs_sequence_t *seq; 2623 struct spdk_blob_md_page *new_cluster_page; 2624 }; 2625 2626 struct spdk_blob_free_cluster_ctx { 2627 struct spdk_blob *blob; 2628 uint64_t page; 2629 struct spdk_blob_md_page *md_page; 2630 uint64_t cluster_num; 2631 uint32_t extent_page; 2632 spdk_bs_sequence_t *seq; 2633 }; 2634 2635 static void 2636 blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno) 2637 { 2638 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2639 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq; 2640 TAILQ_HEAD(, spdk_bs_request_set) requests; 2641 spdk_bs_user_op_t *op; 2642 2643 TAILQ_INIT(&requests); 2644 TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link); 2645 2646 while (!TAILQ_EMPTY(&requests)) { 2647 op = TAILQ_FIRST(&requests); 2648 TAILQ_REMOVE(&requests, op, link); 2649 if (bserrno == 0) { 2650 bs_user_op_execute(op); 2651 } else { 2652 bs_user_op_abort(op, bserrno); 2653 } 2654 } 2655 2656 spdk_free(ctx->buf); 2657 free(ctx); 2658 } 2659 2660 static void 2661 blob_free_cluster_cpl(void *cb_arg, int bserrno) 2662 { 2663 struct spdk_blob_free_cluster_ctx *ctx = cb_arg; 2664 spdk_bs_sequence_t *seq = ctx->seq; 2665 2666 bs_sequence_finish(seq, bserrno); 2667 2668 free(ctx); 2669 } 2670 2671 static void 2672 blob_insert_cluster_revert(struct spdk_blob_copy_cluster_ctx *ctx) 2673 { 2674 spdk_spin_lock(&ctx->blob->bs->used_lock); 2675 bs_release_cluster(ctx->blob->bs, ctx->new_cluster); 2676 if (ctx->new_extent_page != 0) { 2677 bs_release_md_page(ctx->blob->bs, ctx->new_extent_page); 2678 } 2679 spdk_spin_unlock(&ctx->blob->bs->used_lock); 2680 } 2681 2682 static void 2683 blob_insert_cluster_clear_cpl(void *cb_arg, int bserrno) 2684 { 2685 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2686 2687 if (bserrno) { 2688 SPDK_WARNLOG("Failed to clear cluster: %d\n", bserrno); 2689 } 2690 2691 blob_insert_cluster_revert(ctx); 2692 bs_sequence_finish(ctx->seq, bserrno); 2693 } 2694 2695 static void 2696 blob_insert_cluster_clear(struct spdk_blob_copy_cluster_ctx *ctx) 2697 { 2698 struct spdk_bs_cpl cpl; 2699 spdk_bs_batch_t *batch; 2700 struct spdk_io_channel *ch = spdk_io_channel_from_ctx(ctx->seq->channel); 2701 2702 /* 2703 * We allocated a cluster and we copied data to it. But now, we realized that we don't need 2704 * this cluster and we want to release it. We must ensure that we clear the data on this 2705 * cluster. 2706 * The cluster may later be re-allocated by a thick-provisioned blob for example. When 2707 * reading from this thick-provisioned blob before writing data, we should read zeroes. 2708 */ 2709 2710 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2711 cpl.u.blob_basic.cb_fn = blob_insert_cluster_clear_cpl; 2712 cpl.u.blob_basic.cb_arg = ctx; 2713 2714 batch = bs_batch_open(ch, &cpl, ctx->blob); 2715 if (!batch) { 2716 blob_insert_cluster_clear_cpl(ctx, -ENOMEM); 2717 return; 2718 } 2719 2720 bs_batch_clear_dev(ctx->blob, batch, bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 2721 bs_cluster_to_lba(ctx->blob->bs, 1)); 2722 bs_batch_close(batch); 2723 } 2724 2725 static void 2726 blob_insert_cluster_cpl(void *cb_arg, int bserrno) 2727 { 2728 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2729 2730 if (bserrno) { 2731 if (bserrno == -EEXIST) { 2732 /* The metadata insert failed because another thread 2733 * allocated the cluster first. Clear and free our cluster 2734 * but continue without error. */ 2735 blob_insert_cluster_clear(ctx); 2736 return; 2737 } 2738 2739 blob_insert_cluster_revert(ctx); 2740 } 2741 2742 bs_sequence_finish(ctx->seq, bserrno); 2743 } 2744 2745 static void 2746 blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2747 { 2748 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2749 uint32_t cluster_number; 2750 2751 if (bserrno) { 2752 /* The write failed, so jump to the final completion handler */ 2753 bs_sequence_finish(seq, bserrno); 2754 return; 2755 } 2756 2757 cluster_number = bs_page_to_cluster(ctx->blob->bs, ctx->page); 2758 2759 blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2760 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx); 2761 } 2762 2763 static void 2764 blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2765 { 2766 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2767 2768 if (bserrno != 0) { 2769 /* The read failed, so jump to the final completion handler */ 2770 bs_sequence_finish(seq, bserrno); 2771 return; 2772 } 2773 2774 /* Write whole cluster */ 2775 bs_sequence_write_dev(seq, ctx->buf, 2776 bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 2777 bs_cluster_to_lba(ctx->blob->bs, 1), 2778 blob_write_copy_cpl, ctx); 2779 } 2780 2781 static bool 2782 blob_can_copy(struct spdk_blob *blob, uint32_t cluster_start_page, uint64_t *base_lba) 2783 { 2784 uint64_t lba = bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page); 2785 2786 return (!blob_is_esnap_clone(blob) && blob->bs->dev->copy != NULL) && 2787 blob->back_bs_dev->translate_lba(blob->back_bs_dev, lba, base_lba); 2788 } 2789 2790 static void 2791 blob_copy(struct spdk_blob_copy_cluster_ctx *ctx, spdk_bs_user_op_t *op, uint64_t src_lba) 2792 { 2793 struct spdk_blob *blob = ctx->blob; 2794 uint64_t lba_count = bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz); 2795 2796 bs_sequence_copy_dev(ctx->seq, 2797 bs_cluster_to_lba(blob->bs, ctx->new_cluster), 2798 src_lba, 2799 lba_count, 2800 blob_write_copy_cpl, ctx); 2801 } 2802 2803 static void 2804 bs_allocate_and_copy_cluster(struct spdk_blob *blob, 2805 struct spdk_io_channel *_ch, 2806 uint64_t io_unit, spdk_bs_user_op_t *op) 2807 { 2808 struct spdk_bs_cpl cpl; 2809 struct spdk_bs_channel *ch; 2810 struct spdk_blob_copy_cluster_ctx *ctx; 2811 uint32_t cluster_start_page; 2812 uint32_t cluster_number; 2813 bool is_zeroes; 2814 bool can_copy; 2815 bool is_valid_range; 2816 uint64_t copy_src_lba; 2817 int rc; 2818 2819 ch = spdk_io_channel_get_ctx(_ch); 2820 2821 if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) { 2822 /* There are already operations pending. Queue this user op 2823 * and return because it will be re-executed when the outstanding 2824 * cluster allocation completes. */ 2825 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2826 return; 2827 } 2828 2829 /* Round the io_unit offset down to the first page in the cluster */ 2830 cluster_start_page = bs_io_unit_to_cluster_start(blob, io_unit); 2831 2832 /* Calculate which index in the metadata cluster array the corresponding 2833 * cluster is supposed to be at. */ 2834 cluster_number = bs_io_unit_to_cluster_number(blob, io_unit); 2835 2836 ctx = calloc(1, sizeof(*ctx)); 2837 if (!ctx) { 2838 bs_user_op_abort(op, -ENOMEM); 2839 return; 2840 } 2841 2842 assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0); 2843 2844 ctx->blob = blob; 2845 ctx->page = cluster_start_page; 2846 ctx->new_cluster_page = ch->new_cluster_page; 2847 memset(ctx->new_cluster_page, 0, SPDK_BS_PAGE_SIZE); 2848 2849 /* Check if the cluster that we intend to do CoW for is valid for 2850 * the backing dev. For zeroes backing dev, it'll be always valid. 2851 * For other backing dev e.g. a snapshot, it could be invalid if 2852 * the blob has been resized after snapshot was taken. */ 2853 is_valid_range = blob->back_bs_dev->is_range_valid(blob->back_bs_dev, 2854 bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page), 2855 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz)); 2856 2857 can_copy = is_valid_range && blob_can_copy(blob, cluster_start_page, ©_src_lba); 2858 2859 is_zeroes = is_valid_range && blob->back_bs_dev->is_zeroes(blob->back_bs_dev, 2860 bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page), 2861 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz)); 2862 if (blob->parent_id != SPDK_BLOBID_INVALID && !is_zeroes && !can_copy) { 2863 ctx->buf = spdk_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen, 2864 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 2865 if (!ctx->buf) { 2866 SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n", 2867 blob->bs->cluster_sz); 2868 free(ctx); 2869 bs_user_op_abort(op, -ENOMEM); 2870 return; 2871 } 2872 } 2873 2874 spdk_spin_lock(&blob->bs->used_lock); 2875 rc = bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, &ctx->new_extent_page, 2876 false); 2877 spdk_spin_unlock(&blob->bs->used_lock); 2878 if (rc != 0) { 2879 spdk_free(ctx->buf); 2880 free(ctx); 2881 bs_user_op_abort(op, rc); 2882 return; 2883 } 2884 2885 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2886 cpl.u.blob_basic.cb_fn = blob_allocate_and_copy_cluster_cpl; 2887 cpl.u.blob_basic.cb_arg = ctx; 2888 2889 ctx->seq = bs_sequence_start_blob(_ch, &cpl, blob); 2890 if (!ctx->seq) { 2891 spdk_spin_lock(&blob->bs->used_lock); 2892 bs_release_cluster(blob->bs, ctx->new_cluster); 2893 spdk_spin_unlock(&blob->bs->used_lock); 2894 spdk_free(ctx->buf); 2895 free(ctx); 2896 bs_user_op_abort(op, -ENOMEM); 2897 return; 2898 } 2899 2900 /* Queue the user op to block other incoming operations */ 2901 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2902 2903 if (blob->parent_id != SPDK_BLOBID_INVALID && !is_zeroes) { 2904 if (can_copy) { 2905 blob_copy(ctx, op, copy_src_lba); 2906 } else { 2907 /* Read cluster from backing device */ 2908 bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf, 2909 bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page), 2910 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz), 2911 blob_write_copy, ctx); 2912 } 2913 2914 } else { 2915 blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2916 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx); 2917 } 2918 } 2919 2920 static inline bool 2921 blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t io_unit, uint64_t length, 2922 uint64_t *lba, uint64_t *lba_count) 2923 { 2924 *lba_count = length; 2925 2926 if (!bs_io_unit_is_allocated(blob, io_unit)) { 2927 assert(blob->back_bs_dev != NULL); 2928 *lba = bs_io_unit_to_back_dev_lba(blob, io_unit); 2929 *lba_count = bs_io_unit_to_back_dev_lba(blob, *lba_count); 2930 return false; 2931 } else { 2932 *lba = bs_blob_io_unit_to_lba(blob, io_unit); 2933 return true; 2934 } 2935 } 2936 2937 struct op_split_ctx { 2938 struct spdk_blob *blob; 2939 struct spdk_io_channel *channel; 2940 uint64_t io_unit_offset; 2941 uint64_t io_units_remaining; 2942 void *curr_payload; 2943 enum spdk_blob_op_type op_type; 2944 spdk_bs_sequence_t *seq; 2945 bool in_submit_ctx; 2946 bool completed_in_submit_ctx; 2947 bool done; 2948 }; 2949 2950 static void 2951 blob_request_submit_op_split_next(void *cb_arg, int bserrno) 2952 { 2953 struct op_split_ctx *ctx = cb_arg; 2954 struct spdk_blob *blob = ctx->blob; 2955 struct spdk_io_channel *ch = ctx->channel; 2956 enum spdk_blob_op_type op_type = ctx->op_type; 2957 uint8_t *buf; 2958 uint64_t offset; 2959 uint64_t length; 2960 uint64_t op_length; 2961 2962 if (bserrno != 0 || ctx->io_units_remaining == 0) { 2963 bs_sequence_finish(ctx->seq, bserrno); 2964 if (ctx->in_submit_ctx) { 2965 /* Defer freeing of the ctx object, since it will be 2966 * accessed when this unwinds back to the submission 2967 * context. 2968 */ 2969 ctx->done = true; 2970 } else { 2971 free(ctx); 2972 } 2973 return; 2974 } 2975 2976 if (ctx->in_submit_ctx) { 2977 /* If this split operation completed in the context 2978 * of its submission, mark the flag and return immediately 2979 * to avoid recursion. 2980 */ 2981 ctx->completed_in_submit_ctx = true; 2982 return; 2983 } 2984 2985 while (true) { 2986 ctx->completed_in_submit_ctx = false; 2987 2988 offset = ctx->io_unit_offset; 2989 length = ctx->io_units_remaining; 2990 buf = ctx->curr_payload; 2991 op_length = spdk_min(length, bs_num_io_units_to_cluster_boundary(blob, 2992 offset)); 2993 2994 /* Update length and payload for next operation */ 2995 ctx->io_units_remaining -= op_length; 2996 ctx->io_unit_offset += op_length; 2997 if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) { 2998 ctx->curr_payload += op_length * blob->bs->io_unit_size; 2999 } 3000 3001 assert(!ctx->in_submit_ctx); 3002 ctx->in_submit_ctx = true; 3003 3004 switch (op_type) { 3005 case SPDK_BLOB_READ: 3006 spdk_blob_io_read(blob, ch, buf, offset, op_length, 3007 blob_request_submit_op_split_next, ctx); 3008 break; 3009 case SPDK_BLOB_WRITE: 3010 spdk_blob_io_write(blob, ch, buf, offset, op_length, 3011 blob_request_submit_op_split_next, ctx); 3012 break; 3013 case SPDK_BLOB_UNMAP: 3014 spdk_blob_io_unmap(blob, ch, offset, op_length, 3015 blob_request_submit_op_split_next, ctx); 3016 break; 3017 case SPDK_BLOB_WRITE_ZEROES: 3018 spdk_blob_io_write_zeroes(blob, ch, offset, op_length, 3019 blob_request_submit_op_split_next, ctx); 3020 break; 3021 case SPDK_BLOB_READV: 3022 case SPDK_BLOB_WRITEV: 3023 SPDK_ERRLOG("readv/write not valid\n"); 3024 bs_sequence_finish(ctx->seq, -EINVAL); 3025 free(ctx); 3026 return; 3027 } 3028 3029 #ifndef __clang_analyzer__ 3030 /* scan-build reports a false positive around accessing the ctx here. It 3031 * forms a path that recursively calls this function, but then says 3032 * "assuming ctx->in_submit_ctx is false", when that isn't possible. 3033 * This path does free(ctx), returns to here, and reports a use-after-free 3034 * bug. Wrapping this bit of code so that scan-build doesn't see it 3035 * works around the scan-build bug. 3036 */ 3037 assert(ctx->in_submit_ctx); 3038 ctx->in_submit_ctx = false; 3039 3040 /* If the operation completed immediately, loop back and submit the 3041 * next operation. Otherwise we can return and the next split 3042 * operation will get submitted when this current operation is 3043 * later completed asynchronously. 3044 */ 3045 if (ctx->completed_in_submit_ctx) { 3046 continue; 3047 } else if (ctx->done) { 3048 free(ctx); 3049 } 3050 #endif 3051 break; 3052 } 3053 } 3054 3055 static void 3056 blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob, 3057 void *payload, uint64_t offset, uint64_t length, 3058 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 3059 { 3060 struct op_split_ctx *ctx; 3061 spdk_bs_sequence_t *seq; 3062 struct spdk_bs_cpl cpl; 3063 3064 assert(blob != NULL); 3065 3066 ctx = calloc(1, sizeof(struct op_split_ctx)); 3067 if (ctx == NULL) { 3068 cb_fn(cb_arg, -ENOMEM); 3069 return; 3070 } 3071 3072 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 3073 cpl.u.blob_basic.cb_fn = cb_fn; 3074 cpl.u.blob_basic.cb_arg = cb_arg; 3075 3076 seq = bs_sequence_start_blob(ch, &cpl, blob); 3077 if (!seq) { 3078 free(ctx); 3079 cb_fn(cb_arg, -ENOMEM); 3080 return; 3081 } 3082 3083 ctx->blob = blob; 3084 ctx->channel = ch; 3085 ctx->curr_payload = payload; 3086 ctx->io_unit_offset = offset; 3087 ctx->io_units_remaining = length; 3088 ctx->op_type = op_type; 3089 ctx->seq = seq; 3090 3091 blob_request_submit_op_split_next(ctx, 0); 3092 } 3093 3094 static void 3095 spdk_free_cluster_unmap_complete(void *cb_arg, int bserrno) 3096 { 3097 struct spdk_blob_free_cluster_ctx *ctx = cb_arg; 3098 3099 if (bserrno) { 3100 bs_sequence_finish(ctx->seq, bserrno); 3101 free(ctx); 3102 return; 3103 } 3104 3105 blob_free_cluster_on_md_thread(ctx->blob, ctx->cluster_num, 3106 ctx->extent_page, ctx->md_page, blob_free_cluster_cpl, ctx); 3107 } 3108 3109 static void 3110 blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob, 3111 void *payload, uint64_t offset, uint64_t length, 3112 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 3113 { 3114 struct spdk_bs_cpl cpl; 3115 uint64_t lba; 3116 uint64_t lba_count; 3117 bool is_allocated; 3118 3119 assert(blob != NULL); 3120 3121 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 3122 cpl.u.blob_basic.cb_fn = cb_fn; 3123 cpl.u.blob_basic.cb_arg = cb_arg; 3124 3125 if (blob->frozen_refcnt) { 3126 /* This blob I/O is frozen */ 3127 spdk_bs_user_op_t *op; 3128 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 3129 3130 op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 3131 if (!op) { 3132 cb_fn(cb_arg, -ENOMEM); 3133 return; 3134 } 3135 3136 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 3137 3138 return; 3139 } 3140 3141 is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 3142 3143 switch (op_type) { 3144 case SPDK_BLOB_READ: { 3145 spdk_bs_batch_t *batch; 3146 3147 batch = bs_batch_open(_ch, &cpl, blob); 3148 if (!batch) { 3149 cb_fn(cb_arg, -ENOMEM); 3150 return; 3151 } 3152 3153 if (is_allocated) { 3154 /* Read from the blob */ 3155 bs_batch_read_dev(batch, payload, lba, lba_count); 3156 } else { 3157 /* Read from the backing block device */ 3158 bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count); 3159 } 3160 3161 bs_batch_close(batch); 3162 break; 3163 } 3164 case SPDK_BLOB_WRITE: 3165 case SPDK_BLOB_WRITE_ZEROES: { 3166 if (is_allocated) { 3167 /* Write to the blob */ 3168 spdk_bs_batch_t *batch; 3169 3170 if (lba_count == 0) { 3171 cb_fn(cb_arg, 0); 3172 return; 3173 } 3174 3175 batch = bs_batch_open(_ch, &cpl, blob); 3176 if (!batch) { 3177 cb_fn(cb_arg, -ENOMEM); 3178 return; 3179 } 3180 3181 if (op_type == SPDK_BLOB_WRITE) { 3182 bs_batch_write_dev(batch, payload, lba, lba_count); 3183 } else { 3184 bs_batch_write_zeroes_dev(batch, lba, lba_count); 3185 } 3186 3187 bs_batch_close(batch); 3188 } else { 3189 /* Queue this operation and allocate the cluster */ 3190 spdk_bs_user_op_t *op; 3191 3192 op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 3193 if (!op) { 3194 cb_fn(cb_arg, -ENOMEM); 3195 return; 3196 } 3197 3198 bs_allocate_and_copy_cluster(blob, _ch, offset, op); 3199 } 3200 break; 3201 } 3202 case SPDK_BLOB_UNMAP: { 3203 struct spdk_blob_free_cluster_ctx *ctx = NULL; 3204 spdk_bs_batch_t *batch; 3205 3206 /* if aligned with cluster release cluster */ 3207 if (spdk_blob_is_thin_provisioned(blob) && is_allocated && 3208 blob_backed_with_zeroes_dev(blob) && 3209 bs_io_units_per_cluster(blob) == length) { 3210 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 3211 uint32_t cluster_start_page; 3212 uint32_t cluster_number; 3213 3214 assert(offset % bs_io_units_per_cluster(blob) == 0); 3215 3216 /* Round the io_unit offset down to the first page in the cluster */ 3217 cluster_start_page = bs_io_unit_to_cluster_start(blob, offset); 3218 3219 /* Calculate which index in the metadata cluster array the corresponding 3220 * cluster is supposed to be at. */ 3221 cluster_number = bs_io_unit_to_cluster_number(blob, offset); 3222 3223 ctx = calloc(1, sizeof(*ctx)); 3224 if (!ctx) { 3225 cb_fn(cb_arg, -ENOMEM); 3226 return; 3227 } 3228 /* When freeing a cluster the flow should be (in order): 3229 * 1. Unmap the underlying area (so if the cluster is reclaimed in the future, it won't leak 3230 * old data) 3231 * 2. Once the unmap completes (to avoid any races with incoming writes that may claim the 3232 * cluster), update and sync metadata freeing the cluster 3233 * 3. Once metadata update is done, complete the user unmap request 3234 */ 3235 ctx->blob = blob; 3236 ctx->page = cluster_start_page; 3237 ctx->cluster_num = cluster_number; 3238 ctx->md_page = bs_channel->new_cluster_page; 3239 ctx->seq = bs_sequence_start_bs(_ch, &cpl); 3240 if (!ctx->seq) { 3241 free(ctx); 3242 cb_fn(cb_arg, -ENOMEM); 3243 return; 3244 } 3245 3246 if (blob->use_extent_table) { 3247 ctx->extent_page = *bs_cluster_to_extent_page(blob, cluster_number); 3248 } 3249 3250 cpl.u.blob_basic.cb_fn = spdk_free_cluster_unmap_complete; 3251 cpl.u.blob_basic.cb_arg = ctx; 3252 } 3253 3254 batch = bs_batch_open(_ch, &cpl, blob); 3255 if (!batch) { 3256 free(ctx); 3257 cb_fn(cb_arg, -ENOMEM); 3258 return; 3259 } 3260 3261 if (is_allocated) { 3262 bs_batch_unmap_dev(batch, lba, lba_count); 3263 } 3264 3265 bs_batch_close(batch); 3266 break; 3267 } 3268 case SPDK_BLOB_READV: 3269 case SPDK_BLOB_WRITEV: 3270 SPDK_ERRLOG("readv/write not valid\n"); 3271 cb_fn(cb_arg, -EINVAL); 3272 break; 3273 } 3274 } 3275 3276 static void 3277 blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel, 3278 void *payload, uint64_t offset, uint64_t length, 3279 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 3280 { 3281 assert(blob != NULL); 3282 3283 if (blob->data_ro && op_type != SPDK_BLOB_READ) { 3284 cb_fn(cb_arg, -EPERM); 3285 return; 3286 } 3287 3288 if (length == 0) { 3289 cb_fn(cb_arg, 0); 3290 return; 3291 } 3292 3293 if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 3294 cb_fn(cb_arg, -EINVAL); 3295 return; 3296 } 3297 if (length <= bs_num_io_units_to_cluster_boundary(blob, offset)) { 3298 blob_request_submit_op_single(_channel, blob, payload, offset, length, 3299 cb_fn, cb_arg, op_type); 3300 } else { 3301 blob_request_submit_op_split(_channel, blob, payload, offset, length, 3302 cb_fn, cb_arg, op_type); 3303 } 3304 } 3305 3306 struct rw_iov_ctx { 3307 struct spdk_blob *blob; 3308 struct spdk_io_channel *channel; 3309 spdk_blob_op_complete cb_fn; 3310 void *cb_arg; 3311 bool read; 3312 int iovcnt; 3313 struct iovec *orig_iov; 3314 uint64_t io_unit_offset; 3315 uint64_t io_units_remaining; 3316 uint64_t io_units_done; 3317 struct spdk_blob_ext_io_opts *ext_io_opts; 3318 struct iovec iov[0]; 3319 }; 3320 3321 static void 3322 rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3323 { 3324 assert(cb_arg == NULL); 3325 bs_sequence_finish(seq, bserrno); 3326 } 3327 3328 static void 3329 rw_iov_split_next(void *cb_arg, int bserrno) 3330 { 3331 struct rw_iov_ctx *ctx = cb_arg; 3332 struct spdk_blob *blob = ctx->blob; 3333 struct iovec *iov, *orig_iov; 3334 int iovcnt; 3335 size_t orig_iovoff; 3336 uint64_t io_units_count, io_units_to_boundary, io_unit_offset; 3337 uint64_t byte_count; 3338 3339 if (bserrno != 0 || ctx->io_units_remaining == 0) { 3340 ctx->cb_fn(ctx->cb_arg, bserrno); 3341 free(ctx); 3342 return; 3343 } 3344 3345 io_unit_offset = ctx->io_unit_offset; 3346 io_units_to_boundary = bs_num_io_units_to_cluster_boundary(blob, io_unit_offset); 3347 io_units_count = spdk_min(ctx->io_units_remaining, io_units_to_boundary); 3348 /* 3349 * Get index and offset into the original iov array for our current position in the I/O sequence. 3350 * byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will 3351 * point to the current position in the I/O sequence. 3352 */ 3353 byte_count = ctx->io_units_done * blob->bs->io_unit_size; 3354 orig_iov = &ctx->orig_iov[0]; 3355 orig_iovoff = 0; 3356 while (byte_count > 0) { 3357 if (byte_count >= orig_iov->iov_len) { 3358 byte_count -= orig_iov->iov_len; 3359 orig_iov++; 3360 } else { 3361 orig_iovoff = byte_count; 3362 byte_count = 0; 3363 } 3364 } 3365 3366 /* 3367 * Build an iov array for the next I/O in the sequence. byte_count will keep track of how many 3368 * bytes of this next I/O remain to be accounted for in the new iov array. 3369 */ 3370 byte_count = io_units_count * blob->bs->io_unit_size; 3371 iov = &ctx->iov[0]; 3372 iovcnt = 0; 3373 while (byte_count > 0) { 3374 assert(iovcnt < ctx->iovcnt); 3375 iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff); 3376 iov->iov_base = orig_iov->iov_base + orig_iovoff; 3377 byte_count -= iov->iov_len; 3378 orig_iovoff = 0; 3379 orig_iov++; 3380 iov++; 3381 iovcnt++; 3382 } 3383 3384 ctx->io_unit_offset += io_units_count; 3385 ctx->io_units_remaining -= io_units_count; 3386 ctx->io_units_done += io_units_count; 3387 iov = &ctx->iov[0]; 3388 3389 if (ctx->read) { 3390 spdk_blob_io_readv_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 3391 io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts); 3392 } else { 3393 spdk_blob_io_writev_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 3394 io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts); 3395 } 3396 } 3397 3398 static void 3399 blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel, 3400 struct iovec *iov, int iovcnt, 3401 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg, bool read, 3402 struct spdk_blob_ext_io_opts *ext_io_opts) 3403 { 3404 struct spdk_bs_cpl cpl; 3405 3406 assert(blob != NULL); 3407 3408 if (!read && blob->data_ro) { 3409 cb_fn(cb_arg, -EPERM); 3410 return; 3411 } 3412 3413 if (length == 0) { 3414 cb_fn(cb_arg, 0); 3415 return; 3416 } 3417 3418 if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 3419 cb_fn(cb_arg, -EINVAL); 3420 return; 3421 } 3422 3423 /* 3424 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having 3425 * to split a request that spans a cluster boundary. For I/O that do not span a cluster boundary, 3426 * there will be no noticeable difference compared to using a batch. For I/O that do span a cluster 3427 * boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need 3428 * to allocate a separate iov array and split the I/O such that none of the resulting 3429 * smaller I/O cross a cluster boundary. These smaller I/O will be issued in sequence (not in parallel) 3430 * but since this case happens very infrequently, any performance impact will be negligible. 3431 * 3432 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs 3433 * for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them 3434 * in a batch. That would also require creating an intermediate spdk_bs_cpl that would get called 3435 * when the batch was completed, to allow for freeing the memory for the iov arrays. 3436 */ 3437 if (spdk_likely(length <= bs_num_io_units_to_cluster_boundary(blob, offset))) { 3438 uint64_t lba_count; 3439 uint64_t lba; 3440 bool is_allocated; 3441 3442 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 3443 cpl.u.blob_basic.cb_fn = cb_fn; 3444 cpl.u.blob_basic.cb_arg = cb_arg; 3445 3446 if (blob->frozen_refcnt) { 3447 /* This blob I/O is frozen */ 3448 enum spdk_blob_op_type op_type; 3449 spdk_bs_user_op_t *op; 3450 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel); 3451 3452 op_type = read ? SPDK_BLOB_READV : SPDK_BLOB_WRITEV; 3453 op = bs_user_op_alloc(_channel, &cpl, op_type, blob, iov, iovcnt, offset, length); 3454 if (!op) { 3455 cb_fn(cb_arg, -ENOMEM); 3456 return; 3457 } 3458 3459 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 3460 3461 return; 3462 } 3463 3464 is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 3465 3466 if (read) { 3467 spdk_bs_sequence_t *seq; 3468 3469 seq = bs_sequence_start_blob(_channel, &cpl, blob); 3470 if (!seq) { 3471 cb_fn(cb_arg, -ENOMEM); 3472 return; 3473 } 3474 3475 seq->ext_io_opts = ext_io_opts; 3476 3477 if (is_allocated) { 3478 bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL); 3479 } else { 3480 bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count, 3481 rw_iov_done, NULL); 3482 } 3483 } else { 3484 if (is_allocated) { 3485 spdk_bs_sequence_t *seq; 3486 3487 seq = bs_sequence_start_blob(_channel, &cpl, blob); 3488 if (!seq) { 3489 cb_fn(cb_arg, -ENOMEM); 3490 return; 3491 } 3492 3493 seq->ext_io_opts = ext_io_opts; 3494 3495 bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL); 3496 } else { 3497 /* Queue this operation and allocate the cluster */ 3498 spdk_bs_user_op_t *op; 3499 3500 op = bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset, 3501 length); 3502 if (!op) { 3503 cb_fn(cb_arg, -ENOMEM); 3504 return; 3505 } 3506 3507 op->ext_io_opts = ext_io_opts; 3508 3509 bs_allocate_and_copy_cluster(blob, _channel, offset, op); 3510 } 3511 } 3512 } else { 3513 struct rw_iov_ctx *ctx; 3514 3515 ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec)); 3516 if (ctx == NULL) { 3517 cb_fn(cb_arg, -ENOMEM); 3518 return; 3519 } 3520 3521 ctx->blob = blob; 3522 ctx->channel = _channel; 3523 ctx->cb_fn = cb_fn; 3524 ctx->cb_arg = cb_arg; 3525 ctx->read = read; 3526 ctx->orig_iov = iov; 3527 ctx->iovcnt = iovcnt; 3528 ctx->io_unit_offset = offset; 3529 ctx->io_units_remaining = length; 3530 ctx->io_units_done = 0; 3531 ctx->ext_io_opts = ext_io_opts; 3532 3533 rw_iov_split_next(ctx, 0); 3534 } 3535 } 3536 3537 static struct spdk_blob * 3538 blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid) 3539 { 3540 struct spdk_blob find; 3541 3542 if (spdk_bit_array_get(bs->open_blobids, blobid) == 0) { 3543 return NULL; 3544 } 3545 3546 find.id = blobid; 3547 return RB_FIND(spdk_blob_tree, &bs->open_blobs, &find); 3548 } 3549 3550 static void 3551 blob_get_snapshot_and_clone_entries(struct spdk_blob *blob, 3552 struct spdk_blob_list **snapshot_entry, struct spdk_blob_list **clone_entry) 3553 { 3554 assert(blob != NULL); 3555 *snapshot_entry = NULL; 3556 *clone_entry = NULL; 3557 3558 if (blob->parent_id == SPDK_BLOBID_INVALID) { 3559 return; 3560 } 3561 3562 TAILQ_FOREACH(*snapshot_entry, &blob->bs->snapshots, link) { 3563 if ((*snapshot_entry)->id == blob->parent_id) { 3564 break; 3565 } 3566 } 3567 3568 if (*snapshot_entry != NULL) { 3569 TAILQ_FOREACH(*clone_entry, &(*snapshot_entry)->clones, link) { 3570 if ((*clone_entry)->id == blob->id) { 3571 break; 3572 } 3573 } 3574 3575 assert(*clone_entry != NULL); 3576 } 3577 } 3578 3579 static int 3580 bs_channel_create(void *io_device, void *ctx_buf) 3581 { 3582 struct spdk_blob_store *bs = io_device; 3583 struct spdk_bs_channel *channel = ctx_buf; 3584 struct spdk_bs_dev *dev; 3585 uint32_t max_ops = bs->max_channel_ops; 3586 uint32_t i; 3587 3588 dev = bs->dev; 3589 3590 channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set)); 3591 if (!channel->req_mem) { 3592 return -1; 3593 } 3594 3595 TAILQ_INIT(&channel->reqs); 3596 3597 for (i = 0; i < max_ops; i++) { 3598 TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link); 3599 } 3600 3601 channel->bs = bs; 3602 channel->dev = dev; 3603 channel->dev_channel = dev->create_channel(dev); 3604 3605 if (!channel->dev_channel) { 3606 SPDK_ERRLOG("Failed to create device channel.\n"); 3607 free(channel->req_mem); 3608 return -1; 3609 } 3610 3611 channel->new_cluster_page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, NULL, SPDK_ENV_NUMA_ID_ANY, 3612 SPDK_MALLOC_DMA); 3613 if (!channel->new_cluster_page) { 3614 SPDK_ERRLOG("Failed to allocate new cluster page\n"); 3615 free(channel->req_mem); 3616 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 3617 return -1; 3618 } 3619 3620 TAILQ_INIT(&channel->need_cluster_alloc); 3621 TAILQ_INIT(&channel->queued_io); 3622 RB_INIT(&channel->esnap_channels); 3623 3624 return 0; 3625 } 3626 3627 static void 3628 bs_channel_destroy(void *io_device, void *ctx_buf) 3629 { 3630 struct spdk_bs_channel *channel = ctx_buf; 3631 spdk_bs_user_op_t *op; 3632 3633 while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) { 3634 op = TAILQ_FIRST(&channel->need_cluster_alloc); 3635 TAILQ_REMOVE(&channel->need_cluster_alloc, op, link); 3636 bs_user_op_abort(op, -EIO); 3637 } 3638 3639 while (!TAILQ_EMPTY(&channel->queued_io)) { 3640 op = TAILQ_FIRST(&channel->queued_io); 3641 TAILQ_REMOVE(&channel->queued_io, op, link); 3642 bs_user_op_abort(op, -EIO); 3643 } 3644 3645 blob_esnap_destroy_bs_channel(channel); 3646 3647 free(channel->req_mem); 3648 spdk_free(channel->new_cluster_page); 3649 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 3650 } 3651 3652 static void 3653 bs_dev_destroy(void *io_device) 3654 { 3655 struct spdk_blob_store *bs = io_device; 3656 struct spdk_blob *blob, *blob_tmp; 3657 3658 bs->dev->destroy(bs->dev); 3659 3660 RB_FOREACH_SAFE(blob, spdk_blob_tree, &bs->open_blobs, blob_tmp) { 3661 RB_REMOVE(spdk_blob_tree, &bs->open_blobs, blob); 3662 spdk_bit_array_clear(bs->open_blobids, blob->id); 3663 blob_free(blob); 3664 } 3665 3666 spdk_spin_destroy(&bs->used_lock); 3667 3668 spdk_bit_array_free(&bs->open_blobids); 3669 spdk_bit_array_free(&bs->used_blobids); 3670 spdk_bit_array_free(&bs->used_md_pages); 3671 spdk_bit_pool_free(&bs->used_clusters); 3672 /* 3673 * If this function is called for any reason except a successful unload, 3674 * the unload_cpl type will be NONE and this will be a nop. 3675 */ 3676 bs_call_cpl(&bs->unload_cpl, bs->unload_err); 3677 3678 free(bs); 3679 } 3680 3681 static int 3682 bs_blob_list_add(struct spdk_blob *blob) 3683 { 3684 spdk_blob_id snapshot_id; 3685 struct spdk_blob_list *snapshot_entry = NULL; 3686 struct spdk_blob_list *clone_entry = NULL; 3687 3688 assert(blob != NULL); 3689 3690 snapshot_id = blob->parent_id; 3691 if (snapshot_id == SPDK_BLOBID_INVALID || 3692 snapshot_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 3693 return 0; 3694 } 3695 3696 snapshot_entry = bs_get_snapshot_entry(blob->bs, snapshot_id); 3697 if (snapshot_entry == NULL) { 3698 /* Snapshot not found */ 3699 snapshot_entry = calloc(1, sizeof(struct spdk_blob_list)); 3700 if (snapshot_entry == NULL) { 3701 return -ENOMEM; 3702 } 3703 snapshot_entry->id = snapshot_id; 3704 TAILQ_INIT(&snapshot_entry->clones); 3705 TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link); 3706 } else { 3707 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 3708 if (clone_entry->id == blob->id) { 3709 break; 3710 } 3711 } 3712 } 3713 3714 if (clone_entry == NULL) { 3715 /* Clone not found */ 3716 clone_entry = calloc(1, sizeof(struct spdk_blob_list)); 3717 if (clone_entry == NULL) { 3718 return -ENOMEM; 3719 } 3720 clone_entry->id = blob->id; 3721 TAILQ_INIT(&clone_entry->clones); 3722 TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link); 3723 snapshot_entry->clone_count++; 3724 } 3725 3726 return 0; 3727 } 3728 3729 static void 3730 bs_blob_list_remove(struct spdk_blob *blob) 3731 { 3732 struct spdk_blob_list *snapshot_entry = NULL; 3733 struct spdk_blob_list *clone_entry = NULL; 3734 3735 blob_get_snapshot_and_clone_entries(blob, &snapshot_entry, &clone_entry); 3736 3737 if (snapshot_entry == NULL) { 3738 return; 3739 } 3740 3741 blob->parent_id = SPDK_BLOBID_INVALID; 3742 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3743 free(clone_entry); 3744 3745 snapshot_entry->clone_count--; 3746 } 3747 3748 static int 3749 bs_blob_list_free(struct spdk_blob_store *bs) 3750 { 3751 struct spdk_blob_list *snapshot_entry; 3752 struct spdk_blob_list *snapshot_entry_tmp; 3753 struct spdk_blob_list *clone_entry; 3754 struct spdk_blob_list *clone_entry_tmp; 3755 3756 TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) { 3757 TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) { 3758 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3759 free(clone_entry); 3760 } 3761 TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link); 3762 free(snapshot_entry); 3763 } 3764 3765 return 0; 3766 } 3767 3768 static void 3769 bs_free(struct spdk_blob_store *bs) 3770 { 3771 bs_blob_list_free(bs); 3772 3773 bs_unregister_md_thread(bs); 3774 spdk_io_device_unregister(bs, bs_dev_destroy); 3775 } 3776 3777 void 3778 spdk_bs_opts_init(struct spdk_bs_opts *opts, size_t opts_size) 3779 { 3780 3781 if (!opts) { 3782 SPDK_ERRLOG("opts should not be NULL\n"); 3783 return; 3784 } 3785 3786 if (!opts_size) { 3787 SPDK_ERRLOG("opts_size should not be zero value\n"); 3788 return; 3789 } 3790 3791 memset(opts, 0, opts_size); 3792 opts->opts_size = opts_size; 3793 3794 #define FIELD_OK(field) \ 3795 offsetof(struct spdk_bs_opts, field) + sizeof(opts->field) <= opts_size 3796 3797 #define SET_FIELD(field, value) \ 3798 if (FIELD_OK(field)) { \ 3799 opts->field = value; \ 3800 } \ 3801 3802 SET_FIELD(cluster_sz, SPDK_BLOB_OPTS_CLUSTER_SZ); 3803 SET_FIELD(num_md_pages, SPDK_BLOB_OPTS_NUM_MD_PAGES); 3804 SET_FIELD(max_md_ops, SPDK_BLOB_OPTS_NUM_MD_PAGES); 3805 SET_FIELD(max_channel_ops, SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS); 3806 SET_FIELD(clear_method, BS_CLEAR_WITH_UNMAP); 3807 3808 if (FIELD_OK(bstype)) { 3809 memset(&opts->bstype, 0, sizeof(opts->bstype)); 3810 } 3811 3812 SET_FIELD(iter_cb_fn, NULL); 3813 SET_FIELD(iter_cb_arg, NULL); 3814 SET_FIELD(force_recover, false); 3815 SET_FIELD(esnap_bs_dev_create, NULL); 3816 SET_FIELD(esnap_ctx, NULL); 3817 3818 #undef FIELD_OK 3819 #undef SET_FIELD 3820 } 3821 3822 static int 3823 bs_opts_verify(struct spdk_bs_opts *opts) 3824 { 3825 if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 || 3826 opts->max_channel_ops == 0) { 3827 SPDK_ERRLOG("Blobstore options cannot be set to 0\n"); 3828 return -1; 3829 } 3830 3831 return 0; 3832 } 3833 3834 /* START spdk_bs_load */ 3835 3836 /* spdk_bs_load_ctx is used for init, load, unload and dump code paths. */ 3837 3838 struct spdk_bs_load_ctx { 3839 struct spdk_blob_store *bs; 3840 struct spdk_bs_super_block *super; 3841 3842 struct spdk_bs_md_mask *mask; 3843 bool in_page_chain; 3844 uint32_t page_index; 3845 uint32_t cur_page; 3846 struct spdk_blob_md_page *page; 3847 3848 uint64_t num_extent_pages; 3849 uint32_t *extent_page_num; 3850 struct spdk_blob_md_page *extent_pages; 3851 struct spdk_bit_array *used_clusters; 3852 3853 spdk_bs_sequence_t *seq; 3854 spdk_blob_op_with_handle_complete iter_cb_fn; 3855 void *iter_cb_arg; 3856 struct spdk_blob *blob; 3857 spdk_blob_id blobid; 3858 3859 bool force_recover; 3860 3861 /* These fields are used in the spdk_bs_dump path. */ 3862 bool dumping; 3863 FILE *fp; 3864 spdk_bs_dump_print_xattr print_xattr_fn; 3865 char xattr_name[4096]; 3866 }; 3867 3868 static int 3869 bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_blob_store **_bs, 3870 struct spdk_bs_load_ctx **_ctx) 3871 { 3872 struct spdk_blob_store *bs; 3873 struct spdk_bs_load_ctx *ctx; 3874 uint64_t dev_size; 3875 int rc; 3876 3877 dev_size = dev->blocklen * dev->blockcnt; 3878 if (dev_size < opts->cluster_sz) { 3879 /* Device size cannot be smaller than cluster size of blobstore */ 3880 SPDK_INFOLOG(blob, "Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n", 3881 dev_size, opts->cluster_sz); 3882 return -ENOSPC; 3883 } 3884 if (opts->cluster_sz < SPDK_BS_PAGE_SIZE) { 3885 /* Cluster size cannot be smaller than page size */ 3886 SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n", 3887 opts->cluster_sz, SPDK_BS_PAGE_SIZE); 3888 return -EINVAL; 3889 } 3890 bs = calloc(1, sizeof(struct spdk_blob_store)); 3891 if (!bs) { 3892 return -ENOMEM; 3893 } 3894 3895 ctx = calloc(1, sizeof(struct spdk_bs_load_ctx)); 3896 if (!ctx) { 3897 free(bs); 3898 return -ENOMEM; 3899 } 3900 3901 ctx->bs = bs; 3902 ctx->iter_cb_fn = opts->iter_cb_fn; 3903 ctx->iter_cb_arg = opts->iter_cb_arg; 3904 ctx->force_recover = opts->force_recover; 3905 3906 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 3907 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 3908 if (!ctx->super) { 3909 free(ctx); 3910 free(bs); 3911 return -ENOMEM; 3912 } 3913 3914 RB_INIT(&bs->open_blobs); 3915 TAILQ_INIT(&bs->snapshots); 3916 bs->dev = dev; 3917 bs->md_thread = spdk_get_thread(); 3918 assert(bs->md_thread != NULL); 3919 3920 /* 3921 * Do not use bs_lba_to_cluster() here since blockcnt may not be an 3922 * even multiple of the cluster size. 3923 */ 3924 bs->cluster_sz = opts->cluster_sz; 3925 bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen); 3926 ctx->used_clusters = spdk_bit_array_create(bs->total_clusters); 3927 if (!ctx->used_clusters) { 3928 spdk_free(ctx->super); 3929 free(ctx); 3930 free(bs); 3931 return -ENOMEM; 3932 } 3933 3934 bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE; 3935 if (spdk_u32_is_pow2(bs->pages_per_cluster)) { 3936 bs->pages_per_cluster_shift = spdk_u32log2(bs->pages_per_cluster); 3937 } 3938 bs->num_free_clusters = bs->total_clusters; 3939 bs->io_unit_size = dev->blocklen; 3940 3941 bs->max_channel_ops = opts->max_channel_ops; 3942 bs->super_blob = SPDK_BLOBID_INVALID; 3943 memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype)); 3944 bs->esnap_bs_dev_create = opts->esnap_bs_dev_create; 3945 bs->esnap_ctx = opts->esnap_ctx; 3946 3947 /* The metadata is assumed to be at least 1 page */ 3948 bs->used_md_pages = spdk_bit_array_create(1); 3949 bs->used_blobids = spdk_bit_array_create(0); 3950 bs->open_blobids = spdk_bit_array_create(0); 3951 3952 spdk_spin_init(&bs->used_lock); 3953 3954 spdk_io_device_register(bs, bs_channel_create, bs_channel_destroy, 3955 sizeof(struct spdk_bs_channel), "blobstore"); 3956 rc = bs_register_md_thread(bs); 3957 if (rc == -1) { 3958 spdk_io_device_unregister(bs, NULL); 3959 spdk_spin_destroy(&bs->used_lock); 3960 spdk_bit_array_free(&bs->open_blobids); 3961 spdk_bit_array_free(&bs->used_blobids); 3962 spdk_bit_array_free(&bs->used_md_pages); 3963 spdk_bit_array_free(&ctx->used_clusters); 3964 spdk_free(ctx->super); 3965 free(ctx); 3966 free(bs); 3967 /* FIXME: this is a lie but don't know how to get a proper error code here */ 3968 return -ENOMEM; 3969 } 3970 3971 *_ctx = ctx; 3972 *_bs = bs; 3973 return 0; 3974 } 3975 3976 static void 3977 bs_load_ctx_fail(struct spdk_bs_load_ctx *ctx, int bserrno) 3978 { 3979 assert(bserrno != 0); 3980 3981 spdk_free(ctx->super); 3982 bs_sequence_finish(ctx->seq, bserrno); 3983 bs_free(ctx->bs); 3984 spdk_bit_array_free(&ctx->used_clusters); 3985 free(ctx); 3986 } 3987 3988 static void 3989 bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 3990 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg) 3991 { 3992 /* Update the values in the super block */ 3993 super->super_blob = bs->super_blob; 3994 memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype)); 3995 super->crc = blob_md_page_calc_crc(super); 3996 bs_sequence_write_dev(seq, super, bs_page_to_lba(bs, 0), 3997 bs_byte_to_lba(bs, sizeof(*super)), 3998 cb_fn, cb_arg); 3999 } 4000 4001 static void 4002 bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 4003 { 4004 struct spdk_bs_load_ctx *ctx = arg; 4005 uint64_t mask_size, lba, lba_count; 4006 4007 /* Write out the used clusters mask */ 4008 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 4009 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 4010 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4011 if (!ctx->mask) { 4012 bs_load_ctx_fail(ctx, -ENOMEM); 4013 return; 4014 } 4015 4016 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS; 4017 ctx->mask->length = ctx->bs->total_clusters; 4018 /* We could get here through the normal unload path, or through dirty 4019 * shutdown recovery. For the normal unload path, we use the mask from 4020 * the bit pool. For dirty shutdown recovery, we don't have a bit pool yet - 4021 * only the bit array from the load ctx. 4022 */ 4023 if (ctx->bs->used_clusters) { 4024 assert(ctx->mask->length == spdk_bit_pool_capacity(ctx->bs->used_clusters)); 4025 spdk_bit_pool_store_mask(ctx->bs->used_clusters, ctx->mask->mask); 4026 } else { 4027 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->used_clusters)); 4028 spdk_bit_array_store_mask(ctx->used_clusters, ctx->mask->mask); 4029 } 4030 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 4031 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 4032 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 4033 } 4034 4035 static void 4036 bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 4037 { 4038 struct spdk_bs_load_ctx *ctx = arg; 4039 uint64_t mask_size, lba, lba_count; 4040 4041 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 4042 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 4043 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4044 if (!ctx->mask) { 4045 bs_load_ctx_fail(ctx, -ENOMEM); 4046 return; 4047 } 4048 4049 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES; 4050 ctx->mask->length = ctx->super->md_len; 4051 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages)); 4052 4053 spdk_bit_array_store_mask(ctx->bs->used_md_pages, ctx->mask->mask); 4054 lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 4055 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 4056 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 4057 } 4058 4059 static void 4060 bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 4061 { 4062 struct spdk_bs_load_ctx *ctx = arg; 4063 uint64_t mask_size, lba, lba_count; 4064 4065 if (ctx->super->used_blobid_mask_len == 0) { 4066 /* 4067 * This is a pre-v3 on-disk format where the blobid mask does not get 4068 * written to disk. 4069 */ 4070 cb_fn(seq, arg, 0); 4071 return; 4072 } 4073 4074 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 4075 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_NUMA_ID_ANY, 4076 SPDK_MALLOC_DMA); 4077 if (!ctx->mask) { 4078 bs_load_ctx_fail(ctx, -ENOMEM); 4079 return; 4080 } 4081 4082 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS; 4083 ctx->mask->length = ctx->super->md_len; 4084 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids)); 4085 4086 spdk_bit_array_store_mask(ctx->bs->used_blobids, ctx->mask->mask); 4087 lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 4088 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 4089 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 4090 } 4091 4092 static void 4093 blob_set_thin_provision(struct spdk_blob *blob) 4094 { 4095 blob_verify_md_op(blob); 4096 blob->invalid_flags |= SPDK_BLOB_THIN_PROV; 4097 blob->state = SPDK_BLOB_STATE_DIRTY; 4098 } 4099 4100 static void 4101 blob_set_clear_method(struct spdk_blob *blob, enum blob_clear_method clear_method) 4102 { 4103 blob_verify_md_op(blob); 4104 blob->clear_method = clear_method; 4105 blob->md_ro_flags |= (clear_method << SPDK_BLOB_CLEAR_METHOD_SHIFT); 4106 blob->state = SPDK_BLOB_STATE_DIRTY; 4107 } 4108 4109 static void bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno); 4110 4111 static void 4112 bs_delete_corrupted_blob_cpl(void *cb_arg, int bserrno) 4113 { 4114 struct spdk_bs_load_ctx *ctx = cb_arg; 4115 spdk_blob_id id; 4116 int64_t page_num; 4117 4118 /* Iterate to next blob (we can't use spdk_bs_iter_next function as our 4119 * last blob has been removed */ 4120 page_num = bs_blobid_to_page(ctx->blobid); 4121 page_num++; 4122 page_num = spdk_bit_array_find_first_set(ctx->bs->used_blobids, page_num); 4123 if (page_num >= spdk_bit_array_capacity(ctx->bs->used_blobids)) { 4124 bs_load_iter(ctx, NULL, -ENOENT); 4125 return; 4126 } 4127 4128 id = bs_page_to_blobid(page_num); 4129 4130 spdk_bs_open_blob(ctx->bs, id, bs_load_iter, ctx); 4131 } 4132 4133 static void 4134 bs_delete_corrupted_close_cb(void *cb_arg, int bserrno) 4135 { 4136 struct spdk_bs_load_ctx *ctx = cb_arg; 4137 4138 if (bserrno != 0) { 4139 SPDK_ERRLOG("Failed to close corrupted blob\n"); 4140 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4141 return; 4142 } 4143 4144 spdk_bs_delete_blob(ctx->bs, ctx->blobid, bs_delete_corrupted_blob_cpl, ctx); 4145 } 4146 4147 static void 4148 bs_delete_corrupted_blob(void *cb_arg, int bserrno) 4149 { 4150 struct spdk_bs_load_ctx *ctx = cb_arg; 4151 uint64_t i; 4152 4153 if (bserrno != 0) { 4154 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 4155 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4156 return; 4157 } 4158 4159 /* Snapshot and clone have the same copy of cluster map and extent pages 4160 * at this point. Let's clear both for snapshot now, 4161 * so that it won't be cleared for clone later when we remove snapshot. 4162 * Also set thin provision to pass data corruption check */ 4163 for (i = 0; i < ctx->blob->active.num_clusters; i++) { 4164 ctx->blob->active.clusters[i] = 0; 4165 } 4166 for (i = 0; i < ctx->blob->active.num_extent_pages; i++) { 4167 ctx->blob->active.extent_pages[i] = 0; 4168 } 4169 4170 ctx->blob->active.num_allocated_clusters = 0; 4171 4172 ctx->blob->md_ro = false; 4173 4174 blob_set_thin_provision(ctx->blob); 4175 4176 ctx->blobid = ctx->blob->id; 4177 4178 spdk_blob_close(ctx->blob, bs_delete_corrupted_close_cb, ctx); 4179 } 4180 4181 static void 4182 bs_update_corrupted_blob(void *cb_arg, int bserrno) 4183 { 4184 struct spdk_bs_load_ctx *ctx = cb_arg; 4185 4186 if (bserrno != 0) { 4187 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 4188 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4189 return; 4190 } 4191 4192 ctx->blob->md_ro = false; 4193 blob_remove_xattr(ctx->blob, SNAPSHOT_PENDING_REMOVAL, true); 4194 blob_remove_xattr(ctx->blob, SNAPSHOT_IN_PROGRESS, true); 4195 spdk_blob_set_read_only(ctx->blob); 4196 4197 if (ctx->iter_cb_fn) { 4198 ctx->iter_cb_fn(ctx->iter_cb_arg, ctx->blob, 0); 4199 } 4200 bs_blob_list_add(ctx->blob); 4201 4202 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4203 } 4204 4205 static void 4206 bs_examine_clone(void *cb_arg, struct spdk_blob *blob, int bserrno) 4207 { 4208 struct spdk_bs_load_ctx *ctx = cb_arg; 4209 4210 if (bserrno != 0) { 4211 SPDK_ERRLOG("Failed to open clone of a corrupted blob\n"); 4212 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4213 return; 4214 } 4215 4216 if (blob->parent_id == ctx->blob->id) { 4217 /* Power failure occurred before updating clone (snapshot delete case) 4218 * or after updating clone (creating snapshot case) - keep snapshot */ 4219 spdk_blob_close(blob, bs_update_corrupted_blob, ctx); 4220 } else { 4221 /* Power failure occurred after updating clone (snapshot delete case) 4222 * or before updating clone (creating snapshot case) - remove snapshot */ 4223 spdk_blob_close(blob, bs_delete_corrupted_blob, ctx); 4224 } 4225 } 4226 4227 static void 4228 bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno) 4229 { 4230 struct spdk_bs_load_ctx *ctx = arg; 4231 const void *value; 4232 size_t len; 4233 int rc = 0; 4234 4235 if (bserrno == 0) { 4236 /* Examine blob if it is corrupted after power failure. Fix 4237 * the ones that can be fixed and remove any other corrupted 4238 * ones. If it is not corrupted just process it */ 4239 rc = blob_get_xattr_value(blob, SNAPSHOT_PENDING_REMOVAL, &value, &len, true); 4240 if (rc != 0) { 4241 rc = blob_get_xattr_value(blob, SNAPSHOT_IN_PROGRESS, &value, &len, true); 4242 if (rc != 0) { 4243 /* Not corrupted - process it and continue with iterating through blobs */ 4244 if (ctx->iter_cb_fn) { 4245 ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0); 4246 } 4247 bs_blob_list_add(blob); 4248 spdk_bs_iter_next(ctx->bs, blob, bs_load_iter, ctx); 4249 return; 4250 } 4251 4252 } 4253 4254 assert(len == sizeof(spdk_blob_id)); 4255 4256 ctx->blob = blob; 4257 4258 /* Open clone to check if we are able to fix this blob or should we remove it */ 4259 spdk_bs_open_blob(ctx->bs, *(spdk_blob_id *)value, bs_examine_clone, ctx); 4260 return; 4261 } else if (bserrno == -ENOENT) { 4262 bserrno = 0; 4263 } else { 4264 /* 4265 * This case needs to be looked at further. Same problem 4266 * exists with applications that rely on explicit blob 4267 * iteration. We should just skip the blob that failed 4268 * to load and continue on to the next one. 4269 */ 4270 SPDK_ERRLOG("Error in iterating blobs\n"); 4271 } 4272 4273 ctx->iter_cb_fn = NULL; 4274 4275 spdk_free(ctx->super); 4276 spdk_free(ctx->mask); 4277 bs_sequence_finish(ctx->seq, bserrno); 4278 free(ctx); 4279 } 4280 4281 static void bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg); 4282 4283 static void 4284 bs_load_complete(struct spdk_bs_load_ctx *ctx) 4285 { 4286 ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters); 4287 if (ctx->dumping) { 4288 bs_dump_read_md_page(ctx->seq, ctx); 4289 return; 4290 } 4291 spdk_bs_iter_first(ctx->bs, bs_load_iter, ctx); 4292 } 4293 4294 static void 4295 bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4296 { 4297 struct spdk_bs_load_ctx *ctx = cb_arg; 4298 int rc; 4299 4300 /* The type must be correct */ 4301 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS); 4302 4303 /* The length of the mask (in bits) must not be greater than 4304 * the length of the buffer (converted to bits) */ 4305 assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE * 8)); 4306 4307 /* The length of the mask must be exactly equal to the size 4308 * (in pages) of the metadata region */ 4309 assert(ctx->mask->length == ctx->super->md_len); 4310 4311 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->mask->length); 4312 if (rc < 0) { 4313 spdk_free(ctx->mask); 4314 bs_load_ctx_fail(ctx, rc); 4315 return; 4316 } 4317 4318 spdk_bit_array_load_mask(ctx->bs->used_blobids, ctx->mask->mask); 4319 bs_load_complete(ctx); 4320 } 4321 4322 static void 4323 bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4324 { 4325 struct spdk_bs_load_ctx *ctx = cb_arg; 4326 uint64_t lba, lba_count, mask_size; 4327 int rc; 4328 4329 if (bserrno != 0) { 4330 bs_load_ctx_fail(ctx, bserrno); 4331 return; 4332 } 4333 4334 /* The type must be correct */ 4335 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 4336 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 4337 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 4338 struct spdk_blob_md_page) * 8)); 4339 /* 4340 * The length of the mask must be equal to or larger than the total number of clusters. It may be 4341 * larger than the total number of clusters due to a failure spdk_bs_grow. 4342 */ 4343 assert(ctx->mask->length >= ctx->bs->total_clusters); 4344 if (ctx->mask->length > ctx->bs->total_clusters) { 4345 SPDK_WARNLOG("Shrink the used_custers mask length to total_clusters"); 4346 ctx->mask->length = ctx->bs->total_clusters; 4347 } 4348 4349 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->mask->length); 4350 if (rc < 0) { 4351 spdk_free(ctx->mask); 4352 bs_load_ctx_fail(ctx, rc); 4353 return; 4354 } 4355 4356 spdk_bit_array_load_mask(ctx->used_clusters, ctx->mask->mask); 4357 ctx->bs->num_free_clusters = spdk_bit_array_count_clear(ctx->used_clusters); 4358 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 4359 4360 spdk_free(ctx->mask); 4361 4362 /* Read the used blobids mask */ 4363 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 4364 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_NUMA_ID_ANY, 4365 SPDK_MALLOC_DMA); 4366 if (!ctx->mask) { 4367 bs_load_ctx_fail(ctx, -ENOMEM); 4368 return; 4369 } 4370 lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 4371 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 4372 bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 4373 bs_load_used_blobids_cpl, ctx); 4374 } 4375 4376 static void 4377 bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4378 { 4379 struct spdk_bs_load_ctx *ctx = cb_arg; 4380 uint64_t lba, lba_count, mask_size; 4381 int rc; 4382 4383 if (bserrno != 0) { 4384 bs_load_ctx_fail(ctx, bserrno); 4385 return; 4386 } 4387 4388 /* The type must be correct */ 4389 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES); 4390 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 4391 assert(ctx->mask->length <= (ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE * 4392 8)); 4393 /* The length of the mask must be exactly equal to the size (in pages) of the metadata region */ 4394 if (ctx->mask->length != ctx->super->md_len) { 4395 SPDK_ERRLOG("mismatched md_len in used_pages mask: " 4396 "mask->length=%" PRIu32 " super->md_len=%" PRIu32 "\n", 4397 ctx->mask->length, ctx->super->md_len); 4398 assert(false); 4399 } 4400 4401 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->mask->length); 4402 if (rc < 0) { 4403 spdk_free(ctx->mask); 4404 bs_load_ctx_fail(ctx, rc); 4405 return; 4406 } 4407 4408 spdk_bit_array_load_mask(ctx->bs->used_md_pages, ctx->mask->mask); 4409 spdk_free(ctx->mask); 4410 4411 /* Read the used clusters mask */ 4412 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 4413 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_NUMA_ID_ANY, 4414 SPDK_MALLOC_DMA); 4415 if (!ctx->mask) { 4416 bs_load_ctx_fail(ctx, -ENOMEM); 4417 return; 4418 } 4419 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 4420 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 4421 bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 4422 bs_load_used_clusters_cpl, ctx); 4423 } 4424 4425 static void 4426 bs_load_read_used_pages(struct spdk_bs_load_ctx *ctx) 4427 { 4428 uint64_t lba, lba_count, mask_size; 4429 4430 /* Read the used pages mask */ 4431 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 4432 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 4433 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4434 if (!ctx->mask) { 4435 bs_load_ctx_fail(ctx, -ENOMEM); 4436 return; 4437 } 4438 4439 lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 4440 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 4441 bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count, 4442 bs_load_used_pages_cpl, ctx); 4443 } 4444 4445 static int 4446 bs_load_replay_md_parse_page(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_page *page) 4447 { 4448 struct spdk_blob_store *bs = ctx->bs; 4449 struct spdk_blob_md_descriptor *desc; 4450 size_t cur_desc = 0; 4451 4452 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4453 while (cur_desc < sizeof(page->descriptors)) { 4454 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 4455 if (desc->length == 0) { 4456 /* If padding and length are 0, this terminates the page */ 4457 break; 4458 } 4459 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 4460 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 4461 unsigned int i, j; 4462 unsigned int cluster_count = 0; 4463 uint32_t cluster_idx; 4464 4465 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 4466 4467 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 4468 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 4469 cluster_idx = desc_extent_rle->extents[i].cluster_idx; 4470 /* 4471 * cluster_idx = 0 means an unallocated cluster - don't mark that 4472 * in the used cluster map. 4473 */ 4474 if (cluster_idx != 0) { 4475 SPDK_NOTICELOG("Recover: cluster %" PRIu32 "\n", cluster_idx + j); 4476 spdk_bit_array_set(ctx->used_clusters, cluster_idx + j); 4477 if (bs->num_free_clusters == 0) { 4478 return -ENOSPC; 4479 } 4480 bs->num_free_clusters--; 4481 } 4482 cluster_count++; 4483 } 4484 } 4485 if (cluster_count == 0) { 4486 return -EINVAL; 4487 } 4488 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4489 struct spdk_blob_md_descriptor_extent_page *desc_extent; 4490 uint32_t i; 4491 uint32_t cluster_count = 0; 4492 uint32_t cluster_idx; 4493 size_t cluster_idx_length; 4494 4495 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 4496 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 4497 4498 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 4499 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 4500 return -EINVAL; 4501 } 4502 4503 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 4504 cluster_idx = desc_extent->cluster_idx[i]; 4505 /* 4506 * cluster_idx = 0 means an unallocated cluster - don't mark that 4507 * in the used cluster map. 4508 */ 4509 if (cluster_idx != 0) { 4510 if (cluster_idx < desc_extent->start_cluster_idx && 4511 cluster_idx >= desc_extent->start_cluster_idx + cluster_count) { 4512 return -EINVAL; 4513 } 4514 spdk_bit_array_set(ctx->used_clusters, cluster_idx); 4515 if (bs->num_free_clusters == 0) { 4516 return -ENOSPC; 4517 } 4518 bs->num_free_clusters--; 4519 } 4520 cluster_count++; 4521 } 4522 4523 if (cluster_count == 0) { 4524 return -EINVAL; 4525 } 4526 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 4527 /* Skip this item */ 4528 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 4529 /* Skip this item */ 4530 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 4531 /* Skip this item */ 4532 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 4533 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 4534 uint32_t num_extent_pages = ctx->num_extent_pages; 4535 uint32_t i; 4536 size_t extent_pages_length; 4537 void *tmp; 4538 4539 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 4540 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 4541 4542 if (desc_extent_table->length == 0 || 4543 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 4544 return -EINVAL; 4545 } 4546 4547 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 4548 if (desc_extent_table->extent_page[i].page_idx != 0) { 4549 if (desc_extent_table->extent_page[i].num_pages != 1) { 4550 return -EINVAL; 4551 } 4552 num_extent_pages += 1; 4553 } 4554 } 4555 4556 if (num_extent_pages > 0) { 4557 tmp = realloc(ctx->extent_page_num, num_extent_pages * sizeof(uint32_t)); 4558 if (tmp == NULL) { 4559 return -ENOMEM; 4560 } 4561 ctx->extent_page_num = tmp; 4562 4563 /* Extent table entries contain md page numbers for extent pages. 4564 * Zeroes represent unallocated extent pages, those are run-length-encoded. 4565 */ 4566 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 4567 if (desc_extent_table->extent_page[i].page_idx != 0) { 4568 ctx->extent_page_num[ctx->num_extent_pages] = desc_extent_table->extent_page[i].page_idx; 4569 ctx->num_extent_pages += 1; 4570 } 4571 } 4572 } 4573 } else { 4574 /* Error */ 4575 return -EINVAL; 4576 } 4577 /* Advance to the next descriptor */ 4578 cur_desc += sizeof(*desc) + desc->length; 4579 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 4580 break; 4581 } 4582 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 4583 } 4584 return 0; 4585 } 4586 4587 static bool 4588 bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page) 4589 { 4590 uint32_t crc; 4591 struct spdk_blob_md_descriptor *desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4592 size_t desc_len; 4593 4594 crc = blob_md_page_calc_crc(page); 4595 if (crc != page->crc) { 4596 return false; 4597 } 4598 4599 /* Extent page should always be of sequence num 0. */ 4600 if (page->sequence_num != 0) { 4601 return false; 4602 } 4603 4604 /* Descriptor type must be EXTENT_PAGE. */ 4605 if (desc->type != SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4606 return false; 4607 } 4608 4609 /* Descriptor length cannot exceed the page. */ 4610 desc_len = sizeof(*desc) + desc->length; 4611 if (desc_len > sizeof(page->descriptors)) { 4612 return false; 4613 } 4614 4615 /* It has to be the only descriptor in the page. */ 4616 if (desc_len + sizeof(*desc) <= sizeof(page->descriptors)) { 4617 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + desc_len); 4618 if (desc->length != 0) { 4619 return false; 4620 } 4621 } 4622 4623 return true; 4624 } 4625 4626 static bool 4627 bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx) 4628 { 4629 uint32_t crc; 4630 struct spdk_blob_md_page *page = ctx->page; 4631 4632 crc = blob_md_page_calc_crc(page); 4633 if (crc != page->crc) { 4634 return false; 4635 } 4636 4637 /* First page of a sequence should match the blobid. */ 4638 if (page->sequence_num == 0 && 4639 bs_page_to_blobid(ctx->cur_page) != page->id) { 4640 return false; 4641 } 4642 assert(bs_load_cur_extent_page_valid(page) == false); 4643 4644 return true; 4645 } 4646 4647 static void bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx); 4648 4649 static void 4650 bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4651 { 4652 struct spdk_bs_load_ctx *ctx = cb_arg; 4653 4654 if (bserrno != 0) { 4655 bs_load_ctx_fail(ctx, bserrno); 4656 return; 4657 } 4658 4659 bs_load_complete(ctx); 4660 } 4661 4662 static void 4663 bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4664 { 4665 struct spdk_bs_load_ctx *ctx = cb_arg; 4666 4667 spdk_free(ctx->mask); 4668 ctx->mask = NULL; 4669 4670 if (bserrno != 0) { 4671 bs_load_ctx_fail(ctx, bserrno); 4672 return; 4673 } 4674 4675 bs_write_used_clusters(seq, ctx, bs_load_write_used_clusters_cpl); 4676 } 4677 4678 static void 4679 bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4680 { 4681 struct spdk_bs_load_ctx *ctx = cb_arg; 4682 4683 spdk_free(ctx->mask); 4684 ctx->mask = NULL; 4685 4686 if (bserrno != 0) { 4687 bs_load_ctx_fail(ctx, bserrno); 4688 return; 4689 } 4690 4691 bs_write_used_blobids(seq, ctx, bs_load_write_used_blobids_cpl); 4692 } 4693 4694 static void 4695 bs_load_write_used_md(struct spdk_bs_load_ctx *ctx) 4696 { 4697 bs_write_used_md(ctx->seq, ctx, bs_load_write_used_pages_cpl); 4698 } 4699 4700 static void 4701 bs_load_replay_md_chain_cpl(struct spdk_bs_load_ctx *ctx) 4702 { 4703 uint64_t num_md_clusters; 4704 uint64_t i; 4705 4706 ctx->in_page_chain = false; 4707 4708 do { 4709 ctx->page_index++; 4710 } while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true); 4711 4712 if (ctx->page_index < ctx->super->md_len) { 4713 ctx->cur_page = ctx->page_index; 4714 bs_load_replay_cur_md_page(ctx); 4715 } else { 4716 /* Claim all of the clusters used by the metadata */ 4717 num_md_clusters = spdk_divide_round_up( 4718 ctx->super->md_start + ctx->super->md_len, ctx->bs->pages_per_cluster); 4719 for (i = 0; i < num_md_clusters; i++) { 4720 spdk_bit_array_set(ctx->used_clusters, i); 4721 } 4722 ctx->bs->num_free_clusters -= num_md_clusters; 4723 spdk_free(ctx->page); 4724 bs_load_write_used_md(ctx); 4725 } 4726 } 4727 4728 static void 4729 bs_load_replay_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4730 { 4731 struct spdk_bs_load_ctx *ctx = cb_arg; 4732 uint32_t page_num; 4733 uint64_t i; 4734 4735 if (bserrno != 0) { 4736 spdk_free(ctx->extent_pages); 4737 bs_load_ctx_fail(ctx, bserrno); 4738 return; 4739 } 4740 4741 for (i = 0; i < ctx->num_extent_pages; i++) { 4742 /* Extent pages are only read when present within in chain md. 4743 * Integrity of md is not right if that page was not a valid extent page. */ 4744 if (bs_load_cur_extent_page_valid(&ctx->extent_pages[i]) != true) { 4745 spdk_free(ctx->extent_pages); 4746 bs_load_ctx_fail(ctx, -EILSEQ); 4747 return; 4748 } 4749 4750 page_num = ctx->extent_page_num[i]; 4751 spdk_bit_array_set(ctx->bs->used_md_pages, page_num); 4752 if (bs_load_replay_md_parse_page(ctx, &ctx->extent_pages[i])) { 4753 spdk_free(ctx->extent_pages); 4754 bs_load_ctx_fail(ctx, -EILSEQ); 4755 return; 4756 } 4757 } 4758 4759 spdk_free(ctx->extent_pages); 4760 free(ctx->extent_page_num); 4761 ctx->extent_page_num = NULL; 4762 ctx->num_extent_pages = 0; 4763 4764 bs_load_replay_md_chain_cpl(ctx); 4765 } 4766 4767 static void 4768 bs_load_replay_extent_pages(struct spdk_bs_load_ctx *ctx) 4769 { 4770 spdk_bs_batch_t *batch; 4771 uint32_t page; 4772 uint64_t lba; 4773 uint64_t i; 4774 4775 ctx->extent_pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE * ctx->num_extent_pages, 0, 4776 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4777 if (!ctx->extent_pages) { 4778 bs_load_ctx_fail(ctx, -ENOMEM); 4779 return; 4780 } 4781 4782 batch = bs_sequence_to_batch(ctx->seq, bs_load_replay_extent_page_cpl, ctx); 4783 4784 for (i = 0; i < ctx->num_extent_pages; i++) { 4785 page = ctx->extent_page_num[i]; 4786 assert(page < ctx->super->md_len); 4787 lba = bs_md_page_to_lba(ctx->bs, page); 4788 bs_batch_read_dev(batch, &ctx->extent_pages[i], lba, 4789 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE)); 4790 } 4791 4792 bs_batch_close(batch); 4793 } 4794 4795 static void 4796 bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4797 { 4798 struct spdk_bs_load_ctx *ctx = cb_arg; 4799 uint32_t page_num; 4800 struct spdk_blob_md_page *page; 4801 4802 if (bserrno != 0) { 4803 bs_load_ctx_fail(ctx, bserrno); 4804 return; 4805 } 4806 4807 page_num = ctx->cur_page; 4808 page = ctx->page; 4809 if (bs_load_cur_md_page_valid(ctx) == true) { 4810 if (page->sequence_num == 0 || ctx->in_page_chain == true) { 4811 spdk_spin_lock(&ctx->bs->used_lock); 4812 bs_claim_md_page(ctx->bs, page_num); 4813 spdk_spin_unlock(&ctx->bs->used_lock); 4814 if (page->sequence_num == 0) { 4815 SPDK_NOTICELOG("Recover: blob 0x%" PRIx32 "\n", page_num); 4816 spdk_bit_array_set(ctx->bs->used_blobids, page_num); 4817 } 4818 if (bs_load_replay_md_parse_page(ctx, page)) { 4819 bs_load_ctx_fail(ctx, -EILSEQ); 4820 return; 4821 } 4822 if (page->next != SPDK_INVALID_MD_PAGE) { 4823 ctx->in_page_chain = true; 4824 ctx->cur_page = page->next; 4825 bs_load_replay_cur_md_page(ctx); 4826 return; 4827 } 4828 if (ctx->num_extent_pages != 0) { 4829 bs_load_replay_extent_pages(ctx); 4830 return; 4831 } 4832 } 4833 } 4834 bs_load_replay_md_chain_cpl(ctx); 4835 } 4836 4837 static void 4838 bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx) 4839 { 4840 uint64_t lba; 4841 4842 assert(ctx->cur_page < ctx->super->md_len); 4843 lba = bs_md_page_to_lba(ctx->bs, ctx->cur_page); 4844 bs_sequence_read_dev(ctx->seq, ctx->page, lba, 4845 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 4846 bs_load_replay_md_cpl, ctx); 4847 } 4848 4849 static void 4850 bs_load_replay_md(struct spdk_bs_load_ctx *ctx) 4851 { 4852 ctx->page_index = 0; 4853 ctx->cur_page = 0; 4854 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 4855 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 4856 if (!ctx->page) { 4857 bs_load_ctx_fail(ctx, -ENOMEM); 4858 return; 4859 } 4860 bs_load_replay_cur_md_page(ctx); 4861 } 4862 4863 static void 4864 bs_recover(struct spdk_bs_load_ctx *ctx) 4865 { 4866 int rc; 4867 4868 SPDK_NOTICELOG("Performing recovery on blobstore\n"); 4869 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len); 4870 if (rc < 0) { 4871 bs_load_ctx_fail(ctx, -ENOMEM); 4872 return; 4873 } 4874 4875 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len); 4876 if (rc < 0) { 4877 bs_load_ctx_fail(ctx, -ENOMEM); 4878 return; 4879 } 4880 4881 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 4882 if (rc < 0) { 4883 bs_load_ctx_fail(ctx, -ENOMEM); 4884 return; 4885 } 4886 4887 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->super->md_len); 4888 if (rc < 0) { 4889 bs_load_ctx_fail(ctx, -ENOMEM); 4890 return; 4891 } 4892 4893 ctx->bs->num_free_clusters = ctx->bs->total_clusters; 4894 bs_load_replay_md(ctx); 4895 } 4896 4897 static int 4898 bs_parse_super(struct spdk_bs_load_ctx *ctx) 4899 { 4900 int rc; 4901 4902 if (ctx->super->size == 0) { 4903 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 4904 } 4905 4906 if (ctx->super->io_unit_size == 0) { 4907 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 4908 } 4909 4910 ctx->bs->clean = 1; 4911 ctx->bs->cluster_sz = ctx->super->cluster_size; 4912 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 4913 ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE; 4914 if (spdk_u32_is_pow2(ctx->bs->pages_per_cluster)) { 4915 ctx->bs->pages_per_cluster_shift = spdk_u32log2(ctx->bs->pages_per_cluster); 4916 } 4917 ctx->bs->io_unit_size = ctx->super->io_unit_size; 4918 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 4919 if (rc < 0) { 4920 return -ENOMEM; 4921 } 4922 ctx->bs->md_start = ctx->super->md_start; 4923 ctx->bs->md_len = ctx->super->md_len; 4924 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len); 4925 if (rc < 0) { 4926 return -ENOMEM; 4927 } 4928 4929 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 4930 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 4931 ctx->bs->super_blob = ctx->super->super_blob; 4932 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 4933 4934 return 0; 4935 } 4936 4937 static void 4938 bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4939 { 4940 struct spdk_bs_load_ctx *ctx = cb_arg; 4941 int rc; 4942 4943 rc = bs_super_validate(ctx->super, ctx->bs); 4944 if (rc != 0) { 4945 bs_load_ctx_fail(ctx, rc); 4946 return; 4947 } 4948 4949 rc = bs_parse_super(ctx); 4950 if (rc < 0) { 4951 bs_load_ctx_fail(ctx, rc); 4952 return; 4953 } 4954 4955 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0 || ctx->force_recover) { 4956 bs_recover(ctx); 4957 } else { 4958 bs_load_read_used_pages(ctx); 4959 } 4960 } 4961 4962 static inline int 4963 bs_opts_copy(struct spdk_bs_opts *src, struct spdk_bs_opts *dst) 4964 { 4965 4966 if (!src->opts_size) { 4967 SPDK_ERRLOG("opts_size should not be zero value\n"); 4968 return -1; 4969 } 4970 4971 #define FIELD_OK(field) \ 4972 offsetof(struct spdk_bs_opts, field) + sizeof(src->field) <= src->opts_size 4973 4974 #define SET_FIELD(field) \ 4975 if (FIELD_OK(field)) { \ 4976 dst->field = src->field; \ 4977 } \ 4978 4979 SET_FIELD(cluster_sz); 4980 SET_FIELD(num_md_pages); 4981 SET_FIELD(max_md_ops); 4982 SET_FIELD(max_channel_ops); 4983 SET_FIELD(clear_method); 4984 4985 if (FIELD_OK(bstype)) { 4986 memcpy(&dst->bstype, &src->bstype, sizeof(dst->bstype)); 4987 } 4988 SET_FIELD(iter_cb_fn); 4989 SET_FIELD(iter_cb_arg); 4990 SET_FIELD(force_recover); 4991 SET_FIELD(esnap_bs_dev_create); 4992 SET_FIELD(esnap_ctx); 4993 4994 dst->opts_size = src->opts_size; 4995 4996 /* You should not remove this statement, but need to update the assert statement 4997 * if you add a new field, and also add a corresponding SET_FIELD statement */ 4998 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_opts) == 88, "Incorrect size"); 4999 5000 #undef FIELD_OK 5001 #undef SET_FIELD 5002 5003 return 0; 5004 } 5005 5006 void 5007 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 5008 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 5009 { 5010 struct spdk_blob_store *bs; 5011 struct spdk_bs_cpl cpl; 5012 struct spdk_bs_load_ctx *ctx; 5013 struct spdk_bs_opts opts = {}; 5014 int err; 5015 5016 SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev); 5017 5018 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 5019 SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen); 5020 dev->destroy(dev); 5021 cb_fn(cb_arg, NULL, -EINVAL); 5022 return; 5023 } 5024 5025 spdk_bs_opts_init(&opts, sizeof(opts)); 5026 if (o) { 5027 if (bs_opts_copy(o, &opts)) { 5028 return; 5029 } 5030 } 5031 5032 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 5033 dev->destroy(dev); 5034 cb_fn(cb_arg, NULL, -EINVAL); 5035 return; 5036 } 5037 5038 err = bs_alloc(dev, &opts, &bs, &ctx); 5039 if (err) { 5040 dev->destroy(dev); 5041 cb_fn(cb_arg, NULL, err); 5042 return; 5043 } 5044 5045 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 5046 cpl.u.bs_handle.cb_fn = cb_fn; 5047 cpl.u.bs_handle.cb_arg = cb_arg; 5048 cpl.u.bs_handle.bs = bs; 5049 5050 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5051 if (!ctx->seq) { 5052 spdk_free(ctx->super); 5053 free(ctx); 5054 bs_free(bs); 5055 cb_fn(cb_arg, NULL, -ENOMEM); 5056 return; 5057 } 5058 5059 /* Read the super block */ 5060 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5061 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5062 bs_load_super_cpl, ctx); 5063 } 5064 5065 /* END spdk_bs_load */ 5066 5067 /* START spdk_bs_dump */ 5068 5069 static void 5070 bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno) 5071 { 5072 spdk_free(ctx->super); 5073 5074 /* 5075 * We need to defer calling bs_call_cpl() until after 5076 * dev destruction, so tuck these away for later use. 5077 */ 5078 ctx->bs->unload_err = bserrno; 5079 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5080 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5081 5082 bs_sequence_finish(seq, 0); 5083 bs_free(ctx->bs); 5084 free(ctx); 5085 } 5086 5087 static void 5088 bs_dump_print_xattr(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 5089 { 5090 struct spdk_blob_md_descriptor_xattr *desc_xattr; 5091 uint32_t i; 5092 const char *type; 5093 5094 desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc; 5095 5096 if (desc_xattr->length != 5097 sizeof(desc_xattr->name_length) + sizeof(desc_xattr->value_length) + 5098 desc_xattr->name_length + desc_xattr->value_length) { 5099 } 5100 5101 memcpy(ctx->xattr_name, desc_xattr->name, desc_xattr->name_length); 5102 ctx->xattr_name[desc_xattr->name_length] = '\0'; 5103 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 5104 type = "XATTR"; 5105 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 5106 type = "XATTR_INTERNAL"; 5107 } else { 5108 assert(false); 5109 type = "XATTR_?"; 5110 } 5111 fprintf(ctx->fp, "%s: name = \"%s\"\n", type, ctx->xattr_name); 5112 fprintf(ctx->fp, " value = \""); 5113 ctx->print_xattr_fn(ctx->fp, ctx->super->bstype.bstype, ctx->xattr_name, 5114 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 5115 desc_xattr->value_length); 5116 fprintf(ctx->fp, "\"\n"); 5117 for (i = 0; i < desc_xattr->value_length; i++) { 5118 if (i % 16 == 0) { 5119 fprintf(ctx->fp, " "); 5120 } 5121 fprintf(ctx->fp, "%02" PRIx8 " ", *((uint8_t *)desc_xattr->name + desc_xattr->name_length + i)); 5122 if ((i + 1) % 16 == 0) { 5123 fprintf(ctx->fp, "\n"); 5124 } 5125 } 5126 if (i % 16 != 0) { 5127 fprintf(ctx->fp, "\n"); 5128 } 5129 } 5130 5131 struct type_flag_desc { 5132 uint64_t mask; 5133 uint64_t val; 5134 const char *name; 5135 }; 5136 5137 static void 5138 bs_dump_print_type_bits(struct spdk_bs_load_ctx *ctx, uint64_t flags, 5139 struct type_flag_desc *desc, size_t numflags) 5140 { 5141 uint64_t covered = 0; 5142 size_t i; 5143 5144 for (i = 0; i < numflags; i++) { 5145 if ((desc[i].mask & flags) != desc[i].val) { 5146 continue; 5147 } 5148 fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " %s", desc[i].val, desc[i].name); 5149 if (desc[i].mask != desc[i].val) { 5150 fprintf(ctx->fp, " (mask 0x%" PRIx64 " value 0x%" PRIx64 ")", 5151 desc[i].mask, desc[i].val); 5152 } 5153 fprintf(ctx->fp, "\n"); 5154 covered |= desc[i].mask; 5155 } 5156 if ((flags & ~covered) != 0) { 5157 fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " Unknown\n", flags & ~covered); 5158 } 5159 } 5160 5161 static void 5162 bs_dump_print_type_flags(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 5163 { 5164 struct spdk_blob_md_descriptor_flags *type_desc; 5165 #define ADD_FLAG(f) { f, f, #f } 5166 #define ADD_MASK_VAL(m, v) { m, v, #v } 5167 static struct type_flag_desc invalid[] = { 5168 ADD_FLAG(SPDK_BLOB_THIN_PROV), 5169 ADD_FLAG(SPDK_BLOB_INTERNAL_XATTR), 5170 ADD_FLAG(SPDK_BLOB_EXTENT_TABLE), 5171 }; 5172 static struct type_flag_desc data_ro[] = { 5173 ADD_FLAG(SPDK_BLOB_READ_ONLY), 5174 }; 5175 static struct type_flag_desc md_ro[] = { 5176 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_DEFAULT), 5177 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_NONE), 5178 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_UNMAP), 5179 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_WRITE_ZEROES), 5180 }; 5181 #undef ADD_FLAG 5182 #undef ADD_MASK_VAL 5183 5184 type_desc = (struct spdk_blob_md_descriptor_flags *)desc; 5185 fprintf(ctx->fp, "Flags:\n"); 5186 fprintf(ctx->fp, "\tinvalid: 0x%016" PRIx64 "\n", type_desc->invalid_flags); 5187 bs_dump_print_type_bits(ctx, type_desc->invalid_flags, invalid, 5188 SPDK_COUNTOF(invalid)); 5189 fprintf(ctx->fp, "\tdata_ro: 0x%016" PRIx64 "\n", type_desc->data_ro_flags); 5190 bs_dump_print_type_bits(ctx, type_desc->data_ro_flags, data_ro, 5191 SPDK_COUNTOF(data_ro)); 5192 fprintf(ctx->fp, "\t md_ro: 0x%016" PRIx64 "\n", type_desc->md_ro_flags); 5193 bs_dump_print_type_bits(ctx, type_desc->md_ro_flags, md_ro, 5194 SPDK_COUNTOF(md_ro)); 5195 } 5196 5197 static void 5198 bs_dump_print_extent_table(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 5199 { 5200 struct spdk_blob_md_descriptor_extent_table *et_desc; 5201 uint64_t num_extent_pages; 5202 uint32_t et_idx; 5203 5204 et_desc = (struct spdk_blob_md_descriptor_extent_table *)desc; 5205 num_extent_pages = (et_desc->length - sizeof(et_desc->num_clusters)) / 5206 sizeof(et_desc->extent_page[0]); 5207 5208 fprintf(ctx->fp, "Extent table:\n"); 5209 for (et_idx = 0; et_idx < num_extent_pages; et_idx++) { 5210 if (et_desc->extent_page[et_idx].page_idx == 0) { 5211 /* Zeroes represent unallocated extent pages. */ 5212 continue; 5213 } 5214 fprintf(ctx->fp, "\tExtent page: %5" PRIu32 " length %3" PRIu32 5215 " at LBA %" PRIu64 "\n", et_desc->extent_page[et_idx].page_idx, 5216 et_desc->extent_page[et_idx].num_pages, 5217 bs_md_page_to_lba(ctx->bs, et_desc->extent_page[et_idx].page_idx)); 5218 } 5219 } 5220 5221 static void 5222 bs_dump_print_md_page(struct spdk_bs_load_ctx *ctx) 5223 { 5224 uint32_t page_idx = ctx->cur_page; 5225 struct spdk_blob_md_page *page = ctx->page; 5226 struct spdk_blob_md_descriptor *desc; 5227 size_t cur_desc = 0; 5228 uint32_t crc; 5229 5230 fprintf(ctx->fp, "=========\n"); 5231 fprintf(ctx->fp, "Metadata Page Index: %" PRIu32 " (0x%" PRIx32 ")\n", page_idx, page_idx); 5232 fprintf(ctx->fp, "Start LBA: %" PRIu64 "\n", bs_md_page_to_lba(ctx->bs, page_idx)); 5233 fprintf(ctx->fp, "Blob ID: 0x%" PRIx64 "\n", page->id); 5234 fprintf(ctx->fp, "Sequence: %" PRIu32 "\n", page->sequence_num); 5235 if (page->next == SPDK_INVALID_MD_PAGE) { 5236 fprintf(ctx->fp, "Next: None\n"); 5237 } else { 5238 fprintf(ctx->fp, "Next: %" PRIu32 "\n", page->next); 5239 } 5240 fprintf(ctx->fp, "In used bit array%s:", ctx->super->clean ? "" : " (not clean: dubious)"); 5241 if (spdk_bit_array_get(ctx->bs->used_md_pages, page_idx)) { 5242 fprintf(ctx->fp, " md"); 5243 } 5244 if (spdk_bit_array_get(ctx->bs->used_blobids, page_idx)) { 5245 fprintf(ctx->fp, " blob"); 5246 } 5247 fprintf(ctx->fp, "\n"); 5248 5249 crc = blob_md_page_calc_crc(page); 5250 fprintf(ctx->fp, "CRC: 0x%" PRIx32 " (%s)\n", page->crc, crc == page->crc ? "OK" : "Mismatch"); 5251 5252 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 5253 while (cur_desc < sizeof(page->descriptors)) { 5254 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 5255 if (desc->length == 0) { 5256 /* If padding and length are 0, this terminates the page */ 5257 break; 5258 } 5259 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 5260 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 5261 unsigned int i; 5262 5263 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 5264 5265 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 5266 if (desc_extent_rle->extents[i].cluster_idx != 0) { 5267 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 5268 desc_extent_rle->extents[i].cluster_idx); 5269 } else { 5270 fprintf(ctx->fp, "Unallocated Extent - "); 5271 } 5272 fprintf(ctx->fp, " Length: %" PRIu32, desc_extent_rle->extents[i].length); 5273 fprintf(ctx->fp, "\n"); 5274 } 5275 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 5276 struct spdk_blob_md_descriptor_extent_page *desc_extent; 5277 unsigned int i; 5278 5279 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 5280 5281 for (i = 0; i < desc_extent->length / sizeof(desc_extent->cluster_idx[0]); i++) { 5282 if (desc_extent->cluster_idx[i] != 0) { 5283 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 5284 desc_extent->cluster_idx[i]); 5285 } else { 5286 fprintf(ctx->fp, "Unallocated Extent"); 5287 } 5288 fprintf(ctx->fp, "\n"); 5289 } 5290 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 5291 bs_dump_print_xattr(ctx, desc); 5292 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 5293 bs_dump_print_xattr(ctx, desc); 5294 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 5295 bs_dump_print_type_flags(ctx, desc); 5296 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 5297 bs_dump_print_extent_table(ctx, desc); 5298 } else { 5299 /* Error */ 5300 fprintf(ctx->fp, "Unknown descriptor type %" PRIu8 "\n", desc->type); 5301 } 5302 /* Advance to the next descriptor */ 5303 cur_desc += sizeof(*desc) + desc->length; 5304 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 5305 break; 5306 } 5307 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 5308 } 5309 } 5310 5311 static void 5312 bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5313 { 5314 struct spdk_bs_load_ctx *ctx = cb_arg; 5315 5316 if (bserrno != 0) { 5317 bs_dump_finish(seq, ctx, bserrno); 5318 return; 5319 } 5320 5321 if (ctx->page->id != 0) { 5322 bs_dump_print_md_page(ctx); 5323 } 5324 5325 ctx->cur_page++; 5326 5327 if (ctx->cur_page < ctx->super->md_len) { 5328 bs_dump_read_md_page(seq, ctx); 5329 } else { 5330 spdk_free(ctx->page); 5331 bs_dump_finish(seq, ctx, 0); 5332 } 5333 } 5334 5335 static void 5336 bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg) 5337 { 5338 struct spdk_bs_load_ctx *ctx = cb_arg; 5339 uint64_t lba; 5340 5341 assert(ctx->cur_page < ctx->super->md_len); 5342 lba = bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page); 5343 bs_sequence_read_dev(seq, ctx->page, lba, 5344 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 5345 bs_dump_read_md_page_cpl, ctx); 5346 } 5347 5348 static void 5349 bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5350 { 5351 struct spdk_bs_load_ctx *ctx = cb_arg; 5352 int rc; 5353 5354 fprintf(ctx->fp, "Signature: \"%.8s\" ", ctx->super->signature); 5355 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 5356 sizeof(ctx->super->signature)) != 0) { 5357 fprintf(ctx->fp, "(Mismatch)\n"); 5358 bs_dump_finish(seq, ctx, bserrno); 5359 return; 5360 } else { 5361 fprintf(ctx->fp, "(OK)\n"); 5362 } 5363 fprintf(ctx->fp, "Version: %" PRIu32 "\n", ctx->super->version); 5364 fprintf(ctx->fp, "CRC: 0x%x (%s)\n", ctx->super->crc, 5365 (ctx->super->crc == blob_md_page_calc_crc(ctx->super)) ? "OK" : "Mismatch"); 5366 fprintf(ctx->fp, "Blobstore Type: %.*s\n", SPDK_BLOBSTORE_TYPE_LENGTH, ctx->super->bstype.bstype); 5367 fprintf(ctx->fp, "Cluster Size: %" PRIu32 "\n", ctx->super->cluster_size); 5368 fprintf(ctx->fp, "Super Blob ID: "); 5369 if (ctx->super->super_blob == SPDK_BLOBID_INVALID) { 5370 fprintf(ctx->fp, "(None)\n"); 5371 } else { 5372 fprintf(ctx->fp, "0x%" PRIx64 "\n", ctx->super->super_blob); 5373 } 5374 fprintf(ctx->fp, "Clean: %" PRIu32 "\n", ctx->super->clean); 5375 fprintf(ctx->fp, "Used Metadata Page Mask Start: %" PRIu32 "\n", ctx->super->used_page_mask_start); 5376 fprintf(ctx->fp, "Used Metadata Page Mask Length: %" PRIu32 "\n", ctx->super->used_page_mask_len); 5377 fprintf(ctx->fp, "Used Cluster Mask Start: %" PRIu32 "\n", ctx->super->used_cluster_mask_start); 5378 fprintf(ctx->fp, "Used Cluster Mask Length: %" PRIu32 "\n", ctx->super->used_cluster_mask_len); 5379 fprintf(ctx->fp, "Used Blob ID Mask Start: %" PRIu32 "\n", ctx->super->used_blobid_mask_start); 5380 fprintf(ctx->fp, "Used Blob ID Mask Length: %" PRIu32 "\n", ctx->super->used_blobid_mask_len); 5381 fprintf(ctx->fp, "Metadata Start: %" PRIu32 "\n", ctx->super->md_start); 5382 fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len); 5383 5384 ctx->cur_page = 0; 5385 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 5386 NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 5387 if (!ctx->page) { 5388 bs_dump_finish(seq, ctx, -ENOMEM); 5389 return; 5390 } 5391 5392 rc = bs_parse_super(ctx); 5393 if (rc < 0) { 5394 bs_load_ctx_fail(ctx, rc); 5395 return; 5396 } 5397 5398 bs_load_read_used_pages(ctx); 5399 } 5400 5401 void 5402 spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_xattr_fn, 5403 spdk_bs_op_complete cb_fn, void *cb_arg) 5404 { 5405 struct spdk_blob_store *bs; 5406 struct spdk_bs_cpl cpl; 5407 struct spdk_bs_load_ctx *ctx; 5408 struct spdk_bs_opts opts = {}; 5409 int err; 5410 5411 SPDK_DEBUGLOG(blob, "Dumping blobstore from dev %p\n", dev); 5412 5413 spdk_bs_opts_init(&opts, sizeof(opts)); 5414 5415 err = bs_alloc(dev, &opts, &bs, &ctx); 5416 if (err) { 5417 dev->destroy(dev); 5418 cb_fn(cb_arg, err); 5419 return; 5420 } 5421 5422 ctx->dumping = true; 5423 ctx->fp = fp; 5424 ctx->print_xattr_fn = print_xattr_fn; 5425 5426 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5427 cpl.u.bs_basic.cb_fn = cb_fn; 5428 cpl.u.bs_basic.cb_arg = cb_arg; 5429 5430 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5431 if (!ctx->seq) { 5432 spdk_free(ctx->super); 5433 free(ctx); 5434 bs_free(bs); 5435 cb_fn(cb_arg, -ENOMEM); 5436 return; 5437 } 5438 5439 /* Read the super block */ 5440 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5441 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5442 bs_dump_super_cpl, ctx); 5443 } 5444 5445 /* END spdk_bs_dump */ 5446 5447 /* START spdk_bs_init */ 5448 5449 static void 5450 bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5451 { 5452 struct spdk_bs_load_ctx *ctx = cb_arg; 5453 5454 ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters); 5455 spdk_free(ctx->super); 5456 free(ctx); 5457 5458 bs_sequence_finish(seq, bserrno); 5459 } 5460 5461 static void 5462 bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5463 { 5464 struct spdk_bs_load_ctx *ctx = cb_arg; 5465 5466 /* Write super block */ 5467 bs_sequence_write_dev(seq, ctx->super, bs_page_to_lba(ctx->bs, 0), 5468 bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 5469 bs_init_persist_super_cpl, ctx); 5470 } 5471 5472 void 5473 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 5474 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 5475 { 5476 struct spdk_bs_load_ctx *ctx; 5477 struct spdk_blob_store *bs; 5478 struct spdk_bs_cpl cpl; 5479 spdk_bs_sequence_t *seq; 5480 spdk_bs_batch_t *batch; 5481 uint64_t num_md_lba; 5482 uint64_t num_md_pages; 5483 uint64_t num_md_clusters; 5484 uint64_t max_used_cluster_mask_len; 5485 uint32_t i; 5486 struct spdk_bs_opts opts = {}; 5487 int rc; 5488 uint64_t lba, lba_count; 5489 5490 SPDK_DEBUGLOG(blob, "Initializing blobstore on dev %p\n", dev); 5491 5492 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 5493 SPDK_ERRLOG("unsupported dev block length of %d\n", 5494 dev->blocklen); 5495 dev->destroy(dev); 5496 cb_fn(cb_arg, NULL, -EINVAL); 5497 return; 5498 } 5499 5500 spdk_bs_opts_init(&opts, sizeof(opts)); 5501 if (o) { 5502 if (bs_opts_copy(o, &opts)) { 5503 return; 5504 } 5505 } 5506 5507 if (bs_opts_verify(&opts) != 0) { 5508 dev->destroy(dev); 5509 cb_fn(cb_arg, NULL, -EINVAL); 5510 return; 5511 } 5512 5513 rc = bs_alloc(dev, &opts, &bs, &ctx); 5514 if (rc) { 5515 dev->destroy(dev); 5516 cb_fn(cb_arg, NULL, rc); 5517 return; 5518 } 5519 5520 if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) { 5521 /* By default, allocate 1 page per cluster. 5522 * Technically, this over-allocates metadata 5523 * because more metadata will reduce the number 5524 * of usable clusters. This can be addressed with 5525 * more complex math in the future. 5526 */ 5527 bs->md_len = bs->total_clusters; 5528 } else { 5529 bs->md_len = opts.num_md_pages; 5530 } 5531 rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len); 5532 if (rc < 0) { 5533 spdk_free(ctx->super); 5534 free(ctx); 5535 bs_free(bs); 5536 cb_fn(cb_arg, NULL, -ENOMEM); 5537 return; 5538 } 5539 5540 rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len); 5541 if (rc < 0) { 5542 spdk_free(ctx->super); 5543 free(ctx); 5544 bs_free(bs); 5545 cb_fn(cb_arg, NULL, -ENOMEM); 5546 return; 5547 } 5548 5549 rc = spdk_bit_array_resize(&bs->open_blobids, bs->md_len); 5550 if (rc < 0) { 5551 spdk_free(ctx->super); 5552 free(ctx); 5553 bs_free(bs); 5554 cb_fn(cb_arg, NULL, -ENOMEM); 5555 return; 5556 } 5557 5558 memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 5559 sizeof(ctx->super->signature)); 5560 ctx->super->version = SPDK_BS_VERSION; 5561 ctx->super->length = sizeof(*ctx->super); 5562 ctx->super->super_blob = bs->super_blob; 5563 ctx->super->clean = 0; 5564 ctx->super->cluster_size = bs->cluster_sz; 5565 ctx->super->io_unit_size = bs->io_unit_size; 5566 memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype)); 5567 5568 /* Calculate how many pages the metadata consumes at the front 5569 * of the disk. 5570 */ 5571 5572 /* The super block uses 1 page */ 5573 num_md_pages = 1; 5574 5575 /* The used_md_pages mask requires 1 bit per metadata page, rounded 5576 * up to the nearest page, plus a header. 5577 */ 5578 ctx->super->used_page_mask_start = num_md_pages; 5579 ctx->super->used_page_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5580 spdk_divide_round_up(bs->md_len, 8), 5581 SPDK_BS_PAGE_SIZE); 5582 num_md_pages += ctx->super->used_page_mask_len; 5583 5584 /* The used_clusters mask requires 1 bit per cluster, rounded 5585 * up to the nearest page, plus a header. 5586 */ 5587 ctx->super->used_cluster_mask_start = num_md_pages; 5588 ctx->super->used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5589 spdk_divide_round_up(bs->total_clusters, 8), 5590 SPDK_BS_PAGE_SIZE); 5591 /* The blobstore might be extended, then the used_cluster bitmap will need more space. 5592 * Here we calculate the max clusters we can support according to the 5593 * num_md_pages (bs->md_len). 5594 */ 5595 max_used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5596 spdk_divide_round_up(bs->md_len, 8), 5597 SPDK_BS_PAGE_SIZE); 5598 max_used_cluster_mask_len = spdk_max(max_used_cluster_mask_len, 5599 ctx->super->used_cluster_mask_len); 5600 num_md_pages += max_used_cluster_mask_len; 5601 5602 /* The used_blobids mask requires 1 bit per metadata page, rounded 5603 * up to the nearest page, plus a header. 5604 */ 5605 ctx->super->used_blobid_mask_start = num_md_pages; 5606 ctx->super->used_blobid_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5607 spdk_divide_round_up(bs->md_len, 8), 5608 SPDK_BS_PAGE_SIZE); 5609 num_md_pages += ctx->super->used_blobid_mask_len; 5610 5611 /* The metadata region size was chosen above */ 5612 ctx->super->md_start = bs->md_start = num_md_pages; 5613 ctx->super->md_len = bs->md_len; 5614 num_md_pages += bs->md_len; 5615 5616 num_md_lba = bs_page_to_lba(bs, num_md_pages); 5617 5618 ctx->super->size = dev->blockcnt * dev->blocklen; 5619 5620 ctx->super->crc = blob_md_page_calc_crc(ctx->super); 5621 5622 num_md_clusters = spdk_divide_round_up(num_md_pages, bs->pages_per_cluster); 5623 if (num_md_clusters > bs->total_clusters) { 5624 SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, " 5625 "please decrease number of pages reserved for metadata " 5626 "or increase cluster size.\n"); 5627 spdk_free(ctx->super); 5628 spdk_bit_array_free(&ctx->used_clusters); 5629 free(ctx); 5630 bs_free(bs); 5631 cb_fn(cb_arg, NULL, -ENOMEM); 5632 return; 5633 } 5634 /* Claim all of the clusters used by the metadata */ 5635 for (i = 0; i < num_md_clusters; i++) { 5636 spdk_bit_array_set(ctx->used_clusters, i); 5637 } 5638 5639 bs->num_free_clusters -= num_md_clusters; 5640 bs->total_data_clusters = bs->num_free_clusters; 5641 5642 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 5643 cpl.u.bs_handle.cb_fn = cb_fn; 5644 cpl.u.bs_handle.cb_arg = cb_arg; 5645 cpl.u.bs_handle.bs = bs; 5646 5647 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5648 if (!seq) { 5649 spdk_free(ctx->super); 5650 free(ctx); 5651 bs_free(bs); 5652 cb_fn(cb_arg, NULL, -ENOMEM); 5653 return; 5654 } 5655 5656 batch = bs_sequence_to_batch(seq, bs_init_trim_cpl, ctx); 5657 5658 /* Clear metadata space */ 5659 bs_batch_write_zeroes_dev(batch, 0, num_md_lba); 5660 5661 lba = num_md_lba; 5662 lba_count = ctx->bs->dev->blockcnt - lba; 5663 switch (opts.clear_method) { 5664 case BS_CLEAR_WITH_UNMAP: 5665 /* Trim data clusters */ 5666 bs_batch_unmap_dev(batch, lba, lba_count); 5667 break; 5668 case BS_CLEAR_WITH_WRITE_ZEROES: 5669 /* Write_zeroes to data clusters */ 5670 bs_batch_write_zeroes_dev(batch, lba, lba_count); 5671 break; 5672 case BS_CLEAR_WITH_NONE: 5673 default: 5674 break; 5675 } 5676 5677 bs_batch_close(batch); 5678 } 5679 5680 /* END spdk_bs_init */ 5681 5682 /* START spdk_bs_destroy */ 5683 5684 static void 5685 bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5686 { 5687 struct spdk_bs_load_ctx *ctx = cb_arg; 5688 struct spdk_blob_store *bs = ctx->bs; 5689 5690 /* 5691 * We need to defer calling bs_call_cpl() until after 5692 * dev destruction, so tuck these away for later use. 5693 */ 5694 bs->unload_err = bserrno; 5695 memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5696 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5697 5698 bs_sequence_finish(seq, bserrno); 5699 5700 bs_free(bs); 5701 free(ctx); 5702 } 5703 5704 void 5705 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, 5706 void *cb_arg) 5707 { 5708 struct spdk_bs_cpl cpl; 5709 spdk_bs_sequence_t *seq; 5710 struct spdk_bs_load_ctx *ctx; 5711 5712 SPDK_DEBUGLOG(blob, "Destroying blobstore\n"); 5713 5714 if (!RB_EMPTY(&bs->open_blobs)) { 5715 SPDK_ERRLOG("Blobstore still has open blobs\n"); 5716 cb_fn(cb_arg, -EBUSY); 5717 return; 5718 } 5719 5720 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5721 cpl.u.bs_basic.cb_fn = cb_fn; 5722 cpl.u.bs_basic.cb_arg = cb_arg; 5723 5724 ctx = calloc(1, sizeof(*ctx)); 5725 if (!ctx) { 5726 cb_fn(cb_arg, -ENOMEM); 5727 return; 5728 } 5729 5730 ctx->bs = bs; 5731 5732 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5733 if (!seq) { 5734 free(ctx); 5735 cb_fn(cb_arg, -ENOMEM); 5736 return; 5737 } 5738 5739 /* Write zeroes to the super block */ 5740 bs_sequence_write_zeroes_dev(seq, 5741 bs_page_to_lba(bs, 0), 5742 bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)), 5743 bs_destroy_trim_cpl, ctx); 5744 } 5745 5746 /* END spdk_bs_destroy */ 5747 5748 /* START spdk_bs_unload */ 5749 5750 static void 5751 bs_unload_finish(struct spdk_bs_load_ctx *ctx, int bserrno) 5752 { 5753 spdk_bs_sequence_t *seq = ctx->seq; 5754 5755 spdk_free(ctx->super); 5756 5757 /* 5758 * We need to defer calling bs_call_cpl() until after 5759 * dev destruction, so tuck these away for later use. 5760 */ 5761 ctx->bs->unload_err = bserrno; 5762 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5763 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5764 5765 bs_sequence_finish(seq, bserrno); 5766 5767 bs_free(ctx->bs); 5768 free(ctx); 5769 } 5770 5771 static void 5772 bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5773 { 5774 struct spdk_bs_load_ctx *ctx = cb_arg; 5775 5776 bs_unload_finish(ctx, bserrno); 5777 } 5778 5779 static void 5780 bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5781 { 5782 struct spdk_bs_load_ctx *ctx = cb_arg; 5783 5784 spdk_free(ctx->mask); 5785 5786 if (bserrno != 0) { 5787 bs_unload_finish(ctx, bserrno); 5788 return; 5789 } 5790 5791 ctx->super->clean = 1; 5792 5793 bs_write_super(seq, ctx->bs, ctx->super, bs_unload_write_super_cpl, ctx); 5794 } 5795 5796 static void 5797 bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5798 { 5799 struct spdk_bs_load_ctx *ctx = cb_arg; 5800 5801 spdk_free(ctx->mask); 5802 ctx->mask = NULL; 5803 5804 if (bserrno != 0) { 5805 bs_unload_finish(ctx, bserrno); 5806 return; 5807 } 5808 5809 bs_write_used_clusters(seq, ctx, bs_unload_write_used_clusters_cpl); 5810 } 5811 5812 static void 5813 bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5814 { 5815 struct spdk_bs_load_ctx *ctx = cb_arg; 5816 5817 spdk_free(ctx->mask); 5818 ctx->mask = NULL; 5819 5820 if (bserrno != 0) { 5821 bs_unload_finish(ctx, bserrno); 5822 return; 5823 } 5824 5825 bs_write_used_blobids(seq, ctx, bs_unload_write_used_blobids_cpl); 5826 } 5827 5828 static void 5829 bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5830 { 5831 struct spdk_bs_load_ctx *ctx = cb_arg; 5832 int rc; 5833 5834 if (bserrno != 0) { 5835 bs_unload_finish(ctx, bserrno); 5836 return; 5837 } 5838 5839 rc = bs_super_validate(ctx->super, ctx->bs); 5840 if (rc != 0) { 5841 bs_unload_finish(ctx, rc); 5842 return; 5843 } 5844 5845 bs_write_used_md(seq, cb_arg, bs_unload_write_used_pages_cpl); 5846 } 5847 5848 void 5849 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg) 5850 { 5851 struct spdk_bs_cpl cpl; 5852 struct spdk_bs_load_ctx *ctx; 5853 5854 SPDK_DEBUGLOG(blob, "Syncing blobstore\n"); 5855 5856 /* 5857 * If external snapshot channels are being destroyed while the blobstore is unloaded, the 5858 * unload is deferred until after the channel destruction completes. 5859 */ 5860 if (bs->esnap_channels_unloading != 0) { 5861 if (bs->esnap_unload_cb_fn != NULL) { 5862 SPDK_ERRLOG("Blobstore unload in progress\n"); 5863 cb_fn(cb_arg, -EBUSY); 5864 return; 5865 } 5866 SPDK_DEBUGLOG(blob_esnap, "Blobstore unload deferred: %" PRIu32 5867 " esnap clones are unloading\n", bs->esnap_channels_unloading); 5868 bs->esnap_unload_cb_fn = cb_fn; 5869 bs->esnap_unload_cb_arg = cb_arg; 5870 return; 5871 } 5872 if (bs->esnap_unload_cb_fn != NULL) { 5873 SPDK_DEBUGLOG(blob_esnap, "Blobstore deferred unload progressing\n"); 5874 assert(bs->esnap_unload_cb_fn == cb_fn); 5875 assert(bs->esnap_unload_cb_arg == cb_arg); 5876 bs->esnap_unload_cb_fn = NULL; 5877 bs->esnap_unload_cb_arg = NULL; 5878 } 5879 5880 if (!RB_EMPTY(&bs->open_blobs)) { 5881 SPDK_ERRLOG("Blobstore still has open blobs\n"); 5882 cb_fn(cb_arg, -EBUSY); 5883 return; 5884 } 5885 5886 ctx = calloc(1, sizeof(*ctx)); 5887 if (!ctx) { 5888 cb_fn(cb_arg, -ENOMEM); 5889 return; 5890 } 5891 5892 ctx->bs = bs; 5893 5894 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 5895 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 5896 if (!ctx->super) { 5897 free(ctx); 5898 cb_fn(cb_arg, -ENOMEM); 5899 return; 5900 } 5901 5902 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5903 cpl.u.bs_basic.cb_fn = cb_fn; 5904 cpl.u.bs_basic.cb_arg = cb_arg; 5905 5906 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5907 if (!ctx->seq) { 5908 spdk_free(ctx->super); 5909 free(ctx); 5910 cb_fn(cb_arg, -ENOMEM); 5911 return; 5912 } 5913 5914 /* Read super block */ 5915 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5916 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5917 bs_unload_read_super_cpl, ctx); 5918 } 5919 5920 /* END spdk_bs_unload */ 5921 5922 /* START spdk_bs_set_super */ 5923 5924 struct spdk_bs_set_super_ctx { 5925 struct spdk_blob_store *bs; 5926 struct spdk_bs_super_block *super; 5927 }; 5928 5929 static void 5930 bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5931 { 5932 struct spdk_bs_set_super_ctx *ctx = cb_arg; 5933 5934 if (bserrno != 0) { 5935 SPDK_ERRLOG("Unable to write to super block of blobstore\n"); 5936 } 5937 5938 spdk_free(ctx->super); 5939 5940 bs_sequence_finish(seq, bserrno); 5941 5942 free(ctx); 5943 } 5944 5945 static void 5946 bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5947 { 5948 struct spdk_bs_set_super_ctx *ctx = cb_arg; 5949 int rc; 5950 5951 if (bserrno != 0) { 5952 SPDK_ERRLOG("Unable to read super block of blobstore\n"); 5953 spdk_free(ctx->super); 5954 bs_sequence_finish(seq, bserrno); 5955 free(ctx); 5956 return; 5957 } 5958 5959 rc = bs_super_validate(ctx->super, ctx->bs); 5960 if (rc != 0) { 5961 SPDK_ERRLOG("Not a valid super block\n"); 5962 spdk_free(ctx->super); 5963 bs_sequence_finish(seq, rc); 5964 free(ctx); 5965 return; 5966 } 5967 5968 bs_write_super(seq, ctx->bs, ctx->super, bs_set_super_write_cpl, ctx); 5969 } 5970 5971 void 5972 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid, 5973 spdk_bs_op_complete cb_fn, void *cb_arg) 5974 { 5975 struct spdk_bs_cpl cpl; 5976 spdk_bs_sequence_t *seq; 5977 struct spdk_bs_set_super_ctx *ctx; 5978 5979 SPDK_DEBUGLOG(blob, "Setting super blob id on blobstore\n"); 5980 5981 ctx = calloc(1, sizeof(*ctx)); 5982 if (!ctx) { 5983 cb_fn(cb_arg, -ENOMEM); 5984 return; 5985 } 5986 5987 ctx->bs = bs; 5988 5989 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 5990 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 5991 if (!ctx->super) { 5992 free(ctx); 5993 cb_fn(cb_arg, -ENOMEM); 5994 return; 5995 } 5996 5997 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5998 cpl.u.bs_basic.cb_fn = cb_fn; 5999 cpl.u.bs_basic.cb_arg = cb_arg; 6000 6001 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 6002 if (!seq) { 6003 spdk_free(ctx->super); 6004 free(ctx); 6005 cb_fn(cb_arg, -ENOMEM); 6006 return; 6007 } 6008 6009 bs->super_blob = blobid; 6010 6011 /* Read super block */ 6012 bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0), 6013 bs_byte_to_lba(bs, sizeof(*ctx->super)), 6014 bs_set_super_read_cpl, ctx); 6015 } 6016 6017 /* END spdk_bs_set_super */ 6018 6019 void 6020 spdk_bs_get_super(struct spdk_blob_store *bs, 6021 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6022 { 6023 if (bs->super_blob == SPDK_BLOBID_INVALID) { 6024 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT); 6025 } else { 6026 cb_fn(cb_arg, bs->super_blob, 0); 6027 } 6028 } 6029 6030 uint64_t 6031 spdk_bs_get_cluster_size(struct spdk_blob_store *bs) 6032 { 6033 return bs->cluster_sz; 6034 } 6035 6036 uint64_t 6037 spdk_bs_get_page_size(struct spdk_blob_store *bs) 6038 { 6039 return SPDK_BS_PAGE_SIZE; 6040 } 6041 6042 uint64_t 6043 spdk_bs_get_io_unit_size(struct spdk_blob_store *bs) 6044 { 6045 return bs->io_unit_size; 6046 } 6047 6048 uint64_t 6049 spdk_bs_free_cluster_count(struct spdk_blob_store *bs) 6050 { 6051 return bs->num_free_clusters; 6052 } 6053 6054 uint64_t 6055 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs) 6056 { 6057 return bs->total_data_clusters; 6058 } 6059 6060 static int 6061 bs_register_md_thread(struct spdk_blob_store *bs) 6062 { 6063 bs->md_channel = spdk_get_io_channel(bs); 6064 if (!bs->md_channel) { 6065 SPDK_ERRLOG("Failed to get IO channel.\n"); 6066 return -1; 6067 } 6068 6069 return 0; 6070 } 6071 6072 static int 6073 bs_unregister_md_thread(struct spdk_blob_store *bs) 6074 { 6075 spdk_put_io_channel(bs->md_channel); 6076 6077 return 0; 6078 } 6079 6080 spdk_blob_id 6081 spdk_blob_get_id(struct spdk_blob *blob) 6082 { 6083 assert(blob != NULL); 6084 6085 return blob->id; 6086 } 6087 6088 uint64_t 6089 spdk_blob_get_num_pages(struct spdk_blob *blob) 6090 { 6091 assert(blob != NULL); 6092 6093 return bs_cluster_to_page(blob->bs, blob->active.num_clusters); 6094 } 6095 6096 uint64_t 6097 spdk_blob_get_num_io_units(struct spdk_blob *blob) 6098 { 6099 assert(blob != NULL); 6100 6101 return spdk_blob_get_num_pages(blob) * bs_io_unit_per_page(blob->bs); 6102 } 6103 6104 uint64_t 6105 spdk_blob_get_num_clusters(struct spdk_blob *blob) 6106 { 6107 assert(blob != NULL); 6108 6109 return blob->active.num_clusters; 6110 } 6111 6112 uint64_t 6113 spdk_blob_get_num_allocated_clusters(struct spdk_blob *blob) 6114 { 6115 assert(blob != NULL); 6116 6117 return blob->active.num_allocated_clusters; 6118 } 6119 6120 static uint64_t 6121 blob_find_io_unit(struct spdk_blob *blob, uint64_t offset, bool is_allocated) 6122 { 6123 uint64_t blob_io_unit_num = spdk_blob_get_num_io_units(blob); 6124 6125 while (offset < blob_io_unit_num) { 6126 if (bs_io_unit_is_allocated(blob, offset) == is_allocated) { 6127 return offset; 6128 } 6129 6130 offset += bs_num_io_units_to_cluster_boundary(blob, offset); 6131 } 6132 6133 return UINT64_MAX; 6134 } 6135 6136 uint64_t 6137 spdk_blob_get_next_allocated_io_unit(struct spdk_blob *blob, uint64_t offset) 6138 { 6139 return blob_find_io_unit(blob, offset, true); 6140 } 6141 6142 uint64_t 6143 spdk_blob_get_next_unallocated_io_unit(struct spdk_blob *blob, uint64_t offset) 6144 { 6145 return blob_find_io_unit(blob, offset, false); 6146 } 6147 6148 /* START spdk_bs_create_blob */ 6149 6150 static void 6151 bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 6152 { 6153 struct spdk_blob *blob = cb_arg; 6154 uint32_t page_idx = bs_blobid_to_page(blob->id); 6155 6156 if (bserrno != 0) { 6157 spdk_spin_lock(&blob->bs->used_lock); 6158 spdk_bit_array_clear(blob->bs->used_blobids, page_idx); 6159 bs_release_md_page(blob->bs, page_idx); 6160 spdk_spin_unlock(&blob->bs->used_lock); 6161 } 6162 6163 blob_free(blob); 6164 6165 bs_sequence_finish(seq, bserrno); 6166 } 6167 6168 static int 6169 blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs, 6170 bool internal) 6171 { 6172 uint64_t i; 6173 size_t value_len = 0; 6174 int rc; 6175 const void *value = NULL; 6176 if (xattrs->count > 0 && xattrs->get_value == NULL) { 6177 return -EINVAL; 6178 } 6179 for (i = 0; i < xattrs->count; i++) { 6180 xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len); 6181 if (value == NULL || value_len == 0) { 6182 return -EINVAL; 6183 } 6184 rc = blob_set_xattr(blob, xattrs->names[i], value, value_len, internal); 6185 if (rc < 0) { 6186 return rc; 6187 } 6188 } 6189 return 0; 6190 } 6191 6192 static void 6193 blob_opts_copy(const struct spdk_blob_opts *src, struct spdk_blob_opts *dst) 6194 { 6195 #define FIELD_OK(field) \ 6196 offsetof(struct spdk_blob_opts, field) + sizeof(src->field) <= src->opts_size 6197 6198 #define SET_FIELD(field) \ 6199 if (FIELD_OK(field)) { \ 6200 dst->field = src->field; \ 6201 } \ 6202 6203 SET_FIELD(num_clusters); 6204 SET_FIELD(thin_provision); 6205 SET_FIELD(clear_method); 6206 6207 if (FIELD_OK(xattrs)) { 6208 memcpy(&dst->xattrs, &src->xattrs, sizeof(src->xattrs)); 6209 } 6210 6211 SET_FIELD(use_extent_table); 6212 SET_FIELD(esnap_id); 6213 SET_FIELD(esnap_id_len); 6214 6215 dst->opts_size = src->opts_size; 6216 6217 /* You should not remove this statement, but need to update the assert statement 6218 * if you add a new field, and also add a corresponding SET_FIELD statement */ 6219 SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_opts) == 80, "Incorrect size"); 6220 6221 #undef FIELD_OK 6222 #undef SET_FIELD 6223 } 6224 6225 static void 6226 bs_create_blob(struct spdk_blob_store *bs, 6227 const struct spdk_blob_opts *opts, 6228 const struct spdk_blob_xattr_opts *internal_xattrs, 6229 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6230 { 6231 struct spdk_blob *blob; 6232 uint32_t page_idx; 6233 struct spdk_bs_cpl cpl; 6234 struct spdk_blob_opts opts_local; 6235 struct spdk_blob_xattr_opts internal_xattrs_default; 6236 spdk_bs_sequence_t *seq; 6237 spdk_blob_id id; 6238 int rc; 6239 6240 assert(spdk_get_thread() == bs->md_thread); 6241 6242 spdk_spin_lock(&bs->used_lock); 6243 page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); 6244 if (page_idx == UINT32_MAX) { 6245 spdk_spin_unlock(&bs->used_lock); 6246 cb_fn(cb_arg, 0, -ENOMEM); 6247 return; 6248 } 6249 spdk_bit_array_set(bs->used_blobids, page_idx); 6250 bs_claim_md_page(bs, page_idx); 6251 spdk_spin_unlock(&bs->used_lock); 6252 6253 id = bs_page_to_blobid(page_idx); 6254 6255 SPDK_DEBUGLOG(blob, "Creating blob with id 0x%" PRIx64 " at page %u\n", id, page_idx); 6256 6257 spdk_blob_opts_init(&opts_local, sizeof(opts_local)); 6258 if (opts) { 6259 blob_opts_copy(opts, &opts_local); 6260 } 6261 6262 blob = blob_alloc(bs, id); 6263 if (!blob) { 6264 rc = -ENOMEM; 6265 goto error; 6266 } 6267 6268 blob->use_extent_table = opts_local.use_extent_table; 6269 if (blob->use_extent_table) { 6270 blob->invalid_flags |= SPDK_BLOB_EXTENT_TABLE; 6271 } 6272 6273 if (!internal_xattrs) { 6274 blob_xattrs_init(&internal_xattrs_default); 6275 internal_xattrs = &internal_xattrs_default; 6276 } 6277 6278 rc = blob_set_xattrs(blob, &opts_local.xattrs, false); 6279 if (rc < 0) { 6280 goto error; 6281 } 6282 6283 rc = blob_set_xattrs(blob, internal_xattrs, true); 6284 if (rc < 0) { 6285 goto error; 6286 } 6287 6288 if (opts_local.thin_provision) { 6289 blob_set_thin_provision(blob); 6290 } 6291 6292 blob_set_clear_method(blob, opts_local.clear_method); 6293 6294 if (opts_local.esnap_id != NULL) { 6295 if (opts_local.esnap_id_len > UINT16_MAX) { 6296 SPDK_ERRLOG("esnap id length %" PRIu64 "is too long\n", 6297 opts_local.esnap_id_len); 6298 rc = -EINVAL; 6299 goto error; 6300 6301 } 6302 blob_set_thin_provision(blob); 6303 blob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 6304 rc = blob_set_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID, 6305 opts_local.esnap_id, opts_local.esnap_id_len, true); 6306 if (rc != 0) { 6307 goto error; 6308 } 6309 } 6310 6311 rc = blob_resize(blob, opts_local.num_clusters); 6312 if (rc < 0) { 6313 goto error; 6314 } 6315 cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6316 cpl.u.blobid.cb_fn = cb_fn; 6317 cpl.u.blobid.cb_arg = cb_arg; 6318 cpl.u.blobid.blobid = blob->id; 6319 6320 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 6321 if (!seq) { 6322 rc = -ENOMEM; 6323 goto error; 6324 } 6325 6326 blob_persist(seq, blob, bs_create_blob_cpl, blob); 6327 return; 6328 6329 error: 6330 SPDK_ERRLOG("Failed to create blob: %s, size in clusters/size: %lu (clusters)\n", 6331 spdk_strerror(rc), opts_local.num_clusters); 6332 if (blob != NULL) { 6333 blob_free(blob); 6334 } 6335 spdk_spin_lock(&bs->used_lock); 6336 spdk_bit_array_clear(bs->used_blobids, page_idx); 6337 bs_release_md_page(bs, page_idx); 6338 spdk_spin_unlock(&bs->used_lock); 6339 cb_fn(cb_arg, 0, rc); 6340 } 6341 6342 void 6343 spdk_bs_create_blob(struct spdk_blob_store *bs, 6344 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6345 { 6346 bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg); 6347 } 6348 6349 void 6350 spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts, 6351 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6352 { 6353 bs_create_blob(bs, opts, NULL, cb_fn, cb_arg); 6354 } 6355 6356 /* END spdk_bs_create_blob */ 6357 6358 /* START blob_cleanup */ 6359 6360 struct spdk_clone_snapshot_ctx { 6361 struct spdk_bs_cpl cpl; 6362 int bserrno; 6363 bool frozen; 6364 6365 struct spdk_io_channel *channel; 6366 6367 /* Current cluster for inflate operation */ 6368 uint64_t cluster; 6369 6370 /* For inflation force allocation of all unallocated clusters and remove 6371 * thin-provisioning. Otherwise only decouple parent and keep clone thin. */ 6372 bool allocate_all; 6373 6374 struct { 6375 spdk_blob_id id; 6376 struct spdk_blob *blob; 6377 bool md_ro; 6378 } original; 6379 struct { 6380 spdk_blob_id id; 6381 struct spdk_blob *blob; 6382 } new; 6383 6384 /* xattrs specified for snapshot/clones only. They have no impact on 6385 * the original blobs xattrs. */ 6386 const struct spdk_blob_xattr_opts *xattrs; 6387 }; 6388 6389 static void 6390 bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno) 6391 { 6392 struct spdk_clone_snapshot_ctx *ctx = cb_arg; 6393 struct spdk_bs_cpl *cpl = &ctx->cpl; 6394 6395 if (bserrno != 0) { 6396 if (ctx->bserrno != 0) { 6397 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6398 } else { 6399 ctx->bserrno = bserrno; 6400 } 6401 } 6402 6403 switch (cpl->type) { 6404 case SPDK_BS_CPL_TYPE_BLOBID: 6405 cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno); 6406 break; 6407 case SPDK_BS_CPL_TYPE_BLOB_BASIC: 6408 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno); 6409 break; 6410 default: 6411 SPDK_UNREACHABLE(); 6412 break; 6413 } 6414 6415 free(ctx); 6416 } 6417 6418 static void 6419 bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 6420 { 6421 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6422 struct spdk_blob *origblob = ctx->original.blob; 6423 6424 if (bserrno != 0) { 6425 if (ctx->bserrno != 0) { 6426 SPDK_ERRLOG("Unfreeze error %d\n", bserrno); 6427 } else { 6428 ctx->bserrno = bserrno; 6429 } 6430 } 6431 6432 ctx->original.id = origblob->id; 6433 origblob->locked_operation_in_progress = false; 6434 6435 /* Revert md_ro to original state */ 6436 origblob->md_ro = ctx->original.md_ro; 6437 6438 spdk_blob_close(origblob, bs_clone_snapshot_cleanup_finish, ctx); 6439 } 6440 6441 static void 6442 bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno) 6443 { 6444 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6445 struct spdk_blob *origblob = ctx->original.blob; 6446 6447 if (bserrno != 0) { 6448 if (ctx->bserrno != 0) { 6449 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6450 } else { 6451 ctx->bserrno = bserrno; 6452 } 6453 } 6454 6455 if (ctx->frozen) { 6456 /* Unfreeze any outstanding I/O */ 6457 blob_unfreeze_io(origblob, bs_snapshot_unfreeze_cpl, ctx); 6458 } else { 6459 bs_snapshot_unfreeze_cpl(ctx, 0); 6460 } 6461 6462 } 6463 6464 static void 6465 bs_clone_snapshot_newblob_cleanup(struct spdk_clone_snapshot_ctx *ctx, int bserrno) 6466 { 6467 struct spdk_blob *newblob = ctx->new.blob; 6468 6469 if (bserrno != 0) { 6470 if (ctx->bserrno != 0) { 6471 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6472 } else { 6473 ctx->bserrno = bserrno; 6474 } 6475 } 6476 6477 ctx->new.id = newblob->id; 6478 spdk_blob_close(newblob, bs_clone_snapshot_origblob_cleanup, ctx); 6479 } 6480 6481 /* END blob_cleanup */ 6482 6483 /* START spdk_bs_create_snapshot */ 6484 6485 static void 6486 bs_snapshot_swap_cluster_maps(struct spdk_blob *blob1, struct spdk_blob *blob2) 6487 { 6488 uint64_t *cluster_temp; 6489 uint64_t num_allocated_clusters_temp; 6490 uint32_t *extent_page_temp; 6491 6492 cluster_temp = blob1->active.clusters; 6493 blob1->active.clusters = blob2->active.clusters; 6494 blob2->active.clusters = cluster_temp; 6495 6496 num_allocated_clusters_temp = blob1->active.num_allocated_clusters; 6497 blob1->active.num_allocated_clusters = blob2->active.num_allocated_clusters; 6498 blob2->active.num_allocated_clusters = num_allocated_clusters_temp; 6499 6500 extent_page_temp = blob1->active.extent_pages; 6501 blob1->active.extent_pages = blob2->active.extent_pages; 6502 blob2->active.extent_pages = extent_page_temp; 6503 } 6504 6505 /* Copies an internal xattr */ 6506 static int 6507 bs_snapshot_copy_xattr(struct spdk_blob *toblob, struct spdk_blob *fromblob, const char *name) 6508 { 6509 const void *val = NULL; 6510 size_t len; 6511 int bserrno; 6512 6513 bserrno = blob_get_xattr_value(fromblob, name, &val, &len, true); 6514 if (bserrno != 0) { 6515 SPDK_ERRLOG("blob 0x%" PRIx64 " missing %s XATTR\n", fromblob->id, name); 6516 return bserrno; 6517 } 6518 6519 bserrno = blob_set_xattr(toblob, name, val, len, true); 6520 if (bserrno != 0) { 6521 SPDK_ERRLOG("could not set %s XATTR on blob 0x%" PRIx64 "\n", 6522 name, toblob->id); 6523 return bserrno; 6524 } 6525 return 0; 6526 } 6527 6528 static void 6529 bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno) 6530 { 6531 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6532 struct spdk_blob *origblob = ctx->original.blob; 6533 struct spdk_blob *newblob = ctx->new.blob; 6534 6535 if (bserrno != 0) { 6536 bs_snapshot_swap_cluster_maps(newblob, origblob); 6537 if (blob_is_esnap_clone(newblob)) { 6538 bs_snapshot_copy_xattr(origblob, newblob, BLOB_EXTERNAL_SNAPSHOT_ID); 6539 origblob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 6540 } 6541 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6542 return; 6543 } 6544 6545 /* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */ 6546 bserrno = blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true); 6547 if (bserrno != 0) { 6548 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6549 return; 6550 } 6551 6552 bs_blob_list_add(ctx->original.blob); 6553 6554 spdk_blob_set_read_only(newblob); 6555 6556 /* sync snapshot metadata */ 6557 spdk_blob_sync_md(newblob, bs_clone_snapshot_origblob_cleanup, ctx); 6558 } 6559 6560 static void 6561 bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno) 6562 { 6563 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6564 struct spdk_blob *origblob = ctx->original.blob; 6565 struct spdk_blob *newblob = ctx->new.blob; 6566 6567 if (bserrno != 0) { 6568 /* return cluster map back to original */ 6569 bs_snapshot_swap_cluster_maps(newblob, origblob); 6570 6571 /* Newblob md sync failed. Valid clusters are only present in origblob. 6572 * Since I/O is frozen on origblob, not changes to zeroed out cluster map should have occurred. 6573 * Newblob needs to be reverted to thin_provisioned state at creation to properly close. */ 6574 blob_set_thin_provision(newblob); 6575 assert(spdk_mem_all_zero(newblob->active.clusters, 6576 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 6577 assert(spdk_mem_all_zero(newblob->active.extent_pages, 6578 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 6579 6580 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6581 return; 6582 } 6583 6584 /* Set internal xattr for snapshot id */ 6585 bserrno = blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true); 6586 if (bserrno != 0) { 6587 /* return cluster map back to original */ 6588 bs_snapshot_swap_cluster_maps(newblob, origblob); 6589 blob_set_thin_provision(newblob); 6590 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6591 return; 6592 } 6593 6594 /* Create new back_bs_dev for snapshot */ 6595 origblob->back_bs_dev = bs_create_blob_bs_dev(newblob); 6596 if (origblob->back_bs_dev == NULL) { 6597 /* return cluster map back to original */ 6598 bs_snapshot_swap_cluster_maps(newblob, origblob); 6599 blob_set_thin_provision(newblob); 6600 bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL); 6601 return; 6602 } 6603 6604 /* Remove the xattr that references an external snapshot */ 6605 if (blob_is_esnap_clone(origblob)) { 6606 origblob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT; 6607 bserrno = blob_remove_xattr(origblob, BLOB_EXTERNAL_SNAPSHOT_ID, true); 6608 if (bserrno != 0) { 6609 if (bserrno == -ENOENT) { 6610 SPDK_ERRLOG("blob 0x%" PRIx64 " has no " BLOB_EXTERNAL_SNAPSHOT_ID 6611 " xattr to remove\n", origblob->id); 6612 assert(false); 6613 } else { 6614 /* return cluster map back to original */ 6615 bs_snapshot_swap_cluster_maps(newblob, origblob); 6616 blob_set_thin_provision(newblob); 6617 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6618 return; 6619 } 6620 } 6621 } 6622 6623 bs_blob_list_remove(origblob); 6624 origblob->parent_id = newblob->id; 6625 /* set clone blob as thin provisioned */ 6626 blob_set_thin_provision(origblob); 6627 6628 bs_blob_list_add(newblob); 6629 6630 /* sync clone metadata */ 6631 spdk_blob_sync_md(origblob, bs_snapshot_origblob_sync_cpl, ctx); 6632 } 6633 6634 static void 6635 bs_snapshot_freeze_cpl(void *cb_arg, int rc) 6636 { 6637 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6638 struct spdk_blob *origblob = ctx->original.blob; 6639 struct spdk_blob *newblob = ctx->new.blob; 6640 int bserrno; 6641 6642 if (rc != 0) { 6643 bs_clone_snapshot_newblob_cleanup(ctx, rc); 6644 return; 6645 } 6646 6647 ctx->frozen = true; 6648 6649 if (blob_is_esnap_clone(origblob)) { 6650 /* Clean up any channels associated with the original blob id because future IO will 6651 * perform IO using the snapshot blob_id. 6652 */ 6653 blob_esnap_destroy_bs_dev_channels(origblob, false, NULL, NULL); 6654 } 6655 if (newblob->back_bs_dev) { 6656 blob_back_bs_destroy(newblob); 6657 } 6658 /* set new back_bs_dev for snapshot */ 6659 newblob->back_bs_dev = origblob->back_bs_dev; 6660 /* Set invalid flags from origblob */ 6661 newblob->invalid_flags = origblob->invalid_flags; 6662 6663 /* inherit parent from original blob if set */ 6664 newblob->parent_id = origblob->parent_id; 6665 switch (origblob->parent_id) { 6666 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 6667 bserrno = bs_snapshot_copy_xattr(newblob, origblob, BLOB_EXTERNAL_SNAPSHOT_ID); 6668 if (bserrno != 0) { 6669 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6670 return; 6671 } 6672 break; 6673 case SPDK_BLOBID_INVALID: 6674 break; 6675 default: 6676 /* Set internal xattr for snapshot id */ 6677 bserrno = blob_set_xattr(newblob, BLOB_SNAPSHOT, 6678 &origblob->parent_id, sizeof(spdk_blob_id), true); 6679 if (bserrno != 0) { 6680 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6681 return; 6682 } 6683 } 6684 6685 /* swap cluster maps */ 6686 bs_snapshot_swap_cluster_maps(newblob, origblob); 6687 6688 /* Set the clear method on the new blob to match the original. */ 6689 blob_set_clear_method(newblob, origblob->clear_method); 6690 6691 /* sync snapshot metadata */ 6692 spdk_blob_sync_md(newblob, bs_snapshot_newblob_sync_cpl, ctx); 6693 } 6694 6695 static void 6696 bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6697 { 6698 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6699 struct spdk_blob *origblob = ctx->original.blob; 6700 struct spdk_blob *newblob = _blob; 6701 6702 if (bserrno != 0) { 6703 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6704 return; 6705 } 6706 6707 ctx->new.blob = newblob; 6708 assert(spdk_blob_is_thin_provisioned(newblob)); 6709 assert(spdk_mem_all_zero(newblob->active.clusters, 6710 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 6711 assert(spdk_mem_all_zero(newblob->active.extent_pages, 6712 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 6713 6714 blob_freeze_io(origblob, bs_snapshot_freeze_cpl, ctx); 6715 } 6716 6717 static void 6718 bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 6719 { 6720 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6721 struct spdk_blob *origblob = ctx->original.blob; 6722 6723 if (bserrno != 0) { 6724 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6725 return; 6726 } 6727 6728 ctx->new.id = blobid; 6729 ctx->cpl.u.blobid.blobid = blobid; 6730 6731 spdk_bs_open_blob(origblob->bs, ctx->new.id, bs_snapshot_newblob_open_cpl, ctx); 6732 } 6733 6734 6735 static void 6736 bs_xattr_snapshot(void *arg, const char *name, 6737 const void **value, size_t *value_len) 6738 { 6739 assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0); 6740 6741 struct spdk_blob *blob = (struct spdk_blob *)arg; 6742 *value = &blob->id; 6743 *value_len = sizeof(blob->id); 6744 } 6745 6746 static void 6747 bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6748 { 6749 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6750 struct spdk_blob_opts opts; 6751 struct spdk_blob_xattr_opts internal_xattrs; 6752 char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS }; 6753 6754 if (bserrno != 0) { 6755 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6756 return; 6757 } 6758 6759 ctx->original.blob = _blob; 6760 6761 if (_blob->data_ro || _blob->md_ro) { 6762 SPDK_DEBUGLOG(blob, "Cannot create snapshot from read only blob with id 0x%" 6763 PRIx64 "\n", _blob->id); 6764 ctx->bserrno = -EINVAL; 6765 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6766 return; 6767 } 6768 6769 if (_blob->locked_operation_in_progress) { 6770 SPDK_DEBUGLOG(blob, "Cannot create snapshot - another operation in progress\n"); 6771 ctx->bserrno = -EBUSY; 6772 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6773 return; 6774 } 6775 6776 _blob->locked_operation_in_progress = true; 6777 6778 spdk_blob_opts_init(&opts, sizeof(opts)); 6779 blob_xattrs_init(&internal_xattrs); 6780 6781 /* Change the size of new blob to the same as in original blob, 6782 * but do not allocate clusters */ 6783 opts.thin_provision = true; 6784 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 6785 opts.use_extent_table = _blob->use_extent_table; 6786 6787 /* If there are any xattrs specified for snapshot, set them now */ 6788 if (ctx->xattrs) { 6789 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 6790 } 6791 /* Set internal xattr SNAPSHOT_IN_PROGRESS */ 6792 internal_xattrs.count = 1; 6793 internal_xattrs.ctx = _blob; 6794 internal_xattrs.names = xattrs_names; 6795 internal_xattrs.get_value = bs_xattr_snapshot; 6796 6797 bs_create_blob(_blob->bs, &opts, &internal_xattrs, 6798 bs_snapshot_newblob_create_cpl, ctx); 6799 } 6800 6801 void 6802 spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid, 6803 const struct spdk_blob_xattr_opts *snapshot_xattrs, 6804 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6805 { 6806 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6807 6808 if (!ctx) { 6809 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 6810 return; 6811 } 6812 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6813 ctx->cpl.u.blobid.cb_fn = cb_fn; 6814 ctx->cpl.u.blobid.cb_arg = cb_arg; 6815 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 6816 ctx->bserrno = 0; 6817 ctx->frozen = false; 6818 ctx->original.id = blobid; 6819 ctx->xattrs = snapshot_xattrs; 6820 6821 spdk_bs_open_blob(bs, ctx->original.id, bs_snapshot_origblob_open_cpl, ctx); 6822 } 6823 /* END spdk_bs_create_snapshot */ 6824 6825 /* START spdk_bs_create_clone */ 6826 6827 static void 6828 bs_xattr_clone(void *arg, const char *name, 6829 const void **value, size_t *value_len) 6830 { 6831 assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0); 6832 6833 struct spdk_blob *blob = (struct spdk_blob *)arg; 6834 *value = &blob->id; 6835 *value_len = sizeof(blob->id); 6836 } 6837 6838 static void 6839 bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6840 { 6841 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6842 struct spdk_blob *clone = _blob; 6843 6844 ctx->new.blob = clone; 6845 bs_blob_list_add(clone); 6846 6847 spdk_blob_close(clone, bs_clone_snapshot_origblob_cleanup, ctx); 6848 } 6849 6850 static void 6851 bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 6852 { 6853 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6854 6855 ctx->cpl.u.blobid.blobid = blobid; 6856 spdk_bs_open_blob(ctx->original.blob->bs, blobid, bs_clone_newblob_open_cpl, ctx); 6857 } 6858 6859 static void 6860 bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6861 { 6862 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6863 struct spdk_blob_opts opts; 6864 struct spdk_blob_xattr_opts internal_xattrs; 6865 char *xattr_names[] = { BLOB_SNAPSHOT }; 6866 6867 if (bserrno != 0) { 6868 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6869 return; 6870 } 6871 6872 ctx->original.blob = _blob; 6873 ctx->original.md_ro = _blob->md_ro; 6874 6875 if (!_blob->data_ro || !_blob->md_ro) { 6876 SPDK_DEBUGLOG(blob, "Clone not from read-only blob\n"); 6877 ctx->bserrno = -EINVAL; 6878 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6879 return; 6880 } 6881 6882 if (_blob->locked_operation_in_progress) { 6883 SPDK_DEBUGLOG(blob, "Cannot create clone - another operation in progress\n"); 6884 ctx->bserrno = -EBUSY; 6885 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6886 return; 6887 } 6888 6889 _blob->locked_operation_in_progress = true; 6890 6891 spdk_blob_opts_init(&opts, sizeof(opts)); 6892 blob_xattrs_init(&internal_xattrs); 6893 6894 opts.thin_provision = true; 6895 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 6896 opts.use_extent_table = _blob->use_extent_table; 6897 if (ctx->xattrs) { 6898 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 6899 } 6900 6901 /* Set internal xattr BLOB_SNAPSHOT */ 6902 internal_xattrs.count = 1; 6903 internal_xattrs.ctx = _blob; 6904 internal_xattrs.names = xattr_names; 6905 internal_xattrs.get_value = bs_xattr_clone; 6906 6907 bs_create_blob(_blob->bs, &opts, &internal_xattrs, 6908 bs_clone_newblob_create_cpl, ctx); 6909 } 6910 6911 void 6912 spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid, 6913 const struct spdk_blob_xattr_opts *clone_xattrs, 6914 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6915 { 6916 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6917 6918 if (!ctx) { 6919 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 6920 return; 6921 } 6922 6923 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6924 ctx->cpl.u.blobid.cb_fn = cb_fn; 6925 ctx->cpl.u.blobid.cb_arg = cb_arg; 6926 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 6927 ctx->bserrno = 0; 6928 ctx->xattrs = clone_xattrs; 6929 ctx->original.id = blobid; 6930 6931 spdk_bs_open_blob(bs, ctx->original.id, bs_clone_origblob_open_cpl, ctx); 6932 } 6933 6934 /* END spdk_bs_create_clone */ 6935 6936 /* START spdk_bs_inflate_blob */ 6937 6938 static void 6939 bs_inflate_blob_set_parent_cpl(void *cb_arg, struct spdk_blob *_parent, int bserrno) 6940 { 6941 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6942 struct spdk_blob *_blob = ctx->original.blob; 6943 6944 if (bserrno != 0) { 6945 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6946 return; 6947 } 6948 6949 /* Temporarily override md_ro flag for MD modification */ 6950 _blob->md_ro = false; 6951 6952 bserrno = blob_set_xattr(_blob, BLOB_SNAPSHOT, &_parent->id, sizeof(spdk_blob_id), true); 6953 if (bserrno != 0) { 6954 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6955 return; 6956 } 6957 6958 assert(_parent != NULL); 6959 6960 bs_blob_list_remove(_blob); 6961 _blob->parent_id = _parent->id; 6962 6963 blob_back_bs_destroy(_blob); 6964 _blob->back_bs_dev = bs_create_blob_bs_dev(_parent); 6965 bs_blob_list_add(_blob); 6966 6967 spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx); 6968 } 6969 6970 static void 6971 bs_inflate_blob_done(struct spdk_clone_snapshot_ctx *ctx) 6972 { 6973 struct spdk_blob *_blob = ctx->original.blob; 6974 struct spdk_blob *_parent; 6975 6976 if (ctx->allocate_all) { 6977 /* remove thin provisioning */ 6978 bs_blob_list_remove(_blob); 6979 if (_blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 6980 blob_remove_xattr(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, true); 6981 _blob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT; 6982 } else { 6983 blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 6984 } 6985 _blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV; 6986 blob_back_bs_destroy(_blob); 6987 _blob->parent_id = SPDK_BLOBID_INVALID; 6988 } else { 6989 /* For now, esnap clones always have allocate_all set. */ 6990 assert(!blob_is_esnap_clone(_blob)); 6991 6992 _parent = ((struct spdk_blob_bs_dev *)(_blob->back_bs_dev))->blob; 6993 if (_parent->parent_id != SPDK_BLOBID_INVALID) { 6994 /* We must change the parent of the inflated blob */ 6995 spdk_bs_open_blob(_blob->bs, _parent->parent_id, 6996 bs_inflate_blob_set_parent_cpl, ctx); 6997 return; 6998 } 6999 7000 bs_blob_list_remove(_blob); 7001 _blob->parent_id = SPDK_BLOBID_INVALID; 7002 blob_back_bs_destroy(_blob); 7003 _blob->back_bs_dev = bs_create_zeroes_dev(); 7004 } 7005 7006 /* Temporarily override md_ro flag for MD modification */ 7007 _blob->md_ro = false; 7008 blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 7009 _blob->state = SPDK_BLOB_STATE_DIRTY; 7010 7011 spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx); 7012 } 7013 7014 /* Check if cluster needs allocation */ 7015 static inline bool 7016 bs_cluster_needs_allocation(struct spdk_blob *blob, uint64_t cluster, bool allocate_all) 7017 { 7018 struct spdk_blob_bs_dev *b; 7019 7020 assert(blob != NULL); 7021 7022 if (blob->active.clusters[cluster] != 0) { 7023 /* Cluster is already allocated */ 7024 return false; 7025 } 7026 7027 if (blob->parent_id == SPDK_BLOBID_INVALID) { 7028 /* Blob have no parent blob */ 7029 return allocate_all; 7030 } 7031 7032 if (blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 7033 return true; 7034 } 7035 7036 b = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 7037 return (allocate_all || b->blob->active.clusters[cluster] != 0); 7038 } 7039 7040 static void 7041 bs_inflate_blob_touch_next(void *cb_arg, int bserrno) 7042 { 7043 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 7044 struct spdk_blob *_blob = ctx->original.blob; 7045 struct spdk_bs_cpl cpl; 7046 spdk_bs_user_op_t *op; 7047 uint64_t offset; 7048 7049 if (bserrno != 0) { 7050 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 7051 return; 7052 } 7053 7054 for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) { 7055 if (bs_cluster_needs_allocation(_blob, ctx->cluster, ctx->allocate_all)) { 7056 break; 7057 } 7058 } 7059 7060 if (ctx->cluster < _blob->active.num_clusters) { 7061 offset = bs_cluster_to_lba(_blob->bs, ctx->cluster); 7062 7063 /* We may safely increment a cluster before copying */ 7064 ctx->cluster++; 7065 7066 /* Use a dummy 0B read as a context for cluster copy */ 7067 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7068 cpl.u.blob_basic.cb_fn = bs_inflate_blob_touch_next; 7069 cpl.u.blob_basic.cb_arg = ctx; 7070 7071 op = bs_user_op_alloc(ctx->channel, &cpl, SPDK_BLOB_READ, _blob, 7072 NULL, 0, offset, 0); 7073 if (!op) { 7074 bs_clone_snapshot_origblob_cleanup(ctx, -ENOMEM); 7075 return; 7076 } 7077 7078 bs_allocate_and_copy_cluster(_blob, ctx->channel, offset, op); 7079 } else { 7080 bs_inflate_blob_done(ctx); 7081 } 7082 } 7083 7084 static void 7085 bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 7086 { 7087 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 7088 uint64_t clusters_needed; 7089 uint64_t i; 7090 7091 if (bserrno != 0) { 7092 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 7093 return; 7094 } 7095 7096 ctx->original.blob = _blob; 7097 ctx->original.md_ro = _blob->md_ro; 7098 7099 if (_blob->locked_operation_in_progress) { 7100 SPDK_DEBUGLOG(blob, "Cannot inflate blob - another operation in progress\n"); 7101 ctx->bserrno = -EBUSY; 7102 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 7103 return; 7104 } 7105 7106 _blob->locked_operation_in_progress = true; 7107 7108 switch (_blob->parent_id) { 7109 case SPDK_BLOBID_INVALID: 7110 if (!ctx->allocate_all) { 7111 /* This blob has no parent, so we cannot decouple it. */ 7112 SPDK_ERRLOG("Cannot decouple parent of blob with no parent.\n"); 7113 bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 7114 return; 7115 } 7116 break; 7117 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 7118 /* 7119 * It would be better to rely on back_bs_dev->is_zeroes(), to determine which 7120 * clusters require allocation. Until there is a blobstore consumer that 7121 * uses esnaps with an spdk_bs_dev that implements a useful is_zeroes() it is not 7122 * worth the effort. 7123 */ 7124 ctx->allocate_all = true; 7125 break; 7126 default: 7127 break; 7128 } 7129 7130 if (spdk_blob_is_thin_provisioned(_blob) == false) { 7131 /* This is not thin provisioned blob. No need to inflate. */ 7132 bs_clone_snapshot_origblob_cleanup(ctx, 0); 7133 return; 7134 } 7135 7136 /* Do two passes - one to verify that we can obtain enough clusters 7137 * and another to actually claim them. 7138 */ 7139 clusters_needed = 0; 7140 for (i = 0; i < _blob->active.num_clusters; i++) { 7141 if (bs_cluster_needs_allocation(_blob, i, ctx->allocate_all)) { 7142 clusters_needed++; 7143 } 7144 } 7145 7146 if (clusters_needed > _blob->bs->num_free_clusters) { 7147 /* Not enough free clusters. Cannot satisfy the request. */ 7148 bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC); 7149 return; 7150 } 7151 7152 ctx->cluster = 0; 7153 bs_inflate_blob_touch_next(ctx, 0); 7154 } 7155 7156 static void 7157 bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7158 spdk_blob_id blobid, bool allocate_all, spdk_blob_op_complete cb_fn, void *cb_arg) 7159 { 7160 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 7161 7162 if (!ctx) { 7163 cb_fn(cb_arg, -ENOMEM); 7164 return; 7165 } 7166 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7167 ctx->cpl.u.bs_basic.cb_fn = cb_fn; 7168 ctx->cpl.u.bs_basic.cb_arg = cb_arg; 7169 ctx->bserrno = 0; 7170 ctx->original.id = blobid; 7171 ctx->channel = channel; 7172 ctx->allocate_all = allocate_all; 7173 7174 spdk_bs_open_blob(bs, ctx->original.id, bs_inflate_blob_open_cpl, ctx); 7175 } 7176 7177 void 7178 spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7179 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 7180 { 7181 bs_inflate_blob(bs, channel, blobid, true, cb_fn, cb_arg); 7182 } 7183 7184 void 7185 spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7186 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 7187 { 7188 bs_inflate_blob(bs, channel, blobid, false, cb_fn, cb_arg); 7189 } 7190 /* END spdk_bs_inflate_blob */ 7191 7192 /* START spdk_bs_blob_shallow_copy */ 7193 7194 struct shallow_copy_ctx { 7195 struct spdk_bs_cpl cpl; 7196 int bserrno; 7197 7198 /* Blob source for copy */ 7199 struct spdk_blob_store *bs; 7200 spdk_blob_id blobid; 7201 struct spdk_blob *blob; 7202 struct spdk_io_channel *blob_channel; 7203 7204 /* Destination device for copy */ 7205 struct spdk_bs_dev *ext_dev; 7206 struct spdk_io_channel *ext_channel; 7207 7208 /* Current cluster for copy operation */ 7209 uint64_t cluster; 7210 7211 /* Buffer for blob reading */ 7212 uint8_t *read_buff; 7213 7214 /* Struct for external device writing */ 7215 struct spdk_bs_dev_cb_args ext_args; 7216 7217 /* Actual number of copied clusters */ 7218 uint64_t copied_clusters_count; 7219 7220 /* Status callback for updates about the ongoing operation */ 7221 spdk_blob_shallow_copy_status status_cb; 7222 7223 /* Argument passed to function status_cb */ 7224 void *status_cb_arg; 7225 }; 7226 7227 static void 7228 bs_shallow_copy_cleanup_finish(void *cb_arg, int bserrno) 7229 { 7230 struct shallow_copy_ctx *ctx = cb_arg; 7231 struct spdk_bs_cpl *cpl = &ctx->cpl; 7232 7233 if (bserrno != 0) { 7234 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, cleanup error %d\n", ctx->blob->id, bserrno); 7235 ctx->bserrno = bserrno; 7236 } 7237 7238 ctx->ext_dev->destroy_channel(ctx->ext_dev, ctx->ext_channel); 7239 spdk_free(ctx->read_buff); 7240 7241 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno); 7242 7243 free(ctx); 7244 } 7245 7246 static void 7247 bs_shallow_copy_bdev_write_cpl(struct spdk_io_channel *channel, void *cb_arg, int bserrno) 7248 { 7249 struct shallow_copy_ctx *ctx = cb_arg; 7250 struct spdk_blob *_blob = ctx->blob; 7251 7252 if (bserrno != 0) { 7253 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, ext dev write error %d\n", ctx->blob->id, bserrno); 7254 ctx->bserrno = bserrno; 7255 _blob->locked_operation_in_progress = false; 7256 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7257 return; 7258 } 7259 7260 ctx->cluster++; 7261 if (ctx->status_cb) { 7262 ctx->copied_clusters_count++; 7263 ctx->status_cb(ctx->copied_clusters_count, ctx->status_cb_arg); 7264 } 7265 7266 bs_shallow_copy_cluster_find_next(ctx); 7267 } 7268 7269 static void 7270 bs_shallow_copy_blob_read_cpl(void *cb_arg, int bserrno) 7271 { 7272 struct shallow_copy_ctx *ctx = cb_arg; 7273 struct spdk_bs_dev *ext_dev = ctx->ext_dev; 7274 struct spdk_blob *_blob = ctx->blob; 7275 7276 if (bserrno != 0) { 7277 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, blob read error %d\n", ctx->blob->id, bserrno); 7278 ctx->bserrno = bserrno; 7279 _blob->locked_operation_in_progress = false; 7280 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7281 return; 7282 } 7283 7284 ctx->ext_args.channel = ctx->ext_channel; 7285 ctx->ext_args.cb_fn = bs_shallow_copy_bdev_write_cpl; 7286 ctx->ext_args.cb_arg = ctx; 7287 7288 ext_dev->write(ext_dev, ctx->ext_channel, ctx->read_buff, 7289 bs_cluster_to_lba(_blob->bs, ctx->cluster), 7290 bs_dev_byte_to_lba(_blob->bs->dev, _blob->bs->cluster_sz), 7291 &ctx->ext_args); 7292 } 7293 7294 static void 7295 bs_shallow_copy_cluster_find_next(void *cb_arg) 7296 { 7297 struct shallow_copy_ctx *ctx = cb_arg; 7298 struct spdk_blob *_blob = ctx->blob; 7299 7300 while (ctx->cluster < _blob->active.num_clusters) { 7301 if (_blob->active.clusters[ctx->cluster] != 0) { 7302 break; 7303 } 7304 7305 ctx->cluster++; 7306 } 7307 7308 if (ctx->cluster < _blob->active.num_clusters) { 7309 blob_request_submit_op_single(ctx->blob_channel, _blob, ctx->read_buff, 7310 bs_cluster_to_lba(_blob->bs, ctx->cluster), 7311 bs_dev_byte_to_lba(_blob->bs->dev, _blob->bs->cluster_sz), 7312 bs_shallow_copy_blob_read_cpl, ctx, SPDK_BLOB_READ); 7313 } else { 7314 _blob->locked_operation_in_progress = false; 7315 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7316 } 7317 } 7318 7319 static void 7320 bs_shallow_copy_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 7321 { 7322 struct shallow_copy_ctx *ctx = cb_arg; 7323 struct spdk_bs_dev *ext_dev = ctx->ext_dev; 7324 uint32_t blob_block_size; 7325 uint64_t blob_total_size; 7326 7327 if (bserrno != 0) { 7328 SPDK_ERRLOG("Shallow copy blob open error %d\n", bserrno); 7329 ctx->bserrno = bserrno; 7330 bs_shallow_copy_cleanup_finish(ctx, 0); 7331 return; 7332 } 7333 7334 if (!spdk_blob_is_read_only(_blob)) { 7335 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, blob must be read only\n", _blob->id); 7336 ctx->bserrno = -EPERM; 7337 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7338 return; 7339 } 7340 7341 blob_block_size = _blob->bs->dev->blocklen; 7342 blob_total_size = spdk_blob_get_num_clusters(_blob) * spdk_bs_get_cluster_size(_blob->bs); 7343 7344 if (blob_total_size > ext_dev->blockcnt * ext_dev->blocklen) { 7345 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, external device must have at least blob size\n", 7346 _blob->id); 7347 ctx->bserrno = -EINVAL; 7348 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7349 return; 7350 } 7351 7352 if (blob_block_size % ext_dev->blocklen != 0) { 7353 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, external device block size is not compatible with \ 7354 blobstore block size\n", _blob->id); 7355 ctx->bserrno = -EINVAL; 7356 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7357 return; 7358 } 7359 7360 ctx->blob = _blob; 7361 7362 if (_blob->locked_operation_in_progress) { 7363 SPDK_DEBUGLOG(blob, "blob 0x%" PRIx64 " shallow copy - another operation in progress\n", _blob->id); 7364 ctx->bserrno = -EBUSY; 7365 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7366 return; 7367 } 7368 7369 _blob->locked_operation_in_progress = true; 7370 7371 ctx->cluster = 0; 7372 bs_shallow_copy_cluster_find_next(ctx); 7373 } 7374 7375 int 7376 spdk_bs_blob_shallow_copy(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7377 spdk_blob_id blobid, struct spdk_bs_dev *ext_dev, 7378 spdk_blob_shallow_copy_status status_cb_fn, void *status_cb_arg, 7379 spdk_blob_op_complete cb_fn, void *cb_arg) 7380 { 7381 struct shallow_copy_ctx *ctx; 7382 struct spdk_io_channel *ext_channel; 7383 7384 ctx = calloc(1, sizeof(*ctx)); 7385 if (!ctx) { 7386 return -ENOMEM; 7387 } 7388 7389 ctx->bs = bs; 7390 ctx->blobid = blobid; 7391 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7392 ctx->cpl.u.bs_basic.cb_fn = cb_fn; 7393 ctx->cpl.u.bs_basic.cb_arg = cb_arg; 7394 ctx->bserrno = 0; 7395 ctx->blob_channel = channel; 7396 ctx->status_cb = status_cb_fn; 7397 ctx->status_cb_arg = status_cb_arg; 7398 ctx->read_buff = spdk_malloc(bs->cluster_sz, bs->dev->blocklen, NULL, 7399 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 7400 if (!ctx->read_buff) { 7401 free(ctx); 7402 return -ENOMEM; 7403 } 7404 7405 ext_channel = ext_dev->create_channel(ext_dev); 7406 if (!ext_channel) { 7407 spdk_free(ctx->read_buff); 7408 free(ctx); 7409 return -ENOMEM; 7410 } 7411 ctx->ext_dev = ext_dev; 7412 ctx->ext_channel = ext_channel; 7413 7414 spdk_bs_open_blob(ctx->bs, ctx->blobid, bs_shallow_copy_blob_open_cpl, ctx); 7415 7416 return 0; 7417 } 7418 /* END spdk_bs_blob_shallow_copy */ 7419 7420 /* START spdk_bs_blob_set_parent */ 7421 7422 struct set_parent_ctx { 7423 struct spdk_blob_store *bs; 7424 int bserrno; 7425 spdk_bs_op_complete cb_fn; 7426 void *cb_arg; 7427 7428 struct spdk_blob *blob; 7429 bool blob_md_ro; 7430 7431 struct blob_parent parent; 7432 }; 7433 7434 static void 7435 bs_set_parent_cleanup_finish(void *cb_arg, int bserrno) 7436 { 7437 struct set_parent_ctx *ctx = cb_arg; 7438 7439 assert(ctx != NULL); 7440 7441 if (bserrno != 0) { 7442 SPDK_ERRLOG("blob set parent finish error %d\n", bserrno); 7443 if (ctx->bserrno == 0) { 7444 ctx->bserrno = bserrno; 7445 } 7446 } 7447 7448 ctx->cb_fn(ctx->cb_arg, ctx->bserrno); 7449 7450 free(ctx); 7451 } 7452 7453 static void 7454 bs_set_parent_close_snapshot(void *cb_arg, int bserrno) 7455 { 7456 struct set_parent_ctx *ctx = cb_arg; 7457 7458 if (ctx->bserrno != 0) { 7459 spdk_blob_close(ctx->parent.u.snapshot.blob, bs_set_parent_cleanup_finish, ctx); 7460 return; 7461 } 7462 7463 if (bserrno != 0) { 7464 SPDK_ERRLOG("blob close error %d\n", bserrno); 7465 ctx->bserrno = bserrno; 7466 } 7467 7468 bs_set_parent_cleanup_finish(ctx, ctx->bserrno); 7469 } 7470 7471 static void 7472 bs_set_parent_close_blob(void *cb_arg, int bserrno) 7473 { 7474 struct set_parent_ctx *ctx = cb_arg; 7475 struct spdk_blob *blob = ctx->blob; 7476 struct spdk_blob *snapshot = ctx->parent.u.snapshot.blob; 7477 7478 if (bserrno != 0 && ctx->bserrno == 0) { 7479 SPDK_ERRLOG("error %d in metadata sync\n", bserrno); 7480 ctx->bserrno = bserrno; 7481 } 7482 7483 /* Revert md_ro to original state */ 7484 blob->md_ro = ctx->blob_md_ro; 7485 7486 blob->locked_operation_in_progress = false; 7487 snapshot->locked_operation_in_progress = false; 7488 7489 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7490 } 7491 7492 static void 7493 bs_set_parent_set_back_bs_dev_done(void *cb_arg, int bserrno) 7494 { 7495 struct set_parent_ctx *ctx = cb_arg; 7496 struct spdk_blob *blob = ctx->blob; 7497 7498 if (bserrno != 0) { 7499 SPDK_ERRLOG("error %d setting back_bs_dev\n", bserrno); 7500 ctx->bserrno = bserrno; 7501 bs_set_parent_close_blob(ctx, bserrno); 7502 return; 7503 } 7504 7505 spdk_blob_sync_md(blob, bs_set_parent_close_blob, ctx); 7506 } 7507 7508 static int 7509 bs_set_parent_refs(struct spdk_blob *blob, struct blob_parent *parent) 7510 { 7511 int rc; 7512 7513 bs_blob_list_remove(blob); 7514 7515 rc = blob_set_xattr(blob, BLOB_SNAPSHOT, &parent->u.snapshot.id, sizeof(spdk_blob_id), true); 7516 if (rc != 0) { 7517 SPDK_ERRLOG("error %d setting snapshot xattr\n", rc); 7518 return rc; 7519 } 7520 blob->parent_id = parent->u.snapshot.id; 7521 7522 if (blob_is_esnap_clone(blob)) { 7523 /* Remove the xattr that references the external snapshot */ 7524 blob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT; 7525 blob_remove_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID, true); 7526 } 7527 7528 bs_blob_list_add(blob); 7529 7530 return 0; 7531 } 7532 7533 static void 7534 bs_set_parent_snapshot_open_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno) 7535 { 7536 struct set_parent_ctx *ctx = cb_arg; 7537 struct spdk_blob *blob = ctx->blob; 7538 struct spdk_bs_dev *back_bs_dev; 7539 7540 if (bserrno != 0) { 7541 SPDK_ERRLOG("snapshot open error %d\n", bserrno); 7542 ctx->bserrno = bserrno; 7543 spdk_blob_close(blob, bs_set_parent_cleanup_finish, ctx); 7544 return; 7545 } 7546 7547 ctx->parent.u.snapshot.blob = snapshot; 7548 ctx->parent.u.snapshot.id = snapshot->id; 7549 7550 if (!spdk_blob_is_snapshot(snapshot)) { 7551 SPDK_ERRLOG("parent blob is not a snapshot\n"); 7552 ctx->bserrno = -EINVAL; 7553 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7554 return; 7555 } 7556 7557 if (blob->active.num_clusters != snapshot->active.num_clusters) { 7558 SPDK_ERRLOG("parent blob has a number of clusters different from child's ones\n"); 7559 ctx->bserrno = -EINVAL; 7560 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7561 return; 7562 } 7563 7564 if (blob->locked_operation_in_progress || snapshot->locked_operation_in_progress) { 7565 SPDK_ERRLOG("cannot set parent of blob, another operation in progress\n"); 7566 ctx->bserrno = -EBUSY; 7567 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7568 return; 7569 } 7570 7571 blob->locked_operation_in_progress = true; 7572 snapshot->locked_operation_in_progress = true; 7573 7574 /* Temporarily override md_ro flag for MD modification */ 7575 blob->md_ro = false; 7576 7577 back_bs_dev = bs_create_blob_bs_dev(snapshot); 7578 7579 blob_set_back_bs_dev(blob, back_bs_dev, bs_set_parent_refs, &ctx->parent, 7580 bs_set_parent_set_back_bs_dev_done, 7581 ctx); 7582 } 7583 7584 static void 7585 bs_set_parent_blob_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 7586 { 7587 struct set_parent_ctx *ctx = cb_arg; 7588 7589 if (bserrno != 0) { 7590 SPDK_ERRLOG("blob open error %d\n", bserrno); 7591 ctx->bserrno = bserrno; 7592 bs_set_parent_cleanup_finish(ctx, 0); 7593 return; 7594 } 7595 7596 if (!spdk_blob_is_thin_provisioned(blob)) { 7597 SPDK_ERRLOG("blob is not thin-provisioned\n"); 7598 ctx->bserrno = -EINVAL; 7599 spdk_blob_close(blob, bs_set_parent_cleanup_finish, ctx); 7600 return; 7601 } 7602 7603 ctx->blob = blob; 7604 ctx->blob_md_ro = blob->md_ro; 7605 7606 spdk_bs_open_blob(ctx->bs, ctx->parent.u.snapshot.id, bs_set_parent_snapshot_open_cpl, ctx); 7607 } 7608 7609 void 7610 spdk_bs_blob_set_parent(struct spdk_blob_store *bs, spdk_blob_id blob_id, 7611 spdk_blob_id snapshot_id, spdk_blob_op_complete cb_fn, void *cb_arg) 7612 { 7613 struct set_parent_ctx *ctx; 7614 7615 if (snapshot_id == SPDK_BLOBID_INVALID) { 7616 SPDK_ERRLOG("snapshot id not valid\n"); 7617 cb_fn(cb_arg, -EINVAL); 7618 return; 7619 } 7620 7621 if (blob_id == snapshot_id) { 7622 SPDK_ERRLOG("blob id and snapshot id cannot be the same\n"); 7623 cb_fn(cb_arg, -EINVAL); 7624 return; 7625 } 7626 7627 if (spdk_blob_get_parent_snapshot(bs, blob_id) == snapshot_id) { 7628 SPDK_NOTICELOG("snapshot is already the parent of blob\n"); 7629 cb_fn(cb_arg, -EEXIST); 7630 return; 7631 } 7632 7633 ctx = calloc(1, sizeof(*ctx)); 7634 if (!ctx) { 7635 cb_fn(cb_arg, -ENOMEM); 7636 return; 7637 } 7638 7639 ctx->bs = bs; 7640 ctx->parent.u.snapshot.id = snapshot_id; 7641 ctx->cb_fn = cb_fn; 7642 ctx->cb_arg = cb_arg; 7643 ctx->bserrno = 0; 7644 7645 spdk_bs_open_blob(bs, blob_id, bs_set_parent_blob_open_cpl, ctx); 7646 } 7647 /* END spdk_bs_blob_set_parent */ 7648 7649 /* START spdk_bs_blob_set_external_parent */ 7650 7651 static void 7652 bs_set_external_parent_cleanup_finish(void *cb_arg, int bserrno) 7653 { 7654 struct set_parent_ctx *ctx = cb_arg; 7655 7656 if (bserrno != 0) { 7657 SPDK_ERRLOG("blob set external parent finish error %d\n", bserrno); 7658 if (ctx->bserrno == 0) { 7659 ctx->bserrno = bserrno; 7660 } 7661 } 7662 7663 ctx->cb_fn(ctx->cb_arg, ctx->bserrno); 7664 7665 free(ctx->parent.u.esnap.id); 7666 free(ctx); 7667 } 7668 7669 static void 7670 bs_set_external_parent_close_blob(void *cb_arg, int bserrno) 7671 { 7672 struct set_parent_ctx *ctx = cb_arg; 7673 struct spdk_blob *blob = ctx->blob; 7674 7675 if (bserrno != 0 && ctx->bserrno == 0) { 7676 SPDK_ERRLOG("error %d in metadata sync\n", bserrno); 7677 ctx->bserrno = bserrno; 7678 } 7679 7680 /* Revert md_ro to original state */ 7681 blob->md_ro = ctx->blob_md_ro; 7682 7683 blob->locked_operation_in_progress = false; 7684 7685 spdk_blob_close(blob, bs_set_external_parent_cleanup_finish, ctx); 7686 } 7687 7688 static void 7689 bs_set_external_parent_unfrozen(void *cb_arg, int bserrno) 7690 { 7691 struct set_parent_ctx *ctx = cb_arg; 7692 struct spdk_blob *blob = ctx->blob; 7693 7694 if (bserrno != 0) { 7695 SPDK_ERRLOG("error %d setting back_bs_dev\n", bserrno); 7696 ctx->bserrno = bserrno; 7697 bs_set_external_parent_close_blob(ctx, bserrno); 7698 return; 7699 } 7700 7701 spdk_blob_sync_md(blob, bs_set_external_parent_close_blob, ctx); 7702 } 7703 7704 static int 7705 bs_set_external_parent_refs(struct spdk_blob *blob, struct blob_parent *parent) 7706 { 7707 int rc; 7708 7709 bs_blob_list_remove(blob); 7710 7711 if (spdk_blob_is_clone(blob)) { 7712 /* Remove the xattr that references the snapshot */ 7713 blob->parent_id = SPDK_BLOBID_INVALID; 7714 blob_remove_xattr(blob, BLOB_SNAPSHOT, true); 7715 } 7716 7717 rc = blob_set_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID, parent->u.esnap.id, 7718 parent->u.esnap.id_len, true); 7719 if (rc != 0) { 7720 SPDK_ERRLOG("error %d setting external snapshot xattr\n", rc); 7721 return rc; 7722 } 7723 blob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 7724 blob->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT; 7725 7726 bs_blob_list_add(blob); 7727 7728 return 0; 7729 } 7730 7731 static void 7732 bs_set_external_parent_blob_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 7733 { 7734 struct set_parent_ctx *ctx = cb_arg; 7735 const void *esnap_id; 7736 size_t esnap_id_len; 7737 int rc; 7738 7739 if (bserrno != 0) { 7740 SPDK_ERRLOG("blob open error %d\n", bserrno); 7741 ctx->bserrno = bserrno; 7742 bs_set_parent_cleanup_finish(ctx, 0); 7743 return; 7744 } 7745 7746 ctx->blob = blob; 7747 ctx->blob_md_ro = blob->md_ro; 7748 7749 rc = spdk_blob_get_esnap_id(blob, &esnap_id, &esnap_id_len); 7750 if (rc == 0 && esnap_id != NULL && esnap_id_len == ctx->parent.u.esnap.id_len && 7751 memcmp(esnap_id, ctx->parent.u.esnap.id, esnap_id_len) == 0) { 7752 SPDK_ERRLOG("external snapshot is already the parent of blob\n"); 7753 ctx->bserrno = -EEXIST; 7754 goto error; 7755 } 7756 7757 if (!spdk_blob_is_thin_provisioned(blob)) { 7758 SPDK_ERRLOG("blob is not thin-provisioned\n"); 7759 ctx->bserrno = -EINVAL; 7760 goto error; 7761 } 7762 7763 if (blob->locked_operation_in_progress) { 7764 SPDK_ERRLOG("cannot set external parent of blob, another operation in progress\n"); 7765 ctx->bserrno = -EBUSY; 7766 goto error; 7767 } 7768 7769 blob->locked_operation_in_progress = true; 7770 7771 /* Temporarily override md_ro flag for MD modification */ 7772 blob->md_ro = false; 7773 7774 blob_set_back_bs_dev(blob, ctx->parent.u.esnap.back_bs_dev, bs_set_external_parent_refs, 7775 &ctx->parent, bs_set_external_parent_unfrozen, ctx); 7776 return; 7777 7778 error: 7779 spdk_blob_close(blob, bs_set_external_parent_cleanup_finish, ctx); 7780 } 7781 7782 void 7783 spdk_bs_blob_set_external_parent(struct spdk_blob_store *bs, spdk_blob_id blob_id, 7784 struct spdk_bs_dev *esnap_bs_dev, const void *esnap_id, 7785 uint32_t esnap_id_len, spdk_blob_op_complete cb_fn, void *cb_arg) 7786 { 7787 struct set_parent_ctx *ctx; 7788 uint64_t esnap_dev_size, cluster_sz; 7789 7790 if (sizeof(blob_id) == esnap_id_len && memcmp(&blob_id, esnap_id, sizeof(blob_id)) == 0) { 7791 SPDK_ERRLOG("blob id and external snapshot id cannot be the same\n"); 7792 cb_fn(cb_arg, -EINVAL); 7793 return; 7794 } 7795 7796 esnap_dev_size = esnap_bs_dev->blockcnt * esnap_bs_dev->blocklen; 7797 cluster_sz = spdk_bs_get_cluster_size(bs); 7798 if ((esnap_dev_size % cluster_sz) != 0) { 7799 SPDK_ERRLOG("Esnap device size %" PRIu64 " is not an integer multiple of " 7800 "cluster size %" PRIu64 "\n", esnap_dev_size, cluster_sz); 7801 cb_fn(cb_arg, -EINVAL); 7802 return; 7803 } 7804 7805 ctx = calloc(1, sizeof(*ctx)); 7806 if (!ctx) { 7807 cb_fn(cb_arg, -ENOMEM); 7808 return; 7809 } 7810 7811 ctx->parent.u.esnap.id = calloc(1, esnap_id_len); 7812 if (!ctx->parent.u.esnap.id) { 7813 free(ctx); 7814 cb_fn(cb_arg, -ENOMEM); 7815 return; 7816 } 7817 7818 ctx->bs = bs; 7819 ctx->parent.u.esnap.back_bs_dev = esnap_bs_dev; 7820 memcpy(ctx->parent.u.esnap.id, esnap_id, esnap_id_len); 7821 ctx->parent.u.esnap.id_len = esnap_id_len; 7822 ctx->cb_fn = cb_fn; 7823 ctx->cb_arg = cb_arg; 7824 ctx->bserrno = 0; 7825 7826 spdk_bs_open_blob(bs, blob_id, bs_set_external_parent_blob_open_cpl, ctx); 7827 } 7828 /* END spdk_bs_blob_set_external_parent */ 7829 7830 /* START spdk_blob_resize */ 7831 struct spdk_bs_resize_ctx { 7832 spdk_blob_op_complete cb_fn; 7833 void *cb_arg; 7834 struct spdk_blob *blob; 7835 uint64_t sz; 7836 int rc; 7837 }; 7838 7839 static void 7840 bs_resize_unfreeze_cpl(void *cb_arg, int rc) 7841 { 7842 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 7843 7844 if (rc != 0) { 7845 SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc); 7846 } 7847 7848 if (ctx->rc != 0) { 7849 SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc); 7850 rc = ctx->rc; 7851 } 7852 7853 ctx->blob->locked_operation_in_progress = false; 7854 7855 ctx->cb_fn(ctx->cb_arg, rc); 7856 free(ctx); 7857 } 7858 7859 static void 7860 bs_resize_freeze_cpl(void *cb_arg, int rc) 7861 { 7862 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 7863 7864 if (rc != 0) { 7865 ctx->blob->locked_operation_in_progress = false; 7866 ctx->cb_fn(ctx->cb_arg, rc); 7867 free(ctx); 7868 return; 7869 } 7870 7871 ctx->rc = blob_resize(ctx->blob, ctx->sz); 7872 7873 blob_unfreeze_io(ctx->blob, bs_resize_unfreeze_cpl, ctx); 7874 } 7875 7876 void 7877 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg) 7878 { 7879 struct spdk_bs_resize_ctx *ctx; 7880 7881 blob_verify_md_op(blob); 7882 7883 SPDK_DEBUGLOG(blob, "Resizing blob 0x%" PRIx64 " to %" PRIu64 " clusters\n", blob->id, sz); 7884 7885 if (blob->md_ro) { 7886 cb_fn(cb_arg, -EPERM); 7887 return; 7888 } 7889 7890 if (sz == blob->active.num_clusters) { 7891 cb_fn(cb_arg, 0); 7892 return; 7893 } 7894 7895 if (blob->locked_operation_in_progress) { 7896 cb_fn(cb_arg, -EBUSY); 7897 return; 7898 } 7899 7900 ctx = calloc(1, sizeof(*ctx)); 7901 if (!ctx) { 7902 cb_fn(cb_arg, -ENOMEM); 7903 return; 7904 } 7905 7906 blob->locked_operation_in_progress = true; 7907 ctx->cb_fn = cb_fn; 7908 ctx->cb_arg = cb_arg; 7909 ctx->blob = blob; 7910 ctx->sz = sz; 7911 blob_freeze_io(blob, bs_resize_freeze_cpl, ctx); 7912 } 7913 7914 /* END spdk_blob_resize */ 7915 7916 7917 /* START spdk_bs_delete_blob */ 7918 7919 static void 7920 bs_delete_close_cpl(void *cb_arg, int bserrno) 7921 { 7922 spdk_bs_sequence_t *seq = cb_arg; 7923 7924 bs_sequence_finish(seq, bserrno); 7925 } 7926 7927 static void 7928 bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7929 { 7930 struct spdk_blob *blob = cb_arg; 7931 7932 if (bserrno != 0) { 7933 /* 7934 * We already removed this blob from the blobstore tailq, so 7935 * we need to free it here since this is the last reference 7936 * to it. 7937 */ 7938 blob_free(blob); 7939 bs_delete_close_cpl(seq, bserrno); 7940 return; 7941 } 7942 7943 /* 7944 * This will immediately decrement the ref_count and call 7945 * the completion routine since the metadata state is clean. 7946 * By calling spdk_blob_close, we reduce the number of call 7947 * points into code that touches the blob->open_ref count 7948 * and the blobstore's blob list. 7949 */ 7950 spdk_blob_close(blob, bs_delete_close_cpl, seq); 7951 } 7952 7953 struct delete_snapshot_ctx { 7954 struct spdk_blob_list *parent_snapshot_entry; 7955 struct spdk_blob *snapshot; 7956 struct spdk_blob_md_page *page; 7957 bool snapshot_md_ro; 7958 struct spdk_blob *clone; 7959 bool clone_md_ro; 7960 spdk_blob_op_with_handle_complete cb_fn; 7961 void *cb_arg; 7962 int bserrno; 7963 uint32_t next_extent_page; 7964 }; 7965 7966 static void 7967 delete_blob_cleanup_finish(void *cb_arg, int bserrno) 7968 { 7969 struct delete_snapshot_ctx *ctx = cb_arg; 7970 7971 if (bserrno != 0) { 7972 SPDK_ERRLOG("Snapshot cleanup error %d\n", bserrno); 7973 } 7974 7975 assert(ctx != NULL); 7976 7977 if (bserrno != 0 && ctx->bserrno == 0) { 7978 ctx->bserrno = bserrno; 7979 } 7980 7981 ctx->cb_fn(ctx->cb_arg, ctx->snapshot, ctx->bserrno); 7982 spdk_free(ctx->page); 7983 free(ctx); 7984 } 7985 7986 static void 7987 delete_snapshot_cleanup_snapshot(void *cb_arg, int bserrno) 7988 { 7989 struct delete_snapshot_ctx *ctx = cb_arg; 7990 7991 if (bserrno != 0) { 7992 ctx->bserrno = bserrno; 7993 SPDK_ERRLOG("Clone cleanup error %d\n", bserrno); 7994 } 7995 7996 if (ctx->bserrno != 0) { 7997 assert(blob_lookup(ctx->snapshot->bs, ctx->snapshot->id) == NULL); 7998 RB_INSERT(spdk_blob_tree, &ctx->snapshot->bs->open_blobs, ctx->snapshot); 7999 spdk_bit_array_set(ctx->snapshot->bs->open_blobids, ctx->snapshot->id); 8000 } 8001 8002 ctx->snapshot->locked_operation_in_progress = false; 8003 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 8004 8005 spdk_blob_close(ctx->snapshot, delete_blob_cleanup_finish, ctx); 8006 } 8007 8008 static void 8009 delete_snapshot_cleanup_clone(void *cb_arg, int bserrno) 8010 { 8011 struct delete_snapshot_ctx *ctx = cb_arg; 8012 8013 ctx->clone->locked_operation_in_progress = false; 8014 ctx->clone->md_ro = ctx->clone_md_ro; 8015 8016 spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx); 8017 } 8018 8019 static void 8020 delete_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 8021 { 8022 struct delete_snapshot_ctx *ctx = cb_arg; 8023 8024 if (bserrno) { 8025 ctx->bserrno = bserrno; 8026 delete_snapshot_cleanup_clone(ctx, 0); 8027 return; 8028 } 8029 8030 ctx->clone->locked_operation_in_progress = false; 8031 spdk_blob_close(ctx->clone, delete_blob_cleanup_finish, ctx); 8032 } 8033 8034 static void 8035 delete_snapshot_sync_snapshot_cpl(void *cb_arg, int bserrno) 8036 { 8037 struct delete_snapshot_ctx *ctx = cb_arg; 8038 struct spdk_blob_list *parent_snapshot_entry = NULL; 8039 struct spdk_blob_list *snapshot_entry = NULL; 8040 struct spdk_blob_list *clone_entry = NULL; 8041 struct spdk_blob_list *snapshot_clone_entry = NULL; 8042 8043 if (bserrno) { 8044 SPDK_ERRLOG("Failed to sync MD on blob\n"); 8045 ctx->bserrno = bserrno; 8046 delete_snapshot_cleanup_clone(ctx, 0); 8047 return; 8048 } 8049 8050 /* Get snapshot entry for the snapshot we want to remove */ 8051 snapshot_entry = bs_get_snapshot_entry(ctx->snapshot->bs, ctx->snapshot->id); 8052 8053 assert(snapshot_entry != NULL); 8054 8055 /* Remove clone entry in this snapshot (at this point there can be only one clone) */ 8056 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 8057 assert(clone_entry != NULL); 8058 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 8059 snapshot_entry->clone_count--; 8060 assert(TAILQ_EMPTY(&snapshot_entry->clones)); 8061 8062 switch (ctx->snapshot->parent_id) { 8063 case SPDK_BLOBID_INVALID: 8064 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 8065 /* No parent snapshot - just remove clone entry */ 8066 free(clone_entry); 8067 break; 8068 default: 8069 /* This snapshot is at the same time a clone of another snapshot - we need to 8070 * update parent snapshot (remove current clone, add new one inherited from 8071 * the snapshot that is being removed) */ 8072 8073 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 8074 * snapshot that we are removing */ 8075 blob_get_snapshot_and_clone_entries(ctx->snapshot, &parent_snapshot_entry, 8076 &snapshot_clone_entry); 8077 8078 /* Switch clone entry in parent snapshot */ 8079 TAILQ_INSERT_TAIL(&parent_snapshot_entry->clones, clone_entry, link); 8080 TAILQ_REMOVE(&parent_snapshot_entry->clones, snapshot_clone_entry, link); 8081 free(snapshot_clone_entry); 8082 } 8083 8084 /* Restore md_ro flags */ 8085 ctx->clone->md_ro = ctx->clone_md_ro; 8086 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 8087 8088 blob_unfreeze_io(ctx->clone, delete_snapshot_unfreeze_cpl, ctx); 8089 } 8090 8091 static void 8092 delete_snapshot_sync_clone_cpl(void *cb_arg, int bserrno) 8093 { 8094 struct delete_snapshot_ctx *ctx = cb_arg; 8095 uint64_t i; 8096 8097 ctx->snapshot->md_ro = false; 8098 8099 if (bserrno) { 8100 SPDK_ERRLOG("Failed to sync MD on clone\n"); 8101 ctx->bserrno = bserrno; 8102 8103 /* Restore snapshot to previous state */ 8104 bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true); 8105 if (bserrno != 0) { 8106 delete_snapshot_cleanup_clone(ctx, bserrno); 8107 return; 8108 } 8109 8110 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx); 8111 return; 8112 } 8113 8114 /* Clear cluster map entries for snapshot */ 8115 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 8116 if (ctx->clone->active.clusters[i] == ctx->snapshot->active.clusters[i]) { 8117 if (ctx->snapshot->active.clusters[i] != 0) { 8118 ctx->snapshot->active.num_allocated_clusters--; 8119 } 8120 ctx->snapshot->active.clusters[i] = 0; 8121 } 8122 } 8123 for (i = 0; i < ctx->snapshot->active.num_extent_pages && 8124 i < ctx->clone->active.num_extent_pages; i++) { 8125 if (ctx->clone->active.extent_pages[i] == ctx->snapshot->active.extent_pages[i]) { 8126 ctx->snapshot->active.extent_pages[i] = 0; 8127 } 8128 } 8129 8130 blob_set_thin_provision(ctx->snapshot); 8131 ctx->snapshot->state = SPDK_BLOB_STATE_DIRTY; 8132 8133 if (ctx->parent_snapshot_entry != NULL) { 8134 ctx->snapshot->back_bs_dev = NULL; 8135 } 8136 8137 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_cpl, ctx); 8138 } 8139 8140 static void 8141 delete_snapshot_update_extent_pages_cpl(struct delete_snapshot_ctx *ctx) 8142 { 8143 int bserrno; 8144 8145 /* Delete old backing bs_dev from clone (related to snapshot that will be removed) */ 8146 blob_back_bs_destroy(ctx->clone); 8147 8148 /* Set/remove snapshot xattr and switch parent ID and backing bs_dev on clone... */ 8149 if (ctx->snapshot->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 8150 bserrno = bs_snapshot_copy_xattr(ctx->clone, ctx->snapshot, 8151 BLOB_EXTERNAL_SNAPSHOT_ID); 8152 if (bserrno != 0) { 8153 ctx->bserrno = bserrno; 8154 8155 /* Restore snapshot to previous state */ 8156 bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true); 8157 if (bserrno != 0) { 8158 delete_snapshot_cleanup_clone(ctx, bserrno); 8159 return; 8160 } 8161 8162 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx); 8163 return; 8164 } 8165 ctx->clone->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT; 8166 ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev; 8167 /* Do not delete the external snapshot along with this snapshot */ 8168 ctx->snapshot->back_bs_dev = NULL; 8169 ctx->clone->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 8170 } else if (ctx->parent_snapshot_entry != NULL) { 8171 /* ...to parent snapshot */ 8172 ctx->clone->parent_id = ctx->parent_snapshot_entry->id; 8173 ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev; 8174 blob_set_xattr(ctx->clone, BLOB_SNAPSHOT, &ctx->parent_snapshot_entry->id, 8175 sizeof(spdk_blob_id), 8176 true); 8177 } else { 8178 /* ...to blobid invalid and zeroes dev */ 8179 ctx->clone->parent_id = SPDK_BLOBID_INVALID; 8180 ctx->clone->back_bs_dev = bs_create_zeroes_dev(); 8181 blob_remove_xattr(ctx->clone, BLOB_SNAPSHOT, true); 8182 } 8183 8184 spdk_blob_sync_md(ctx->clone, delete_snapshot_sync_clone_cpl, ctx); 8185 } 8186 8187 static void 8188 delete_snapshot_update_extent_pages(void *cb_arg, int bserrno) 8189 { 8190 struct delete_snapshot_ctx *ctx = cb_arg; 8191 uint32_t *extent_page; 8192 uint64_t i; 8193 8194 for (i = ctx->next_extent_page; i < ctx->snapshot->active.num_extent_pages && 8195 i < ctx->clone->active.num_extent_pages; i++) { 8196 if (ctx->snapshot->active.extent_pages[i] == 0) { 8197 /* No extent page to use from snapshot */ 8198 continue; 8199 } 8200 8201 extent_page = &ctx->clone->active.extent_pages[i]; 8202 if (*extent_page == 0) { 8203 /* Copy extent page from snapshot when clone did not have a matching one */ 8204 *extent_page = ctx->snapshot->active.extent_pages[i]; 8205 continue; 8206 } 8207 8208 /* Clone and snapshot both contain partially filled matching extent pages. 8209 * Update the clone extent page in place with cluster map containing the mix of both. */ 8210 ctx->next_extent_page = i + 1; 8211 memset(ctx->page, 0, SPDK_BS_PAGE_SIZE); 8212 8213 blob_write_extent_page(ctx->clone, *extent_page, i * SPDK_EXTENTS_PER_EP, ctx->page, 8214 delete_snapshot_update_extent_pages, ctx); 8215 return; 8216 } 8217 delete_snapshot_update_extent_pages_cpl(ctx); 8218 } 8219 8220 static void 8221 delete_snapshot_sync_snapshot_xattr_cpl(void *cb_arg, int bserrno) 8222 { 8223 struct delete_snapshot_ctx *ctx = cb_arg; 8224 uint64_t i; 8225 8226 /* Temporarily override md_ro flag for clone for MD modification */ 8227 ctx->clone_md_ro = ctx->clone->md_ro; 8228 ctx->clone->md_ro = false; 8229 8230 if (bserrno) { 8231 SPDK_ERRLOG("Failed to sync MD with xattr on blob\n"); 8232 ctx->bserrno = bserrno; 8233 delete_snapshot_cleanup_clone(ctx, 0); 8234 return; 8235 } 8236 8237 /* Copy snapshot map to clone map (only unallocated clusters in clone) */ 8238 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 8239 if (ctx->clone->active.clusters[i] == 0) { 8240 ctx->clone->active.clusters[i] = ctx->snapshot->active.clusters[i]; 8241 if (ctx->clone->active.clusters[i] != 0) { 8242 ctx->clone->active.num_allocated_clusters++; 8243 } 8244 } 8245 } 8246 ctx->next_extent_page = 0; 8247 delete_snapshot_update_extent_pages(ctx, 0); 8248 } 8249 8250 static void 8251 delete_snapshot_esnap_channels_destroyed_cb(void *cb_arg, struct spdk_blob *blob, int bserrno) 8252 { 8253 struct delete_snapshot_ctx *ctx = cb_arg; 8254 8255 if (bserrno != 0) { 8256 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to destroy esnap channels: %d\n", 8257 blob->id, bserrno); 8258 /* That error should not stop us from syncing metadata. */ 8259 } 8260 8261 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx); 8262 } 8263 8264 static void 8265 delete_snapshot_freeze_io_cb(void *cb_arg, int bserrno) 8266 { 8267 struct delete_snapshot_ctx *ctx = cb_arg; 8268 8269 if (bserrno) { 8270 SPDK_ERRLOG("Failed to freeze I/O on clone\n"); 8271 ctx->bserrno = bserrno; 8272 delete_snapshot_cleanup_clone(ctx, 0); 8273 return; 8274 } 8275 8276 /* Temporarily override md_ro flag for snapshot for MD modification */ 8277 ctx->snapshot_md_ro = ctx->snapshot->md_ro; 8278 ctx->snapshot->md_ro = false; 8279 8280 /* Mark blob as pending for removal for power failure safety, use clone id for recovery */ 8281 ctx->bserrno = blob_set_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, &ctx->clone->id, 8282 sizeof(spdk_blob_id), true); 8283 if (ctx->bserrno != 0) { 8284 delete_snapshot_cleanup_clone(ctx, 0); 8285 return; 8286 } 8287 8288 if (blob_is_esnap_clone(ctx->snapshot)) { 8289 blob_esnap_destroy_bs_dev_channels(ctx->snapshot, false, 8290 delete_snapshot_esnap_channels_destroyed_cb, 8291 ctx); 8292 return; 8293 } 8294 8295 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx); 8296 } 8297 8298 static void 8299 delete_snapshot_open_clone_cb(void *cb_arg, struct spdk_blob *clone, int bserrno) 8300 { 8301 struct delete_snapshot_ctx *ctx = cb_arg; 8302 8303 if (bserrno) { 8304 SPDK_ERRLOG("Failed to open clone\n"); 8305 ctx->bserrno = bserrno; 8306 delete_snapshot_cleanup_snapshot(ctx, 0); 8307 return; 8308 } 8309 8310 ctx->clone = clone; 8311 8312 if (clone->locked_operation_in_progress) { 8313 SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress on its clone\n"); 8314 ctx->bserrno = -EBUSY; 8315 spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx); 8316 return; 8317 } 8318 8319 clone->locked_operation_in_progress = true; 8320 8321 blob_freeze_io(clone, delete_snapshot_freeze_io_cb, ctx); 8322 } 8323 8324 static void 8325 update_clone_on_snapshot_deletion(struct spdk_blob *snapshot, struct delete_snapshot_ctx *ctx) 8326 { 8327 struct spdk_blob_list *snapshot_entry = NULL; 8328 struct spdk_blob_list *clone_entry = NULL; 8329 struct spdk_blob_list *snapshot_clone_entry = NULL; 8330 8331 /* Get snapshot entry for the snapshot we want to remove */ 8332 snapshot_entry = bs_get_snapshot_entry(snapshot->bs, snapshot->id); 8333 8334 assert(snapshot_entry != NULL); 8335 8336 /* Get clone of the snapshot (at this point there can be only one clone) */ 8337 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 8338 assert(snapshot_entry->clone_count == 1); 8339 assert(clone_entry != NULL); 8340 8341 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 8342 * snapshot that we are removing */ 8343 blob_get_snapshot_and_clone_entries(snapshot, &ctx->parent_snapshot_entry, 8344 &snapshot_clone_entry); 8345 8346 spdk_bs_open_blob(snapshot->bs, clone_entry->id, delete_snapshot_open_clone_cb, ctx); 8347 } 8348 8349 static void 8350 bs_delete_blob_finish(void *cb_arg, struct spdk_blob *blob, int bserrno) 8351 { 8352 spdk_bs_sequence_t *seq = cb_arg; 8353 struct spdk_blob_list *snapshot_entry = NULL; 8354 uint32_t page_num; 8355 8356 if (bserrno) { 8357 SPDK_ERRLOG("Failed to remove blob\n"); 8358 bs_sequence_finish(seq, bserrno); 8359 return; 8360 } 8361 8362 /* Remove snapshot from the list */ 8363 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 8364 if (snapshot_entry != NULL) { 8365 TAILQ_REMOVE(&blob->bs->snapshots, snapshot_entry, link); 8366 free(snapshot_entry); 8367 } 8368 8369 page_num = bs_blobid_to_page(blob->id); 8370 spdk_bit_array_clear(blob->bs->used_blobids, page_num); 8371 blob->state = SPDK_BLOB_STATE_DIRTY; 8372 blob->active.num_pages = 0; 8373 blob_resize(blob, 0); 8374 8375 blob_persist(seq, blob, bs_delete_persist_cpl, blob); 8376 } 8377 8378 static int 8379 bs_is_blob_deletable(struct spdk_blob *blob, bool *update_clone) 8380 { 8381 struct spdk_blob_list *snapshot_entry = NULL; 8382 struct spdk_blob_list *clone_entry = NULL; 8383 struct spdk_blob *clone = NULL; 8384 bool has_one_clone = false; 8385 8386 /* Check if this is a snapshot with clones */ 8387 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 8388 if (snapshot_entry != NULL) { 8389 if (snapshot_entry->clone_count > 1) { 8390 SPDK_ERRLOG("Cannot remove snapshot with more than one clone\n"); 8391 return -EBUSY; 8392 } else if (snapshot_entry->clone_count == 1) { 8393 has_one_clone = true; 8394 } 8395 } 8396 8397 /* Check if someone has this blob open (besides this delete context): 8398 * - open_ref = 1 - only this context opened blob, so it is ok to remove it 8399 * - open_ref <= 2 && has_one_clone = true - clone is holding snapshot 8400 * and that is ok, because we will update it accordingly */ 8401 if (blob->open_ref <= 2 && has_one_clone) { 8402 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 8403 assert(clone_entry != NULL); 8404 clone = blob_lookup(blob->bs, clone_entry->id); 8405 8406 if (blob->open_ref == 2 && clone == NULL) { 8407 /* Clone is closed and someone else opened this blob */ 8408 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 8409 return -EBUSY; 8410 } 8411 8412 *update_clone = true; 8413 return 0; 8414 } 8415 8416 if (blob->open_ref > 1) { 8417 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 8418 return -EBUSY; 8419 } 8420 8421 assert(has_one_clone == false); 8422 *update_clone = false; 8423 return 0; 8424 } 8425 8426 static void 8427 bs_delete_enomem_close_cpl(void *cb_arg, int bserrno) 8428 { 8429 spdk_bs_sequence_t *seq = cb_arg; 8430 8431 bs_sequence_finish(seq, -ENOMEM); 8432 } 8433 8434 static void 8435 bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 8436 { 8437 spdk_bs_sequence_t *seq = cb_arg; 8438 struct delete_snapshot_ctx *ctx; 8439 bool update_clone = false; 8440 8441 if (bserrno != 0) { 8442 bs_sequence_finish(seq, bserrno); 8443 return; 8444 } 8445 8446 blob_verify_md_op(blob); 8447 8448 ctx = calloc(1, sizeof(*ctx)); 8449 if (ctx == NULL) { 8450 spdk_blob_close(blob, bs_delete_enomem_close_cpl, seq); 8451 return; 8452 } 8453 8454 ctx->snapshot = blob; 8455 ctx->cb_fn = bs_delete_blob_finish; 8456 ctx->cb_arg = seq; 8457 8458 /* Check if blob can be removed and if it is a snapshot with clone on top of it */ 8459 ctx->bserrno = bs_is_blob_deletable(blob, &update_clone); 8460 if (ctx->bserrno) { 8461 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 8462 return; 8463 } 8464 8465 if (blob->locked_operation_in_progress) { 8466 SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress\n"); 8467 ctx->bserrno = -EBUSY; 8468 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 8469 return; 8470 } 8471 8472 blob->locked_operation_in_progress = true; 8473 8474 /* 8475 * Remove the blob from the blob_store list now, to ensure it does not 8476 * get returned after this point by blob_lookup(). 8477 */ 8478 spdk_bit_array_clear(blob->bs->open_blobids, blob->id); 8479 RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob); 8480 8481 if (update_clone) { 8482 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 8483 if (!ctx->page) { 8484 ctx->bserrno = -ENOMEM; 8485 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 8486 return; 8487 } 8488 /* This blob is a snapshot with active clone - update clone first */ 8489 update_clone_on_snapshot_deletion(blob, ctx); 8490 } else { 8491 /* This blob does not have any clones - just remove it */ 8492 bs_blob_list_remove(blob); 8493 bs_delete_blob_finish(seq, blob, 0); 8494 free(ctx); 8495 } 8496 } 8497 8498 void 8499 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 8500 spdk_blob_op_complete cb_fn, void *cb_arg) 8501 { 8502 struct spdk_bs_cpl cpl; 8503 spdk_bs_sequence_t *seq; 8504 8505 SPDK_DEBUGLOG(blob, "Deleting blob 0x%" PRIx64 "\n", blobid); 8506 8507 assert(spdk_get_thread() == bs->md_thread); 8508 8509 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 8510 cpl.u.blob_basic.cb_fn = cb_fn; 8511 cpl.u.blob_basic.cb_arg = cb_arg; 8512 8513 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 8514 if (!seq) { 8515 cb_fn(cb_arg, -ENOMEM); 8516 return; 8517 } 8518 8519 spdk_bs_open_blob(bs, blobid, bs_delete_open_cpl, seq); 8520 } 8521 8522 /* END spdk_bs_delete_blob */ 8523 8524 /* START spdk_bs_open_blob */ 8525 8526 static void 8527 bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8528 { 8529 struct spdk_blob *blob = cb_arg; 8530 struct spdk_blob *existing; 8531 8532 if (bserrno != 0) { 8533 blob_free(blob); 8534 seq->cpl.u.blob_handle.blob = NULL; 8535 bs_sequence_finish(seq, bserrno); 8536 return; 8537 } 8538 8539 existing = blob_lookup(blob->bs, blob->id); 8540 if (existing) { 8541 blob_free(blob); 8542 existing->open_ref++; 8543 seq->cpl.u.blob_handle.blob = existing; 8544 bs_sequence_finish(seq, 0); 8545 return; 8546 } 8547 8548 blob->open_ref++; 8549 8550 spdk_bit_array_set(blob->bs->open_blobids, blob->id); 8551 RB_INSERT(spdk_blob_tree, &blob->bs->open_blobs, blob); 8552 8553 bs_sequence_finish(seq, bserrno); 8554 } 8555 8556 static inline void 8557 blob_open_opts_copy(const struct spdk_blob_open_opts *src, struct spdk_blob_open_opts *dst) 8558 { 8559 #define FIELD_OK(field) \ 8560 offsetof(struct spdk_blob_open_opts, field) + sizeof(src->field) <= src->opts_size 8561 8562 #define SET_FIELD(field) \ 8563 if (FIELD_OK(field)) { \ 8564 dst->field = src->field; \ 8565 } \ 8566 8567 SET_FIELD(clear_method); 8568 SET_FIELD(esnap_ctx); 8569 8570 dst->opts_size = src->opts_size; 8571 8572 /* You should not remove this statement, but need to update the assert statement 8573 * if you add a new field, and also add a corresponding SET_FIELD statement */ 8574 SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_open_opts) == 24, "Incorrect size"); 8575 8576 #undef FIELD_OK 8577 #undef SET_FIELD 8578 } 8579 8580 static void 8581 bs_open_blob(struct spdk_blob_store *bs, 8582 spdk_blob_id blobid, 8583 struct spdk_blob_open_opts *opts, 8584 spdk_blob_op_with_handle_complete cb_fn, 8585 void *cb_arg) 8586 { 8587 struct spdk_blob *blob; 8588 struct spdk_bs_cpl cpl; 8589 struct spdk_blob_open_opts opts_local; 8590 spdk_bs_sequence_t *seq; 8591 uint32_t page_num; 8592 8593 SPDK_DEBUGLOG(blob, "Opening blob 0x%" PRIx64 "\n", blobid); 8594 assert(spdk_get_thread() == bs->md_thread); 8595 8596 page_num = bs_blobid_to_page(blobid); 8597 if (spdk_bit_array_get(bs->used_blobids, page_num) == false) { 8598 /* Invalid blobid */ 8599 cb_fn(cb_arg, NULL, -ENOENT); 8600 return; 8601 } 8602 8603 blob = blob_lookup(bs, blobid); 8604 if (blob) { 8605 blob->open_ref++; 8606 cb_fn(cb_arg, blob, 0); 8607 return; 8608 } 8609 8610 blob = blob_alloc(bs, blobid); 8611 if (!blob) { 8612 cb_fn(cb_arg, NULL, -ENOMEM); 8613 return; 8614 } 8615 8616 spdk_blob_open_opts_init(&opts_local, sizeof(opts_local)); 8617 if (opts) { 8618 blob_open_opts_copy(opts, &opts_local); 8619 } 8620 8621 blob->clear_method = opts_local.clear_method; 8622 8623 cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE; 8624 cpl.u.blob_handle.cb_fn = cb_fn; 8625 cpl.u.blob_handle.cb_arg = cb_arg; 8626 cpl.u.blob_handle.blob = blob; 8627 cpl.u.blob_handle.esnap_ctx = opts_local.esnap_ctx; 8628 8629 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 8630 if (!seq) { 8631 blob_free(blob); 8632 cb_fn(cb_arg, NULL, -ENOMEM); 8633 return; 8634 } 8635 8636 blob_load(seq, blob, bs_open_blob_cpl, blob); 8637 } 8638 8639 void 8640 spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 8641 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 8642 { 8643 bs_open_blob(bs, blobid, NULL, cb_fn, cb_arg); 8644 } 8645 8646 void 8647 spdk_bs_open_blob_ext(struct spdk_blob_store *bs, spdk_blob_id blobid, 8648 struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 8649 { 8650 bs_open_blob(bs, blobid, opts, cb_fn, cb_arg); 8651 } 8652 8653 /* END spdk_bs_open_blob */ 8654 8655 /* START spdk_blob_set_read_only */ 8656 int 8657 spdk_blob_set_read_only(struct spdk_blob *blob) 8658 { 8659 blob_verify_md_op(blob); 8660 8661 blob->data_ro_flags |= SPDK_BLOB_READ_ONLY; 8662 8663 blob->state = SPDK_BLOB_STATE_DIRTY; 8664 return 0; 8665 } 8666 /* END spdk_blob_set_read_only */ 8667 8668 /* START spdk_blob_sync_md */ 8669 8670 static void 8671 blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8672 { 8673 struct spdk_blob *blob = cb_arg; 8674 8675 if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 8676 blob->data_ro = true; 8677 blob->md_ro = true; 8678 } 8679 8680 bs_sequence_finish(seq, bserrno); 8681 } 8682 8683 static void 8684 blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 8685 { 8686 struct spdk_bs_cpl cpl; 8687 spdk_bs_sequence_t *seq; 8688 8689 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 8690 cpl.u.blob_basic.cb_fn = cb_fn; 8691 cpl.u.blob_basic.cb_arg = cb_arg; 8692 8693 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 8694 if (!seq) { 8695 cb_fn(cb_arg, -ENOMEM); 8696 return; 8697 } 8698 8699 blob_persist(seq, blob, blob_sync_md_cpl, blob); 8700 } 8701 8702 void 8703 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 8704 { 8705 blob_verify_md_op(blob); 8706 8707 SPDK_DEBUGLOG(blob, "Syncing blob 0x%" PRIx64 "\n", blob->id); 8708 8709 if (blob->md_ro) { 8710 assert(blob->state == SPDK_BLOB_STATE_CLEAN); 8711 cb_fn(cb_arg, 0); 8712 return; 8713 } 8714 8715 blob_sync_md(blob, cb_fn, cb_arg); 8716 } 8717 8718 /* END spdk_blob_sync_md */ 8719 8720 struct spdk_blob_cluster_op_ctx { 8721 struct spdk_thread *thread; 8722 struct spdk_blob *blob; 8723 uint32_t cluster_num; /* cluster index in blob */ 8724 uint32_t cluster; /* cluster on disk */ 8725 uint32_t extent_page; /* extent page on disk */ 8726 struct spdk_blob_md_page *page; /* preallocated extent page */ 8727 int rc; 8728 spdk_blob_op_complete cb_fn; 8729 void *cb_arg; 8730 }; 8731 8732 static void 8733 blob_op_cluster_msg_cpl(void *arg) 8734 { 8735 struct spdk_blob_cluster_op_ctx *ctx = arg; 8736 8737 ctx->cb_fn(ctx->cb_arg, ctx->rc); 8738 free(ctx); 8739 } 8740 8741 static void 8742 blob_op_cluster_msg_cb(void *arg, int bserrno) 8743 { 8744 struct spdk_blob_cluster_op_ctx *ctx = arg; 8745 8746 ctx->rc = bserrno; 8747 spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx); 8748 } 8749 8750 static void 8751 blob_insert_new_ep_cb(void *arg, int bserrno) 8752 { 8753 struct spdk_blob_cluster_op_ctx *ctx = arg; 8754 uint32_t *extent_page; 8755 8756 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 8757 *extent_page = ctx->extent_page; 8758 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8759 blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx); 8760 } 8761 8762 struct spdk_blob_write_extent_page_ctx { 8763 struct spdk_blob_store *bs; 8764 8765 uint32_t extent; 8766 struct spdk_blob_md_page *page; 8767 }; 8768 8769 static void 8770 blob_free_cluster_msg_cb(void *arg, int bserrno) 8771 { 8772 struct spdk_blob_cluster_op_ctx *ctx = arg; 8773 8774 spdk_spin_lock(&ctx->blob->bs->used_lock); 8775 bs_release_cluster(ctx->blob->bs, ctx->cluster); 8776 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8777 8778 ctx->rc = bserrno; 8779 spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx); 8780 } 8781 8782 static void 8783 blob_free_cluster_update_ep_cb(void *arg, int bserrno) 8784 { 8785 struct spdk_blob_cluster_op_ctx *ctx = arg; 8786 8787 if (bserrno != 0 || ctx->blob->bs->clean == 0) { 8788 blob_free_cluster_msg_cb(ctx, bserrno); 8789 return; 8790 } 8791 8792 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8793 blob_sync_md(ctx->blob, blob_free_cluster_msg_cb, ctx); 8794 } 8795 8796 static void 8797 blob_free_cluster_free_ep_cb(void *arg, int bserrno) 8798 { 8799 struct spdk_blob_cluster_op_ctx *ctx = arg; 8800 8801 spdk_spin_lock(&ctx->blob->bs->used_lock); 8802 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8803 bs_release_md_page(ctx->blob->bs, ctx->extent_page); 8804 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8805 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8806 blob_sync_md(ctx->blob, blob_free_cluster_msg_cb, ctx); 8807 } 8808 8809 static void 8810 blob_persist_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8811 { 8812 struct spdk_blob_write_extent_page_ctx *ctx = cb_arg; 8813 8814 free(ctx); 8815 bs_sequence_finish(seq, bserrno); 8816 } 8817 8818 static void 8819 blob_write_extent_page_ready(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8820 { 8821 struct spdk_blob_write_extent_page_ctx *ctx = cb_arg; 8822 8823 if (bserrno != 0) { 8824 blob_persist_extent_page_cpl(seq, ctx, bserrno); 8825 return; 8826 } 8827 bs_sequence_write_dev(seq, ctx->page, bs_md_page_to_lba(ctx->bs, ctx->extent), 8828 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 8829 blob_persist_extent_page_cpl, ctx); 8830 } 8831 8832 static void 8833 blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 8834 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg) 8835 { 8836 struct spdk_blob_write_extent_page_ctx *ctx; 8837 spdk_bs_sequence_t *seq; 8838 struct spdk_bs_cpl cpl; 8839 8840 ctx = calloc(1, sizeof(*ctx)); 8841 if (!ctx) { 8842 cb_fn(cb_arg, -ENOMEM); 8843 return; 8844 } 8845 ctx->bs = blob->bs; 8846 ctx->extent = extent; 8847 ctx->page = page; 8848 8849 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 8850 cpl.u.blob_basic.cb_fn = cb_fn; 8851 cpl.u.blob_basic.cb_arg = cb_arg; 8852 8853 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 8854 if (!seq) { 8855 free(ctx); 8856 cb_fn(cb_arg, -ENOMEM); 8857 return; 8858 } 8859 8860 assert(page); 8861 page->next = SPDK_INVALID_MD_PAGE; 8862 page->id = blob->id; 8863 page->sequence_num = 0; 8864 8865 blob_serialize_extent_page(blob, cluster_num, page); 8866 8867 page->crc = blob_md_page_calc_crc(page); 8868 8869 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent) == true); 8870 8871 bs_mark_dirty(seq, blob->bs, blob_write_extent_page_ready, ctx); 8872 } 8873 8874 static void 8875 blob_insert_cluster_msg(void *arg) 8876 { 8877 struct spdk_blob_cluster_op_ctx *ctx = arg; 8878 uint32_t *extent_page; 8879 8880 ctx->rc = blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster); 8881 if (ctx->rc != 0) { 8882 spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx); 8883 return; 8884 } 8885 8886 if (ctx->blob->use_extent_table == false) { 8887 /* Extent table is not used, proceed with sync of md that will only use extents_rle. */ 8888 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8889 blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx); 8890 return; 8891 } 8892 8893 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 8894 if (*extent_page == 0) { 8895 /* Extent page requires allocation. 8896 * It was already claimed in the used_md_pages map and placed in ctx. */ 8897 assert(ctx->extent_page != 0); 8898 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8899 blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, ctx->page, 8900 blob_insert_new_ep_cb, ctx); 8901 } else { 8902 /* It is possible for original thread to allocate extent page for 8903 * different cluster in the same extent page. In such case proceed with 8904 * updating the existing extent page, but release the additional one. */ 8905 if (ctx->extent_page != 0) { 8906 spdk_spin_lock(&ctx->blob->bs->used_lock); 8907 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8908 bs_release_md_page(ctx->blob->bs, ctx->extent_page); 8909 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8910 ctx->extent_page = 0; 8911 } 8912 /* Extent page already allocated. 8913 * Every cluster allocation, requires just an update of single extent page. */ 8914 blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, ctx->page, 8915 blob_op_cluster_msg_cb, ctx); 8916 } 8917 } 8918 8919 static void 8920 blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 8921 uint64_t cluster, uint32_t extent_page, struct spdk_blob_md_page *page, 8922 spdk_blob_op_complete cb_fn, void *cb_arg) 8923 { 8924 struct spdk_blob_cluster_op_ctx *ctx; 8925 8926 ctx = calloc(1, sizeof(*ctx)); 8927 if (ctx == NULL) { 8928 cb_fn(cb_arg, -ENOMEM); 8929 return; 8930 } 8931 8932 ctx->thread = spdk_get_thread(); 8933 ctx->blob = blob; 8934 ctx->cluster_num = cluster_num; 8935 ctx->cluster = cluster; 8936 ctx->extent_page = extent_page; 8937 ctx->page = page; 8938 ctx->cb_fn = cb_fn; 8939 ctx->cb_arg = cb_arg; 8940 8941 spdk_thread_send_msg(blob->bs->md_thread, blob_insert_cluster_msg, ctx); 8942 } 8943 8944 static void 8945 blob_free_cluster_msg(void *arg) 8946 { 8947 struct spdk_blob_cluster_op_ctx *ctx = arg; 8948 uint32_t *extent_page; 8949 uint32_t start_cluster_idx; 8950 bool free_extent_page = true; 8951 size_t i; 8952 8953 ctx->cluster = bs_lba_to_cluster(ctx->blob->bs, ctx->blob->active.clusters[ctx->cluster_num]); 8954 8955 /* There were concurrent unmaps to the same cluster, only release the cluster on the first one */ 8956 if (ctx->cluster == 0) { 8957 blob_op_cluster_msg_cb(ctx, 0); 8958 return; 8959 } 8960 8961 ctx->blob->active.clusters[ctx->cluster_num] = 0; 8962 if (ctx->cluster != 0) { 8963 ctx->blob->active.num_allocated_clusters--; 8964 } 8965 8966 if (ctx->blob->use_extent_table == false) { 8967 /* Extent table is not used, proceed with sync of md that will only use extents_rle. */ 8968 spdk_spin_lock(&ctx->blob->bs->used_lock); 8969 bs_release_cluster(ctx->blob->bs, ctx->cluster); 8970 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8971 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8972 blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx); 8973 return; 8974 } 8975 8976 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 8977 8978 /* There shouldn't be parallel release operations on same cluster */ 8979 assert(*extent_page == ctx->extent_page); 8980 8981 start_cluster_idx = (ctx->cluster_num / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP; 8982 for (i = 0; i < SPDK_EXTENTS_PER_EP; ++i) { 8983 if (ctx->blob->active.clusters[start_cluster_idx + i] != 0) { 8984 free_extent_page = false; 8985 break; 8986 } 8987 } 8988 8989 if (free_extent_page) { 8990 assert(ctx->extent_page != 0); 8991 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8992 ctx->blob->active.extent_pages[bs_cluster_to_extent_table_id(ctx->cluster_num)] = 0; 8993 blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, ctx->page, 8994 blob_free_cluster_free_ep_cb, ctx); 8995 } else { 8996 blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, ctx->page, 8997 blob_free_cluster_update_ep_cb, ctx); 8998 } 8999 } 9000 9001 9002 static void 9003 blob_free_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, uint32_t extent_page, 9004 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg) 9005 { 9006 struct spdk_blob_cluster_op_ctx *ctx; 9007 9008 ctx = calloc(1, sizeof(*ctx)); 9009 if (ctx == NULL) { 9010 cb_fn(cb_arg, -ENOMEM); 9011 return; 9012 } 9013 9014 ctx->thread = spdk_get_thread(); 9015 ctx->blob = blob; 9016 ctx->cluster_num = cluster_num; 9017 ctx->extent_page = extent_page; 9018 ctx->page = page; 9019 ctx->cb_fn = cb_fn; 9020 ctx->cb_arg = cb_arg; 9021 9022 spdk_thread_send_msg(blob->bs->md_thread, blob_free_cluster_msg, ctx); 9023 } 9024 9025 /* START spdk_blob_close */ 9026 9027 static void 9028 blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9029 { 9030 struct spdk_blob *blob = cb_arg; 9031 9032 if (bserrno == 0) { 9033 blob->open_ref--; 9034 if (blob->open_ref == 0) { 9035 /* 9036 * Blobs with active.num_pages == 0 are deleted blobs. 9037 * these blobs are removed from the blob_store list 9038 * when the deletion process starts - so don't try to 9039 * remove them again. 9040 */ 9041 if (blob->active.num_pages > 0) { 9042 spdk_bit_array_clear(blob->bs->open_blobids, blob->id); 9043 RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob); 9044 } 9045 blob_free(blob); 9046 } 9047 } 9048 9049 bs_sequence_finish(seq, bserrno); 9050 } 9051 9052 static void 9053 blob_close_esnap_done(void *cb_arg, struct spdk_blob *blob, int bserrno) 9054 { 9055 spdk_bs_sequence_t *seq = cb_arg; 9056 9057 if (bserrno != 0) { 9058 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": close failed with error %d\n", 9059 blob->id, bserrno); 9060 bs_sequence_finish(seq, bserrno); 9061 return; 9062 } 9063 9064 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": closed, syncing metadata on thread %s\n", 9065 blob->id, spdk_thread_get_name(spdk_get_thread())); 9066 9067 /* Sync metadata */ 9068 blob_persist(seq, blob, blob_close_cpl, blob); 9069 } 9070 9071 void 9072 spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 9073 { 9074 struct spdk_bs_cpl cpl; 9075 spdk_bs_sequence_t *seq; 9076 9077 blob_verify_md_op(blob); 9078 9079 SPDK_DEBUGLOG(blob, "Closing blob 0x%" PRIx64 "\n", blob->id); 9080 9081 if (blob->open_ref == 0) { 9082 cb_fn(cb_arg, -EBADF); 9083 return; 9084 } 9085 9086 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 9087 cpl.u.blob_basic.cb_fn = cb_fn; 9088 cpl.u.blob_basic.cb_arg = cb_arg; 9089 9090 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 9091 if (!seq) { 9092 cb_fn(cb_arg, -ENOMEM); 9093 return; 9094 } 9095 9096 if (blob->open_ref == 1 && blob_is_esnap_clone(blob)) { 9097 blob_esnap_destroy_bs_dev_channels(blob, false, blob_close_esnap_done, seq); 9098 return; 9099 } 9100 9101 /* Sync metadata */ 9102 blob_persist(seq, blob, blob_close_cpl, blob); 9103 } 9104 9105 /* END spdk_blob_close */ 9106 9107 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs) 9108 { 9109 return spdk_get_io_channel(bs); 9110 } 9111 9112 void 9113 spdk_bs_free_io_channel(struct spdk_io_channel *channel) 9114 { 9115 blob_esnap_destroy_bs_channel(spdk_io_channel_get_ctx(channel)); 9116 spdk_put_io_channel(channel); 9117 } 9118 9119 void 9120 spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel, 9121 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 9122 { 9123 blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 9124 SPDK_BLOB_UNMAP); 9125 } 9126 9127 void 9128 spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel, 9129 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 9130 { 9131 blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 9132 SPDK_BLOB_WRITE_ZEROES); 9133 } 9134 9135 void 9136 spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel, 9137 void *payload, uint64_t offset, uint64_t length, 9138 spdk_blob_op_complete cb_fn, void *cb_arg) 9139 { 9140 blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 9141 SPDK_BLOB_WRITE); 9142 } 9143 9144 void 9145 spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel, 9146 void *payload, uint64_t offset, uint64_t length, 9147 spdk_blob_op_complete cb_fn, void *cb_arg) 9148 { 9149 blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 9150 SPDK_BLOB_READ); 9151 } 9152 9153 void 9154 spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel, 9155 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9156 spdk_blob_op_complete cb_fn, void *cb_arg) 9157 { 9158 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, NULL); 9159 } 9160 9161 void 9162 spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel, 9163 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9164 spdk_blob_op_complete cb_fn, void *cb_arg) 9165 { 9166 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, NULL); 9167 } 9168 9169 void 9170 spdk_blob_io_writev_ext(struct spdk_blob *blob, struct spdk_io_channel *channel, 9171 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9172 spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts) 9173 { 9174 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, 9175 io_opts); 9176 } 9177 9178 void 9179 spdk_blob_io_readv_ext(struct spdk_blob *blob, struct spdk_io_channel *channel, 9180 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9181 spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts) 9182 { 9183 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, 9184 io_opts); 9185 } 9186 9187 struct spdk_bs_iter_ctx { 9188 int64_t page_num; 9189 struct spdk_blob_store *bs; 9190 9191 spdk_blob_op_with_handle_complete cb_fn; 9192 void *cb_arg; 9193 }; 9194 9195 static void 9196 bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 9197 { 9198 struct spdk_bs_iter_ctx *ctx = cb_arg; 9199 struct spdk_blob_store *bs = ctx->bs; 9200 spdk_blob_id id; 9201 9202 if (bserrno == 0) { 9203 ctx->cb_fn(ctx->cb_arg, _blob, bserrno); 9204 free(ctx); 9205 return; 9206 } 9207 9208 ctx->page_num++; 9209 ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num); 9210 if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) { 9211 ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT); 9212 free(ctx); 9213 return; 9214 } 9215 9216 id = bs_page_to_blobid(ctx->page_num); 9217 9218 spdk_bs_open_blob(bs, id, bs_iter_cpl, ctx); 9219 } 9220 9221 void 9222 spdk_bs_iter_first(struct spdk_blob_store *bs, 9223 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 9224 { 9225 struct spdk_bs_iter_ctx *ctx; 9226 9227 ctx = calloc(1, sizeof(*ctx)); 9228 if (!ctx) { 9229 cb_fn(cb_arg, NULL, -ENOMEM); 9230 return; 9231 } 9232 9233 ctx->page_num = -1; 9234 ctx->bs = bs; 9235 ctx->cb_fn = cb_fn; 9236 ctx->cb_arg = cb_arg; 9237 9238 bs_iter_cpl(ctx, NULL, -1); 9239 } 9240 9241 static void 9242 bs_iter_close_cpl(void *cb_arg, int bserrno) 9243 { 9244 struct spdk_bs_iter_ctx *ctx = cb_arg; 9245 9246 bs_iter_cpl(ctx, NULL, -1); 9247 } 9248 9249 void 9250 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob, 9251 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 9252 { 9253 struct spdk_bs_iter_ctx *ctx; 9254 9255 assert(blob != NULL); 9256 9257 ctx = calloc(1, sizeof(*ctx)); 9258 if (!ctx) { 9259 cb_fn(cb_arg, NULL, -ENOMEM); 9260 return; 9261 } 9262 9263 ctx->page_num = bs_blobid_to_page(blob->id); 9264 ctx->bs = bs; 9265 ctx->cb_fn = cb_fn; 9266 ctx->cb_arg = cb_arg; 9267 9268 /* Close the existing blob */ 9269 spdk_blob_close(blob, bs_iter_close_cpl, ctx); 9270 } 9271 9272 static int 9273 blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 9274 uint16_t value_len, bool internal) 9275 { 9276 struct spdk_xattr_tailq *xattrs; 9277 struct spdk_xattr *xattr; 9278 size_t desc_size; 9279 void *tmp; 9280 9281 blob_verify_md_op(blob); 9282 9283 if (blob->md_ro) { 9284 return -EPERM; 9285 } 9286 9287 desc_size = sizeof(struct spdk_blob_md_descriptor_xattr) + strlen(name) + value_len; 9288 if (desc_size > SPDK_BS_MAX_DESC_SIZE) { 9289 SPDK_DEBUGLOG(blob, "Xattr '%s' of size %zu does not fix into single page %zu\n", name, 9290 desc_size, SPDK_BS_MAX_DESC_SIZE); 9291 return -ENOMEM; 9292 } 9293 9294 if (internal) { 9295 xattrs = &blob->xattrs_internal; 9296 blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR; 9297 } else { 9298 xattrs = &blob->xattrs; 9299 } 9300 9301 TAILQ_FOREACH(xattr, xattrs, link) { 9302 if (!strcmp(name, xattr->name)) { 9303 tmp = malloc(value_len); 9304 if (!tmp) { 9305 return -ENOMEM; 9306 } 9307 9308 free(xattr->value); 9309 xattr->value_len = value_len; 9310 xattr->value = tmp; 9311 memcpy(xattr->value, value, value_len); 9312 9313 blob->state = SPDK_BLOB_STATE_DIRTY; 9314 9315 return 0; 9316 } 9317 } 9318 9319 xattr = calloc(1, sizeof(*xattr)); 9320 if (!xattr) { 9321 return -ENOMEM; 9322 } 9323 9324 xattr->name = strdup(name); 9325 if (!xattr->name) { 9326 free(xattr); 9327 return -ENOMEM; 9328 } 9329 9330 xattr->value_len = value_len; 9331 xattr->value = malloc(value_len); 9332 if (!xattr->value) { 9333 free(xattr->name); 9334 free(xattr); 9335 return -ENOMEM; 9336 } 9337 memcpy(xattr->value, value, value_len); 9338 TAILQ_INSERT_TAIL(xattrs, xattr, link); 9339 9340 blob->state = SPDK_BLOB_STATE_DIRTY; 9341 9342 return 0; 9343 } 9344 9345 int 9346 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 9347 uint16_t value_len) 9348 { 9349 return blob_set_xattr(blob, name, value, value_len, false); 9350 } 9351 9352 static int 9353 blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal) 9354 { 9355 struct spdk_xattr_tailq *xattrs; 9356 struct spdk_xattr *xattr; 9357 9358 blob_verify_md_op(blob); 9359 9360 if (blob->md_ro) { 9361 return -EPERM; 9362 } 9363 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 9364 9365 TAILQ_FOREACH(xattr, xattrs, link) { 9366 if (!strcmp(name, xattr->name)) { 9367 TAILQ_REMOVE(xattrs, xattr, link); 9368 free(xattr->value); 9369 free(xattr->name); 9370 free(xattr); 9371 9372 if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) { 9373 blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR; 9374 } 9375 blob->state = SPDK_BLOB_STATE_DIRTY; 9376 9377 return 0; 9378 } 9379 } 9380 9381 return -ENOENT; 9382 } 9383 9384 int 9385 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name) 9386 { 9387 return blob_remove_xattr(blob, name, false); 9388 } 9389 9390 static int 9391 blob_get_xattr_value(struct spdk_blob *blob, const char *name, 9392 const void **value, size_t *value_len, bool internal) 9393 { 9394 struct spdk_xattr *xattr; 9395 struct spdk_xattr_tailq *xattrs; 9396 9397 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 9398 9399 TAILQ_FOREACH(xattr, xattrs, link) { 9400 if (!strcmp(name, xattr->name)) { 9401 *value = xattr->value; 9402 *value_len = xattr->value_len; 9403 return 0; 9404 } 9405 } 9406 return -ENOENT; 9407 } 9408 9409 int 9410 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 9411 const void **value, size_t *value_len) 9412 { 9413 blob_verify_md_op(blob); 9414 9415 return blob_get_xattr_value(blob, name, value, value_len, false); 9416 } 9417 9418 struct spdk_xattr_names { 9419 uint32_t count; 9420 const char *names[0]; 9421 }; 9422 9423 static int 9424 blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names) 9425 { 9426 struct spdk_xattr *xattr; 9427 int count = 0; 9428 9429 TAILQ_FOREACH(xattr, xattrs, link) { 9430 count++; 9431 } 9432 9433 *names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *)); 9434 if (*names == NULL) { 9435 return -ENOMEM; 9436 } 9437 9438 TAILQ_FOREACH(xattr, xattrs, link) { 9439 (*names)->names[(*names)->count++] = xattr->name; 9440 } 9441 9442 return 0; 9443 } 9444 9445 int 9446 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names) 9447 { 9448 blob_verify_md_op(blob); 9449 9450 return blob_get_xattr_names(&blob->xattrs, names); 9451 } 9452 9453 uint32_t 9454 spdk_xattr_names_get_count(struct spdk_xattr_names *names) 9455 { 9456 assert(names != NULL); 9457 9458 return names->count; 9459 } 9460 9461 const char * 9462 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index) 9463 { 9464 if (index >= names->count) { 9465 return NULL; 9466 } 9467 9468 return names->names[index]; 9469 } 9470 9471 void 9472 spdk_xattr_names_free(struct spdk_xattr_names *names) 9473 { 9474 free(names); 9475 } 9476 9477 struct spdk_bs_type 9478 spdk_bs_get_bstype(struct spdk_blob_store *bs) 9479 { 9480 return bs->bstype; 9481 } 9482 9483 void 9484 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype) 9485 { 9486 memcpy(&bs->bstype, &bstype, sizeof(bstype)); 9487 } 9488 9489 bool 9490 spdk_blob_is_read_only(struct spdk_blob *blob) 9491 { 9492 assert(blob != NULL); 9493 return (blob->data_ro || blob->md_ro); 9494 } 9495 9496 bool 9497 spdk_blob_is_snapshot(struct spdk_blob *blob) 9498 { 9499 struct spdk_blob_list *snapshot_entry; 9500 9501 assert(blob != NULL); 9502 9503 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 9504 if (snapshot_entry == NULL) { 9505 return false; 9506 } 9507 9508 return true; 9509 } 9510 9511 bool 9512 spdk_blob_is_clone(struct spdk_blob *blob) 9513 { 9514 assert(blob != NULL); 9515 9516 if (blob->parent_id != SPDK_BLOBID_INVALID && 9517 blob->parent_id != SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 9518 assert(spdk_blob_is_thin_provisioned(blob)); 9519 return true; 9520 } 9521 9522 return false; 9523 } 9524 9525 bool 9526 spdk_blob_is_thin_provisioned(struct spdk_blob *blob) 9527 { 9528 assert(blob != NULL); 9529 return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 9530 } 9531 9532 bool 9533 spdk_blob_is_esnap_clone(const struct spdk_blob *blob) 9534 { 9535 return blob_is_esnap_clone(blob); 9536 } 9537 9538 static void 9539 blob_update_clear_method(struct spdk_blob *blob) 9540 { 9541 enum blob_clear_method stored_cm; 9542 9543 assert(blob != NULL); 9544 9545 /* If BLOB_CLEAR_WITH_DEFAULT was passed in, use the setting stored 9546 * in metadata previously. If something other than the default was 9547 * specified, ignore stored value and used what was passed in. 9548 */ 9549 stored_cm = ((blob->md_ro_flags & SPDK_BLOB_CLEAR_METHOD) >> SPDK_BLOB_CLEAR_METHOD_SHIFT); 9550 9551 if (blob->clear_method == BLOB_CLEAR_WITH_DEFAULT) { 9552 blob->clear_method = stored_cm; 9553 } else if (blob->clear_method != stored_cm) { 9554 SPDK_WARNLOG("Using passed in clear method 0x%x instead of stored value of 0x%x\n", 9555 blob->clear_method, stored_cm); 9556 } 9557 } 9558 9559 spdk_blob_id 9560 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id) 9561 { 9562 struct spdk_blob_list *snapshot_entry = NULL; 9563 struct spdk_blob_list *clone_entry = NULL; 9564 9565 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 9566 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 9567 if (clone_entry->id == blob_id) { 9568 return snapshot_entry->id; 9569 } 9570 } 9571 } 9572 9573 return SPDK_BLOBID_INVALID; 9574 } 9575 9576 int 9577 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids, 9578 size_t *count) 9579 { 9580 struct spdk_blob_list *snapshot_entry, *clone_entry; 9581 size_t n; 9582 9583 snapshot_entry = bs_get_snapshot_entry(bs, blobid); 9584 if (snapshot_entry == NULL) { 9585 *count = 0; 9586 return 0; 9587 } 9588 9589 if (ids == NULL || *count < snapshot_entry->clone_count) { 9590 *count = snapshot_entry->clone_count; 9591 return -ENOMEM; 9592 } 9593 *count = snapshot_entry->clone_count; 9594 9595 n = 0; 9596 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 9597 ids[n++] = clone_entry->id; 9598 } 9599 9600 return 0; 9601 } 9602 9603 static void 9604 bs_load_grow_continue(struct spdk_bs_load_ctx *ctx) 9605 { 9606 int rc; 9607 9608 if (ctx->super->size == 0) { 9609 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9610 } 9611 9612 if (ctx->super->io_unit_size == 0) { 9613 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 9614 } 9615 9616 /* Parse the super block */ 9617 ctx->bs->clean = 1; 9618 ctx->bs->cluster_sz = ctx->super->cluster_size; 9619 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 9620 ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE; 9621 if (spdk_u32_is_pow2(ctx->bs->pages_per_cluster)) { 9622 ctx->bs->pages_per_cluster_shift = spdk_u32log2(ctx->bs->pages_per_cluster); 9623 } 9624 ctx->bs->io_unit_size = ctx->super->io_unit_size; 9625 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 9626 if (rc < 0) { 9627 bs_load_ctx_fail(ctx, -ENOMEM); 9628 return; 9629 } 9630 ctx->bs->md_start = ctx->super->md_start; 9631 ctx->bs->md_len = ctx->super->md_len; 9632 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len); 9633 if (rc < 0) { 9634 bs_load_ctx_fail(ctx, -ENOMEM); 9635 return; 9636 } 9637 9638 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 9639 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 9640 ctx->bs->super_blob = ctx->super->super_blob; 9641 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 9642 9643 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0) { 9644 SPDK_ERRLOG("Can not grow an unclean blobstore, please load it normally to clean it.\n"); 9645 bs_load_ctx_fail(ctx, -EIO); 9646 return; 9647 } else { 9648 bs_load_read_used_pages(ctx); 9649 } 9650 } 9651 9652 static void 9653 bs_load_grow_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9654 { 9655 struct spdk_bs_load_ctx *ctx = cb_arg; 9656 9657 if (bserrno != 0) { 9658 bs_load_ctx_fail(ctx, bserrno); 9659 return; 9660 } 9661 bs_load_grow_continue(ctx); 9662 } 9663 9664 static void 9665 bs_load_grow_used_clusters_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9666 { 9667 struct spdk_bs_load_ctx *ctx = cb_arg; 9668 9669 if (bserrno != 0) { 9670 bs_load_ctx_fail(ctx, bserrno); 9671 return; 9672 } 9673 9674 spdk_free(ctx->mask); 9675 9676 bs_sequence_write_dev(ctx->seq, ctx->super, bs_page_to_lba(ctx->bs, 0), 9677 bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 9678 bs_load_grow_super_write_cpl, ctx); 9679 } 9680 9681 static void 9682 bs_load_grow_used_clusters_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9683 { 9684 struct spdk_bs_load_ctx *ctx = cb_arg; 9685 uint64_t lba, lba_count; 9686 uint64_t dev_size; 9687 uint64_t total_clusters; 9688 9689 if (bserrno != 0) { 9690 bs_load_ctx_fail(ctx, bserrno); 9691 return; 9692 } 9693 9694 /* The type must be correct */ 9695 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 9696 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 9697 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 9698 struct spdk_blob_md_page) * 8)); 9699 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9700 total_clusters = dev_size / ctx->super->cluster_size; 9701 ctx->mask->length = total_clusters; 9702 9703 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 9704 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 9705 bs_sequence_write_dev(ctx->seq, ctx->mask, lba, lba_count, 9706 bs_load_grow_used_clusters_write_cpl, ctx); 9707 } 9708 9709 static void 9710 bs_load_try_to_grow(struct spdk_bs_load_ctx *ctx) 9711 { 9712 uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask; 9713 uint64_t lba, lba_count, mask_size; 9714 9715 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9716 total_clusters = dev_size / ctx->super->cluster_size; 9717 used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 9718 spdk_divide_round_up(total_clusters, 8), 9719 SPDK_BS_PAGE_SIZE); 9720 max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start; 9721 /* No necessary to grow or no space to grow */ 9722 if (ctx->super->size >= dev_size || used_cluster_mask_len > max_used_cluster_mask) { 9723 SPDK_DEBUGLOG(blob, "No grow\n"); 9724 bs_load_grow_continue(ctx); 9725 return; 9726 } 9727 9728 SPDK_DEBUGLOG(blob, "Resize blobstore\n"); 9729 9730 ctx->super->size = dev_size; 9731 ctx->super->used_cluster_mask_len = used_cluster_mask_len; 9732 ctx->super->crc = blob_md_page_calc_crc(ctx->super); 9733 9734 mask_size = used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 9735 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_NUMA_ID_ANY, 9736 SPDK_MALLOC_DMA); 9737 if (!ctx->mask) { 9738 bs_load_ctx_fail(ctx, -ENOMEM); 9739 return; 9740 } 9741 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 9742 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 9743 bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count, 9744 bs_load_grow_used_clusters_read_cpl, ctx); 9745 } 9746 9747 static void 9748 bs_grow_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9749 { 9750 struct spdk_bs_load_ctx *ctx = cb_arg; 9751 int rc; 9752 9753 rc = bs_super_validate(ctx->super, ctx->bs); 9754 if (rc != 0) { 9755 bs_load_ctx_fail(ctx, rc); 9756 return; 9757 } 9758 9759 bs_load_try_to_grow(ctx); 9760 } 9761 9762 struct spdk_bs_grow_ctx { 9763 struct spdk_blob_store *bs; 9764 struct spdk_bs_super_block *super; 9765 9766 struct spdk_bit_pool *new_used_clusters; 9767 struct spdk_bs_md_mask *new_used_clusters_mask; 9768 9769 spdk_bs_sequence_t *seq; 9770 }; 9771 9772 static void 9773 bs_grow_live_done(struct spdk_bs_grow_ctx *ctx, int bserrno) 9774 { 9775 if (bserrno != 0) { 9776 spdk_bit_pool_free(&ctx->new_used_clusters); 9777 } 9778 9779 bs_sequence_finish(ctx->seq, bserrno); 9780 free(ctx->new_used_clusters_mask); 9781 spdk_free(ctx->super); 9782 free(ctx); 9783 } 9784 9785 static void 9786 bs_grow_live_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9787 { 9788 struct spdk_bs_grow_ctx *ctx = cb_arg; 9789 struct spdk_blob_store *bs = ctx->bs; 9790 uint64_t total_clusters; 9791 9792 if (bserrno != 0) { 9793 bs_grow_live_done(ctx, bserrno); 9794 return; 9795 } 9796 9797 /* 9798 * Blobstore is not clean until unload, for now only the super block is up to date. 9799 * This is similar to state right after blobstore init, when bs_write_used_md() didn't 9800 * yet execute. 9801 * When cleanly unloaded, the used md pages will be written out. 9802 * In case of unclean shutdown, loading blobstore will go through recovery path correctly 9803 * filling out the used_clusters with new size and writing it out. 9804 */ 9805 bs->clean = 0; 9806 9807 /* Reverting the super->size past this point is complex, avoid any error paths 9808 * that require to do so. */ 9809 spdk_spin_lock(&bs->used_lock); 9810 9811 total_clusters = ctx->super->size / ctx->super->cluster_size; 9812 9813 assert(total_clusters >= spdk_bit_pool_capacity(bs->used_clusters)); 9814 spdk_bit_pool_store_mask(bs->used_clusters, ctx->new_used_clusters_mask); 9815 9816 assert(total_clusters == spdk_bit_pool_capacity(ctx->new_used_clusters)); 9817 spdk_bit_pool_load_mask(ctx->new_used_clusters, ctx->new_used_clusters_mask); 9818 9819 spdk_bit_pool_free(&bs->used_clusters); 9820 bs->used_clusters = ctx->new_used_clusters; 9821 9822 bs->total_clusters = total_clusters; 9823 bs->total_data_clusters = bs->total_clusters - spdk_divide_round_up( 9824 bs->md_start + bs->md_len, bs->pages_per_cluster); 9825 9826 bs->num_free_clusters = spdk_bit_pool_count_free(bs->used_clusters); 9827 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 9828 spdk_spin_unlock(&bs->used_lock); 9829 9830 bs_grow_live_done(ctx, 0); 9831 } 9832 9833 static void 9834 bs_grow_live_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9835 { 9836 struct spdk_bs_grow_ctx *ctx = cb_arg; 9837 uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask; 9838 int rc; 9839 9840 if (bserrno != 0) { 9841 bs_grow_live_done(ctx, bserrno); 9842 return; 9843 } 9844 9845 rc = bs_super_validate(ctx->super, ctx->bs); 9846 if (rc != 0) { 9847 bs_grow_live_done(ctx, rc); 9848 return; 9849 } 9850 9851 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9852 total_clusters = dev_size / ctx->super->cluster_size; 9853 used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 9854 spdk_divide_round_up(total_clusters, 8), 9855 SPDK_BS_PAGE_SIZE); 9856 max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start; 9857 /* Only checking dev_size. Since it can change, but total_clusters remain the same. */ 9858 if (dev_size == ctx->super->size) { 9859 SPDK_DEBUGLOG(blob, "No need to grow blobstore\n"); 9860 bs_grow_live_done(ctx, 0); 9861 return; 9862 } 9863 /* 9864 * Blobstore cannot be shrunk, so check before if: 9865 * - new size of the device is smaller than size in super_block 9866 * - new total number of clusters is smaller than used_clusters bit_pool 9867 * - there is enough space in metadata for used_cluster_mask to be written out 9868 */ 9869 if (dev_size < ctx->super->size || 9870 total_clusters < spdk_bit_pool_capacity(ctx->bs->used_clusters) || 9871 used_cluster_mask_len > max_used_cluster_mask) { 9872 SPDK_DEBUGLOG(blob, "No space to grow blobstore\n"); 9873 bs_grow_live_done(ctx, -ENOSPC); 9874 return; 9875 } 9876 9877 SPDK_DEBUGLOG(blob, "Resizing blobstore\n"); 9878 9879 ctx->new_used_clusters_mask = calloc(1, total_clusters); 9880 if (!ctx->new_used_clusters_mask) { 9881 bs_grow_live_done(ctx, -ENOMEM); 9882 return; 9883 } 9884 ctx->new_used_clusters = spdk_bit_pool_create(total_clusters); 9885 if (!ctx->new_used_clusters) { 9886 bs_grow_live_done(ctx, -ENOMEM); 9887 return; 9888 } 9889 9890 ctx->super->clean = 0; 9891 ctx->super->size = dev_size; 9892 ctx->super->used_cluster_mask_len = used_cluster_mask_len; 9893 bs_write_super(seq, ctx->bs, ctx->super, bs_grow_live_super_write_cpl, ctx); 9894 } 9895 9896 void 9897 spdk_bs_grow_live(struct spdk_blob_store *bs, 9898 spdk_bs_op_complete cb_fn, void *cb_arg) 9899 { 9900 struct spdk_bs_cpl cpl; 9901 struct spdk_bs_grow_ctx *ctx; 9902 9903 assert(spdk_get_thread() == bs->md_thread); 9904 9905 SPDK_DEBUGLOG(blob, "Growing blobstore on dev %p\n", bs->dev); 9906 9907 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 9908 cpl.u.bs_basic.cb_fn = cb_fn; 9909 cpl.u.bs_basic.cb_arg = cb_arg; 9910 9911 ctx = calloc(1, sizeof(struct spdk_bs_grow_ctx)); 9912 if (!ctx) { 9913 cb_fn(cb_arg, -ENOMEM); 9914 return; 9915 } 9916 ctx->bs = bs; 9917 9918 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 9919 SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA); 9920 if (!ctx->super) { 9921 free(ctx); 9922 cb_fn(cb_arg, -ENOMEM); 9923 return; 9924 } 9925 9926 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 9927 if (!ctx->seq) { 9928 spdk_free(ctx->super); 9929 free(ctx); 9930 cb_fn(cb_arg, -ENOMEM); 9931 return; 9932 } 9933 9934 /* Read the super block */ 9935 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 9936 bs_byte_to_lba(bs, sizeof(*ctx->super)), 9937 bs_grow_live_load_super_cpl, ctx); 9938 } 9939 9940 void 9941 spdk_bs_grow(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 9942 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 9943 { 9944 struct spdk_blob_store *bs; 9945 struct spdk_bs_cpl cpl; 9946 struct spdk_bs_load_ctx *ctx; 9947 struct spdk_bs_opts opts = {}; 9948 int err; 9949 9950 SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev); 9951 9952 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 9953 SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen); 9954 dev->destroy(dev); 9955 cb_fn(cb_arg, NULL, -EINVAL); 9956 return; 9957 } 9958 9959 spdk_bs_opts_init(&opts, sizeof(opts)); 9960 if (o) { 9961 if (bs_opts_copy(o, &opts)) { 9962 return; 9963 } 9964 } 9965 9966 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 9967 dev->destroy(dev); 9968 cb_fn(cb_arg, NULL, -EINVAL); 9969 return; 9970 } 9971 9972 err = bs_alloc(dev, &opts, &bs, &ctx); 9973 if (err) { 9974 dev->destroy(dev); 9975 cb_fn(cb_arg, NULL, err); 9976 return; 9977 } 9978 9979 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 9980 cpl.u.bs_handle.cb_fn = cb_fn; 9981 cpl.u.bs_handle.cb_arg = cb_arg; 9982 cpl.u.bs_handle.bs = bs; 9983 9984 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 9985 if (!ctx->seq) { 9986 spdk_free(ctx->super); 9987 free(ctx); 9988 bs_free(bs); 9989 cb_fn(cb_arg, NULL, -ENOMEM); 9990 return; 9991 } 9992 9993 /* Read the super block */ 9994 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 9995 bs_byte_to_lba(bs, sizeof(*ctx->super)), 9996 bs_grow_load_super_cpl, ctx); 9997 } 9998 9999 int 10000 spdk_blob_get_esnap_id(struct spdk_blob *blob, const void **id, size_t *len) 10001 { 10002 if (!blob_is_esnap_clone(blob)) { 10003 return -EINVAL; 10004 } 10005 10006 return blob_get_xattr_value(blob, BLOB_EXTERNAL_SNAPSHOT_ID, id, len, true); 10007 } 10008 10009 struct spdk_io_channel * 10010 blob_esnap_get_io_channel(struct spdk_io_channel *ch, struct spdk_blob *blob) 10011 { 10012 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(ch); 10013 struct spdk_bs_dev *bs_dev = blob->back_bs_dev; 10014 struct blob_esnap_channel find = {}; 10015 struct blob_esnap_channel *esnap_channel, *existing; 10016 10017 find.blob_id = blob->id; 10018 esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find); 10019 if (spdk_likely(esnap_channel != NULL)) { 10020 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": using cached channel on thread %s\n", 10021 blob->id, spdk_thread_get_name(spdk_get_thread())); 10022 return esnap_channel->channel; 10023 } 10024 10025 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": allocating channel on thread %s\n", 10026 blob->id, spdk_thread_get_name(spdk_get_thread())); 10027 10028 esnap_channel = calloc(1, sizeof(*esnap_channel)); 10029 if (esnap_channel == NULL) { 10030 SPDK_NOTICELOG("blob 0x%" PRIx64 " channel allocation failed: no memory\n", 10031 find.blob_id); 10032 return NULL; 10033 } 10034 esnap_channel->channel = bs_dev->create_channel(bs_dev); 10035 if (esnap_channel->channel == NULL) { 10036 SPDK_NOTICELOG("blob 0x%" PRIx64 " back channel allocation failed\n", blob->id); 10037 free(esnap_channel); 10038 return NULL; 10039 } 10040 esnap_channel->blob_id = find.blob_id; 10041 existing = RB_INSERT(blob_esnap_channel_tree, &bs_channel->esnap_channels, esnap_channel); 10042 if (spdk_unlikely(existing != NULL)) { 10043 /* 10044 * This should be unreachable: all modifications to this tree happen on this thread. 10045 */ 10046 SPDK_ERRLOG("blob 0x%" PRIx64 "lost race to allocate a channel\n", find.blob_id); 10047 assert(false); 10048 10049 bs_dev->destroy_channel(bs_dev, esnap_channel->channel); 10050 free(esnap_channel); 10051 10052 return existing->channel; 10053 } 10054 10055 return esnap_channel->channel; 10056 } 10057 10058 static int 10059 blob_esnap_channel_compare(struct blob_esnap_channel *c1, struct blob_esnap_channel *c2) 10060 { 10061 return (c1->blob_id < c2->blob_id ? -1 : c1->blob_id > c2->blob_id); 10062 } 10063 10064 struct blob_esnap_destroy_ctx { 10065 spdk_blob_op_with_handle_complete cb_fn; 10066 void *cb_arg; 10067 struct spdk_blob *blob; 10068 struct spdk_bs_dev *back_bs_dev; 10069 bool abort_io; 10070 }; 10071 10072 static void 10073 blob_esnap_destroy_channels_done(struct spdk_io_channel_iter *i, int status) 10074 { 10075 struct blob_esnap_destroy_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 10076 struct spdk_blob *blob = ctx->blob; 10077 struct spdk_blob_store *bs = blob->bs; 10078 10079 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": done destroying channels for this blob\n", 10080 blob->id); 10081 10082 if (ctx->cb_fn != NULL) { 10083 ctx->cb_fn(ctx->cb_arg, blob, status); 10084 } 10085 free(ctx); 10086 10087 bs->esnap_channels_unloading--; 10088 if (bs->esnap_channels_unloading == 0 && bs->esnap_unload_cb_fn != NULL) { 10089 spdk_bs_unload(bs, bs->esnap_unload_cb_fn, bs->esnap_unload_cb_arg); 10090 } 10091 } 10092 10093 static void 10094 blob_esnap_destroy_one_channel(struct spdk_io_channel_iter *i) 10095 { 10096 struct blob_esnap_destroy_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 10097 struct spdk_blob *blob = ctx->blob; 10098 struct spdk_bs_dev *bs_dev = ctx->back_bs_dev; 10099 struct spdk_io_channel *channel = spdk_io_channel_iter_get_channel(i); 10100 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(channel); 10101 struct blob_esnap_channel *esnap_channel; 10102 struct blob_esnap_channel find = {}; 10103 10104 assert(spdk_get_thread() == spdk_io_channel_get_thread(channel)); 10105 10106 find.blob_id = blob->id; 10107 esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find); 10108 if (esnap_channel != NULL) { 10109 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": destroying channel on thread %s\n", 10110 blob->id, spdk_thread_get_name(spdk_get_thread())); 10111 RB_REMOVE(blob_esnap_channel_tree, &bs_channel->esnap_channels, esnap_channel); 10112 10113 if (ctx->abort_io) { 10114 spdk_bs_user_op_t *op, *tmp; 10115 10116 TAILQ_FOREACH_SAFE(op, &bs_channel->queued_io, link, tmp) { 10117 if (op->back_channel == esnap_channel->channel) { 10118 TAILQ_REMOVE(&bs_channel->queued_io, op, link); 10119 bs_user_op_abort(op, -EIO); 10120 } 10121 } 10122 } 10123 10124 bs_dev->destroy_channel(bs_dev, esnap_channel->channel); 10125 free(esnap_channel); 10126 } 10127 10128 spdk_for_each_channel_continue(i, 0); 10129 } 10130 10131 /* 10132 * Destroy the channels for a specific blob on each thread with a blobstore channel. This should be 10133 * used when closing an esnap clone blob and after decoupling from the parent. 10134 */ 10135 static void 10136 blob_esnap_destroy_bs_dev_channels(struct spdk_blob *blob, bool abort_io, 10137 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 10138 { 10139 struct blob_esnap_destroy_ctx *ctx; 10140 10141 if (!blob_is_esnap_clone(blob) || blob->back_bs_dev == NULL) { 10142 if (cb_fn != NULL) { 10143 cb_fn(cb_arg, blob, 0); 10144 } 10145 return; 10146 } 10147 10148 ctx = calloc(1, sizeof(*ctx)); 10149 if (ctx == NULL) { 10150 if (cb_fn != NULL) { 10151 cb_fn(cb_arg, blob, -ENOMEM); 10152 } 10153 return; 10154 } 10155 ctx->cb_fn = cb_fn; 10156 ctx->cb_arg = cb_arg; 10157 ctx->blob = blob; 10158 ctx->back_bs_dev = blob->back_bs_dev; 10159 ctx->abort_io = abort_io; 10160 10161 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": destroying channels for this blob\n", 10162 blob->id); 10163 10164 blob->bs->esnap_channels_unloading++; 10165 spdk_for_each_channel(blob->bs, blob_esnap_destroy_one_channel, ctx, 10166 blob_esnap_destroy_channels_done); 10167 } 10168 10169 /* 10170 * Destroy all bs_dev channels on a specific blobstore channel. This should be used when a 10171 * bs_channel is destroyed. 10172 */ 10173 static void 10174 blob_esnap_destroy_bs_channel(struct spdk_bs_channel *ch) 10175 { 10176 struct blob_esnap_channel *esnap_channel, *esnap_channel_tmp; 10177 10178 assert(spdk_get_thread() == spdk_io_channel_get_thread(spdk_io_channel_from_ctx(ch))); 10179 10180 SPDK_DEBUGLOG(blob_esnap, "destroying channels on thread %s\n", 10181 spdk_thread_get_name(spdk_get_thread())); 10182 RB_FOREACH_SAFE(esnap_channel, blob_esnap_channel_tree, &ch->esnap_channels, 10183 esnap_channel_tmp) { 10184 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 10185 ": destroying one channel in thread %s\n", 10186 esnap_channel->blob_id, spdk_thread_get_name(spdk_get_thread())); 10187 RB_REMOVE(blob_esnap_channel_tree, &ch->esnap_channels, esnap_channel); 10188 spdk_put_io_channel(esnap_channel->channel); 10189 free(esnap_channel); 10190 } 10191 SPDK_DEBUGLOG(blob_esnap, "done destroying channels on thread %s\n", 10192 spdk_thread_get_name(spdk_get_thread())); 10193 } 10194 10195 static void 10196 blob_set_back_bs_dev_done(void *_ctx, int bserrno) 10197 { 10198 struct set_bs_dev_ctx *ctx = _ctx; 10199 10200 if (bserrno != 0) { 10201 /* Even though the unfreeze failed, the update may have succeed. */ 10202 SPDK_ERRLOG("blob 0x%" PRIx64 ": unfreeze failed with error %d\n", ctx->blob->id, 10203 bserrno); 10204 } 10205 ctx->cb_fn(ctx->cb_arg, ctx->bserrno); 10206 free(ctx); 10207 } 10208 10209 static void 10210 blob_frozen_set_back_bs_dev(void *_ctx, struct spdk_blob *blob, int bserrno) 10211 { 10212 struct set_bs_dev_ctx *ctx = _ctx; 10213 int rc; 10214 10215 if (bserrno != 0) { 10216 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to release old back_bs_dev with error %d\n", 10217 blob->id, bserrno); 10218 ctx->bserrno = bserrno; 10219 blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx); 10220 return; 10221 } 10222 10223 if (blob->back_bs_dev != NULL) { 10224 blob->back_bs_dev->destroy(blob->back_bs_dev); 10225 blob->back_bs_dev = NULL; 10226 } 10227 10228 if (ctx->parent_refs_cb_fn) { 10229 rc = ctx->parent_refs_cb_fn(blob, ctx->parent_refs_cb_arg); 10230 if (rc != 0) { 10231 ctx->bserrno = rc; 10232 blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx); 10233 return; 10234 } 10235 } 10236 10237 SPDK_NOTICELOG("blob 0x%" PRIx64 ": hotplugged back_bs_dev\n", blob->id); 10238 blob->back_bs_dev = ctx->back_bs_dev; 10239 ctx->bserrno = 0; 10240 10241 blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx); 10242 } 10243 10244 static void 10245 blob_set_back_bs_dev_frozen(void *_ctx, int bserrno) 10246 { 10247 struct set_bs_dev_ctx *ctx = _ctx; 10248 struct spdk_blob *blob = ctx->blob; 10249 10250 if (bserrno != 0) { 10251 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to freeze with error %d\n", blob->id, 10252 bserrno); 10253 ctx->cb_fn(ctx->cb_arg, bserrno); 10254 free(ctx); 10255 return; 10256 } 10257 10258 /* 10259 * This does not prevent future reads from the esnap device because any future IO will 10260 * lazily create a new esnap IO channel. 10261 */ 10262 blob_esnap_destroy_bs_dev_channels(blob, true, blob_frozen_set_back_bs_dev, ctx); 10263 } 10264 10265 void 10266 spdk_blob_set_esnap_bs_dev(struct spdk_blob *blob, struct spdk_bs_dev *back_bs_dev, 10267 spdk_blob_op_complete cb_fn, void *cb_arg) 10268 { 10269 if (!blob_is_esnap_clone(blob)) { 10270 SPDK_ERRLOG("blob 0x%" PRIx64 ": not an esnap clone\n", blob->id); 10271 cb_fn(cb_arg, -EINVAL); 10272 return; 10273 } 10274 10275 blob_set_back_bs_dev(blob, back_bs_dev, NULL, NULL, cb_fn, cb_arg); 10276 } 10277 10278 struct spdk_bs_dev * 10279 spdk_blob_get_esnap_bs_dev(const struct spdk_blob *blob) 10280 { 10281 if (!blob_is_esnap_clone(blob)) { 10282 SPDK_ERRLOG("blob 0x%" PRIx64 ": not an esnap clone\n", blob->id); 10283 return NULL; 10284 } 10285 10286 return blob->back_bs_dev; 10287 } 10288 10289 bool 10290 spdk_blob_is_degraded(const struct spdk_blob *blob) 10291 { 10292 if (blob->bs->dev->is_degraded != NULL && blob->bs->dev->is_degraded(blob->bs->dev)) { 10293 return true; 10294 } 10295 if (blob->back_bs_dev == NULL || blob->back_bs_dev->is_degraded == NULL) { 10296 return false; 10297 } 10298 10299 return blob->back_bs_dev->is_degraded(blob->back_bs_dev); 10300 } 10301 10302 SPDK_LOG_REGISTER_COMPONENT(blob) 10303 SPDK_LOG_REGISTER_COMPONENT(blob_esnap) 10304 10305 SPDK_TRACE_REGISTER_FN(blob_trace, "blob", TRACE_GROUP_BLOB) 10306 { 10307 struct spdk_trace_tpoint_opts opts[] = { 10308 { 10309 "BLOB_REQ_SET_START", TRACE_BLOB_REQ_SET_START, 10310 OWNER_TYPE_NONE, OBJECT_BLOB_CB_ARG, 1, 10311 { 10312 { "ctx", SPDK_TRACE_ARG_TYPE_PTR, 8 } 10313 } 10314 }, 10315 { 10316 "BLOB_REQ_SET_COMPLETE", TRACE_BLOB_REQ_SET_COMPLETE, 10317 OWNER_TYPE_NONE, OBJECT_BLOB_CB_ARG, 0, 10318 { 10319 { "ctx", SPDK_TRACE_ARG_TYPE_PTR, 8 } 10320 } 10321 }, 10322 }; 10323 10324 spdk_trace_register_object(OBJECT_BLOB_CB_ARG, 'a'); 10325 spdk_trace_register_description_ext(opts, SPDK_COUNTOF(opts)); 10326 spdk_trace_tpoint_register_relation(TRACE_BDEV_IO_START, OBJECT_BLOB_CB_ARG, 1); 10327 spdk_trace_tpoint_register_relation(TRACE_BDEV_IO_DONE, OBJECT_BLOB_CB_ARG, 0); 10328 } 10329