1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "spdk/blob.h" 10 #include "spdk/crc32.h" 11 #include "spdk/env.h" 12 #include "spdk/queue.h" 13 #include "spdk/thread.h" 14 #include "spdk/bit_array.h" 15 #include "spdk/bit_pool.h" 16 #include "spdk/likely.h" 17 #include "spdk/util.h" 18 #include "spdk/string.h" 19 #include "spdk/trace.h" 20 21 #include "spdk_internal/assert.h" 22 #include "spdk_internal/trace_defs.h" 23 #include "spdk/log.h" 24 25 #include "blobstore.h" 26 27 #define BLOB_CRC32C_INITIAL 0xffffffffUL 28 29 static int bs_register_md_thread(struct spdk_blob_store *bs); 30 static int bs_unregister_md_thread(struct spdk_blob_store *bs); 31 static void blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 32 static void blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 33 uint64_t cluster, uint32_t extent, struct spdk_blob_md_page *page, 34 spdk_blob_op_complete cb_fn, void *cb_arg); 35 static void blob_free_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 36 uint32_t extent_page, struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg); 37 38 static int blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 39 uint16_t value_len, bool internal); 40 static int blob_get_xattr_value(struct spdk_blob *blob, const char *name, 41 const void **value, size_t *value_len, bool internal); 42 static int blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal); 43 44 static void blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 45 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg); 46 static void blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg); 47 48 static void bs_shallow_copy_cluster_find_next(void *cb_arg); 49 50 /* 51 * External snapshots require a channel per thread per esnap bdev. The tree 52 * is populated lazily as blob IOs are handled by the back_bs_dev. When this 53 * channel is destroyed, all the channels in the tree are destroyed. 54 */ 55 56 struct blob_esnap_channel { 57 RB_ENTRY(blob_esnap_channel) node; 58 spdk_blob_id blob_id; 59 struct spdk_io_channel *channel; 60 }; 61 62 static int blob_esnap_channel_compare(struct blob_esnap_channel *c1, struct blob_esnap_channel *c2); 63 static void blob_esnap_destroy_bs_dev_channels(struct spdk_blob *blob, bool abort_io, 64 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg); 65 static void blob_esnap_destroy_bs_channel(struct spdk_bs_channel *ch); 66 static void blob_set_back_bs_dev_frozen(void *_ctx, int bserrno); 67 RB_GENERATE_STATIC(blob_esnap_channel_tree, blob_esnap_channel, node, blob_esnap_channel_compare) 68 69 static inline bool 70 blob_is_esnap_clone(const struct spdk_blob *blob) 71 { 72 assert(blob != NULL); 73 return !!(blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT); 74 } 75 76 static int 77 blob_id_cmp(struct spdk_blob *blob1, struct spdk_blob *blob2) 78 { 79 assert(blob1 != NULL && blob2 != NULL); 80 return (blob1->id < blob2->id ? -1 : blob1->id > blob2->id); 81 } 82 83 RB_GENERATE_STATIC(spdk_blob_tree, spdk_blob, link, blob_id_cmp); 84 85 static void 86 blob_verify_md_op(struct spdk_blob *blob) 87 { 88 assert(blob != NULL); 89 assert(spdk_get_thread() == blob->bs->md_thread); 90 assert(blob->state != SPDK_BLOB_STATE_LOADING); 91 } 92 93 static struct spdk_blob_list * 94 bs_get_snapshot_entry(struct spdk_blob_store *bs, spdk_blob_id blobid) 95 { 96 struct spdk_blob_list *snapshot_entry = NULL; 97 98 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 99 if (snapshot_entry->id == blobid) { 100 break; 101 } 102 } 103 104 return snapshot_entry; 105 } 106 107 static void 108 bs_claim_md_page(struct spdk_blob_store *bs, uint32_t page) 109 { 110 assert(spdk_spin_held(&bs->used_lock)); 111 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 112 assert(spdk_bit_array_get(bs->used_md_pages, page) == false); 113 114 spdk_bit_array_set(bs->used_md_pages, page); 115 } 116 117 static void 118 bs_release_md_page(struct spdk_blob_store *bs, uint32_t page) 119 { 120 assert(spdk_spin_held(&bs->used_lock)); 121 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 122 assert(spdk_bit_array_get(bs->used_md_pages, page) == true); 123 124 spdk_bit_array_clear(bs->used_md_pages, page); 125 } 126 127 static uint32_t 128 bs_claim_cluster(struct spdk_blob_store *bs) 129 { 130 uint32_t cluster_num; 131 132 assert(spdk_spin_held(&bs->used_lock)); 133 134 cluster_num = spdk_bit_pool_allocate_bit(bs->used_clusters); 135 if (cluster_num == UINT32_MAX) { 136 return UINT32_MAX; 137 } 138 139 SPDK_DEBUGLOG(blob, "Claiming cluster %u\n", cluster_num); 140 bs->num_free_clusters--; 141 142 return cluster_num; 143 } 144 145 static void 146 bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 147 { 148 assert(spdk_spin_held(&bs->used_lock)); 149 assert(cluster_num < spdk_bit_pool_capacity(bs->used_clusters)); 150 assert(spdk_bit_pool_is_allocated(bs->used_clusters, cluster_num) == true); 151 assert(bs->num_free_clusters < bs->total_clusters); 152 153 SPDK_DEBUGLOG(blob, "Releasing cluster %u\n", cluster_num); 154 155 spdk_bit_pool_free_bit(bs->used_clusters, cluster_num); 156 bs->num_free_clusters++; 157 } 158 159 static int 160 blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster) 161 { 162 uint64_t *cluster_lba = &blob->active.clusters[cluster_num]; 163 164 blob_verify_md_op(blob); 165 166 if (*cluster_lba != 0) { 167 return -EEXIST; 168 } 169 170 *cluster_lba = bs_cluster_to_lba(blob->bs, cluster); 171 blob->active.num_allocated_clusters++; 172 173 return 0; 174 } 175 176 static int 177 bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num, 178 uint64_t *cluster, uint32_t *lowest_free_md_page, bool update_map) 179 { 180 uint32_t *extent_page = 0; 181 182 assert(spdk_spin_held(&blob->bs->used_lock)); 183 184 *cluster = bs_claim_cluster(blob->bs); 185 if (*cluster == UINT32_MAX) { 186 /* No more free clusters. Cannot satisfy the request */ 187 return -ENOSPC; 188 } 189 190 if (blob->use_extent_table) { 191 extent_page = bs_cluster_to_extent_page(blob, cluster_num); 192 if (*extent_page == 0) { 193 /* Extent page shall never occupy md_page so start the search from 1 */ 194 if (*lowest_free_md_page == 0) { 195 *lowest_free_md_page = 1; 196 } 197 /* No extent_page is allocated for the cluster */ 198 *lowest_free_md_page = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, 199 *lowest_free_md_page); 200 if (*lowest_free_md_page == UINT32_MAX) { 201 /* No more free md pages. Cannot satisfy the request */ 202 bs_release_cluster(blob->bs, *cluster); 203 return -ENOSPC; 204 } 205 bs_claim_md_page(blob->bs, *lowest_free_md_page); 206 } 207 } 208 209 SPDK_DEBUGLOG(blob, "Claiming cluster %" PRIu64 " for blob 0x%" PRIx64 "\n", *cluster, 210 blob->id); 211 212 if (update_map) { 213 blob_insert_cluster(blob, cluster_num, *cluster); 214 if (blob->use_extent_table && *extent_page == 0) { 215 *extent_page = *lowest_free_md_page; 216 } 217 } 218 219 return 0; 220 } 221 222 static void 223 blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs) 224 { 225 xattrs->count = 0; 226 xattrs->names = NULL; 227 xattrs->ctx = NULL; 228 xattrs->get_value = NULL; 229 } 230 231 void 232 spdk_blob_opts_init(struct spdk_blob_opts *opts, size_t opts_size) 233 { 234 if (!opts) { 235 SPDK_ERRLOG("opts should not be NULL\n"); 236 return; 237 } 238 239 if (!opts_size) { 240 SPDK_ERRLOG("opts_size should not be zero value\n"); 241 return; 242 } 243 244 memset(opts, 0, opts_size); 245 opts->opts_size = opts_size; 246 247 #define FIELD_OK(field) \ 248 offsetof(struct spdk_blob_opts, field) + sizeof(opts->field) <= opts_size 249 250 #define SET_FIELD(field, value) \ 251 if (FIELD_OK(field)) { \ 252 opts->field = value; \ 253 } \ 254 255 SET_FIELD(num_clusters, 0); 256 SET_FIELD(thin_provision, false); 257 SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT); 258 259 if (FIELD_OK(xattrs)) { 260 blob_xattrs_init(&opts->xattrs); 261 } 262 263 SET_FIELD(use_extent_table, true); 264 265 #undef FIELD_OK 266 #undef SET_FIELD 267 } 268 269 void 270 spdk_blob_open_opts_init(struct spdk_blob_open_opts *opts, size_t opts_size) 271 { 272 if (!opts) { 273 SPDK_ERRLOG("opts should not be NULL\n"); 274 return; 275 } 276 277 if (!opts_size) { 278 SPDK_ERRLOG("opts_size should not be zero value\n"); 279 return; 280 } 281 282 memset(opts, 0, opts_size); 283 opts->opts_size = opts_size; 284 285 #define FIELD_OK(field) \ 286 offsetof(struct spdk_blob_open_opts, field) + sizeof(opts->field) <= opts_size 287 288 #define SET_FIELD(field, value) \ 289 if (FIELD_OK(field)) { \ 290 opts->field = value; \ 291 } \ 292 293 SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT); 294 295 #undef FIELD_OK 296 #undef SET_FILED 297 } 298 299 static struct spdk_blob * 300 blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id) 301 { 302 struct spdk_blob *blob; 303 304 blob = calloc(1, sizeof(*blob)); 305 if (!blob) { 306 return NULL; 307 } 308 309 blob->id = id; 310 blob->bs = bs; 311 312 blob->parent_id = SPDK_BLOBID_INVALID; 313 314 blob->state = SPDK_BLOB_STATE_DIRTY; 315 blob->extent_rle_found = false; 316 blob->extent_table_found = false; 317 blob->active.num_pages = 1; 318 blob->active.pages = calloc(1, sizeof(*blob->active.pages)); 319 if (!blob->active.pages) { 320 free(blob); 321 return NULL; 322 } 323 324 blob->active.pages[0] = bs_blobid_to_page(id); 325 326 TAILQ_INIT(&blob->xattrs); 327 TAILQ_INIT(&blob->xattrs_internal); 328 TAILQ_INIT(&blob->pending_persists); 329 TAILQ_INIT(&blob->persists_to_complete); 330 331 return blob; 332 } 333 334 static void 335 xattrs_free(struct spdk_xattr_tailq *xattrs) 336 { 337 struct spdk_xattr *xattr, *xattr_tmp; 338 339 TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) { 340 TAILQ_REMOVE(xattrs, xattr, link); 341 free(xattr->name); 342 free(xattr->value); 343 free(xattr); 344 } 345 } 346 347 static void 348 blob_free(struct spdk_blob *blob) 349 { 350 assert(blob != NULL); 351 assert(TAILQ_EMPTY(&blob->pending_persists)); 352 assert(TAILQ_EMPTY(&blob->persists_to_complete)); 353 354 free(blob->active.extent_pages); 355 free(blob->clean.extent_pages); 356 free(blob->active.clusters); 357 free(blob->clean.clusters); 358 free(blob->active.pages); 359 free(blob->clean.pages); 360 361 xattrs_free(&blob->xattrs); 362 xattrs_free(&blob->xattrs_internal); 363 364 if (blob->back_bs_dev) { 365 blob->back_bs_dev->destroy(blob->back_bs_dev); 366 } 367 368 free(blob); 369 } 370 371 static void 372 blob_back_bs_destroy_esnap_done(void *ctx, struct spdk_blob *blob, int bserrno) 373 { 374 struct spdk_bs_dev *bs_dev = ctx; 375 376 if (bserrno != 0) { 377 /* 378 * This is probably due to a memory allocation failure when creating the 379 * blob_esnap_destroy_ctx before iterating threads. 380 */ 381 SPDK_ERRLOG("blob 0x%" PRIx64 ": Unable to destroy bs dev channels: error %d\n", 382 blob->id, bserrno); 383 assert(false); 384 } 385 386 if (bs_dev == NULL) { 387 /* 388 * This check exists to make scanbuild happy. 389 * 390 * blob->back_bs_dev for an esnap is NULL during the first iteration of blobs while 391 * the blobstore is being loaded. It could also be NULL if there was an error 392 * opening the esnap device. In each of these cases, no channels could have been 393 * created because back_bs_dev->create_channel() would have led to a NULL pointer 394 * deref. 395 */ 396 assert(false); 397 return; 398 } 399 400 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": calling destroy on back_bs_dev\n", blob->id); 401 bs_dev->destroy(bs_dev); 402 } 403 404 static void 405 blob_back_bs_destroy(struct spdk_blob *blob) 406 { 407 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": preparing to destroy back_bs_dev\n", 408 blob->id); 409 410 blob_esnap_destroy_bs_dev_channels(blob, false, blob_back_bs_destroy_esnap_done, 411 blob->back_bs_dev); 412 blob->back_bs_dev = NULL; 413 } 414 415 struct blob_parent { 416 union { 417 struct { 418 spdk_blob_id id; 419 struct spdk_blob *blob; 420 } snapshot; 421 422 struct { 423 void *id; 424 uint32_t id_len; 425 struct spdk_bs_dev *back_bs_dev; 426 } esnap; 427 } u; 428 }; 429 430 typedef int (*set_parent_refs_cb)(struct spdk_blob *blob, struct blob_parent *parent); 431 432 struct set_bs_dev_ctx { 433 struct spdk_blob *blob; 434 struct spdk_bs_dev *back_bs_dev; 435 436 /* 437 * This callback is used during a set parent operation to change the references 438 * to the parent of the blob. 439 */ 440 set_parent_refs_cb parent_refs_cb_fn; 441 struct blob_parent *parent_refs_cb_arg; 442 443 spdk_blob_op_complete cb_fn; 444 void *cb_arg; 445 int bserrno; 446 }; 447 448 static void 449 blob_set_back_bs_dev(struct spdk_blob *blob, struct spdk_bs_dev *back_bs_dev, 450 set_parent_refs_cb parent_refs_cb_fn, struct blob_parent *parent_refs_cb_arg, 451 spdk_blob_op_complete cb_fn, void *cb_arg) 452 { 453 struct set_bs_dev_ctx *ctx; 454 455 ctx = calloc(1, sizeof(*ctx)); 456 if (ctx == NULL) { 457 SPDK_ERRLOG("blob 0x%" PRIx64 ": out of memory while setting back_bs_dev\n", 458 blob->id); 459 cb_fn(cb_arg, -ENOMEM); 460 return; 461 } 462 463 ctx->parent_refs_cb_fn = parent_refs_cb_fn; 464 ctx->parent_refs_cb_arg = parent_refs_cb_arg; 465 ctx->cb_fn = cb_fn; 466 ctx->cb_arg = cb_arg; 467 ctx->back_bs_dev = back_bs_dev; 468 ctx->blob = blob; 469 470 blob_freeze_io(blob, blob_set_back_bs_dev_frozen, ctx); 471 } 472 473 struct freeze_io_ctx { 474 struct spdk_bs_cpl cpl; 475 struct spdk_blob *blob; 476 }; 477 478 static void 479 blob_io_sync(struct spdk_io_channel_iter *i) 480 { 481 spdk_for_each_channel_continue(i, 0); 482 } 483 484 static void 485 blob_execute_queued_io(struct spdk_io_channel_iter *i) 486 { 487 struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); 488 struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch); 489 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 490 struct spdk_bs_request_set *set; 491 struct spdk_bs_user_op_args *args; 492 spdk_bs_user_op_t *op, *tmp; 493 494 TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) { 495 set = (struct spdk_bs_request_set *)op; 496 args = &set->u.user_op; 497 498 if (args->blob == ctx->blob) { 499 TAILQ_REMOVE(&ch->queued_io, op, link); 500 bs_user_op_execute(op); 501 } 502 } 503 504 spdk_for_each_channel_continue(i, 0); 505 } 506 507 static void 508 blob_io_cpl(struct spdk_io_channel_iter *i, int status) 509 { 510 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 511 512 ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0); 513 514 free(ctx); 515 } 516 517 static void 518 blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 519 { 520 struct freeze_io_ctx *ctx; 521 522 blob_verify_md_op(blob); 523 524 ctx = calloc(1, sizeof(*ctx)); 525 if (!ctx) { 526 cb_fn(cb_arg, -ENOMEM); 527 return; 528 } 529 530 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 531 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 532 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 533 ctx->blob = blob; 534 535 /* Freeze I/O on blob */ 536 blob->frozen_refcnt++; 537 538 spdk_for_each_channel(blob->bs, blob_io_sync, ctx, blob_io_cpl); 539 } 540 541 static void 542 blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 543 { 544 struct freeze_io_ctx *ctx; 545 546 blob_verify_md_op(blob); 547 548 ctx = calloc(1, sizeof(*ctx)); 549 if (!ctx) { 550 cb_fn(cb_arg, -ENOMEM); 551 return; 552 } 553 554 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 555 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 556 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 557 ctx->blob = blob; 558 559 assert(blob->frozen_refcnt > 0); 560 561 blob->frozen_refcnt--; 562 563 spdk_for_each_channel(blob->bs, blob_execute_queued_io, ctx, blob_io_cpl); 564 } 565 566 static int 567 blob_mark_clean(struct spdk_blob *blob) 568 { 569 uint32_t *extent_pages = NULL; 570 uint64_t *clusters = NULL; 571 uint32_t *pages = NULL; 572 573 assert(blob != NULL); 574 575 if (blob->active.num_extent_pages) { 576 assert(blob->active.extent_pages); 577 extent_pages = calloc(blob->active.num_extent_pages, sizeof(*blob->active.extent_pages)); 578 if (!extent_pages) { 579 return -ENOMEM; 580 } 581 memcpy(extent_pages, blob->active.extent_pages, 582 blob->active.num_extent_pages * sizeof(*extent_pages)); 583 } 584 585 if (blob->active.num_clusters) { 586 assert(blob->active.clusters); 587 clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters)); 588 if (!clusters) { 589 free(extent_pages); 590 return -ENOMEM; 591 } 592 memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 593 } 594 595 if (blob->active.num_pages) { 596 assert(blob->active.pages); 597 pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages)); 598 if (!pages) { 599 free(extent_pages); 600 free(clusters); 601 return -ENOMEM; 602 } 603 memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 604 } 605 606 free(blob->clean.extent_pages); 607 free(blob->clean.clusters); 608 free(blob->clean.pages); 609 610 blob->clean.num_extent_pages = blob->active.num_extent_pages; 611 blob->clean.extent_pages = blob->active.extent_pages; 612 blob->clean.num_clusters = blob->active.num_clusters; 613 blob->clean.clusters = blob->active.clusters; 614 blob->clean.num_allocated_clusters = blob->active.num_allocated_clusters; 615 blob->clean.num_pages = blob->active.num_pages; 616 blob->clean.pages = blob->active.pages; 617 618 blob->active.extent_pages = extent_pages; 619 blob->active.clusters = clusters; 620 blob->active.pages = pages; 621 622 /* If the metadata was dirtied again while the metadata was being written to disk, 623 * we do not want to revert the DIRTY state back to CLEAN here. 624 */ 625 if (blob->state == SPDK_BLOB_STATE_LOADING) { 626 blob->state = SPDK_BLOB_STATE_CLEAN; 627 } 628 629 return 0; 630 } 631 632 static int 633 blob_deserialize_xattr(struct spdk_blob *blob, 634 struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal) 635 { 636 struct spdk_xattr *xattr; 637 638 if (desc_xattr->length != sizeof(desc_xattr->name_length) + 639 sizeof(desc_xattr->value_length) + 640 desc_xattr->name_length + desc_xattr->value_length) { 641 return -EINVAL; 642 } 643 644 xattr = calloc(1, sizeof(*xattr)); 645 if (xattr == NULL) { 646 return -ENOMEM; 647 } 648 649 xattr->name = malloc(desc_xattr->name_length + 1); 650 if (xattr->name == NULL) { 651 free(xattr); 652 return -ENOMEM; 653 } 654 655 xattr->value = malloc(desc_xattr->value_length); 656 if (xattr->value == NULL) { 657 free(xattr->name); 658 free(xattr); 659 return -ENOMEM; 660 } 661 662 memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length); 663 xattr->name[desc_xattr->name_length] = '\0'; 664 xattr->value_len = desc_xattr->value_length; 665 memcpy(xattr->value, 666 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 667 desc_xattr->value_length); 668 669 TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link); 670 671 return 0; 672 } 673 674 675 static int 676 blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob) 677 { 678 struct spdk_blob_md_descriptor *desc; 679 size_t cur_desc = 0; 680 void *tmp; 681 682 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 683 while (cur_desc < sizeof(page->descriptors)) { 684 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 685 if (desc->length == 0) { 686 /* If padding and length are 0, this terminates the page */ 687 break; 688 } 689 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 690 struct spdk_blob_md_descriptor_flags *desc_flags; 691 692 desc_flags = (struct spdk_blob_md_descriptor_flags *)desc; 693 694 if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) { 695 return -EINVAL; 696 } 697 698 if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) != 699 SPDK_BLOB_INVALID_FLAGS_MASK) { 700 return -EINVAL; 701 } 702 703 if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) != 704 SPDK_BLOB_DATA_RO_FLAGS_MASK) { 705 blob->data_ro = true; 706 blob->md_ro = true; 707 } 708 709 if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) != 710 SPDK_BLOB_MD_RO_FLAGS_MASK) { 711 blob->md_ro = true; 712 } 713 714 if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 715 blob->data_ro = true; 716 blob->md_ro = true; 717 } 718 719 blob->invalid_flags = desc_flags->invalid_flags; 720 blob->data_ro_flags = desc_flags->data_ro_flags; 721 blob->md_ro_flags = desc_flags->md_ro_flags; 722 723 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 724 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 725 unsigned int i, j; 726 unsigned int cluster_count = blob->active.num_clusters; 727 728 if (blob->extent_table_found) { 729 /* Extent Table already present in the md, 730 * both descriptors should never be at the same time. */ 731 return -EINVAL; 732 } 733 blob->extent_rle_found = true; 734 735 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 736 737 if (desc_extent_rle->length == 0 || 738 (desc_extent_rle->length % sizeof(desc_extent_rle->extents[0]) != 0)) { 739 return -EINVAL; 740 } 741 742 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 743 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 744 if (desc_extent_rle->extents[i].cluster_idx != 0) { 745 if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, 746 desc_extent_rle->extents[i].cluster_idx + j)) { 747 return -EINVAL; 748 } 749 } 750 cluster_count++; 751 } 752 } 753 754 if (cluster_count == 0) { 755 return -EINVAL; 756 } 757 tmp = realloc(blob->active.clusters, cluster_count * sizeof(*blob->active.clusters)); 758 if (tmp == NULL) { 759 return -ENOMEM; 760 } 761 blob->active.clusters = tmp; 762 blob->active.cluster_array_size = cluster_count; 763 764 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 765 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 766 if (desc_extent_rle->extents[i].cluster_idx != 0) { 767 blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs, 768 desc_extent_rle->extents[i].cluster_idx + j); 769 blob->active.num_allocated_clusters++; 770 } else if (spdk_blob_is_thin_provisioned(blob)) { 771 blob->active.clusters[blob->active.num_clusters++] = 0; 772 } else { 773 return -EINVAL; 774 } 775 } 776 } 777 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 778 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 779 uint32_t num_extent_pages = blob->active.num_extent_pages; 780 uint32_t i, j; 781 size_t extent_pages_length; 782 783 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 784 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 785 786 if (blob->extent_rle_found) { 787 /* This means that Extent RLE is present in MD, 788 * both should never be at the same time. */ 789 return -EINVAL; 790 } else if (blob->extent_table_found && 791 desc_extent_table->num_clusters != blob->remaining_clusters_in_et) { 792 /* Number of clusters in this ET does not match number 793 * from previously read EXTENT_TABLE. */ 794 return -EINVAL; 795 } 796 797 if (desc_extent_table->length == 0 || 798 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 799 return -EINVAL; 800 } 801 802 blob->extent_table_found = true; 803 804 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 805 num_extent_pages += desc_extent_table->extent_page[i].num_pages; 806 } 807 808 if (num_extent_pages > 0) { 809 tmp = realloc(blob->active.extent_pages, num_extent_pages * sizeof(uint32_t)); 810 if (tmp == NULL) { 811 return -ENOMEM; 812 } 813 blob->active.extent_pages = tmp; 814 } 815 blob->active.extent_pages_array_size = num_extent_pages; 816 817 blob->remaining_clusters_in_et = desc_extent_table->num_clusters; 818 819 /* Extent table entries contain md page numbers for extent pages. 820 * Zeroes represent unallocated extent pages, those are run-length-encoded. 821 */ 822 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 823 if (desc_extent_table->extent_page[i].page_idx != 0) { 824 assert(desc_extent_table->extent_page[i].num_pages == 1); 825 blob->active.extent_pages[blob->active.num_extent_pages++] = 826 desc_extent_table->extent_page[i].page_idx; 827 } else if (spdk_blob_is_thin_provisioned(blob)) { 828 for (j = 0; j < desc_extent_table->extent_page[i].num_pages; j++) { 829 blob->active.extent_pages[blob->active.num_extent_pages++] = 0; 830 } 831 } else { 832 return -EINVAL; 833 } 834 } 835 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 836 struct spdk_blob_md_descriptor_extent_page *desc_extent; 837 unsigned int i; 838 unsigned int cluster_count = 0; 839 size_t cluster_idx_length; 840 841 if (blob->extent_rle_found) { 842 /* This means that Extent RLE is present in MD, 843 * both should never be at the same time. */ 844 return -EINVAL; 845 } 846 847 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 848 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 849 850 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 851 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 852 return -EINVAL; 853 } 854 855 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 856 if (desc_extent->cluster_idx[i] != 0) { 857 if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, desc_extent->cluster_idx[i])) { 858 return -EINVAL; 859 } 860 } 861 cluster_count++; 862 } 863 864 if (cluster_count == 0) { 865 return -EINVAL; 866 } 867 868 /* When reading extent pages sequentially starting cluster idx should match 869 * current size of a blob. 870 * If changed to batch reading, this check shall be removed. */ 871 if (desc_extent->start_cluster_idx != blob->active.num_clusters) { 872 return -EINVAL; 873 } 874 875 tmp = realloc(blob->active.clusters, 876 (cluster_count + blob->active.num_clusters) * sizeof(*blob->active.clusters)); 877 if (tmp == NULL) { 878 return -ENOMEM; 879 } 880 blob->active.clusters = tmp; 881 blob->active.cluster_array_size = (cluster_count + blob->active.num_clusters); 882 883 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 884 if (desc_extent->cluster_idx[i] != 0) { 885 blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs, 886 desc_extent->cluster_idx[i]); 887 blob->active.num_allocated_clusters++; 888 } else if (spdk_blob_is_thin_provisioned(blob)) { 889 blob->active.clusters[blob->active.num_clusters++] = 0; 890 } else { 891 return -EINVAL; 892 } 893 } 894 assert(desc_extent->start_cluster_idx + cluster_count == blob->active.num_clusters); 895 assert(blob->remaining_clusters_in_et >= cluster_count); 896 blob->remaining_clusters_in_et -= cluster_count; 897 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 898 int rc; 899 900 rc = blob_deserialize_xattr(blob, 901 (struct spdk_blob_md_descriptor_xattr *) desc, false); 902 if (rc != 0) { 903 return rc; 904 } 905 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 906 int rc; 907 908 rc = blob_deserialize_xattr(blob, 909 (struct spdk_blob_md_descriptor_xattr *) desc, true); 910 if (rc != 0) { 911 return rc; 912 } 913 } else { 914 /* Unrecognized descriptor type. Do not fail - just continue to the 915 * next descriptor. If this descriptor is associated with some feature 916 * defined in a newer version of blobstore, that version of blobstore 917 * should create and set an associated feature flag to specify if this 918 * blob can be loaded or not. 919 */ 920 } 921 922 /* Advance to the next descriptor */ 923 cur_desc += sizeof(*desc) + desc->length; 924 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 925 break; 926 } 927 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 928 } 929 930 return 0; 931 } 932 933 static bool bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page); 934 935 static int 936 blob_parse_extent_page(struct spdk_blob_md_page *extent_page, struct spdk_blob *blob) 937 { 938 assert(blob != NULL); 939 assert(blob->state == SPDK_BLOB_STATE_LOADING); 940 941 if (bs_load_cur_extent_page_valid(extent_page) == false) { 942 return -ENOENT; 943 } 944 945 return blob_parse_page(extent_page, blob); 946 } 947 948 static int 949 blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count, 950 struct spdk_blob *blob) 951 { 952 const struct spdk_blob_md_page *page; 953 uint32_t i; 954 int rc; 955 void *tmp; 956 957 assert(page_count > 0); 958 assert(pages[0].sequence_num == 0); 959 assert(blob != NULL); 960 assert(blob->state == SPDK_BLOB_STATE_LOADING); 961 assert(blob->active.clusters == NULL); 962 963 /* The blobid provided doesn't match what's in the MD, this can 964 * happen for example if a bogus blobid is passed in through open. 965 */ 966 if (blob->id != pages[0].id) { 967 SPDK_ERRLOG("Blobid (0x%" PRIx64 ") doesn't match what's in metadata " 968 "(0x%" PRIx64 ")\n", blob->id, pages[0].id); 969 return -ENOENT; 970 } 971 972 tmp = realloc(blob->active.pages, page_count * sizeof(*blob->active.pages)); 973 if (!tmp) { 974 return -ENOMEM; 975 } 976 blob->active.pages = tmp; 977 978 blob->active.pages[0] = pages[0].id; 979 980 for (i = 1; i < page_count; i++) { 981 assert(spdk_bit_array_get(blob->bs->used_md_pages, pages[i - 1].next)); 982 blob->active.pages[i] = pages[i - 1].next; 983 } 984 blob->active.num_pages = page_count; 985 986 for (i = 0; i < page_count; i++) { 987 page = &pages[i]; 988 989 assert(page->id == blob->id); 990 assert(page->sequence_num == i); 991 992 rc = blob_parse_page(page, blob); 993 if (rc != 0) { 994 return rc; 995 } 996 } 997 998 return 0; 999 } 1000 1001 static int 1002 blob_serialize_add_page(const struct spdk_blob *blob, 1003 struct spdk_blob_md_page **pages, 1004 uint32_t *page_count, 1005 struct spdk_blob_md_page **last_page) 1006 { 1007 struct spdk_blob_md_page *page, *tmp_pages; 1008 1009 assert(pages != NULL); 1010 assert(page_count != NULL); 1011 1012 *last_page = NULL; 1013 if (*page_count == 0) { 1014 assert(*pages == NULL); 1015 *pages = spdk_malloc(SPDK_BS_PAGE_SIZE, 0, 1016 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 1017 if (*pages == NULL) { 1018 return -ENOMEM; 1019 } 1020 *page_count = 1; 1021 } else { 1022 assert(*pages != NULL); 1023 tmp_pages = spdk_realloc(*pages, SPDK_BS_PAGE_SIZE * (*page_count + 1), 0); 1024 if (tmp_pages == NULL) { 1025 return -ENOMEM; 1026 } 1027 (*page_count)++; 1028 *pages = tmp_pages; 1029 } 1030 1031 page = &(*pages)[*page_count - 1]; 1032 memset(page, 0, sizeof(*page)); 1033 page->id = blob->id; 1034 page->sequence_num = *page_count - 1; 1035 page->next = SPDK_INVALID_MD_PAGE; 1036 *last_page = page; 1037 1038 return 0; 1039 } 1040 1041 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor. 1042 * Update required_sz on both success and failure. 1043 * 1044 */ 1045 static int 1046 blob_serialize_xattr(const struct spdk_xattr *xattr, 1047 uint8_t *buf, size_t buf_sz, 1048 size_t *required_sz, bool internal) 1049 { 1050 struct spdk_blob_md_descriptor_xattr *desc; 1051 1052 *required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) + 1053 strlen(xattr->name) + 1054 xattr->value_len; 1055 1056 if (buf_sz < *required_sz) { 1057 return -1; 1058 } 1059 1060 desc = (struct spdk_blob_md_descriptor_xattr *)buf; 1061 1062 desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR; 1063 desc->length = sizeof(desc->name_length) + 1064 sizeof(desc->value_length) + 1065 strlen(xattr->name) + 1066 xattr->value_len; 1067 desc->name_length = strlen(xattr->name); 1068 desc->value_length = xattr->value_len; 1069 1070 memcpy(desc->name, xattr->name, desc->name_length); 1071 memcpy((void *)((uintptr_t)desc->name + desc->name_length), 1072 xattr->value, 1073 desc->value_length); 1074 1075 return 0; 1076 } 1077 1078 static void 1079 blob_serialize_extent_table_entry(const struct spdk_blob *blob, 1080 uint64_t start_ep, uint64_t *next_ep, 1081 uint8_t **buf, size_t *remaining_sz) 1082 { 1083 struct spdk_blob_md_descriptor_extent_table *desc; 1084 size_t cur_sz; 1085 uint64_t i, et_idx; 1086 uint32_t extent_page, ep_len; 1087 1088 /* The buffer must have room for at least num_clusters entry */ 1089 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->num_clusters); 1090 if (*remaining_sz < cur_sz) { 1091 *next_ep = start_ep; 1092 return; 1093 } 1094 1095 desc = (struct spdk_blob_md_descriptor_extent_table *)*buf; 1096 desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE; 1097 1098 desc->num_clusters = blob->active.num_clusters; 1099 1100 ep_len = 1; 1101 et_idx = 0; 1102 for (i = start_ep; i < blob->active.num_extent_pages; i++) { 1103 if (*remaining_sz < cur_sz + sizeof(desc->extent_page[0])) { 1104 /* If we ran out of buffer space, return */ 1105 break; 1106 } 1107 1108 extent_page = blob->active.extent_pages[i]; 1109 /* Verify that next extent_page is unallocated */ 1110 if (extent_page == 0 && 1111 (i + 1 < blob->active.num_extent_pages && blob->active.extent_pages[i + 1] == 0)) { 1112 ep_len++; 1113 continue; 1114 } 1115 desc->extent_page[et_idx].page_idx = extent_page; 1116 desc->extent_page[et_idx].num_pages = ep_len; 1117 et_idx++; 1118 1119 ep_len = 1; 1120 cur_sz += sizeof(desc->extent_page[et_idx]); 1121 } 1122 *next_ep = i; 1123 1124 desc->length = sizeof(desc->num_clusters) + sizeof(desc->extent_page[0]) * et_idx; 1125 *remaining_sz -= sizeof(struct spdk_blob_md_descriptor) + desc->length; 1126 *buf += sizeof(struct spdk_blob_md_descriptor) + desc->length; 1127 } 1128 1129 static int 1130 blob_serialize_extent_table(const struct spdk_blob *blob, 1131 struct spdk_blob_md_page **pages, 1132 struct spdk_blob_md_page *cur_page, 1133 uint32_t *page_count, uint8_t **buf, 1134 size_t *remaining_sz) 1135 { 1136 uint64_t last_extent_page; 1137 int rc; 1138 1139 last_extent_page = 0; 1140 /* At least single extent table entry has to be always persisted. 1141 * Such case occurs with num_extent_pages == 0. */ 1142 while (last_extent_page <= blob->active.num_extent_pages) { 1143 blob_serialize_extent_table_entry(blob, last_extent_page, &last_extent_page, buf, 1144 remaining_sz); 1145 1146 if (last_extent_page == blob->active.num_extent_pages) { 1147 break; 1148 } 1149 1150 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1151 if (rc < 0) { 1152 return rc; 1153 } 1154 1155 *buf = (uint8_t *)cur_page->descriptors; 1156 *remaining_sz = sizeof(cur_page->descriptors); 1157 } 1158 1159 return 0; 1160 } 1161 1162 static void 1163 blob_serialize_extent_rle(const struct spdk_blob *blob, 1164 uint64_t start_cluster, uint64_t *next_cluster, 1165 uint8_t **buf, size_t *buf_sz) 1166 { 1167 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 1168 size_t cur_sz; 1169 uint64_t i, extent_idx; 1170 uint64_t lba, lba_per_cluster, lba_count; 1171 1172 /* The buffer must have room for at least one extent */ 1173 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc_extent_rle->extents[0]); 1174 if (*buf_sz < cur_sz) { 1175 *next_cluster = start_cluster; 1176 return; 1177 } 1178 1179 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)*buf; 1180 desc_extent_rle->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE; 1181 1182 lba_per_cluster = bs_cluster_to_lba(blob->bs, 1); 1183 /* Assert for scan-build false positive */ 1184 assert(lba_per_cluster > 0); 1185 1186 lba = blob->active.clusters[start_cluster]; 1187 lba_count = lba_per_cluster; 1188 extent_idx = 0; 1189 for (i = start_cluster + 1; i < blob->active.num_clusters; i++) { 1190 if ((lba + lba_count) == blob->active.clusters[i] && lba != 0) { 1191 /* Run-length encode sequential non-zero LBA */ 1192 lba_count += lba_per_cluster; 1193 continue; 1194 } else if (lba == 0 && blob->active.clusters[i] == 0) { 1195 /* Run-length encode unallocated clusters */ 1196 lba_count += lba_per_cluster; 1197 continue; 1198 } 1199 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1200 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1201 extent_idx++; 1202 1203 cur_sz += sizeof(desc_extent_rle->extents[extent_idx]); 1204 1205 if (*buf_sz < cur_sz) { 1206 /* If we ran out of buffer space, return */ 1207 *next_cluster = i; 1208 break; 1209 } 1210 1211 lba = blob->active.clusters[i]; 1212 lba_count = lba_per_cluster; 1213 } 1214 1215 if (*buf_sz >= cur_sz) { 1216 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1217 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1218 extent_idx++; 1219 1220 *next_cluster = blob->active.num_clusters; 1221 } 1222 1223 desc_extent_rle->length = sizeof(desc_extent_rle->extents[0]) * extent_idx; 1224 *buf_sz -= sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1225 *buf += sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1226 } 1227 1228 static int 1229 blob_serialize_extents_rle(const struct spdk_blob *blob, 1230 struct spdk_blob_md_page **pages, 1231 struct spdk_blob_md_page *cur_page, 1232 uint32_t *page_count, uint8_t **buf, 1233 size_t *remaining_sz) 1234 { 1235 uint64_t last_cluster; 1236 int rc; 1237 1238 last_cluster = 0; 1239 while (last_cluster < blob->active.num_clusters) { 1240 blob_serialize_extent_rle(blob, last_cluster, &last_cluster, buf, remaining_sz); 1241 1242 if (last_cluster == blob->active.num_clusters) { 1243 break; 1244 } 1245 1246 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1247 if (rc < 0) { 1248 return rc; 1249 } 1250 1251 *buf = (uint8_t *)cur_page->descriptors; 1252 *remaining_sz = sizeof(cur_page->descriptors); 1253 } 1254 1255 return 0; 1256 } 1257 1258 static void 1259 blob_serialize_extent_page(const struct spdk_blob *blob, 1260 uint64_t cluster, struct spdk_blob_md_page *page) 1261 { 1262 struct spdk_blob_md_descriptor_extent_page *desc_extent; 1263 uint64_t i, extent_idx; 1264 uint64_t lba, lba_per_cluster; 1265 uint64_t start_cluster_idx = (cluster / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP; 1266 1267 desc_extent = (struct spdk_blob_md_descriptor_extent_page *) page->descriptors; 1268 desc_extent->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE; 1269 1270 lba_per_cluster = bs_cluster_to_lba(blob->bs, 1); 1271 1272 desc_extent->start_cluster_idx = start_cluster_idx; 1273 extent_idx = 0; 1274 for (i = start_cluster_idx; i < blob->active.num_clusters; i++) { 1275 lba = blob->active.clusters[i]; 1276 desc_extent->cluster_idx[extent_idx++] = lba / lba_per_cluster; 1277 if (extent_idx >= SPDK_EXTENTS_PER_EP) { 1278 break; 1279 } 1280 } 1281 desc_extent->length = sizeof(desc_extent->start_cluster_idx) + 1282 sizeof(desc_extent->cluster_idx[0]) * extent_idx; 1283 } 1284 1285 static void 1286 blob_serialize_flags(const struct spdk_blob *blob, 1287 uint8_t *buf, size_t *buf_sz) 1288 { 1289 struct spdk_blob_md_descriptor_flags *desc; 1290 1291 /* 1292 * Flags get serialized first, so we should always have room for the flags 1293 * descriptor. 1294 */ 1295 assert(*buf_sz >= sizeof(*desc)); 1296 1297 desc = (struct spdk_blob_md_descriptor_flags *)buf; 1298 desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS; 1299 desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor); 1300 desc->invalid_flags = blob->invalid_flags; 1301 desc->data_ro_flags = blob->data_ro_flags; 1302 desc->md_ro_flags = blob->md_ro_flags; 1303 1304 *buf_sz -= sizeof(*desc); 1305 } 1306 1307 static int 1308 blob_serialize_xattrs(const struct spdk_blob *blob, 1309 const struct spdk_xattr_tailq *xattrs, bool internal, 1310 struct spdk_blob_md_page **pages, 1311 struct spdk_blob_md_page *cur_page, 1312 uint32_t *page_count, uint8_t **buf, 1313 size_t *remaining_sz) 1314 { 1315 const struct spdk_xattr *xattr; 1316 int rc; 1317 1318 TAILQ_FOREACH(xattr, xattrs, link) { 1319 size_t required_sz = 0; 1320 1321 rc = blob_serialize_xattr(xattr, 1322 *buf, *remaining_sz, 1323 &required_sz, internal); 1324 if (rc < 0) { 1325 /* Need to add a new page to the chain */ 1326 rc = blob_serialize_add_page(blob, pages, page_count, 1327 &cur_page); 1328 if (rc < 0) { 1329 spdk_free(*pages); 1330 *pages = NULL; 1331 *page_count = 0; 1332 return rc; 1333 } 1334 1335 *buf = (uint8_t *)cur_page->descriptors; 1336 *remaining_sz = sizeof(cur_page->descriptors); 1337 1338 /* Try again */ 1339 required_sz = 0; 1340 rc = blob_serialize_xattr(xattr, 1341 *buf, *remaining_sz, 1342 &required_sz, internal); 1343 1344 if (rc < 0) { 1345 spdk_free(*pages); 1346 *pages = NULL; 1347 *page_count = 0; 1348 return rc; 1349 } 1350 } 1351 1352 *remaining_sz -= required_sz; 1353 *buf += required_sz; 1354 } 1355 1356 return 0; 1357 } 1358 1359 static int 1360 blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages, 1361 uint32_t *page_count) 1362 { 1363 struct spdk_blob_md_page *cur_page; 1364 int rc; 1365 uint8_t *buf; 1366 size_t remaining_sz; 1367 1368 assert(pages != NULL); 1369 assert(page_count != NULL); 1370 assert(blob != NULL); 1371 assert(blob->state == SPDK_BLOB_STATE_DIRTY); 1372 1373 *pages = NULL; 1374 *page_count = 0; 1375 1376 /* A blob always has at least 1 page, even if it has no descriptors */ 1377 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1378 if (rc < 0) { 1379 return rc; 1380 } 1381 1382 buf = (uint8_t *)cur_page->descriptors; 1383 remaining_sz = sizeof(cur_page->descriptors); 1384 1385 /* Serialize flags */ 1386 blob_serialize_flags(blob, buf, &remaining_sz); 1387 buf += sizeof(struct spdk_blob_md_descriptor_flags); 1388 1389 /* Serialize xattrs */ 1390 rc = blob_serialize_xattrs(blob, &blob->xattrs, false, 1391 pages, cur_page, page_count, &buf, &remaining_sz); 1392 if (rc < 0) { 1393 return rc; 1394 } 1395 1396 /* Serialize internal xattrs */ 1397 rc = blob_serialize_xattrs(blob, &blob->xattrs_internal, true, 1398 pages, cur_page, page_count, &buf, &remaining_sz); 1399 if (rc < 0) { 1400 return rc; 1401 } 1402 1403 if (blob->use_extent_table) { 1404 /* Serialize extent table */ 1405 rc = blob_serialize_extent_table(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1406 } else { 1407 /* Serialize extents */ 1408 rc = blob_serialize_extents_rle(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1409 } 1410 1411 return rc; 1412 } 1413 1414 struct spdk_blob_load_ctx { 1415 struct spdk_blob *blob; 1416 1417 struct spdk_blob_md_page *pages; 1418 uint32_t num_pages; 1419 uint32_t next_extent_page; 1420 spdk_bs_sequence_t *seq; 1421 1422 spdk_bs_sequence_cpl cb_fn; 1423 void *cb_arg; 1424 }; 1425 1426 static uint32_t 1427 blob_md_page_calc_crc(void *page) 1428 { 1429 uint32_t crc; 1430 1431 crc = BLOB_CRC32C_INITIAL; 1432 crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc); 1433 crc ^= BLOB_CRC32C_INITIAL; 1434 1435 return crc; 1436 1437 } 1438 1439 static void 1440 blob_load_final(struct spdk_blob_load_ctx *ctx, int bserrno) 1441 { 1442 struct spdk_blob *blob = ctx->blob; 1443 1444 if (bserrno == 0) { 1445 blob_mark_clean(blob); 1446 } 1447 1448 ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno); 1449 1450 /* Free the memory */ 1451 spdk_free(ctx->pages); 1452 free(ctx); 1453 } 1454 1455 static void 1456 blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno) 1457 { 1458 struct spdk_blob_load_ctx *ctx = cb_arg; 1459 struct spdk_blob *blob = ctx->blob; 1460 1461 if (bserrno == 0) { 1462 blob->back_bs_dev = bs_create_blob_bs_dev(snapshot); 1463 if (blob->back_bs_dev == NULL) { 1464 bserrno = -ENOMEM; 1465 } 1466 } 1467 if (bserrno != 0) { 1468 SPDK_ERRLOG("Snapshot fail\n"); 1469 } 1470 1471 blob_load_final(ctx, bserrno); 1472 } 1473 1474 static void blob_update_clear_method(struct spdk_blob *blob); 1475 1476 static int 1477 blob_load_esnap(struct spdk_blob *blob, void *blob_ctx) 1478 { 1479 struct spdk_blob_store *bs = blob->bs; 1480 struct spdk_bs_dev *bs_dev = NULL; 1481 const void *esnap_id = NULL; 1482 size_t id_len = 0; 1483 int rc; 1484 1485 if (bs->esnap_bs_dev_create == NULL) { 1486 SPDK_NOTICELOG("blob 0x%" PRIx64 " is an esnap clone but the blobstore was opened " 1487 "without support for esnap clones\n", blob->id); 1488 return -ENOTSUP; 1489 } 1490 assert(blob->back_bs_dev == NULL); 1491 1492 rc = blob_get_xattr_value(blob, BLOB_EXTERNAL_SNAPSHOT_ID, &esnap_id, &id_len, true); 1493 if (rc != 0) { 1494 SPDK_ERRLOG("blob 0x%" PRIx64 " is an esnap clone but has no esnap ID\n", blob->id); 1495 return -EINVAL; 1496 } 1497 assert(id_len > 0 && id_len < UINT32_MAX); 1498 1499 SPDK_INFOLOG(blob, "Creating external snapshot device\n"); 1500 1501 rc = bs->esnap_bs_dev_create(bs->esnap_ctx, blob_ctx, blob, esnap_id, (uint32_t)id_len, 1502 &bs_dev); 1503 if (rc != 0) { 1504 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": failed to load back_bs_dev " 1505 "with error %d\n", blob->id, rc); 1506 return rc; 1507 } 1508 1509 /* 1510 * Note: bs_dev might be NULL if the consumer chose to not open the external snapshot. 1511 * This especially might happen during spdk_bs_load() iteration. 1512 */ 1513 if (bs_dev != NULL) { 1514 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": loaded back_bs_dev\n", blob->id); 1515 if ((bs->io_unit_size % bs_dev->blocklen) != 0) { 1516 SPDK_NOTICELOG("blob 0x%" PRIx64 " external snapshot device block size %u " 1517 "is not compatible with blobstore block size %u\n", 1518 blob->id, bs_dev->blocklen, bs->io_unit_size); 1519 bs_dev->destroy(bs_dev); 1520 return -EINVAL; 1521 } 1522 } 1523 1524 blob->back_bs_dev = bs_dev; 1525 blob->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT; 1526 1527 return 0; 1528 } 1529 1530 static void 1531 blob_load_backing_dev(spdk_bs_sequence_t *seq, void *cb_arg) 1532 { 1533 struct spdk_blob_load_ctx *ctx = cb_arg; 1534 struct spdk_blob *blob = ctx->blob; 1535 const void *value; 1536 size_t len; 1537 int rc; 1538 1539 if (blob_is_esnap_clone(blob)) { 1540 rc = blob_load_esnap(blob, seq->cpl.u.blob_handle.esnap_ctx); 1541 blob_load_final(ctx, rc); 1542 return; 1543 } 1544 1545 if (spdk_blob_is_thin_provisioned(blob)) { 1546 rc = blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true); 1547 if (rc == 0) { 1548 if (len != sizeof(spdk_blob_id)) { 1549 blob_load_final(ctx, -EINVAL); 1550 return; 1551 } 1552 /* open snapshot blob and continue in the callback function */ 1553 blob->parent_id = *(spdk_blob_id *)value; 1554 spdk_bs_open_blob(blob->bs, blob->parent_id, 1555 blob_load_snapshot_cpl, ctx); 1556 return; 1557 } else { 1558 /* add zeroes_dev for thin provisioned blob */ 1559 blob->back_bs_dev = bs_create_zeroes_dev(); 1560 } 1561 } else { 1562 /* standard blob */ 1563 blob->back_bs_dev = NULL; 1564 } 1565 blob_load_final(ctx, 0); 1566 } 1567 1568 static void 1569 blob_load_cpl_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1570 { 1571 struct spdk_blob_load_ctx *ctx = cb_arg; 1572 struct spdk_blob *blob = ctx->blob; 1573 struct spdk_blob_md_page *page; 1574 uint64_t i; 1575 uint32_t crc; 1576 uint64_t lba; 1577 void *tmp; 1578 uint64_t sz; 1579 1580 if (bserrno) { 1581 SPDK_ERRLOG("Extent page read failed: %d\n", bserrno); 1582 blob_load_final(ctx, bserrno); 1583 return; 1584 } 1585 1586 if (ctx->pages == NULL) { 1587 /* First iteration of this function, allocate buffer for single EXTENT_PAGE */ 1588 ctx->pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 1589 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 1590 if (!ctx->pages) { 1591 blob_load_final(ctx, -ENOMEM); 1592 return; 1593 } 1594 ctx->num_pages = 1; 1595 ctx->next_extent_page = 0; 1596 } else { 1597 page = &ctx->pages[0]; 1598 crc = blob_md_page_calc_crc(page); 1599 if (crc != page->crc) { 1600 blob_load_final(ctx, -EINVAL); 1601 return; 1602 } 1603 1604 if (page->next != SPDK_INVALID_MD_PAGE) { 1605 blob_load_final(ctx, -EINVAL); 1606 return; 1607 } 1608 1609 bserrno = blob_parse_extent_page(page, blob); 1610 if (bserrno) { 1611 blob_load_final(ctx, bserrno); 1612 return; 1613 } 1614 } 1615 1616 for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) { 1617 if (blob->active.extent_pages[i] != 0) { 1618 /* Extent page was allocated, read and parse it. */ 1619 lba = bs_md_page_to_lba(blob->bs, blob->active.extent_pages[i]); 1620 ctx->next_extent_page = i + 1; 1621 1622 bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1623 bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 1624 blob_load_cpl_extents_cpl, ctx); 1625 return; 1626 } else { 1627 /* Thin provisioned blobs can point to unallocated extent pages. 1628 * In this case blob size should be increased by up to the amount left in remaining_clusters_in_et. */ 1629 1630 sz = spdk_min(blob->remaining_clusters_in_et, SPDK_EXTENTS_PER_EP); 1631 blob->active.num_clusters += sz; 1632 blob->remaining_clusters_in_et -= sz; 1633 1634 assert(spdk_blob_is_thin_provisioned(blob)); 1635 assert(i + 1 < blob->active.num_extent_pages || blob->remaining_clusters_in_et == 0); 1636 1637 tmp = realloc(blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 1638 if (tmp == NULL) { 1639 blob_load_final(ctx, -ENOMEM); 1640 return; 1641 } 1642 memset(tmp + sizeof(*blob->active.clusters) * blob->active.cluster_array_size, 0, 1643 sizeof(*blob->active.clusters) * (blob->active.num_clusters - blob->active.cluster_array_size)); 1644 blob->active.clusters = tmp; 1645 blob->active.cluster_array_size = blob->active.num_clusters; 1646 } 1647 } 1648 1649 blob_load_backing_dev(seq, ctx); 1650 } 1651 1652 static void 1653 blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1654 { 1655 struct spdk_blob_load_ctx *ctx = cb_arg; 1656 struct spdk_blob *blob = ctx->blob; 1657 struct spdk_blob_md_page *page; 1658 int rc; 1659 uint32_t crc; 1660 uint32_t current_page; 1661 1662 if (ctx->num_pages == 1) { 1663 current_page = bs_blobid_to_page(blob->id); 1664 } else { 1665 assert(ctx->num_pages != 0); 1666 page = &ctx->pages[ctx->num_pages - 2]; 1667 current_page = page->next; 1668 } 1669 1670 if (bserrno) { 1671 SPDK_ERRLOG("Metadata page %d read failed for blobid 0x%" PRIx64 ": %d\n", 1672 current_page, blob->id, bserrno); 1673 blob_load_final(ctx, bserrno); 1674 return; 1675 } 1676 1677 page = &ctx->pages[ctx->num_pages - 1]; 1678 crc = blob_md_page_calc_crc(page); 1679 if (crc != page->crc) { 1680 SPDK_ERRLOG("Metadata page %d crc mismatch for blobid 0x%" PRIx64 "\n", 1681 current_page, blob->id); 1682 blob_load_final(ctx, -EINVAL); 1683 return; 1684 } 1685 1686 if (page->next != SPDK_INVALID_MD_PAGE) { 1687 struct spdk_blob_md_page *tmp_pages; 1688 uint32_t next_page = page->next; 1689 uint64_t next_lba = bs_md_page_to_lba(blob->bs, next_page); 1690 1691 /* Read the next page */ 1692 tmp_pages = spdk_realloc(ctx->pages, (sizeof(*page) * (ctx->num_pages + 1)), 0); 1693 if (tmp_pages == NULL) { 1694 blob_load_final(ctx, -ENOMEM); 1695 return; 1696 } 1697 ctx->num_pages++; 1698 ctx->pages = tmp_pages; 1699 1700 bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1], 1701 next_lba, 1702 bs_byte_to_lba(blob->bs, sizeof(*page)), 1703 blob_load_cpl, ctx); 1704 return; 1705 } 1706 1707 /* Parse the pages */ 1708 rc = blob_parse(ctx->pages, ctx->num_pages, blob); 1709 if (rc) { 1710 blob_load_final(ctx, rc); 1711 return; 1712 } 1713 1714 if (blob->extent_table_found == true) { 1715 /* If EXTENT_TABLE was found, that means support for it should be enabled. */ 1716 assert(blob->extent_rle_found == false); 1717 blob->use_extent_table = true; 1718 } else { 1719 /* If EXTENT_RLE or no extent_* descriptor was found disable support 1720 * for extent table. No extent_* descriptors means that blob has length of 0 1721 * and no extent_rle descriptors were persisted for it. 1722 * EXTENT_TABLE if used, is always present in metadata regardless of length. */ 1723 blob->use_extent_table = false; 1724 } 1725 1726 /* Check the clear_method stored in metadata vs what may have been passed 1727 * via spdk_bs_open_blob_ext() and update accordingly. 1728 */ 1729 blob_update_clear_method(blob); 1730 1731 spdk_free(ctx->pages); 1732 ctx->pages = NULL; 1733 1734 if (blob->extent_table_found) { 1735 blob_load_cpl_extents_cpl(seq, ctx, 0); 1736 } else { 1737 blob_load_backing_dev(seq, ctx); 1738 } 1739 } 1740 1741 /* Load a blob from disk given a blobid */ 1742 static void 1743 blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 1744 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 1745 { 1746 struct spdk_blob_load_ctx *ctx; 1747 struct spdk_blob_store *bs; 1748 uint32_t page_num; 1749 uint64_t lba; 1750 1751 blob_verify_md_op(blob); 1752 1753 bs = blob->bs; 1754 1755 ctx = calloc(1, sizeof(*ctx)); 1756 if (!ctx) { 1757 cb_fn(seq, cb_arg, -ENOMEM); 1758 return; 1759 } 1760 1761 ctx->blob = blob; 1762 ctx->pages = spdk_realloc(ctx->pages, SPDK_BS_PAGE_SIZE, 0); 1763 if (!ctx->pages) { 1764 free(ctx); 1765 cb_fn(seq, cb_arg, -ENOMEM); 1766 return; 1767 } 1768 ctx->num_pages = 1; 1769 ctx->cb_fn = cb_fn; 1770 ctx->cb_arg = cb_arg; 1771 ctx->seq = seq; 1772 1773 page_num = bs_blobid_to_page(blob->id); 1774 lba = bs_md_page_to_lba(blob->bs, page_num); 1775 1776 blob->state = SPDK_BLOB_STATE_LOADING; 1777 1778 bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1779 bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE), 1780 blob_load_cpl, ctx); 1781 } 1782 1783 struct spdk_blob_persist_ctx { 1784 struct spdk_blob *blob; 1785 1786 struct spdk_blob_md_page *pages; 1787 uint32_t next_extent_page; 1788 struct spdk_blob_md_page *extent_page; 1789 1790 spdk_bs_sequence_t *seq; 1791 spdk_bs_sequence_cpl cb_fn; 1792 void *cb_arg; 1793 TAILQ_ENTRY(spdk_blob_persist_ctx) link; 1794 }; 1795 1796 static void 1797 bs_batch_clear_dev(struct spdk_blob *blob, spdk_bs_batch_t *batch, uint64_t lba, 1798 uint64_t lba_count) 1799 { 1800 switch (blob->clear_method) { 1801 case BLOB_CLEAR_WITH_DEFAULT: 1802 case BLOB_CLEAR_WITH_UNMAP: 1803 bs_batch_unmap_dev(batch, lba, lba_count); 1804 break; 1805 case BLOB_CLEAR_WITH_WRITE_ZEROES: 1806 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1807 break; 1808 case BLOB_CLEAR_WITH_NONE: 1809 default: 1810 break; 1811 } 1812 } 1813 1814 static int 1815 bs_super_validate(struct spdk_bs_super_block *super, struct spdk_blob_store *bs) 1816 { 1817 uint32_t crc; 1818 static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH]; 1819 1820 if (super->version > SPDK_BS_VERSION || 1821 super->version < SPDK_BS_INITIAL_VERSION) { 1822 return -EILSEQ; 1823 } 1824 1825 if (memcmp(super->signature, SPDK_BS_SUPER_BLOCK_SIG, 1826 sizeof(super->signature)) != 0) { 1827 return -EILSEQ; 1828 } 1829 1830 crc = blob_md_page_calc_crc(super); 1831 if (crc != super->crc) { 1832 return -EILSEQ; 1833 } 1834 1835 if (memcmp(&bs->bstype, &super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 1836 SPDK_DEBUGLOG(blob, "Bstype matched - loading blobstore\n"); 1837 } else if (memcmp(&bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 1838 SPDK_DEBUGLOG(blob, "Bstype wildcard used - loading blobstore regardless bstype\n"); 1839 } else { 1840 SPDK_DEBUGLOG(blob, "Unexpected bstype\n"); 1841 SPDK_LOGDUMP(blob, "Expected:", bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 1842 SPDK_LOGDUMP(blob, "Found:", super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 1843 return -ENXIO; 1844 } 1845 1846 if (super->size > bs->dev->blockcnt * bs->dev->blocklen) { 1847 SPDK_NOTICELOG("Size mismatch, dev size: %" PRIu64 ", blobstore size: %" PRIu64 "\n", 1848 bs->dev->blockcnt * bs->dev->blocklen, super->size); 1849 return -EILSEQ; 1850 } 1851 1852 return 0; 1853 } 1854 1855 static void bs_mark_dirty(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 1856 spdk_bs_sequence_cpl cb_fn, void *cb_arg); 1857 1858 static void 1859 blob_persist_complete_cb(void *arg) 1860 { 1861 struct spdk_blob_persist_ctx *ctx = arg; 1862 1863 /* Call user callback */ 1864 ctx->cb_fn(ctx->seq, ctx->cb_arg, 0); 1865 1866 /* Free the memory */ 1867 spdk_free(ctx->pages); 1868 free(ctx); 1869 } 1870 1871 static void blob_persist_start(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 1872 1873 static void 1874 blob_persist_complete(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx, int bserrno) 1875 { 1876 struct spdk_blob_persist_ctx *next_persist, *tmp; 1877 struct spdk_blob *blob = ctx->blob; 1878 1879 if (bserrno == 0) { 1880 blob_mark_clean(blob); 1881 } 1882 1883 assert(ctx == TAILQ_FIRST(&blob->persists_to_complete)); 1884 1885 /* Complete all persists that were pending when the current persist started */ 1886 TAILQ_FOREACH_SAFE(next_persist, &blob->persists_to_complete, link, tmp) { 1887 TAILQ_REMOVE(&blob->persists_to_complete, next_persist, link); 1888 spdk_thread_send_msg(spdk_get_thread(), blob_persist_complete_cb, next_persist); 1889 } 1890 1891 if (TAILQ_EMPTY(&blob->pending_persists)) { 1892 return; 1893 } 1894 1895 /* Queue up all pending persists for completion and start blob persist with first one */ 1896 TAILQ_SWAP(&blob->persists_to_complete, &blob->pending_persists, spdk_blob_persist_ctx, link); 1897 next_persist = TAILQ_FIRST(&blob->persists_to_complete); 1898 1899 blob->state = SPDK_BLOB_STATE_DIRTY; 1900 bs_mark_dirty(seq, blob->bs, blob_persist_start, next_persist); 1901 } 1902 1903 static void 1904 blob_persist_clear_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1905 { 1906 struct spdk_blob_persist_ctx *ctx = cb_arg; 1907 struct spdk_blob *blob = ctx->blob; 1908 struct spdk_blob_store *bs = blob->bs; 1909 size_t i; 1910 1911 if (bserrno != 0) { 1912 blob_persist_complete(seq, ctx, bserrno); 1913 return; 1914 } 1915 1916 spdk_spin_lock(&bs->used_lock); 1917 1918 /* Release all extent_pages that were truncated */ 1919 for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) { 1920 /* Nothing to release if it was not allocated */ 1921 if (blob->active.extent_pages[i] != 0) { 1922 bs_release_md_page(bs, blob->active.extent_pages[i]); 1923 } 1924 } 1925 1926 spdk_spin_unlock(&bs->used_lock); 1927 1928 if (blob->active.num_extent_pages == 0) { 1929 free(blob->active.extent_pages); 1930 blob->active.extent_pages = NULL; 1931 blob->active.extent_pages_array_size = 0; 1932 } else if (blob->active.num_extent_pages != blob->active.extent_pages_array_size) { 1933 #ifndef __clang_analyzer__ 1934 void *tmp; 1935 1936 /* scan-build really can't figure reallocs, workaround it */ 1937 tmp = realloc(blob->active.extent_pages, sizeof(uint32_t) * blob->active.num_extent_pages); 1938 assert(tmp != NULL); 1939 blob->active.extent_pages = tmp; 1940 #endif 1941 blob->active.extent_pages_array_size = blob->active.num_extent_pages; 1942 } 1943 1944 blob_persist_complete(seq, ctx, bserrno); 1945 } 1946 1947 static void 1948 blob_persist_clear_extents(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 1949 { 1950 struct spdk_blob *blob = ctx->blob; 1951 struct spdk_blob_store *bs = blob->bs; 1952 size_t i; 1953 uint64_t lba; 1954 uint64_t lba_count; 1955 spdk_bs_batch_t *batch; 1956 1957 batch = bs_sequence_to_batch(seq, blob_persist_clear_extents_cpl, ctx); 1958 lba_count = bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE); 1959 1960 /* Clear all extent_pages that were truncated */ 1961 for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) { 1962 /* Nothing to clear if it was not allocated */ 1963 if (blob->active.extent_pages[i] != 0) { 1964 lba = bs_md_page_to_lba(bs, blob->active.extent_pages[i]); 1965 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1966 } 1967 } 1968 1969 bs_batch_close(batch); 1970 } 1971 1972 static void 1973 blob_persist_clear_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1974 { 1975 struct spdk_blob_persist_ctx *ctx = cb_arg; 1976 struct spdk_blob *blob = ctx->blob; 1977 struct spdk_blob_store *bs = blob->bs; 1978 size_t i; 1979 1980 if (bserrno != 0) { 1981 blob_persist_complete(seq, ctx, bserrno); 1982 return; 1983 } 1984 1985 spdk_spin_lock(&bs->used_lock); 1986 /* Release all clusters that were truncated */ 1987 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1988 uint32_t cluster_num = bs_lba_to_cluster(bs, blob->active.clusters[i]); 1989 1990 /* Nothing to release if it was not allocated */ 1991 if (blob->active.clusters[i] != 0) { 1992 bs_release_cluster(bs, cluster_num); 1993 } 1994 } 1995 spdk_spin_unlock(&bs->used_lock); 1996 1997 if (blob->active.num_clusters == 0) { 1998 free(blob->active.clusters); 1999 blob->active.clusters = NULL; 2000 blob->active.cluster_array_size = 0; 2001 } else if (blob->active.num_clusters != blob->active.cluster_array_size) { 2002 #ifndef __clang_analyzer__ 2003 void *tmp; 2004 2005 /* scan-build really can't figure reallocs, workaround it */ 2006 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * blob->active.num_clusters); 2007 assert(tmp != NULL); 2008 blob->active.clusters = tmp; 2009 2010 #endif 2011 blob->active.cluster_array_size = blob->active.num_clusters; 2012 } 2013 2014 /* Move on to clearing extent pages */ 2015 blob_persist_clear_extents(seq, ctx); 2016 } 2017 2018 static void 2019 blob_persist_clear_clusters(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 2020 { 2021 struct spdk_blob *blob = ctx->blob; 2022 struct spdk_blob_store *bs = blob->bs; 2023 spdk_bs_batch_t *batch; 2024 size_t i; 2025 uint64_t lba; 2026 uint64_t lba_count; 2027 2028 /* Clusters don't move around in blobs. The list shrinks or grows 2029 * at the end, but no changes ever occur in the middle of the list. 2030 */ 2031 2032 batch = bs_sequence_to_batch(seq, blob_persist_clear_clusters_cpl, ctx); 2033 2034 /* Clear all clusters that were truncated */ 2035 lba = 0; 2036 lba_count = 0; 2037 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 2038 uint64_t next_lba = blob->active.clusters[i]; 2039 uint64_t next_lba_count = bs_cluster_to_lba(bs, 1); 2040 2041 if (next_lba > 0 && (lba + lba_count) == next_lba) { 2042 /* This cluster is contiguous with the previous one. */ 2043 lba_count += next_lba_count; 2044 continue; 2045 } else if (next_lba == 0) { 2046 continue; 2047 } 2048 2049 /* This cluster is not contiguous with the previous one. */ 2050 2051 /* If a run of LBAs previously existing, clear them now */ 2052 if (lba_count > 0) { 2053 bs_batch_clear_dev(ctx->blob, batch, lba, lba_count); 2054 } 2055 2056 /* Start building the next batch */ 2057 lba = next_lba; 2058 if (next_lba > 0) { 2059 lba_count = next_lba_count; 2060 } else { 2061 lba_count = 0; 2062 } 2063 } 2064 2065 /* If we ended with a contiguous set of LBAs, clear them now */ 2066 if (lba_count > 0) { 2067 bs_batch_clear_dev(ctx->blob, batch, lba, lba_count); 2068 } 2069 2070 bs_batch_close(batch); 2071 } 2072 2073 static void 2074 blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2075 { 2076 struct spdk_blob_persist_ctx *ctx = cb_arg; 2077 struct spdk_blob *blob = ctx->blob; 2078 struct spdk_blob_store *bs = blob->bs; 2079 size_t i; 2080 2081 if (bserrno != 0) { 2082 blob_persist_complete(seq, ctx, bserrno); 2083 return; 2084 } 2085 2086 spdk_spin_lock(&bs->used_lock); 2087 2088 /* This loop starts at 1 because the first page is special and handled 2089 * below. The pages (except the first) are never written in place, 2090 * so any pages in the clean list must be zeroed. 2091 */ 2092 for (i = 1; i < blob->clean.num_pages; i++) { 2093 bs_release_md_page(bs, blob->clean.pages[i]); 2094 } 2095 2096 if (blob->active.num_pages == 0) { 2097 uint32_t page_num; 2098 2099 page_num = bs_blobid_to_page(blob->id); 2100 bs_release_md_page(bs, page_num); 2101 } 2102 2103 spdk_spin_unlock(&bs->used_lock); 2104 2105 /* Move on to clearing clusters */ 2106 blob_persist_clear_clusters(seq, ctx); 2107 } 2108 2109 static void 2110 blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2111 { 2112 struct spdk_blob_persist_ctx *ctx = cb_arg; 2113 struct spdk_blob *blob = ctx->blob; 2114 struct spdk_blob_store *bs = blob->bs; 2115 uint64_t lba; 2116 uint64_t lba_count; 2117 spdk_bs_batch_t *batch; 2118 size_t i; 2119 2120 if (bserrno != 0) { 2121 blob_persist_complete(seq, ctx, bserrno); 2122 return; 2123 } 2124 2125 batch = bs_sequence_to_batch(seq, blob_persist_zero_pages_cpl, ctx); 2126 2127 lba_count = bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE); 2128 2129 /* This loop starts at 1 because the first page is special and handled 2130 * below. The pages (except the first) are never written in place, 2131 * so any pages in the clean list must be zeroed. 2132 */ 2133 for (i = 1; i < blob->clean.num_pages; i++) { 2134 lba = bs_md_page_to_lba(bs, blob->clean.pages[i]); 2135 2136 bs_batch_write_zeroes_dev(batch, lba, lba_count); 2137 } 2138 2139 /* The first page will only be zeroed if this is a delete. */ 2140 if (blob->active.num_pages == 0) { 2141 uint32_t page_num; 2142 2143 /* The first page in the metadata goes where the blobid indicates */ 2144 page_num = bs_blobid_to_page(blob->id); 2145 lba = bs_md_page_to_lba(bs, page_num); 2146 2147 bs_batch_write_zeroes_dev(batch, lba, lba_count); 2148 } 2149 2150 bs_batch_close(batch); 2151 } 2152 2153 static void 2154 blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2155 { 2156 struct spdk_blob_persist_ctx *ctx = cb_arg; 2157 struct spdk_blob *blob = ctx->blob; 2158 struct spdk_blob_store *bs = blob->bs; 2159 uint64_t lba; 2160 uint32_t lba_count; 2161 struct spdk_blob_md_page *page; 2162 2163 if (bserrno != 0) { 2164 blob_persist_complete(seq, ctx, bserrno); 2165 return; 2166 } 2167 2168 if (blob->active.num_pages == 0) { 2169 /* Move on to the next step */ 2170 blob_persist_zero_pages(seq, ctx, 0); 2171 return; 2172 } 2173 2174 lba_count = bs_byte_to_lba(bs, sizeof(*page)); 2175 2176 page = &ctx->pages[0]; 2177 /* The first page in the metadata goes where the blobid indicates */ 2178 lba = bs_md_page_to_lba(bs, bs_blobid_to_page(blob->id)); 2179 2180 bs_sequence_write_dev(seq, page, lba, lba_count, 2181 blob_persist_zero_pages, ctx); 2182 } 2183 2184 static void 2185 blob_persist_write_page_chain(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 2186 { 2187 struct spdk_blob *blob = ctx->blob; 2188 struct spdk_blob_store *bs = blob->bs; 2189 uint64_t lba; 2190 uint32_t lba_count; 2191 struct spdk_blob_md_page *page; 2192 spdk_bs_batch_t *batch; 2193 size_t i; 2194 2195 /* Clusters don't move around in blobs. The list shrinks or grows 2196 * at the end, but no changes ever occur in the middle of the list. 2197 */ 2198 2199 lba_count = bs_byte_to_lba(bs, sizeof(*page)); 2200 2201 batch = bs_sequence_to_batch(seq, blob_persist_write_page_root, ctx); 2202 2203 /* This starts at 1. The root page is not written until 2204 * all of the others are finished 2205 */ 2206 for (i = 1; i < blob->active.num_pages; i++) { 2207 page = &ctx->pages[i]; 2208 assert(page->sequence_num == i); 2209 2210 lba = bs_md_page_to_lba(bs, blob->active.pages[i]); 2211 2212 bs_batch_write_dev(batch, page, lba, lba_count); 2213 } 2214 2215 bs_batch_close(batch); 2216 } 2217 2218 static int 2219 blob_resize(struct spdk_blob *blob, uint64_t sz) 2220 { 2221 uint64_t i; 2222 uint64_t *tmp; 2223 uint64_t cluster; 2224 uint32_t lfmd; /* lowest free md page */ 2225 uint64_t num_clusters; 2226 uint32_t *ep_tmp; 2227 uint64_t new_num_ep = 0, current_num_ep = 0; 2228 struct spdk_blob_store *bs; 2229 int rc; 2230 2231 bs = blob->bs; 2232 2233 blob_verify_md_op(blob); 2234 2235 if (blob->active.num_clusters == sz) { 2236 return 0; 2237 } 2238 2239 if (blob->active.num_clusters < blob->active.cluster_array_size) { 2240 /* If this blob was resized to be larger, then smaller, then 2241 * larger without syncing, then the cluster array already 2242 * contains spare assigned clusters we can use. 2243 */ 2244 num_clusters = spdk_min(blob->active.cluster_array_size, 2245 sz); 2246 } else { 2247 num_clusters = blob->active.num_clusters; 2248 } 2249 2250 if (blob->use_extent_table) { 2251 /* Round up since every cluster beyond current Extent Table size, 2252 * requires new extent page. */ 2253 new_num_ep = spdk_divide_round_up(sz, SPDK_EXTENTS_PER_EP); 2254 current_num_ep = spdk_divide_round_up(num_clusters, SPDK_EXTENTS_PER_EP); 2255 } 2256 2257 assert(!spdk_spin_held(&bs->used_lock)); 2258 2259 /* Check first that we have enough clusters and md pages before we start claiming them. 2260 * bs->used_lock is held to ensure that clusters we think are free are still free when we go 2261 * to claim them later in this function. 2262 */ 2263 if (sz > num_clusters && spdk_blob_is_thin_provisioned(blob) == false) { 2264 spdk_spin_lock(&bs->used_lock); 2265 if ((sz - num_clusters) > bs->num_free_clusters) { 2266 rc = -ENOSPC; 2267 goto out; 2268 } 2269 lfmd = 0; 2270 for (i = current_num_ep; i < new_num_ep ; i++) { 2271 lfmd = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, lfmd); 2272 if (lfmd == UINT32_MAX) { 2273 /* No more free md pages. Cannot satisfy the request */ 2274 rc = -ENOSPC; 2275 goto out; 2276 } 2277 } 2278 } 2279 2280 if (sz > num_clusters) { 2281 /* Expand the cluster array if necessary. 2282 * We only shrink the array when persisting. 2283 */ 2284 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * sz); 2285 if (sz > 0 && tmp == NULL) { 2286 rc = -ENOMEM; 2287 goto out; 2288 } 2289 memset(tmp + blob->active.cluster_array_size, 0, 2290 sizeof(*blob->active.clusters) * (sz - blob->active.cluster_array_size)); 2291 blob->active.clusters = tmp; 2292 blob->active.cluster_array_size = sz; 2293 2294 /* Expand the extents table, only if enough clusters were added */ 2295 if (new_num_ep > current_num_ep && blob->use_extent_table) { 2296 ep_tmp = realloc(blob->active.extent_pages, sizeof(*blob->active.extent_pages) * new_num_ep); 2297 if (new_num_ep > 0 && ep_tmp == NULL) { 2298 rc = -ENOMEM; 2299 goto out; 2300 } 2301 memset(ep_tmp + blob->active.extent_pages_array_size, 0, 2302 sizeof(*blob->active.extent_pages) * (new_num_ep - blob->active.extent_pages_array_size)); 2303 blob->active.extent_pages = ep_tmp; 2304 blob->active.extent_pages_array_size = new_num_ep; 2305 } 2306 } 2307 2308 blob->state = SPDK_BLOB_STATE_DIRTY; 2309 2310 if (spdk_blob_is_thin_provisioned(blob) == false) { 2311 cluster = 0; 2312 lfmd = 0; 2313 for (i = num_clusters; i < sz; i++) { 2314 bs_allocate_cluster(blob, i, &cluster, &lfmd, true); 2315 /* Do not increment lfmd here. lfmd will get updated 2316 * to the md_page allocated (if any) when a new extent 2317 * page is needed. Just pass that value again, 2318 * bs_allocate_cluster will just start at that index 2319 * to find the next free md_page when needed. 2320 */ 2321 } 2322 } 2323 2324 /* If we are shrinking the blob, we must adjust num_allocated_clusters */ 2325 for (i = sz; i < num_clusters; i++) { 2326 if (blob->active.clusters[i] != 0) { 2327 blob->active.num_allocated_clusters--; 2328 } 2329 } 2330 2331 blob->active.num_clusters = sz; 2332 blob->active.num_extent_pages = new_num_ep; 2333 2334 rc = 0; 2335 out: 2336 if (spdk_spin_held(&bs->used_lock)) { 2337 spdk_spin_unlock(&bs->used_lock); 2338 } 2339 2340 return rc; 2341 } 2342 2343 static void 2344 blob_persist_generate_new_md(struct spdk_blob_persist_ctx *ctx) 2345 { 2346 spdk_bs_sequence_t *seq = ctx->seq; 2347 struct spdk_blob *blob = ctx->blob; 2348 struct spdk_blob_store *bs = blob->bs; 2349 uint64_t i; 2350 uint32_t page_num; 2351 void *tmp; 2352 int rc; 2353 2354 /* Generate the new metadata */ 2355 rc = blob_serialize(blob, &ctx->pages, &blob->active.num_pages); 2356 if (rc < 0) { 2357 blob_persist_complete(seq, ctx, rc); 2358 return; 2359 } 2360 2361 assert(blob->active.num_pages >= 1); 2362 2363 /* Resize the cache of page indices */ 2364 tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 2365 if (!tmp) { 2366 blob_persist_complete(seq, ctx, -ENOMEM); 2367 return; 2368 } 2369 blob->active.pages = tmp; 2370 2371 /* Assign this metadata to pages. This requires two passes - one to verify that there are 2372 * enough pages and a second to actually claim them. The used_lock is held across 2373 * both passes to ensure things don't change in the middle. 2374 */ 2375 spdk_spin_lock(&bs->used_lock); 2376 page_num = 0; 2377 /* Note that this loop starts at one. The first page location is fixed by the blobid. */ 2378 for (i = 1; i < blob->active.num_pages; i++) { 2379 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 2380 if (page_num == UINT32_MAX) { 2381 spdk_spin_unlock(&bs->used_lock); 2382 blob_persist_complete(seq, ctx, -ENOMEM); 2383 return; 2384 } 2385 page_num++; 2386 } 2387 2388 page_num = 0; 2389 blob->active.pages[0] = bs_blobid_to_page(blob->id); 2390 for (i = 1; i < blob->active.num_pages; i++) { 2391 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 2392 ctx->pages[i - 1].next = page_num; 2393 /* Now that previous metadata page is complete, calculate the crc for it. */ 2394 ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]); 2395 blob->active.pages[i] = page_num; 2396 bs_claim_md_page(bs, page_num); 2397 SPDK_DEBUGLOG(blob, "Claiming page %u for blob 0x%" PRIx64 "\n", page_num, 2398 blob->id); 2399 page_num++; 2400 } 2401 spdk_spin_unlock(&bs->used_lock); 2402 ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]); 2403 /* Start writing the metadata from last page to first */ 2404 blob->state = SPDK_BLOB_STATE_CLEAN; 2405 blob_persist_write_page_chain(seq, ctx); 2406 } 2407 2408 static void 2409 blob_persist_write_extent_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2410 { 2411 struct spdk_blob_persist_ctx *ctx = cb_arg; 2412 struct spdk_blob *blob = ctx->blob; 2413 size_t i; 2414 uint32_t extent_page_id; 2415 uint32_t page_count = 0; 2416 int rc; 2417 2418 if (ctx->extent_page != NULL) { 2419 spdk_free(ctx->extent_page); 2420 ctx->extent_page = NULL; 2421 } 2422 2423 if (bserrno != 0) { 2424 blob_persist_complete(seq, ctx, bserrno); 2425 return; 2426 } 2427 2428 /* Only write out Extent Pages when blob was resized. */ 2429 for (i = ctx->next_extent_page; i < blob->active.extent_pages_array_size; i++) { 2430 extent_page_id = blob->active.extent_pages[i]; 2431 if (extent_page_id == 0) { 2432 /* No Extent Page to persist */ 2433 assert(spdk_blob_is_thin_provisioned(blob)); 2434 continue; 2435 } 2436 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent_page_id)); 2437 ctx->next_extent_page = i + 1; 2438 rc = blob_serialize_add_page(ctx->blob, &ctx->extent_page, &page_count, &ctx->extent_page); 2439 if (rc < 0) { 2440 blob_persist_complete(seq, ctx, rc); 2441 return; 2442 } 2443 2444 blob->state = SPDK_BLOB_STATE_DIRTY; 2445 blob_serialize_extent_page(blob, i * SPDK_EXTENTS_PER_EP, ctx->extent_page); 2446 2447 ctx->extent_page->crc = blob_md_page_calc_crc(ctx->extent_page); 2448 2449 bs_sequence_write_dev(seq, ctx->extent_page, bs_md_page_to_lba(blob->bs, extent_page_id), 2450 bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 2451 blob_persist_write_extent_pages, ctx); 2452 return; 2453 } 2454 2455 blob_persist_generate_new_md(ctx); 2456 } 2457 2458 static void 2459 blob_persist_start(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2460 { 2461 struct spdk_blob_persist_ctx *ctx = cb_arg; 2462 struct spdk_blob *blob = ctx->blob; 2463 2464 if (bserrno != 0) { 2465 blob_persist_complete(seq, ctx, bserrno); 2466 return; 2467 } 2468 2469 if (blob->active.num_pages == 0) { 2470 /* This is the signal that the blob should be deleted. 2471 * Immediately jump to the clean up routine. */ 2472 assert(blob->clean.num_pages > 0); 2473 blob->state = SPDK_BLOB_STATE_CLEAN; 2474 blob_persist_zero_pages(seq, ctx, 0); 2475 return; 2476 2477 } 2478 2479 if (blob->clean.num_clusters < blob->active.num_clusters) { 2480 /* Blob was resized up */ 2481 assert(blob->clean.num_extent_pages <= blob->active.num_extent_pages); 2482 ctx->next_extent_page = spdk_max(1, blob->clean.num_extent_pages) - 1; 2483 } else if (blob->active.num_clusters < blob->active.cluster_array_size) { 2484 /* Blob was resized down */ 2485 assert(blob->clean.num_extent_pages >= blob->active.num_extent_pages); 2486 ctx->next_extent_page = spdk_max(1, blob->active.num_extent_pages) - 1; 2487 } else { 2488 /* No change in size occurred */ 2489 blob_persist_generate_new_md(ctx); 2490 return; 2491 } 2492 2493 blob_persist_write_extent_pages(seq, ctx, 0); 2494 } 2495 2496 struct spdk_bs_mark_dirty { 2497 struct spdk_blob_store *bs; 2498 struct spdk_bs_super_block *super; 2499 spdk_bs_sequence_cpl cb_fn; 2500 void *cb_arg; 2501 }; 2502 2503 static void 2504 bs_mark_dirty_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2505 { 2506 struct spdk_bs_mark_dirty *ctx = cb_arg; 2507 2508 if (bserrno == 0) { 2509 ctx->bs->clean = 0; 2510 } 2511 2512 ctx->cb_fn(seq, ctx->cb_arg, bserrno); 2513 2514 spdk_free(ctx->super); 2515 free(ctx); 2516 } 2517 2518 static void bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2519 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg); 2520 2521 2522 static void 2523 bs_mark_dirty_write(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2524 { 2525 struct spdk_bs_mark_dirty *ctx = cb_arg; 2526 int rc; 2527 2528 if (bserrno != 0) { 2529 bs_mark_dirty_write_cpl(seq, ctx, bserrno); 2530 return; 2531 } 2532 2533 rc = bs_super_validate(ctx->super, ctx->bs); 2534 if (rc != 0) { 2535 bs_mark_dirty_write_cpl(seq, ctx, rc); 2536 return; 2537 } 2538 2539 ctx->super->clean = 0; 2540 if (ctx->super->size == 0) { 2541 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 2542 } 2543 2544 bs_write_super(seq, ctx->bs, ctx->super, bs_mark_dirty_write_cpl, ctx); 2545 } 2546 2547 static void 2548 bs_mark_dirty(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2549 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2550 { 2551 struct spdk_bs_mark_dirty *ctx; 2552 2553 /* Blobstore is already marked dirty */ 2554 if (bs->clean == 0) { 2555 cb_fn(seq, cb_arg, 0); 2556 return; 2557 } 2558 2559 ctx = calloc(1, sizeof(*ctx)); 2560 if (!ctx) { 2561 cb_fn(seq, cb_arg, -ENOMEM); 2562 return; 2563 } 2564 ctx->bs = bs; 2565 ctx->cb_fn = cb_fn; 2566 ctx->cb_arg = cb_arg; 2567 2568 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 2569 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 2570 if (!ctx->super) { 2571 free(ctx); 2572 cb_fn(seq, cb_arg, -ENOMEM); 2573 return; 2574 } 2575 2576 bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0), 2577 bs_byte_to_lba(bs, sizeof(*ctx->super)), 2578 bs_mark_dirty_write, ctx); 2579 } 2580 2581 /* Write a blob to disk */ 2582 static void 2583 blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 2584 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2585 { 2586 struct spdk_blob_persist_ctx *ctx; 2587 2588 blob_verify_md_op(blob); 2589 2590 if (blob->state == SPDK_BLOB_STATE_CLEAN && TAILQ_EMPTY(&blob->persists_to_complete)) { 2591 cb_fn(seq, cb_arg, 0); 2592 return; 2593 } 2594 2595 ctx = calloc(1, sizeof(*ctx)); 2596 if (!ctx) { 2597 cb_fn(seq, cb_arg, -ENOMEM); 2598 return; 2599 } 2600 ctx->blob = blob; 2601 ctx->seq = seq; 2602 ctx->cb_fn = cb_fn; 2603 ctx->cb_arg = cb_arg; 2604 2605 /* Multiple blob persists can affect one another, via blob->state or 2606 * blob mutable data changes. To prevent it, queue up the persists. */ 2607 if (!TAILQ_EMPTY(&blob->persists_to_complete)) { 2608 TAILQ_INSERT_TAIL(&blob->pending_persists, ctx, link); 2609 return; 2610 } 2611 TAILQ_INSERT_HEAD(&blob->persists_to_complete, ctx, link); 2612 2613 bs_mark_dirty(seq, blob->bs, blob_persist_start, ctx); 2614 } 2615 2616 struct spdk_blob_copy_cluster_ctx { 2617 struct spdk_blob *blob; 2618 uint8_t *buf; 2619 uint64_t page; 2620 uint64_t new_cluster; 2621 uint32_t new_extent_page; 2622 spdk_bs_sequence_t *seq; 2623 struct spdk_blob_md_page *new_cluster_page; 2624 }; 2625 2626 struct spdk_blob_free_cluster_ctx { 2627 struct spdk_blob *blob; 2628 uint64_t page; 2629 struct spdk_blob_md_page *md_page; 2630 uint64_t cluster_num; 2631 uint32_t extent_page; 2632 spdk_bs_sequence_t *seq; 2633 }; 2634 2635 static void 2636 blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno) 2637 { 2638 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2639 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq; 2640 TAILQ_HEAD(, spdk_bs_request_set) requests; 2641 spdk_bs_user_op_t *op; 2642 2643 TAILQ_INIT(&requests); 2644 TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link); 2645 2646 while (!TAILQ_EMPTY(&requests)) { 2647 op = TAILQ_FIRST(&requests); 2648 TAILQ_REMOVE(&requests, op, link); 2649 if (bserrno == 0) { 2650 bs_user_op_execute(op); 2651 } else { 2652 bs_user_op_abort(op, bserrno); 2653 } 2654 } 2655 2656 spdk_free(ctx->buf); 2657 free(ctx); 2658 } 2659 2660 static void 2661 blob_free_cluster_cpl(void *cb_arg, int bserrno) 2662 { 2663 struct spdk_blob_free_cluster_ctx *ctx = cb_arg; 2664 spdk_bs_sequence_t *seq = ctx->seq; 2665 2666 bs_sequence_finish(seq, bserrno); 2667 2668 free(ctx); 2669 } 2670 2671 static void 2672 blob_insert_cluster_revert(struct spdk_blob_copy_cluster_ctx *ctx) 2673 { 2674 spdk_spin_lock(&ctx->blob->bs->used_lock); 2675 bs_release_cluster(ctx->blob->bs, ctx->new_cluster); 2676 if (ctx->new_extent_page != 0) { 2677 bs_release_md_page(ctx->blob->bs, ctx->new_extent_page); 2678 } 2679 spdk_spin_unlock(&ctx->blob->bs->used_lock); 2680 } 2681 2682 static void 2683 blob_insert_cluster_clear_cpl(void *cb_arg, int bserrno) 2684 { 2685 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2686 2687 if (bserrno) { 2688 SPDK_WARNLOG("Failed to clear cluster: %d\n", bserrno); 2689 } 2690 2691 blob_insert_cluster_revert(ctx); 2692 bs_sequence_finish(ctx->seq, bserrno); 2693 } 2694 2695 static void 2696 blob_insert_cluster_clear(struct spdk_blob_copy_cluster_ctx *ctx) 2697 { 2698 struct spdk_bs_cpl cpl; 2699 spdk_bs_batch_t *batch; 2700 struct spdk_io_channel *ch = spdk_io_channel_from_ctx(ctx->seq->channel); 2701 2702 /* 2703 * We allocated a cluster and we copied data to it. But now, we realized that we don't need 2704 * this cluster and we want to release it. We must ensure that we clear the data on this 2705 * cluster. 2706 * The cluster may later be re-allocated by a thick-provisioned blob for example. When 2707 * reading from this thick-provisioned blob before writing data, we should read zeroes. 2708 */ 2709 2710 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2711 cpl.u.blob_basic.cb_fn = blob_insert_cluster_clear_cpl; 2712 cpl.u.blob_basic.cb_arg = ctx; 2713 2714 batch = bs_batch_open(ch, &cpl, ctx->blob); 2715 if (!batch) { 2716 blob_insert_cluster_clear_cpl(ctx, -ENOMEM); 2717 return; 2718 } 2719 2720 bs_batch_clear_dev(ctx->blob, batch, bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 2721 bs_cluster_to_lba(ctx->blob->bs, 1)); 2722 bs_batch_close(batch); 2723 } 2724 2725 static void 2726 blob_insert_cluster_cpl(void *cb_arg, int bserrno) 2727 { 2728 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2729 2730 if (bserrno) { 2731 if (bserrno == -EEXIST) { 2732 /* The metadata insert failed because another thread 2733 * allocated the cluster first. Clear and free our cluster 2734 * but continue without error. */ 2735 blob_insert_cluster_clear(ctx); 2736 return; 2737 } 2738 2739 blob_insert_cluster_revert(ctx); 2740 } 2741 2742 bs_sequence_finish(ctx->seq, bserrno); 2743 } 2744 2745 static void 2746 blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2747 { 2748 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2749 uint32_t cluster_number; 2750 2751 if (bserrno) { 2752 /* The write failed, so jump to the final completion handler */ 2753 bs_sequence_finish(seq, bserrno); 2754 return; 2755 } 2756 2757 cluster_number = bs_page_to_cluster(ctx->blob->bs, ctx->page); 2758 2759 blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2760 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx); 2761 } 2762 2763 static void 2764 blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2765 { 2766 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2767 2768 if (bserrno != 0) { 2769 /* The read failed, so jump to the final completion handler */ 2770 bs_sequence_finish(seq, bserrno); 2771 return; 2772 } 2773 2774 /* Write whole cluster */ 2775 bs_sequence_write_dev(seq, ctx->buf, 2776 bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 2777 bs_cluster_to_lba(ctx->blob->bs, 1), 2778 blob_write_copy_cpl, ctx); 2779 } 2780 2781 static bool 2782 blob_can_copy(struct spdk_blob *blob, uint32_t cluster_start_page, uint64_t *base_lba) 2783 { 2784 uint64_t lba = bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page); 2785 2786 return (!blob_is_esnap_clone(blob) && blob->bs->dev->copy != NULL) && 2787 blob->back_bs_dev->translate_lba(blob->back_bs_dev, lba, base_lba); 2788 } 2789 2790 static void 2791 blob_copy(struct spdk_blob_copy_cluster_ctx *ctx, spdk_bs_user_op_t *op, uint64_t src_lba) 2792 { 2793 struct spdk_blob *blob = ctx->blob; 2794 uint64_t lba_count = bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz); 2795 2796 bs_sequence_copy_dev(ctx->seq, 2797 bs_cluster_to_lba(blob->bs, ctx->new_cluster), 2798 src_lba, 2799 lba_count, 2800 blob_write_copy_cpl, ctx); 2801 } 2802 2803 static void 2804 bs_allocate_and_copy_cluster(struct spdk_blob *blob, 2805 struct spdk_io_channel *_ch, 2806 uint64_t io_unit, spdk_bs_user_op_t *op) 2807 { 2808 struct spdk_bs_cpl cpl; 2809 struct spdk_bs_channel *ch; 2810 struct spdk_blob_copy_cluster_ctx *ctx; 2811 uint32_t cluster_start_page; 2812 uint32_t cluster_number; 2813 bool is_zeroes; 2814 bool can_copy; 2815 bool is_valid_range; 2816 uint64_t copy_src_lba; 2817 int rc; 2818 2819 ch = spdk_io_channel_get_ctx(_ch); 2820 2821 if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) { 2822 /* There are already operations pending. Queue this user op 2823 * and return because it will be re-executed when the outstanding 2824 * cluster allocation completes. */ 2825 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2826 return; 2827 } 2828 2829 /* Round the io_unit offset down to the first page in the cluster */ 2830 cluster_start_page = bs_io_unit_to_cluster_start(blob, io_unit); 2831 2832 /* Calculate which index in the metadata cluster array the corresponding 2833 * cluster is supposed to be at. */ 2834 cluster_number = bs_io_unit_to_cluster_number(blob, io_unit); 2835 2836 ctx = calloc(1, sizeof(*ctx)); 2837 if (!ctx) { 2838 bs_user_op_abort(op, -ENOMEM); 2839 return; 2840 } 2841 2842 assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0); 2843 2844 ctx->blob = blob; 2845 ctx->page = cluster_start_page; 2846 ctx->new_cluster_page = ch->new_cluster_page; 2847 memset(ctx->new_cluster_page, 0, SPDK_BS_PAGE_SIZE); 2848 2849 /* Check if the cluster that we intend to do CoW for is valid for 2850 * the backing dev. For zeroes backing dev, it'll be always valid. 2851 * For other backing dev e.g. a snapshot, it could be invalid if 2852 * the blob has been resized after snapshot was taken. */ 2853 is_valid_range = blob->back_bs_dev->is_range_valid(blob->back_bs_dev, 2854 bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page), 2855 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz)); 2856 2857 can_copy = is_valid_range && blob_can_copy(blob, cluster_start_page, ©_src_lba); 2858 2859 is_zeroes = is_valid_range && blob->back_bs_dev->is_zeroes(blob->back_bs_dev, 2860 bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page), 2861 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz)); 2862 if (blob->parent_id != SPDK_BLOBID_INVALID && !is_zeroes && !can_copy) { 2863 ctx->buf = spdk_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen, 2864 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 2865 if (!ctx->buf) { 2866 SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n", 2867 blob->bs->cluster_sz); 2868 free(ctx); 2869 bs_user_op_abort(op, -ENOMEM); 2870 return; 2871 } 2872 } 2873 2874 spdk_spin_lock(&blob->bs->used_lock); 2875 rc = bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, &ctx->new_extent_page, 2876 false); 2877 spdk_spin_unlock(&blob->bs->used_lock); 2878 if (rc != 0) { 2879 spdk_free(ctx->buf); 2880 free(ctx); 2881 bs_user_op_abort(op, rc); 2882 return; 2883 } 2884 2885 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2886 cpl.u.blob_basic.cb_fn = blob_allocate_and_copy_cluster_cpl; 2887 cpl.u.blob_basic.cb_arg = ctx; 2888 2889 ctx->seq = bs_sequence_start_blob(_ch, &cpl, blob); 2890 if (!ctx->seq) { 2891 spdk_spin_lock(&blob->bs->used_lock); 2892 bs_release_cluster(blob->bs, ctx->new_cluster); 2893 spdk_spin_unlock(&blob->bs->used_lock); 2894 spdk_free(ctx->buf); 2895 free(ctx); 2896 bs_user_op_abort(op, -ENOMEM); 2897 return; 2898 } 2899 2900 /* Queue the user op to block other incoming operations */ 2901 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2902 2903 if (blob->parent_id != SPDK_BLOBID_INVALID && !is_zeroes) { 2904 if (can_copy) { 2905 blob_copy(ctx, op, copy_src_lba); 2906 } else { 2907 /* Read cluster from backing device */ 2908 bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf, 2909 bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page), 2910 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz), 2911 blob_write_copy, ctx); 2912 } 2913 2914 } else { 2915 blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2916 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx); 2917 } 2918 } 2919 2920 static inline bool 2921 blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t io_unit, uint64_t length, 2922 uint64_t *lba, uint64_t *lba_count) 2923 { 2924 *lba_count = length; 2925 2926 if (!bs_io_unit_is_allocated(blob, io_unit)) { 2927 assert(blob->back_bs_dev != NULL); 2928 *lba = bs_io_unit_to_back_dev_lba(blob, io_unit); 2929 *lba_count = bs_io_unit_to_back_dev_lba(blob, *lba_count); 2930 return false; 2931 } else { 2932 *lba = bs_blob_io_unit_to_lba(blob, io_unit); 2933 return true; 2934 } 2935 } 2936 2937 struct op_split_ctx { 2938 struct spdk_blob *blob; 2939 struct spdk_io_channel *channel; 2940 uint64_t io_unit_offset; 2941 uint64_t io_units_remaining; 2942 void *curr_payload; 2943 enum spdk_blob_op_type op_type; 2944 spdk_bs_sequence_t *seq; 2945 bool in_submit_ctx; 2946 bool completed_in_submit_ctx; 2947 bool done; 2948 }; 2949 2950 static void 2951 blob_request_submit_op_split_next(void *cb_arg, int bserrno) 2952 { 2953 struct op_split_ctx *ctx = cb_arg; 2954 struct spdk_blob *blob = ctx->blob; 2955 struct spdk_io_channel *ch = ctx->channel; 2956 enum spdk_blob_op_type op_type = ctx->op_type; 2957 uint8_t *buf; 2958 uint64_t offset; 2959 uint64_t length; 2960 uint64_t op_length; 2961 2962 if (bserrno != 0 || ctx->io_units_remaining == 0) { 2963 bs_sequence_finish(ctx->seq, bserrno); 2964 if (ctx->in_submit_ctx) { 2965 /* Defer freeing of the ctx object, since it will be 2966 * accessed when this unwinds back to the submission 2967 * context. 2968 */ 2969 ctx->done = true; 2970 } else { 2971 free(ctx); 2972 } 2973 return; 2974 } 2975 2976 if (ctx->in_submit_ctx) { 2977 /* If this split operation completed in the context 2978 * of its submission, mark the flag and return immediately 2979 * to avoid recursion. 2980 */ 2981 ctx->completed_in_submit_ctx = true; 2982 return; 2983 } 2984 2985 while (true) { 2986 ctx->completed_in_submit_ctx = false; 2987 2988 offset = ctx->io_unit_offset; 2989 length = ctx->io_units_remaining; 2990 buf = ctx->curr_payload; 2991 op_length = spdk_min(length, bs_num_io_units_to_cluster_boundary(blob, 2992 offset)); 2993 2994 /* Update length and payload for next operation */ 2995 ctx->io_units_remaining -= op_length; 2996 ctx->io_unit_offset += op_length; 2997 if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) { 2998 ctx->curr_payload += op_length * blob->bs->io_unit_size; 2999 } 3000 3001 assert(!ctx->in_submit_ctx); 3002 ctx->in_submit_ctx = true; 3003 3004 switch (op_type) { 3005 case SPDK_BLOB_READ: 3006 spdk_blob_io_read(blob, ch, buf, offset, op_length, 3007 blob_request_submit_op_split_next, ctx); 3008 break; 3009 case SPDK_BLOB_WRITE: 3010 spdk_blob_io_write(blob, ch, buf, offset, op_length, 3011 blob_request_submit_op_split_next, ctx); 3012 break; 3013 case SPDK_BLOB_UNMAP: 3014 spdk_blob_io_unmap(blob, ch, offset, op_length, 3015 blob_request_submit_op_split_next, ctx); 3016 break; 3017 case SPDK_BLOB_WRITE_ZEROES: 3018 spdk_blob_io_write_zeroes(blob, ch, offset, op_length, 3019 blob_request_submit_op_split_next, ctx); 3020 break; 3021 case SPDK_BLOB_READV: 3022 case SPDK_BLOB_WRITEV: 3023 SPDK_ERRLOG("readv/write not valid\n"); 3024 bs_sequence_finish(ctx->seq, -EINVAL); 3025 free(ctx); 3026 return; 3027 } 3028 3029 #ifndef __clang_analyzer__ 3030 /* scan-build reports a false positive around accessing the ctx here. It 3031 * forms a path that recursively calls this function, but then says 3032 * "assuming ctx->in_submit_ctx is false", when that isn't possible. 3033 * This path does free(ctx), returns to here, and reports a use-after-free 3034 * bug. Wrapping this bit of code so that scan-build doesn't see it 3035 * works around the scan-build bug. 3036 */ 3037 assert(ctx->in_submit_ctx); 3038 ctx->in_submit_ctx = false; 3039 3040 /* If the operation completed immediately, loop back and submit the 3041 * next operation. Otherwise we can return and the next split 3042 * operation will get submitted when this current operation is 3043 * later completed asynchronously. 3044 */ 3045 if (ctx->completed_in_submit_ctx) { 3046 continue; 3047 } else if (ctx->done) { 3048 free(ctx); 3049 } 3050 #endif 3051 break; 3052 } 3053 } 3054 3055 static void 3056 blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob, 3057 void *payload, uint64_t offset, uint64_t length, 3058 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 3059 { 3060 struct op_split_ctx *ctx; 3061 spdk_bs_sequence_t *seq; 3062 struct spdk_bs_cpl cpl; 3063 3064 assert(blob != NULL); 3065 3066 ctx = calloc(1, sizeof(struct op_split_ctx)); 3067 if (ctx == NULL) { 3068 cb_fn(cb_arg, -ENOMEM); 3069 return; 3070 } 3071 3072 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 3073 cpl.u.blob_basic.cb_fn = cb_fn; 3074 cpl.u.blob_basic.cb_arg = cb_arg; 3075 3076 seq = bs_sequence_start_blob(ch, &cpl, blob); 3077 if (!seq) { 3078 free(ctx); 3079 cb_fn(cb_arg, -ENOMEM); 3080 return; 3081 } 3082 3083 ctx->blob = blob; 3084 ctx->channel = ch; 3085 ctx->curr_payload = payload; 3086 ctx->io_unit_offset = offset; 3087 ctx->io_units_remaining = length; 3088 ctx->op_type = op_type; 3089 ctx->seq = seq; 3090 3091 blob_request_submit_op_split_next(ctx, 0); 3092 } 3093 3094 static void 3095 spdk_free_cluster_unmap_complete(void *cb_arg, int bserrno) 3096 { 3097 struct spdk_blob_free_cluster_ctx *ctx = cb_arg; 3098 3099 if (bserrno) { 3100 bs_sequence_finish(ctx->seq, bserrno); 3101 free(ctx); 3102 return; 3103 } 3104 3105 blob_free_cluster_on_md_thread(ctx->blob, ctx->cluster_num, 3106 ctx->extent_page, ctx->md_page, blob_free_cluster_cpl, ctx); 3107 } 3108 3109 static void 3110 blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob, 3111 void *payload, uint64_t offset, uint64_t length, 3112 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 3113 { 3114 struct spdk_bs_cpl cpl; 3115 uint64_t lba; 3116 uint64_t lba_count; 3117 bool is_allocated; 3118 3119 assert(blob != NULL); 3120 3121 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 3122 cpl.u.blob_basic.cb_fn = cb_fn; 3123 cpl.u.blob_basic.cb_arg = cb_arg; 3124 3125 if (blob->frozen_refcnt) { 3126 /* This blob I/O is frozen */ 3127 spdk_bs_user_op_t *op; 3128 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 3129 3130 op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 3131 if (!op) { 3132 cb_fn(cb_arg, -ENOMEM); 3133 return; 3134 } 3135 3136 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 3137 3138 return; 3139 } 3140 3141 is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 3142 3143 switch (op_type) { 3144 case SPDK_BLOB_READ: { 3145 spdk_bs_batch_t *batch; 3146 3147 batch = bs_batch_open(_ch, &cpl, blob); 3148 if (!batch) { 3149 cb_fn(cb_arg, -ENOMEM); 3150 return; 3151 } 3152 3153 if (is_allocated) { 3154 /* Read from the blob */ 3155 bs_batch_read_dev(batch, payload, lba, lba_count); 3156 } else { 3157 /* Read from the backing block device */ 3158 bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count); 3159 } 3160 3161 bs_batch_close(batch); 3162 break; 3163 } 3164 case SPDK_BLOB_WRITE: 3165 case SPDK_BLOB_WRITE_ZEROES: { 3166 if (is_allocated) { 3167 /* Write to the blob */ 3168 spdk_bs_batch_t *batch; 3169 3170 if (lba_count == 0) { 3171 cb_fn(cb_arg, 0); 3172 return; 3173 } 3174 3175 batch = bs_batch_open(_ch, &cpl, blob); 3176 if (!batch) { 3177 cb_fn(cb_arg, -ENOMEM); 3178 return; 3179 } 3180 3181 if (op_type == SPDK_BLOB_WRITE) { 3182 bs_batch_write_dev(batch, payload, lba, lba_count); 3183 } else { 3184 bs_batch_write_zeroes_dev(batch, lba, lba_count); 3185 } 3186 3187 bs_batch_close(batch); 3188 } else { 3189 /* Queue this operation and allocate the cluster */ 3190 spdk_bs_user_op_t *op; 3191 3192 op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 3193 if (!op) { 3194 cb_fn(cb_arg, -ENOMEM); 3195 return; 3196 } 3197 3198 bs_allocate_and_copy_cluster(blob, _ch, offset, op); 3199 } 3200 break; 3201 } 3202 case SPDK_BLOB_UNMAP: { 3203 struct spdk_blob_free_cluster_ctx *ctx = NULL; 3204 spdk_bs_batch_t *batch; 3205 3206 /* if aligned with cluster release cluster */ 3207 if (spdk_blob_is_thin_provisioned(blob) && is_allocated && 3208 bs_io_units_per_cluster(blob) == length) { 3209 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 3210 uint32_t cluster_start_page; 3211 uint32_t cluster_number; 3212 3213 assert(offset % bs_io_units_per_cluster(blob) == 0); 3214 3215 /* Round the io_unit offset down to the first page in the cluster */ 3216 cluster_start_page = bs_io_unit_to_cluster_start(blob, offset); 3217 3218 /* Calculate which index in the metadata cluster array the corresponding 3219 * cluster is supposed to be at. */ 3220 cluster_number = bs_io_unit_to_cluster_number(blob, offset); 3221 3222 ctx = calloc(1, sizeof(*ctx)); 3223 if (!ctx) { 3224 cb_fn(cb_arg, -ENOMEM); 3225 return; 3226 } 3227 /* When freeing a cluster the flow should be (in order): 3228 * 1. Unmap the underlying area (so if the cluster is reclaimed in the future, it won't leak 3229 * old data) 3230 * 2. Once the unmap completes (to avoid any races with incoming writes that may claim the 3231 * cluster), update and sync metadata freeing the cluster 3232 * 3. Once metadata update is done, complete the user unmap request 3233 */ 3234 ctx->blob = blob; 3235 ctx->page = cluster_start_page; 3236 ctx->cluster_num = cluster_number; 3237 ctx->md_page = bs_channel->new_cluster_page; 3238 ctx->seq = bs_sequence_start_bs(_ch, &cpl); 3239 if (!ctx->seq) { 3240 free(ctx); 3241 cb_fn(cb_arg, -ENOMEM); 3242 return; 3243 } 3244 3245 if (blob->use_extent_table) { 3246 ctx->extent_page = *bs_cluster_to_extent_page(blob, cluster_number); 3247 } 3248 3249 cpl.u.blob_basic.cb_fn = spdk_free_cluster_unmap_complete; 3250 cpl.u.blob_basic.cb_arg = ctx; 3251 } 3252 3253 batch = bs_batch_open(_ch, &cpl, blob); 3254 if (!batch) { 3255 free(ctx); 3256 cb_fn(cb_arg, -ENOMEM); 3257 return; 3258 } 3259 3260 if (is_allocated) { 3261 bs_batch_unmap_dev(batch, lba, lba_count); 3262 } 3263 3264 bs_batch_close(batch); 3265 break; 3266 } 3267 case SPDK_BLOB_READV: 3268 case SPDK_BLOB_WRITEV: 3269 SPDK_ERRLOG("readv/write not valid\n"); 3270 cb_fn(cb_arg, -EINVAL); 3271 break; 3272 } 3273 } 3274 3275 static void 3276 blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel, 3277 void *payload, uint64_t offset, uint64_t length, 3278 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 3279 { 3280 assert(blob != NULL); 3281 3282 if (blob->data_ro && op_type != SPDK_BLOB_READ) { 3283 cb_fn(cb_arg, -EPERM); 3284 return; 3285 } 3286 3287 if (length == 0) { 3288 cb_fn(cb_arg, 0); 3289 return; 3290 } 3291 3292 if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 3293 cb_fn(cb_arg, -EINVAL); 3294 return; 3295 } 3296 if (length <= bs_num_io_units_to_cluster_boundary(blob, offset)) { 3297 blob_request_submit_op_single(_channel, blob, payload, offset, length, 3298 cb_fn, cb_arg, op_type); 3299 } else { 3300 blob_request_submit_op_split(_channel, blob, payload, offset, length, 3301 cb_fn, cb_arg, op_type); 3302 } 3303 } 3304 3305 struct rw_iov_ctx { 3306 struct spdk_blob *blob; 3307 struct spdk_io_channel *channel; 3308 spdk_blob_op_complete cb_fn; 3309 void *cb_arg; 3310 bool read; 3311 int iovcnt; 3312 struct iovec *orig_iov; 3313 uint64_t io_unit_offset; 3314 uint64_t io_units_remaining; 3315 uint64_t io_units_done; 3316 struct spdk_blob_ext_io_opts *ext_io_opts; 3317 struct iovec iov[0]; 3318 }; 3319 3320 static void 3321 rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3322 { 3323 assert(cb_arg == NULL); 3324 bs_sequence_finish(seq, bserrno); 3325 } 3326 3327 static void 3328 rw_iov_split_next(void *cb_arg, int bserrno) 3329 { 3330 struct rw_iov_ctx *ctx = cb_arg; 3331 struct spdk_blob *blob = ctx->blob; 3332 struct iovec *iov, *orig_iov; 3333 int iovcnt; 3334 size_t orig_iovoff; 3335 uint64_t io_units_count, io_units_to_boundary, io_unit_offset; 3336 uint64_t byte_count; 3337 3338 if (bserrno != 0 || ctx->io_units_remaining == 0) { 3339 ctx->cb_fn(ctx->cb_arg, bserrno); 3340 free(ctx); 3341 return; 3342 } 3343 3344 io_unit_offset = ctx->io_unit_offset; 3345 io_units_to_boundary = bs_num_io_units_to_cluster_boundary(blob, io_unit_offset); 3346 io_units_count = spdk_min(ctx->io_units_remaining, io_units_to_boundary); 3347 /* 3348 * Get index and offset into the original iov array for our current position in the I/O sequence. 3349 * byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will 3350 * point to the current position in the I/O sequence. 3351 */ 3352 byte_count = ctx->io_units_done * blob->bs->io_unit_size; 3353 orig_iov = &ctx->orig_iov[0]; 3354 orig_iovoff = 0; 3355 while (byte_count > 0) { 3356 if (byte_count >= orig_iov->iov_len) { 3357 byte_count -= orig_iov->iov_len; 3358 orig_iov++; 3359 } else { 3360 orig_iovoff = byte_count; 3361 byte_count = 0; 3362 } 3363 } 3364 3365 /* 3366 * Build an iov array for the next I/O in the sequence. byte_count will keep track of how many 3367 * bytes of this next I/O remain to be accounted for in the new iov array. 3368 */ 3369 byte_count = io_units_count * blob->bs->io_unit_size; 3370 iov = &ctx->iov[0]; 3371 iovcnt = 0; 3372 while (byte_count > 0) { 3373 assert(iovcnt < ctx->iovcnt); 3374 iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff); 3375 iov->iov_base = orig_iov->iov_base + orig_iovoff; 3376 byte_count -= iov->iov_len; 3377 orig_iovoff = 0; 3378 orig_iov++; 3379 iov++; 3380 iovcnt++; 3381 } 3382 3383 ctx->io_unit_offset += io_units_count; 3384 ctx->io_units_remaining -= io_units_count; 3385 ctx->io_units_done += io_units_count; 3386 iov = &ctx->iov[0]; 3387 3388 if (ctx->read) { 3389 spdk_blob_io_readv_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 3390 io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts); 3391 } else { 3392 spdk_blob_io_writev_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 3393 io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts); 3394 } 3395 } 3396 3397 static void 3398 blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel, 3399 struct iovec *iov, int iovcnt, 3400 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg, bool read, 3401 struct spdk_blob_ext_io_opts *ext_io_opts) 3402 { 3403 struct spdk_bs_cpl cpl; 3404 3405 assert(blob != NULL); 3406 3407 if (!read && blob->data_ro) { 3408 cb_fn(cb_arg, -EPERM); 3409 return; 3410 } 3411 3412 if (length == 0) { 3413 cb_fn(cb_arg, 0); 3414 return; 3415 } 3416 3417 if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 3418 cb_fn(cb_arg, -EINVAL); 3419 return; 3420 } 3421 3422 /* 3423 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having 3424 * to split a request that spans a cluster boundary. For I/O that do not span a cluster boundary, 3425 * there will be no noticeable difference compared to using a batch. For I/O that do span a cluster 3426 * boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need 3427 * to allocate a separate iov array and split the I/O such that none of the resulting 3428 * smaller I/O cross a cluster boundary. These smaller I/O will be issued in sequence (not in parallel) 3429 * but since this case happens very infrequently, any performance impact will be negligible. 3430 * 3431 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs 3432 * for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them 3433 * in a batch. That would also require creating an intermediate spdk_bs_cpl that would get called 3434 * when the batch was completed, to allow for freeing the memory for the iov arrays. 3435 */ 3436 if (spdk_likely(length <= bs_num_io_units_to_cluster_boundary(blob, offset))) { 3437 uint64_t lba_count; 3438 uint64_t lba; 3439 bool is_allocated; 3440 3441 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 3442 cpl.u.blob_basic.cb_fn = cb_fn; 3443 cpl.u.blob_basic.cb_arg = cb_arg; 3444 3445 if (blob->frozen_refcnt) { 3446 /* This blob I/O is frozen */ 3447 enum spdk_blob_op_type op_type; 3448 spdk_bs_user_op_t *op; 3449 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel); 3450 3451 op_type = read ? SPDK_BLOB_READV : SPDK_BLOB_WRITEV; 3452 op = bs_user_op_alloc(_channel, &cpl, op_type, blob, iov, iovcnt, offset, length); 3453 if (!op) { 3454 cb_fn(cb_arg, -ENOMEM); 3455 return; 3456 } 3457 3458 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 3459 3460 return; 3461 } 3462 3463 is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 3464 3465 if (read) { 3466 spdk_bs_sequence_t *seq; 3467 3468 seq = bs_sequence_start_blob(_channel, &cpl, blob); 3469 if (!seq) { 3470 cb_fn(cb_arg, -ENOMEM); 3471 return; 3472 } 3473 3474 seq->ext_io_opts = ext_io_opts; 3475 3476 if (is_allocated) { 3477 bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL); 3478 } else { 3479 bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count, 3480 rw_iov_done, NULL); 3481 } 3482 } else { 3483 if (is_allocated) { 3484 spdk_bs_sequence_t *seq; 3485 3486 seq = bs_sequence_start_blob(_channel, &cpl, blob); 3487 if (!seq) { 3488 cb_fn(cb_arg, -ENOMEM); 3489 return; 3490 } 3491 3492 seq->ext_io_opts = ext_io_opts; 3493 3494 bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL); 3495 } else { 3496 /* Queue this operation and allocate the cluster */ 3497 spdk_bs_user_op_t *op; 3498 3499 op = bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset, 3500 length); 3501 if (!op) { 3502 cb_fn(cb_arg, -ENOMEM); 3503 return; 3504 } 3505 3506 op->ext_io_opts = ext_io_opts; 3507 3508 bs_allocate_and_copy_cluster(blob, _channel, offset, op); 3509 } 3510 } 3511 } else { 3512 struct rw_iov_ctx *ctx; 3513 3514 ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec)); 3515 if (ctx == NULL) { 3516 cb_fn(cb_arg, -ENOMEM); 3517 return; 3518 } 3519 3520 ctx->blob = blob; 3521 ctx->channel = _channel; 3522 ctx->cb_fn = cb_fn; 3523 ctx->cb_arg = cb_arg; 3524 ctx->read = read; 3525 ctx->orig_iov = iov; 3526 ctx->iovcnt = iovcnt; 3527 ctx->io_unit_offset = offset; 3528 ctx->io_units_remaining = length; 3529 ctx->io_units_done = 0; 3530 ctx->ext_io_opts = ext_io_opts; 3531 3532 rw_iov_split_next(ctx, 0); 3533 } 3534 } 3535 3536 static struct spdk_blob * 3537 blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid) 3538 { 3539 struct spdk_blob find; 3540 3541 if (spdk_bit_array_get(bs->open_blobids, blobid) == 0) { 3542 return NULL; 3543 } 3544 3545 find.id = blobid; 3546 return RB_FIND(spdk_blob_tree, &bs->open_blobs, &find); 3547 } 3548 3549 static void 3550 blob_get_snapshot_and_clone_entries(struct spdk_blob *blob, 3551 struct spdk_blob_list **snapshot_entry, struct spdk_blob_list **clone_entry) 3552 { 3553 assert(blob != NULL); 3554 *snapshot_entry = NULL; 3555 *clone_entry = NULL; 3556 3557 if (blob->parent_id == SPDK_BLOBID_INVALID) { 3558 return; 3559 } 3560 3561 TAILQ_FOREACH(*snapshot_entry, &blob->bs->snapshots, link) { 3562 if ((*snapshot_entry)->id == blob->parent_id) { 3563 break; 3564 } 3565 } 3566 3567 if (*snapshot_entry != NULL) { 3568 TAILQ_FOREACH(*clone_entry, &(*snapshot_entry)->clones, link) { 3569 if ((*clone_entry)->id == blob->id) { 3570 break; 3571 } 3572 } 3573 3574 assert(*clone_entry != NULL); 3575 } 3576 } 3577 3578 static int 3579 bs_channel_create(void *io_device, void *ctx_buf) 3580 { 3581 struct spdk_blob_store *bs = io_device; 3582 struct spdk_bs_channel *channel = ctx_buf; 3583 struct spdk_bs_dev *dev; 3584 uint32_t max_ops = bs->max_channel_ops; 3585 uint32_t i; 3586 3587 dev = bs->dev; 3588 3589 channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set)); 3590 if (!channel->req_mem) { 3591 return -1; 3592 } 3593 3594 TAILQ_INIT(&channel->reqs); 3595 3596 for (i = 0; i < max_ops; i++) { 3597 TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link); 3598 } 3599 3600 channel->bs = bs; 3601 channel->dev = dev; 3602 channel->dev_channel = dev->create_channel(dev); 3603 3604 if (!channel->dev_channel) { 3605 SPDK_ERRLOG("Failed to create device channel.\n"); 3606 free(channel->req_mem); 3607 return -1; 3608 } 3609 3610 channel->new_cluster_page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, NULL, SPDK_ENV_SOCKET_ID_ANY, 3611 SPDK_MALLOC_DMA); 3612 if (!channel->new_cluster_page) { 3613 SPDK_ERRLOG("Failed to allocate new cluster page\n"); 3614 free(channel->req_mem); 3615 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 3616 return -1; 3617 } 3618 3619 TAILQ_INIT(&channel->need_cluster_alloc); 3620 TAILQ_INIT(&channel->queued_io); 3621 RB_INIT(&channel->esnap_channels); 3622 3623 return 0; 3624 } 3625 3626 static void 3627 bs_channel_destroy(void *io_device, void *ctx_buf) 3628 { 3629 struct spdk_bs_channel *channel = ctx_buf; 3630 spdk_bs_user_op_t *op; 3631 3632 while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) { 3633 op = TAILQ_FIRST(&channel->need_cluster_alloc); 3634 TAILQ_REMOVE(&channel->need_cluster_alloc, op, link); 3635 bs_user_op_abort(op, -EIO); 3636 } 3637 3638 while (!TAILQ_EMPTY(&channel->queued_io)) { 3639 op = TAILQ_FIRST(&channel->queued_io); 3640 TAILQ_REMOVE(&channel->queued_io, op, link); 3641 bs_user_op_abort(op, -EIO); 3642 } 3643 3644 blob_esnap_destroy_bs_channel(channel); 3645 3646 free(channel->req_mem); 3647 spdk_free(channel->new_cluster_page); 3648 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 3649 } 3650 3651 static void 3652 bs_dev_destroy(void *io_device) 3653 { 3654 struct spdk_blob_store *bs = io_device; 3655 struct spdk_blob *blob, *blob_tmp; 3656 3657 bs->dev->destroy(bs->dev); 3658 3659 RB_FOREACH_SAFE(blob, spdk_blob_tree, &bs->open_blobs, blob_tmp) { 3660 RB_REMOVE(spdk_blob_tree, &bs->open_blobs, blob); 3661 spdk_bit_array_clear(bs->open_blobids, blob->id); 3662 blob_free(blob); 3663 } 3664 3665 spdk_spin_destroy(&bs->used_lock); 3666 3667 spdk_bit_array_free(&bs->open_blobids); 3668 spdk_bit_array_free(&bs->used_blobids); 3669 spdk_bit_array_free(&bs->used_md_pages); 3670 spdk_bit_pool_free(&bs->used_clusters); 3671 /* 3672 * If this function is called for any reason except a successful unload, 3673 * the unload_cpl type will be NONE and this will be a nop. 3674 */ 3675 bs_call_cpl(&bs->unload_cpl, bs->unload_err); 3676 3677 free(bs); 3678 } 3679 3680 static int 3681 bs_blob_list_add(struct spdk_blob *blob) 3682 { 3683 spdk_blob_id snapshot_id; 3684 struct spdk_blob_list *snapshot_entry = NULL; 3685 struct spdk_blob_list *clone_entry = NULL; 3686 3687 assert(blob != NULL); 3688 3689 snapshot_id = blob->parent_id; 3690 if (snapshot_id == SPDK_BLOBID_INVALID || 3691 snapshot_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 3692 return 0; 3693 } 3694 3695 snapshot_entry = bs_get_snapshot_entry(blob->bs, snapshot_id); 3696 if (snapshot_entry == NULL) { 3697 /* Snapshot not found */ 3698 snapshot_entry = calloc(1, sizeof(struct spdk_blob_list)); 3699 if (snapshot_entry == NULL) { 3700 return -ENOMEM; 3701 } 3702 snapshot_entry->id = snapshot_id; 3703 TAILQ_INIT(&snapshot_entry->clones); 3704 TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link); 3705 } else { 3706 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 3707 if (clone_entry->id == blob->id) { 3708 break; 3709 } 3710 } 3711 } 3712 3713 if (clone_entry == NULL) { 3714 /* Clone not found */ 3715 clone_entry = calloc(1, sizeof(struct spdk_blob_list)); 3716 if (clone_entry == NULL) { 3717 return -ENOMEM; 3718 } 3719 clone_entry->id = blob->id; 3720 TAILQ_INIT(&clone_entry->clones); 3721 TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link); 3722 snapshot_entry->clone_count++; 3723 } 3724 3725 return 0; 3726 } 3727 3728 static void 3729 bs_blob_list_remove(struct spdk_blob *blob) 3730 { 3731 struct spdk_blob_list *snapshot_entry = NULL; 3732 struct spdk_blob_list *clone_entry = NULL; 3733 3734 blob_get_snapshot_and_clone_entries(blob, &snapshot_entry, &clone_entry); 3735 3736 if (snapshot_entry == NULL) { 3737 return; 3738 } 3739 3740 blob->parent_id = SPDK_BLOBID_INVALID; 3741 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3742 free(clone_entry); 3743 3744 snapshot_entry->clone_count--; 3745 } 3746 3747 static int 3748 bs_blob_list_free(struct spdk_blob_store *bs) 3749 { 3750 struct spdk_blob_list *snapshot_entry; 3751 struct spdk_blob_list *snapshot_entry_tmp; 3752 struct spdk_blob_list *clone_entry; 3753 struct spdk_blob_list *clone_entry_tmp; 3754 3755 TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) { 3756 TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) { 3757 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3758 free(clone_entry); 3759 } 3760 TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link); 3761 free(snapshot_entry); 3762 } 3763 3764 return 0; 3765 } 3766 3767 static void 3768 bs_free(struct spdk_blob_store *bs) 3769 { 3770 bs_blob_list_free(bs); 3771 3772 bs_unregister_md_thread(bs); 3773 spdk_io_device_unregister(bs, bs_dev_destroy); 3774 } 3775 3776 void 3777 spdk_bs_opts_init(struct spdk_bs_opts *opts, size_t opts_size) 3778 { 3779 3780 if (!opts) { 3781 SPDK_ERRLOG("opts should not be NULL\n"); 3782 return; 3783 } 3784 3785 if (!opts_size) { 3786 SPDK_ERRLOG("opts_size should not be zero value\n"); 3787 return; 3788 } 3789 3790 memset(opts, 0, opts_size); 3791 opts->opts_size = opts_size; 3792 3793 #define FIELD_OK(field) \ 3794 offsetof(struct spdk_bs_opts, field) + sizeof(opts->field) <= opts_size 3795 3796 #define SET_FIELD(field, value) \ 3797 if (FIELD_OK(field)) { \ 3798 opts->field = value; \ 3799 } \ 3800 3801 SET_FIELD(cluster_sz, SPDK_BLOB_OPTS_CLUSTER_SZ); 3802 SET_FIELD(num_md_pages, SPDK_BLOB_OPTS_NUM_MD_PAGES); 3803 SET_FIELD(max_md_ops, SPDK_BLOB_OPTS_NUM_MD_PAGES); 3804 SET_FIELD(max_channel_ops, SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS); 3805 SET_FIELD(clear_method, BS_CLEAR_WITH_UNMAP); 3806 3807 if (FIELD_OK(bstype)) { 3808 memset(&opts->bstype, 0, sizeof(opts->bstype)); 3809 } 3810 3811 SET_FIELD(iter_cb_fn, NULL); 3812 SET_FIELD(iter_cb_arg, NULL); 3813 SET_FIELD(force_recover, false); 3814 SET_FIELD(esnap_bs_dev_create, NULL); 3815 SET_FIELD(esnap_ctx, NULL); 3816 3817 #undef FIELD_OK 3818 #undef SET_FIELD 3819 } 3820 3821 static int 3822 bs_opts_verify(struct spdk_bs_opts *opts) 3823 { 3824 if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 || 3825 opts->max_channel_ops == 0) { 3826 SPDK_ERRLOG("Blobstore options cannot be set to 0\n"); 3827 return -1; 3828 } 3829 3830 return 0; 3831 } 3832 3833 /* START spdk_bs_load */ 3834 3835 /* spdk_bs_load_ctx is used for init, load, unload and dump code paths. */ 3836 3837 struct spdk_bs_load_ctx { 3838 struct spdk_blob_store *bs; 3839 struct spdk_bs_super_block *super; 3840 3841 struct spdk_bs_md_mask *mask; 3842 bool in_page_chain; 3843 uint32_t page_index; 3844 uint32_t cur_page; 3845 struct spdk_blob_md_page *page; 3846 3847 uint64_t num_extent_pages; 3848 uint32_t *extent_page_num; 3849 struct spdk_blob_md_page *extent_pages; 3850 struct spdk_bit_array *used_clusters; 3851 3852 spdk_bs_sequence_t *seq; 3853 spdk_blob_op_with_handle_complete iter_cb_fn; 3854 void *iter_cb_arg; 3855 struct spdk_blob *blob; 3856 spdk_blob_id blobid; 3857 3858 bool force_recover; 3859 3860 /* These fields are used in the spdk_bs_dump path. */ 3861 bool dumping; 3862 FILE *fp; 3863 spdk_bs_dump_print_xattr print_xattr_fn; 3864 char xattr_name[4096]; 3865 }; 3866 3867 static int 3868 bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_blob_store **_bs, 3869 struct spdk_bs_load_ctx **_ctx) 3870 { 3871 struct spdk_blob_store *bs; 3872 struct spdk_bs_load_ctx *ctx; 3873 uint64_t dev_size; 3874 int rc; 3875 3876 dev_size = dev->blocklen * dev->blockcnt; 3877 if (dev_size < opts->cluster_sz) { 3878 /* Device size cannot be smaller than cluster size of blobstore */ 3879 SPDK_INFOLOG(blob, "Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n", 3880 dev_size, opts->cluster_sz); 3881 return -ENOSPC; 3882 } 3883 if (opts->cluster_sz < SPDK_BS_PAGE_SIZE) { 3884 /* Cluster size cannot be smaller than page size */ 3885 SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n", 3886 opts->cluster_sz, SPDK_BS_PAGE_SIZE); 3887 return -EINVAL; 3888 } 3889 bs = calloc(1, sizeof(struct spdk_blob_store)); 3890 if (!bs) { 3891 return -ENOMEM; 3892 } 3893 3894 ctx = calloc(1, sizeof(struct spdk_bs_load_ctx)); 3895 if (!ctx) { 3896 free(bs); 3897 return -ENOMEM; 3898 } 3899 3900 ctx->bs = bs; 3901 ctx->iter_cb_fn = opts->iter_cb_fn; 3902 ctx->iter_cb_arg = opts->iter_cb_arg; 3903 ctx->force_recover = opts->force_recover; 3904 3905 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 3906 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3907 if (!ctx->super) { 3908 free(ctx); 3909 free(bs); 3910 return -ENOMEM; 3911 } 3912 3913 RB_INIT(&bs->open_blobs); 3914 TAILQ_INIT(&bs->snapshots); 3915 bs->dev = dev; 3916 bs->md_thread = spdk_get_thread(); 3917 assert(bs->md_thread != NULL); 3918 3919 /* 3920 * Do not use bs_lba_to_cluster() here since blockcnt may not be an 3921 * even multiple of the cluster size. 3922 */ 3923 bs->cluster_sz = opts->cluster_sz; 3924 bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen); 3925 ctx->used_clusters = spdk_bit_array_create(bs->total_clusters); 3926 if (!ctx->used_clusters) { 3927 spdk_free(ctx->super); 3928 free(ctx); 3929 free(bs); 3930 return -ENOMEM; 3931 } 3932 3933 bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE; 3934 if (spdk_u32_is_pow2(bs->pages_per_cluster)) { 3935 bs->pages_per_cluster_shift = spdk_u32log2(bs->pages_per_cluster); 3936 } 3937 bs->num_free_clusters = bs->total_clusters; 3938 bs->io_unit_size = dev->blocklen; 3939 3940 bs->max_channel_ops = opts->max_channel_ops; 3941 bs->super_blob = SPDK_BLOBID_INVALID; 3942 memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype)); 3943 bs->esnap_bs_dev_create = opts->esnap_bs_dev_create; 3944 bs->esnap_ctx = opts->esnap_ctx; 3945 3946 /* The metadata is assumed to be at least 1 page */ 3947 bs->used_md_pages = spdk_bit_array_create(1); 3948 bs->used_blobids = spdk_bit_array_create(0); 3949 bs->open_blobids = spdk_bit_array_create(0); 3950 3951 spdk_spin_init(&bs->used_lock); 3952 3953 spdk_io_device_register(bs, bs_channel_create, bs_channel_destroy, 3954 sizeof(struct spdk_bs_channel), "blobstore"); 3955 rc = bs_register_md_thread(bs); 3956 if (rc == -1) { 3957 spdk_io_device_unregister(bs, NULL); 3958 spdk_spin_destroy(&bs->used_lock); 3959 spdk_bit_array_free(&bs->open_blobids); 3960 spdk_bit_array_free(&bs->used_blobids); 3961 spdk_bit_array_free(&bs->used_md_pages); 3962 spdk_bit_array_free(&ctx->used_clusters); 3963 spdk_free(ctx->super); 3964 free(ctx); 3965 free(bs); 3966 /* FIXME: this is a lie but don't know how to get a proper error code here */ 3967 return -ENOMEM; 3968 } 3969 3970 *_ctx = ctx; 3971 *_bs = bs; 3972 return 0; 3973 } 3974 3975 static void 3976 bs_load_ctx_fail(struct spdk_bs_load_ctx *ctx, int bserrno) 3977 { 3978 assert(bserrno != 0); 3979 3980 spdk_free(ctx->super); 3981 bs_sequence_finish(ctx->seq, bserrno); 3982 bs_free(ctx->bs); 3983 spdk_bit_array_free(&ctx->used_clusters); 3984 free(ctx); 3985 } 3986 3987 static void 3988 bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 3989 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg) 3990 { 3991 /* Update the values in the super block */ 3992 super->super_blob = bs->super_blob; 3993 memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype)); 3994 super->crc = blob_md_page_calc_crc(super); 3995 bs_sequence_write_dev(seq, super, bs_page_to_lba(bs, 0), 3996 bs_byte_to_lba(bs, sizeof(*super)), 3997 cb_fn, cb_arg); 3998 } 3999 4000 static void 4001 bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 4002 { 4003 struct spdk_bs_load_ctx *ctx = arg; 4004 uint64_t mask_size, lba, lba_count; 4005 4006 /* Write out the used clusters mask */ 4007 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 4008 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 4009 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4010 if (!ctx->mask) { 4011 bs_load_ctx_fail(ctx, -ENOMEM); 4012 return; 4013 } 4014 4015 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS; 4016 ctx->mask->length = ctx->bs->total_clusters; 4017 /* We could get here through the normal unload path, or through dirty 4018 * shutdown recovery. For the normal unload path, we use the mask from 4019 * the bit pool. For dirty shutdown recovery, we don't have a bit pool yet - 4020 * only the bit array from the load ctx. 4021 */ 4022 if (ctx->bs->used_clusters) { 4023 assert(ctx->mask->length == spdk_bit_pool_capacity(ctx->bs->used_clusters)); 4024 spdk_bit_pool_store_mask(ctx->bs->used_clusters, ctx->mask->mask); 4025 } else { 4026 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->used_clusters)); 4027 spdk_bit_array_store_mask(ctx->used_clusters, ctx->mask->mask); 4028 } 4029 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 4030 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 4031 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 4032 } 4033 4034 static void 4035 bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 4036 { 4037 struct spdk_bs_load_ctx *ctx = arg; 4038 uint64_t mask_size, lba, lba_count; 4039 4040 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 4041 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 4042 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4043 if (!ctx->mask) { 4044 bs_load_ctx_fail(ctx, -ENOMEM); 4045 return; 4046 } 4047 4048 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES; 4049 ctx->mask->length = ctx->super->md_len; 4050 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages)); 4051 4052 spdk_bit_array_store_mask(ctx->bs->used_md_pages, ctx->mask->mask); 4053 lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 4054 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 4055 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 4056 } 4057 4058 static void 4059 bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 4060 { 4061 struct spdk_bs_load_ctx *ctx = arg; 4062 uint64_t mask_size, lba, lba_count; 4063 4064 if (ctx->super->used_blobid_mask_len == 0) { 4065 /* 4066 * This is a pre-v3 on-disk format where the blobid mask does not get 4067 * written to disk. 4068 */ 4069 cb_fn(seq, arg, 0); 4070 return; 4071 } 4072 4073 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 4074 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 4075 SPDK_MALLOC_DMA); 4076 if (!ctx->mask) { 4077 bs_load_ctx_fail(ctx, -ENOMEM); 4078 return; 4079 } 4080 4081 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS; 4082 ctx->mask->length = ctx->super->md_len; 4083 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids)); 4084 4085 spdk_bit_array_store_mask(ctx->bs->used_blobids, ctx->mask->mask); 4086 lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 4087 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 4088 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 4089 } 4090 4091 static void 4092 blob_set_thin_provision(struct spdk_blob *blob) 4093 { 4094 blob_verify_md_op(blob); 4095 blob->invalid_flags |= SPDK_BLOB_THIN_PROV; 4096 blob->state = SPDK_BLOB_STATE_DIRTY; 4097 } 4098 4099 static void 4100 blob_set_clear_method(struct spdk_blob *blob, enum blob_clear_method clear_method) 4101 { 4102 blob_verify_md_op(blob); 4103 blob->clear_method = clear_method; 4104 blob->md_ro_flags |= (clear_method << SPDK_BLOB_CLEAR_METHOD_SHIFT); 4105 blob->state = SPDK_BLOB_STATE_DIRTY; 4106 } 4107 4108 static void bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno); 4109 4110 static void 4111 bs_delete_corrupted_blob_cpl(void *cb_arg, int bserrno) 4112 { 4113 struct spdk_bs_load_ctx *ctx = cb_arg; 4114 spdk_blob_id id; 4115 int64_t page_num; 4116 4117 /* Iterate to next blob (we can't use spdk_bs_iter_next function as our 4118 * last blob has been removed */ 4119 page_num = bs_blobid_to_page(ctx->blobid); 4120 page_num++; 4121 page_num = spdk_bit_array_find_first_set(ctx->bs->used_blobids, page_num); 4122 if (page_num >= spdk_bit_array_capacity(ctx->bs->used_blobids)) { 4123 bs_load_iter(ctx, NULL, -ENOENT); 4124 return; 4125 } 4126 4127 id = bs_page_to_blobid(page_num); 4128 4129 spdk_bs_open_blob(ctx->bs, id, bs_load_iter, ctx); 4130 } 4131 4132 static void 4133 bs_delete_corrupted_close_cb(void *cb_arg, int bserrno) 4134 { 4135 struct spdk_bs_load_ctx *ctx = cb_arg; 4136 4137 if (bserrno != 0) { 4138 SPDK_ERRLOG("Failed to close corrupted blob\n"); 4139 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4140 return; 4141 } 4142 4143 spdk_bs_delete_blob(ctx->bs, ctx->blobid, bs_delete_corrupted_blob_cpl, ctx); 4144 } 4145 4146 static void 4147 bs_delete_corrupted_blob(void *cb_arg, int bserrno) 4148 { 4149 struct spdk_bs_load_ctx *ctx = cb_arg; 4150 uint64_t i; 4151 4152 if (bserrno != 0) { 4153 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 4154 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4155 return; 4156 } 4157 4158 /* Snapshot and clone have the same copy of cluster map and extent pages 4159 * at this point. Let's clear both for snapshot now, 4160 * so that it won't be cleared for clone later when we remove snapshot. 4161 * Also set thin provision to pass data corruption check */ 4162 for (i = 0; i < ctx->blob->active.num_clusters; i++) { 4163 ctx->blob->active.clusters[i] = 0; 4164 } 4165 for (i = 0; i < ctx->blob->active.num_extent_pages; i++) { 4166 ctx->blob->active.extent_pages[i] = 0; 4167 } 4168 4169 ctx->blob->active.num_allocated_clusters = 0; 4170 4171 ctx->blob->md_ro = false; 4172 4173 blob_set_thin_provision(ctx->blob); 4174 4175 ctx->blobid = ctx->blob->id; 4176 4177 spdk_blob_close(ctx->blob, bs_delete_corrupted_close_cb, ctx); 4178 } 4179 4180 static void 4181 bs_update_corrupted_blob(void *cb_arg, int bserrno) 4182 { 4183 struct spdk_bs_load_ctx *ctx = cb_arg; 4184 4185 if (bserrno != 0) { 4186 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 4187 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4188 return; 4189 } 4190 4191 ctx->blob->md_ro = false; 4192 blob_remove_xattr(ctx->blob, SNAPSHOT_PENDING_REMOVAL, true); 4193 blob_remove_xattr(ctx->blob, SNAPSHOT_IN_PROGRESS, true); 4194 spdk_blob_set_read_only(ctx->blob); 4195 4196 if (ctx->iter_cb_fn) { 4197 ctx->iter_cb_fn(ctx->iter_cb_arg, ctx->blob, 0); 4198 } 4199 bs_blob_list_add(ctx->blob); 4200 4201 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4202 } 4203 4204 static void 4205 bs_examine_clone(void *cb_arg, struct spdk_blob *blob, int bserrno) 4206 { 4207 struct spdk_bs_load_ctx *ctx = cb_arg; 4208 4209 if (bserrno != 0) { 4210 SPDK_ERRLOG("Failed to open clone of a corrupted blob\n"); 4211 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4212 return; 4213 } 4214 4215 if (blob->parent_id == ctx->blob->id) { 4216 /* Power failure occurred before updating clone (snapshot delete case) 4217 * or after updating clone (creating snapshot case) - keep snapshot */ 4218 spdk_blob_close(blob, bs_update_corrupted_blob, ctx); 4219 } else { 4220 /* Power failure occurred after updating clone (snapshot delete case) 4221 * or before updating clone (creating snapshot case) - remove snapshot */ 4222 spdk_blob_close(blob, bs_delete_corrupted_blob, ctx); 4223 } 4224 } 4225 4226 static void 4227 bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno) 4228 { 4229 struct spdk_bs_load_ctx *ctx = arg; 4230 const void *value; 4231 size_t len; 4232 int rc = 0; 4233 4234 if (bserrno == 0) { 4235 /* Examine blob if it is corrupted after power failure. Fix 4236 * the ones that can be fixed and remove any other corrupted 4237 * ones. If it is not corrupted just process it */ 4238 rc = blob_get_xattr_value(blob, SNAPSHOT_PENDING_REMOVAL, &value, &len, true); 4239 if (rc != 0) { 4240 rc = blob_get_xattr_value(blob, SNAPSHOT_IN_PROGRESS, &value, &len, true); 4241 if (rc != 0) { 4242 /* Not corrupted - process it and continue with iterating through blobs */ 4243 if (ctx->iter_cb_fn) { 4244 ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0); 4245 } 4246 bs_blob_list_add(blob); 4247 spdk_bs_iter_next(ctx->bs, blob, bs_load_iter, ctx); 4248 return; 4249 } 4250 4251 } 4252 4253 assert(len == sizeof(spdk_blob_id)); 4254 4255 ctx->blob = blob; 4256 4257 /* Open clone to check if we are able to fix this blob or should we remove it */ 4258 spdk_bs_open_blob(ctx->bs, *(spdk_blob_id *)value, bs_examine_clone, ctx); 4259 return; 4260 } else if (bserrno == -ENOENT) { 4261 bserrno = 0; 4262 } else { 4263 /* 4264 * This case needs to be looked at further. Same problem 4265 * exists with applications that rely on explicit blob 4266 * iteration. We should just skip the blob that failed 4267 * to load and continue on to the next one. 4268 */ 4269 SPDK_ERRLOG("Error in iterating blobs\n"); 4270 } 4271 4272 ctx->iter_cb_fn = NULL; 4273 4274 spdk_free(ctx->super); 4275 spdk_free(ctx->mask); 4276 bs_sequence_finish(ctx->seq, bserrno); 4277 free(ctx); 4278 } 4279 4280 static void bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg); 4281 4282 static void 4283 bs_load_complete(struct spdk_bs_load_ctx *ctx) 4284 { 4285 ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters); 4286 if (ctx->dumping) { 4287 bs_dump_read_md_page(ctx->seq, ctx); 4288 return; 4289 } 4290 spdk_bs_iter_first(ctx->bs, bs_load_iter, ctx); 4291 } 4292 4293 static void 4294 bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4295 { 4296 struct spdk_bs_load_ctx *ctx = cb_arg; 4297 int rc; 4298 4299 /* The type must be correct */ 4300 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS); 4301 4302 /* The length of the mask (in bits) must not be greater than 4303 * the length of the buffer (converted to bits) */ 4304 assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE * 8)); 4305 4306 /* The length of the mask must be exactly equal to the size 4307 * (in pages) of the metadata region */ 4308 assert(ctx->mask->length == ctx->super->md_len); 4309 4310 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->mask->length); 4311 if (rc < 0) { 4312 spdk_free(ctx->mask); 4313 bs_load_ctx_fail(ctx, rc); 4314 return; 4315 } 4316 4317 spdk_bit_array_load_mask(ctx->bs->used_blobids, ctx->mask->mask); 4318 bs_load_complete(ctx); 4319 } 4320 4321 static void 4322 bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4323 { 4324 struct spdk_bs_load_ctx *ctx = cb_arg; 4325 uint64_t lba, lba_count, mask_size; 4326 int rc; 4327 4328 if (bserrno != 0) { 4329 bs_load_ctx_fail(ctx, bserrno); 4330 return; 4331 } 4332 4333 /* The type must be correct */ 4334 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 4335 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 4336 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 4337 struct spdk_blob_md_page) * 8)); 4338 /* 4339 * The length of the mask must be equal to or larger than the total number of clusters. It may be 4340 * larger than the total number of clusters due to a failure spdk_bs_grow. 4341 */ 4342 assert(ctx->mask->length >= ctx->bs->total_clusters); 4343 if (ctx->mask->length > ctx->bs->total_clusters) { 4344 SPDK_WARNLOG("Shrink the used_custers mask length to total_clusters"); 4345 ctx->mask->length = ctx->bs->total_clusters; 4346 } 4347 4348 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->mask->length); 4349 if (rc < 0) { 4350 spdk_free(ctx->mask); 4351 bs_load_ctx_fail(ctx, rc); 4352 return; 4353 } 4354 4355 spdk_bit_array_load_mask(ctx->used_clusters, ctx->mask->mask); 4356 ctx->bs->num_free_clusters = spdk_bit_array_count_clear(ctx->used_clusters); 4357 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 4358 4359 spdk_free(ctx->mask); 4360 4361 /* Read the used blobids mask */ 4362 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 4363 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 4364 SPDK_MALLOC_DMA); 4365 if (!ctx->mask) { 4366 bs_load_ctx_fail(ctx, -ENOMEM); 4367 return; 4368 } 4369 lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 4370 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 4371 bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 4372 bs_load_used_blobids_cpl, ctx); 4373 } 4374 4375 static void 4376 bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4377 { 4378 struct spdk_bs_load_ctx *ctx = cb_arg; 4379 uint64_t lba, lba_count, mask_size; 4380 int rc; 4381 4382 if (bserrno != 0) { 4383 bs_load_ctx_fail(ctx, bserrno); 4384 return; 4385 } 4386 4387 /* The type must be correct */ 4388 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES); 4389 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 4390 assert(ctx->mask->length <= (ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE * 4391 8)); 4392 /* The length of the mask must be exactly equal to the size (in pages) of the metadata region */ 4393 if (ctx->mask->length != ctx->super->md_len) { 4394 SPDK_ERRLOG("mismatched md_len in used_pages mask: " 4395 "mask->length=%" PRIu32 " super->md_len=%" PRIu32 "\n", 4396 ctx->mask->length, ctx->super->md_len); 4397 assert(false); 4398 } 4399 4400 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->mask->length); 4401 if (rc < 0) { 4402 spdk_free(ctx->mask); 4403 bs_load_ctx_fail(ctx, rc); 4404 return; 4405 } 4406 4407 spdk_bit_array_load_mask(ctx->bs->used_md_pages, ctx->mask->mask); 4408 spdk_free(ctx->mask); 4409 4410 /* Read the used clusters mask */ 4411 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 4412 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 4413 SPDK_MALLOC_DMA); 4414 if (!ctx->mask) { 4415 bs_load_ctx_fail(ctx, -ENOMEM); 4416 return; 4417 } 4418 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 4419 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 4420 bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 4421 bs_load_used_clusters_cpl, ctx); 4422 } 4423 4424 static void 4425 bs_load_read_used_pages(struct spdk_bs_load_ctx *ctx) 4426 { 4427 uint64_t lba, lba_count, mask_size; 4428 4429 /* Read the used pages mask */ 4430 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 4431 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 4432 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4433 if (!ctx->mask) { 4434 bs_load_ctx_fail(ctx, -ENOMEM); 4435 return; 4436 } 4437 4438 lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 4439 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 4440 bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count, 4441 bs_load_used_pages_cpl, ctx); 4442 } 4443 4444 static int 4445 bs_load_replay_md_parse_page(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_page *page) 4446 { 4447 struct spdk_blob_store *bs = ctx->bs; 4448 struct spdk_blob_md_descriptor *desc; 4449 size_t cur_desc = 0; 4450 4451 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4452 while (cur_desc < sizeof(page->descriptors)) { 4453 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 4454 if (desc->length == 0) { 4455 /* If padding and length are 0, this terminates the page */ 4456 break; 4457 } 4458 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 4459 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 4460 unsigned int i, j; 4461 unsigned int cluster_count = 0; 4462 uint32_t cluster_idx; 4463 4464 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 4465 4466 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 4467 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 4468 cluster_idx = desc_extent_rle->extents[i].cluster_idx; 4469 /* 4470 * cluster_idx = 0 means an unallocated cluster - don't mark that 4471 * in the used cluster map. 4472 */ 4473 if (cluster_idx != 0) { 4474 SPDK_NOTICELOG("Recover: cluster %" PRIu32 "\n", cluster_idx + j); 4475 spdk_bit_array_set(ctx->used_clusters, cluster_idx + j); 4476 if (bs->num_free_clusters == 0) { 4477 return -ENOSPC; 4478 } 4479 bs->num_free_clusters--; 4480 } 4481 cluster_count++; 4482 } 4483 } 4484 if (cluster_count == 0) { 4485 return -EINVAL; 4486 } 4487 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4488 struct spdk_blob_md_descriptor_extent_page *desc_extent; 4489 uint32_t i; 4490 uint32_t cluster_count = 0; 4491 uint32_t cluster_idx; 4492 size_t cluster_idx_length; 4493 4494 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 4495 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 4496 4497 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 4498 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 4499 return -EINVAL; 4500 } 4501 4502 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 4503 cluster_idx = desc_extent->cluster_idx[i]; 4504 /* 4505 * cluster_idx = 0 means an unallocated cluster - don't mark that 4506 * in the used cluster map. 4507 */ 4508 if (cluster_idx != 0) { 4509 if (cluster_idx < desc_extent->start_cluster_idx && 4510 cluster_idx >= desc_extent->start_cluster_idx + cluster_count) { 4511 return -EINVAL; 4512 } 4513 spdk_bit_array_set(ctx->used_clusters, cluster_idx); 4514 if (bs->num_free_clusters == 0) { 4515 return -ENOSPC; 4516 } 4517 bs->num_free_clusters--; 4518 } 4519 cluster_count++; 4520 } 4521 4522 if (cluster_count == 0) { 4523 return -EINVAL; 4524 } 4525 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 4526 /* Skip this item */ 4527 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 4528 /* Skip this item */ 4529 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 4530 /* Skip this item */ 4531 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 4532 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 4533 uint32_t num_extent_pages = ctx->num_extent_pages; 4534 uint32_t i; 4535 size_t extent_pages_length; 4536 void *tmp; 4537 4538 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 4539 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 4540 4541 if (desc_extent_table->length == 0 || 4542 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 4543 return -EINVAL; 4544 } 4545 4546 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 4547 if (desc_extent_table->extent_page[i].page_idx != 0) { 4548 if (desc_extent_table->extent_page[i].num_pages != 1) { 4549 return -EINVAL; 4550 } 4551 num_extent_pages += 1; 4552 } 4553 } 4554 4555 if (num_extent_pages > 0) { 4556 tmp = realloc(ctx->extent_page_num, num_extent_pages * sizeof(uint32_t)); 4557 if (tmp == NULL) { 4558 return -ENOMEM; 4559 } 4560 ctx->extent_page_num = tmp; 4561 4562 /* Extent table entries contain md page numbers for extent pages. 4563 * Zeroes represent unallocated extent pages, those are run-length-encoded. 4564 */ 4565 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 4566 if (desc_extent_table->extent_page[i].page_idx != 0) { 4567 ctx->extent_page_num[ctx->num_extent_pages] = desc_extent_table->extent_page[i].page_idx; 4568 ctx->num_extent_pages += 1; 4569 } 4570 } 4571 } 4572 } else { 4573 /* Error */ 4574 return -EINVAL; 4575 } 4576 /* Advance to the next descriptor */ 4577 cur_desc += sizeof(*desc) + desc->length; 4578 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 4579 break; 4580 } 4581 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 4582 } 4583 return 0; 4584 } 4585 4586 static bool 4587 bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page) 4588 { 4589 uint32_t crc; 4590 struct spdk_blob_md_descriptor *desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4591 size_t desc_len; 4592 4593 crc = blob_md_page_calc_crc(page); 4594 if (crc != page->crc) { 4595 return false; 4596 } 4597 4598 /* Extent page should always be of sequence num 0. */ 4599 if (page->sequence_num != 0) { 4600 return false; 4601 } 4602 4603 /* Descriptor type must be EXTENT_PAGE. */ 4604 if (desc->type != SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4605 return false; 4606 } 4607 4608 /* Descriptor length cannot exceed the page. */ 4609 desc_len = sizeof(*desc) + desc->length; 4610 if (desc_len > sizeof(page->descriptors)) { 4611 return false; 4612 } 4613 4614 /* It has to be the only descriptor in the page. */ 4615 if (desc_len + sizeof(*desc) <= sizeof(page->descriptors)) { 4616 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + desc_len); 4617 if (desc->length != 0) { 4618 return false; 4619 } 4620 } 4621 4622 return true; 4623 } 4624 4625 static bool 4626 bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx) 4627 { 4628 uint32_t crc; 4629 struct spdk_blob_md_page *page = ctx->page; 4630 4631 crc = blob_md_page_calc_crc(page); 4632 if (crc != page->crc) { 4633 return false; 4634 } 4635 4636 /* First page of a sequence should match the blobid. */ 4637 if (page->sequence_num == 0 && 4638 bs_page_to_blobid(ctx->cur_page) != page->id) { 4639 return false; 4640 } 4641 assert(bs_load_cur_extent_page_valid(page) == false); 4642 4643 return true; 4644 } 4645 4646 static void bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx); 4647 4648 static void 4649 bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4650 { 4651 struct spdk_bs_load_ctx *ctx = cb_arg; 4652 4653 if (bserrno != 0) { 4654 bs_load_ctx_fail(ctx, bserrno); 4655 return; 4656 } 4657 4658 bs_load_complete(ctx); 4659 } 4660 4661 static void 4662 bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4663 { 4664 struct spdk_bs_load_ctx *ctx = cb_arg; 4665 4666 spdk_free(ctx->mask); 4667 ctx->mask = NULL; 4668 4669 if (bserrno != 0) { 4670 bs_load_ctx_fail(ctx, bserrno); 4671 return; 4672 } 4673 4674 bs_write_used_clusters(seq, ctx, bs_load_write_used_clusters_cpl); 4675 } 4676 4677 static void 4678 bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4679 { 4680 struct spdk_bs_load_ctx *ctx = cb_arg; 4681 4682 spdk_free(ctx->mask); 4683 ctx->mask = NULL; 4684 4685 if (bserrno != 0) { 4686 bs_load_ctx_fail(ctx, bserrno); 4687 return; 4688 } 4689 4690 bs_write_used_blobids(seq, ctx, bs_load_write_used_blobids_cpl); 4691 } 4692 4693 static void 4694 bs_load_write_used_md(struct spdk_bs_load_ctx *ctx) 4695 { 4696 bs_write_used_md(ctx->seq, ctx, bs_load_write_used_pages_cpl); 4697 } 4698 4699 static void 4700 bs_load_replay_md_chain_cpl(struct spdk_bs_load_ctx *ctx) 4701 { 4702 uint64_t num_md_clusters; 4703 uint64_t i; 4704 4705 ctx->in_page_chain = false; 4706 4707 do { 4708 ctx->page_index++; 4709 } while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true); 4710 4711 if (ctx->page_index < ctx->super->md_len) { 4712 ctx->cur_page = ctx->page_index; 4713 bs_load_replay_cur_md_page(ctx); 4714 } else { 4715 /* Claim all of the clusters used by the metadata */ 4716 num_md_clusters = spdk_divide_round_up( 4717 ctx->super->md_start + ctx->super->md_len, ctx->bs->pages_per_cluster); 4718 for (i = 0; i < num_md_clusters; i++) { 4719 spdk_bit_array_set(ctx->used_clusters, i); 4720 } 4721 ctx->bs->num_free_clusters -= num_md_clusters; 4722 spdk_free(ctx->page); 4723 bs_load_write_used_md(ctx); 4724 } 4725 } 4726 4727 static void 4728 bs_load_replay_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4729 { 4730 struct spdk_bs_load_ctx *ctx = cb_arg; 4731 uint32_t page_num; 4732 uint64_t i; 4733 4734 if (bserrno != 0) { 4735 spdk_free(ctx->extent_pages); 4736 bs_load_ctx_fail(ctx, bserrno); 4737 return; 4738 } 4739 4740 for (i = 0; i < ctx->num_extent_pages; i++) { 4741 /* Extent pages are only read when present within in chain md. 4742 * Integrity of md is not right if that page was not a valid extent page. */ 4743 if (bs_load_cur_extent_page_valid(&ctx->extent_pages[i]) != true) { 4744 spdk_free(ctx->extent_pages); 4745 bs_load_ctx_fail(ctx, -EILSEQ); 4746 return; 4747 } 4748 4749 page_num = ctx->extent_page_num[i]; 4750 spdk_bit_array_set(ctx->bs->used_md_pages, page_num); 4751 if (bs_load_replay_md_parse_page(ctx, &ctx->extent_pages[i])) { 4752 spdk_free(ctx->extent_pages); 4753 bs_load_ctx_fail(ctx, -EILSEQ); 4754 return; 4755 } 4756 } 4757 4758 spdk_free(ctx->extent_pages); 4759 free(ctx->extent_page_num); 4760 ctx->extent_page_num = NULL; 4761 ctx->num_extent_pages = 0; 4762 4763 bs_load_replay_md_chain_cpl(ctx); 4764 } 4765 4766 static void 4767 bs_load_replay_extent_pages(struct spdk_bs_load_ctx *ctx) 4768 { 4769 spdk_bs_batch_t *batch; 4770 uint32_t page; 4771 uint64_t lba; 4772 uint64_t i; 4773 4774 ctx->extent_pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE * ctx->num_extent_pages, 0, 4775 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4776 if (!ctx->extent_pages) { 4777 bs_load_ctx_fail(ctx, -ENOMEM); 4778 return; 4779 } 4780 4781 batch = bs_sequence_to_batch(ctx->seq, bs_load_replay_extent_page_cpl, ctx); 4782 4783 for (i = 0; i < ctx->num_extent_pages; i++) { 4784 page = ctx->extent_page_num[i]; 4785 assert(page < ctx->super->md_len); 4786 lba = bs_md_page_to_lba(ctx->bs, page); 4787 bs_batch_read_dev(batch, &ctx->extent_pages[i], lba, 4788 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE)); 4789 } 4790 4791 bs_batch_close(batch); 4792 } 4793 4794 static void 4795 bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4796 { 4797 struct spdk_bs_load_ctx *ctx = cb_arg; 4798 uint32_t page_num; 4799 struct spdk_blob_md_page *page; 4800 4801 if (bserrno != 0) { 4802 bs_load_ctx_fail(ctx, bserrno); 4803 return; 4804 } 4805 4806 page_num = ctx->cur_page; 4807 page = ctx->page; 4808 if (bs_load_cur_md_page_valid(ctx) == true) { 4809 if (page->sequence_num == 0 || ctx->in_page_chain == true) { 4810 spdk_spin_lock(&ctx->bs->used_lock); 4811 bs_claim_md_page(ctx->bs, page_num); 4812 spdk_spin_unlock(&ctx->bs->used_lock); 4813 if (page->sequence_num == 0) { 4814 SPDK_NOTICELOG("Recover: blob 0x%" PRIx32 "\n", page_num); 4815 spdk_bit_array_set(ctx->bs->used_blobids, page_num); 4816 } 4817 if (bs_load_replay_md_parse_page(ctx, page)) { 4818 bs_load_ctx_fail(ctx, -EILSEQ); 4819 return; 4820 } 4821 if (page->next != SPDK_INVALID_MD_PAGE) { 4822 ctx->in_page_chain = true; 4823 ctx->cur_page = page->next; 4824 bs_load_replay_cur_md_page(ctx); 4825 return; 4826 } 4827 if (ctx->num_extent_pages != 0) { 4828 bs_load_replay_extent_pages(ctx); 4829 return; 4830 } 4831 } 4832 } 4833 bs_load_replay_md_chain_cpl(ctx); 4834 } 4835 4836 static void 4837 bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx) 4838 { 4839 uint64_t lba; 4840 4841 assert(ctx->cur_page < ctx->super->md_len); 4842 lba = bs_md_page_to_lba(ctx->bs, ctx->cur_page); 4843 bs_sequence_read_dev(ctx->seq, ctx->page, lba, 4844 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 4845 bs_load_replay_md_cpl, ctx); 4846 } 4847 4848 static void 4849 bs_load_replay_md(struct spdk_bs_load_ctx *ctx) 4850 { 4851 ctx->page_index = 0; 4852 ctx->cur_page = 0; 4853 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 4854 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4855 if (!ctx->page) { 4856 bs_load_ctx_fail(ctx, -ENOMEM); 4857 return; 4858 } 4859 bs_load_replay_cur_md_page(ctx); 4860 } 4861 4862 static void 4863 bs_recover(struct spdk_bs_load_ctx *ctx) 4864 { 4865 int rc; 4866 4867 SPDK_NOTICELOG("Performing recovery on blobstore\n"); 4868 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len); 4869 if (rc < 0) { 4870 bs_load_ctx_fail(ctx, -ENOMEM); 4871 return; 4872 } 4873 4874 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len); 4875 if (rc < 0) { 4876 bs_load_ctx_fail(ctx, -ENOMEM); 4877 return; 4878 } 4879 4880 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 4881 if (rc < 0) { 4882 bs_load_ctx_fail(ctx, -ENOMEM); 4883 return; 4884 } 4885 4886 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->super->md_len); 4887 if (rc < 0) { 4888 bs_load_ctx_fail(ctx, -ENOMEM); 4889 return; 4890 } 4891 4892 ctx->bs->num_free_clusters = ctx->bs->total_clusters; 4893 bs_load_replay_md(ctx); 4894 } 4895 4896 static int 4897 bs_parse_super(struct spdk_bs_load_ctx *ctx) 4898 { 4899 int rc; 4900 4901 if (ctx->super->size == 0) { 4902 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 4903 } 4904 4905 if (ctx->super->io_unit_size == 0) { 4906 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 4907 } 4908 4909 ctx->bs->clean = 1; 4910 ctx->bs->cluster_sz = ctx->super->cluster_size; 4911 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 4912 ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE; 4913 if (spdk_u32_is_pow2(ctx->bs->pages_per_cluster)) { 4914 ctx->bs->pages_per_cluster_shift = spdk_u32log2(ctx->bs->pages_per_cluster); 4915 } 4916 ctx->bs->io_unit_size = ctx->super->io_unit_size; 4917 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 4918 if (rc < 0) { 4919 return -ENOMEM; 4920 } 4921 ctx->bs->md_start = ctx->super->md_start; 4922 ctx->bs->md_len = ctx->super->md_len; 4923 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len); 4924 if (rc < 0) { 4925 return -ENOMEM; 4926 } 4927 4928 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 4929 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 4930 ctx->bs->super_blob = ctx->super->super_blob; 4931 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 4932 4933 return 0; 4934 } 4935 4936 static void 4937 bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4938 { 4939 struct spdk_bs_load_ctx *ctx = cb_arg; 4940 int rc; 4941 4942 rc = bs_super_validate(ctx->super, ctx->bs); 4943 if (rc != 0) { 4944 bs_load_ctx_fail(ctx, rc); 4945 return; 4946 } 4947 4948 rc = bs_parse_super(ctx); 4949 if (rc < 0) { 4950 bs_load_ctx_fail(ctx, rc); 4951 return; 4952 } 4953 4954 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0 || ctx->force_recover) { 4955 bs_recover(ctx); 4956 } else { 4957 bs_load_read_used_pages(ctx); 4958 } 4959 } 4960 4961 static inline int 4962 bs_opts_copy(struct spdk_bs_opts *src, struct spdk_bs_opts *dst) 4963 { 4964 4965 if (!src->opts_size) { 4966 SPDK_ERRLOG("opts_size should not be zero value\n"); 4967 return -1; 4968 } 4969 4970 #define FIELD_OK(field) \ 4971 offsetof(struct spdk_bs_opts, field) + sizeof(src->field) <= src->opts_size 4972 4973 #define SET_FIELD(field) \ 4974 if (FIELD_OK(field)) { \ 4975 dst->field = src->field; \ 4976 } \ 4977 4978 SET_FIELD(cluster_sz); 4979 SET_FIELD(num_md_pages); 4980 SET_FIELD(max_md_ops); 4981 SET_FIELD(max_channel_ops); 4982 SET_FIELD(clear_method); 4983 4984 if (FIELD_OK(bstype)) { 4985 memcpy(&dst->bstype, &src->bstype, sizeof(dst->bstype)); 4986 } 4987 SET_FIELD(iter_cb_fn); 4988 SET_FIELD(iter_cb_arg); 4989 SET_FIELD(force_recover); 4990 SET_FIELD(esnap_bs_dev_create); 4991 SET_FIELD(esnap_ctx); 4992 4993 dst->opts_size = src->opts_size; 4994 4995 /* You should not remove this statement, but need to update the assert statement 4996 * if you add a new field, and also add a corresponding SET_FIELD statement */ 4997 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_opts) == 88, "Incorrect size"); 4998 4999 #undef FIELD_OK 5000 #undef SET_FIELD 5001 5002 return 0; 5003 } 5004 5005 void 5006 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 5007 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 5008 { 5009 struct spdk_blob_store *bs; 5010 struct spdk_bs_cpl cpl; 5011 struct spdk_bs_load_ctx *ctx; 5012 struct spdk_bs_opts opts = {}; 5013 int err; 5014 5015 SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev); 5016 5017 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 5018 SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen); 5019 dev->destroy(dev); 5020 cb_fn(cb_arg, NULL, -EINVAL); 5021 return; 5022 } 5023 5024 spdk_bs_opts_init(&opts, sizeof(opts)); 5025 if (o) { 5026 if (bs_opts_copy(o, &opts)) { 5027 return; 5028 } 5029 } 5030 5031 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 5032 dev->destroy(dev); 5033 cb_fn(cb_arg, NULL, -EINVAL); 5034 return; 5035 } 5036 5037 err = bs_alloc(dev, &opts, &bs, &ctx); 5038 if (err) { 5039 dev->destroy(dev); 5040 cb_fn(cb_arg, NULL, err); 5041 return; 5042 } 5043 5044 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 5045 cpl.u.bs_handle.cb_fn = cb_fn; 5046 cpl.u.bs_handle.cb_arg = cb_arg; 5047 cpl.u.bs_handle.bs = bs; 5048 5049 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5050 if (!ctx->seq) { 5051 spdk_free(ctx->super); 5052 free(ctx); 5053 bs_free(bs); 5054 cb_fn(cb_arg, NULL, -ENOMEM); 5055 return; 5056 } 5057 5058 /* Read the super block */ 5059 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5060 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5061 bs_load_super_cpl, ctx); 5062 } 5063 5064 /* END spdk_bs_load */ 5065 5066 /* START spdk_bs_dump */ 5067 5068 static void 5069 bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno) 5070 { 5071 spdk_free(ctx->super); 5072 5073 /* 5074 * We need to defer calling bs_call_cpl() until after 5075 * dev destruction, so tuck these away for later use. 5076 */ 5077 ctx->bs->unload_err = bserrno; 5078 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5079 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5080 5081 bs_sequence_finish(seq, 0); 5082 bs_free(ctx->bs); 5083 free(ctx); 5084 } 5085 5086 static void 5087 bs_dump_print_xattr(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 5088 { 5089 struct spdk_blob_md_descriptor_xattr *desc_xattr; 5090 uint32_t i; 5091 const char *type; 5092 5093 desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc; 5094 5095 if (desc_xattr->length != 5096 sizeof(desc_xattr->name_length) + sizeof(desc_xattr->value_length) + 5097 desc_xattr->name_length + desc_xattr->value_length) { 5098 } 5099 5100 memcpy(ctx->xattr_name, desc_xattr->name, desc_xattr->name_length); 5101 ctx->xattr_name[desc_xattr->name_length] = '\0'; 5102 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 5103 type = "XATTR"; 5104 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 5105 type = "XATTR_INTERNAL"; 5106 } else { 5107 assert(false); 5108 type = "XATTR_?"; 5109 } 5110 fprintf(ctx->fp, "%s: name = \"%s\"\n", type, ctx->xattr_name); 5111 fprintf(ctx->fp, " value = \""); 5112 ctx->print_xattr_fn(ctx->fp, ctx->super->bstype.bstype, ctx->xattr_name, 5113 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 5114 desc_xattr->value_length); 5115 fprintf(ctx->fp, "\"\n"); 5116 for (i = 0; i < desc_xattr->value_length; i++) { 5117 if (i % 16 == 0) { 5118 fprintf(ctx->fp, " "); 5119 } 5120 fprintf(ctx->fp, "%02" PRIx8 " ", *((uint8_t *)desc_xattr->name + desc_xattr->name_length + i)); 5121 if ((i + 1) % 16 == 0) { 5122 fprintf(ctx->fp, "\n"); 5123 } 5124 } 5125 if (i % 16 != 0) { 5126 fprintf(ctx->fp, "\n"); 5127 } 5128 } 5129 5130 struct type_flag_desc { 5131 uint64_t mask; 5132 uint64_t val; 5133 const char *name; 5134 }; 5135 5136 static void 5137 bs_dump_print_type_bits(struct spdk_bs_load_ctx *ctx, uint64_t flags, 5138 struct type_flag_desc *desc, size_t numflags) 5139 { 5140 uint64_t covered = 0; 5141 size_t i; 5142 5143 for (i = 0; i < numflags; i++) { 5144 if ((desc[i].mask & flags) != desc[i].val) { 5145 continue; 5146 } 5147 fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " %s", desc[i].val, desc[i].name); 5148 if (desc[i].mask != desc[i].val) { 5149 fprintf(ctx->fp, " (mask 0x%" PRIx64 " value 0x%" PRIx64 ")", 5150 desc[i].mask, desc[i].val); 5151 } 5152 fprintf(ctx->fp, "\n"); 5153 covered |= desc[i].mask; 5154 } 5155 if ((flags & ~covered) != 0) { 5156 fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " Unknown\n", flags & ~covered); 5157 } 5158 } 5159 5160 static void 5161 bs_dump_print_type_flags(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 5162 { 5163 struct spdk_blob_md_descriptor_flags *type_desc; 5164 #define ADD_FLAG(f) { f, f, #f } 5165 #define ADD_MASK_VAL(m, v) { m, v, #v } 5166 static struct type_flag_desc invalid[] = { 5167 ADD_FLAG(SPDK_BLOB_THIN_PROV), 5168 ADD_FLAG(SPDK_BLOB_INTERNAL_XATTR), 5169 ADD_FLAG(SPDK_BLOB_EXTENT_TABLE), 5170 }; 5171 static struct type_flag_desc data_ro[] = { 5172 ADD_FLAG(SPDK_BLOB_READ_ONLY), 5173 }; 5174 static struct type_flag_desc md_ro[] = { 5175 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_DEFAULT), 5176 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_NONE), 5177 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_UNMAP), 5178 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_WRITE_ZEROES), 5179 }; 5180 #undef ADD_FLAG 5181 #undef ADD_MASK_VAL 5182 5183 type_desc = (struct spdk_blob_md_descriptor_flags *)desc; 5184 fprintf(ctx->fp, "Flags:\n"); 5185 fprintf(ctx->fp, "\tinvalid: 0x%016" PRIx64 "\n", type_desc->invalid_flags); 5186 bs_dump_print_type_bits(ctx, type_desc->invalid_flags, invalid, 5187 SPDK_COUNTOF(invalid)); 5188 fprintf(ctx->fp, "\tdata_ro: 0x%016" PRIx64 "\n", type_desc->data_ro_flags); 5189 bs_dump_print_type_bits(ctx, type_desc->data_ro_flags, data_ro, 5190 SPDK_COUNTOF(data_ro)); 5191 fprintf(ctx->fp, "\t md_ro: 0x%016" PRIx64 "\n", type_desc->md_ro_flags); 5192 bs_dump_print_type_bits(ctx, type_desc->md_ro_flags, md_ro, 5193 SPDK_COUNTOF(md_ro)); 5194 } 5195 5196 static void 5197 bs_dump_print_extent_table(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 5198 { 5199 struct spdk_blob_md_descriptor_extent_table *et_desc; 5200 uint64_t num_extent_pages; 5201 uint32_t et_idx; 5202 5203 et_desc = (struct spdk_blob_md_descriptor_extent_table *)desc; 5204 num_extent_pages = (et_desc->length - sizeof(et_desc->num_clusters)) / 5205 sizeof(et_desc->extent_page[0]); 5206 5207 fprintf(ctx->fp, "Extent table:\n"); 5208 for (et_idx = 0; et_idx < num_extent_pages; et_idx++) { 5209 if (et_desc->extent_page[et_idx].page_idx == 0) { 5210 /* Zeroes represent unallocated extent pages. */ 5211 continue; 5212 } 5213 fprintf(ctx->fp, "\tExtent page: %5" PRIu32 " length %3" PRIu32 5214 " at LBA %" PRIu64 "\n", et_desc->extent_page[et_idx].page_idx, 5215 et_desc->extent_page[et_idx].num_pages, 5216 bs_md_page_to_lba(ctx->bs, et_desc->extent_page[et_idx].page_idx)); 5217 } 5218 } 5219 5220 static void 5221 bs_dump_print_md_page(struct spdk_bs_load_ctx *ctx) 5222 { 5223 uint32_t page_idx = ctx->cur_page; 5224 struct spdk_blob_md_page *page = ctx->page; 5225 struct spdk_blob_md_descriptor *desc; 5226 size_t cur_desc = 0; 5227 uint32_t crc; 5228 5229 fprintf(ctx->fp, "=========\n"); 5230 fprintf(ctx->fp, "Metadata Page Index: %" PRIu32 " (0x%" PRIx32 ")\n", page_idx, page_idx); 5231 fprintf(ctx->fp, "Start LBA: %" PRIu64 "\n", bs_md_page_to_lba(ctx->bs, page_idx)); 5232 fprintf(ctx->fp, "Blob ID: 0x%" PRIx64 "\n", page->id); 5233 fprintf(ctx->fp, "Sequence: %" PRIu32 "\n", page->sequence_num); 5234 if (page->next == SPDK_INVALID_MD_PAGE) { 5235 fprintf(ctx->fp, "Next: None\n"); 5236 } else { 5237 fprintf(ctx->fp, "Next: %" PRIu32 "\n", page->next); 5238 } 5239 fprintf(ctx->fp, "In used bit array%s:", ctx->super->clean ? "" : " (not clean: dubious)"); 5240 if (spdk_bit_array_get(ctx->bs->used_md_pages, page_idx)) { 5241 fprintf(ctx->fp, " md"); 5242 } 5243 if (spdk_bit_array_get(ctx->bs->used_blobids, page_idx)) { 5244 fprintf(ctx->fp, " blob"); 5245 } 5246 fprintf(ctx->fp, "\n"); 5247 5248 crc = blob_md_page_calc_crc(page); 5249 fprintf(ctx->fp, "CRC: 0x%" PRIx32 " (%s)\n", page->crc, crc == page->crc ? "OK" : "Mismatch"); 5250 5251 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 5252 while (cur_desc < sizeof(page->descriptors)) { 5253 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 5254 if (desc->length == 0) { 5255 /* If padding and length are 0, this terminates the page */ 5256 break; 5257 } 5258 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 5259 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 5260 unsigned int i; 5261 5262 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 5263 5264 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 5265 if (desc_extent_rle->extents[i].cluster_idx != 0) { 5266 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 5267 desc_extent_rle->extents[i].cluster_idx); 5268 } else { 5269 fprintf(ctx->fp, "Unallocated Extent - "); 5270 } 5271 fprintf(ctx->fp, " Length: %" PRIu32, desc_extent_rle->extents[i].length); 5272 fprintf(ctx->fp, "\n"); 5273 } 5274 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 5275 struct spdk_blob_md_descriptor_extent_page *desc_extent; 5276 unsigned int i; 5277 5278 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 5279 5280 for (i = 0; i < desc_extent->length / sizeof(desc_extent->cluster_idx[0]); i++) { 5281 if (desc_extent->cluster_idx[i] != 0) { 5282 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 5283 desc_extent->cluster_idx[i]); 5284 } else { 5285 fprintf(ctx->fp, "Unallocated Extent"); 5286 } 5287 fprintf(ctx->fp, "\n"); 5288 } 5289 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 5290 bs_dump_print_xattr(ctx, desc); 5291 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 5292 bs_dump_print_xattr(ctx, desc); 5293 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 5294 bs_dump_print_type_flags(ctx, desc); 5295 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 5296 bs_dump_print_extent_table(ctx, desc); 5297 } else { 5298 /* Error */ 5299 fprintf(ctx->fp, "Unknown descriptor type %" PRIu8 "\n", desc->type); 5300 } 5301 /* Advance to the next descriptor */ 5302 cur_desc += sizeof(*desc) + desc->length; 5303 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 5304 break; 5305 } 5306 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 5307 } 5308 } 5309 5310 static void 5311 bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5312 { 5313 struct spdk_bs_load_ctx *ctx = cb_arg; 5314 5315 if (bserrno != 0) { 5316 bs_dump_finish(seq, ctx, bserrno); 5317 return; 5318 } 5319 5320 if (ctx->page->id != 0) { 5321 bs_dump_print_md_page(ctx); 5322 } 5323 5324 ctx->cur_page++; 5325 5326 if (ctx->cur_page < ctx->super->md_len) { 5327 bs_dump_read_md_page(seq, ctx); 5328 } else { 5329 spdk_free(ctx->page); 5330 bs_dump_finish(seq, ctx, 0); 5331 } 5332 } 5333 5334 static void 5335 bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg) 5336 { 5337 struct spdk_bs_load_ctx *ctx = cb_arg; 5338 uint64_t lba; 5339 5340 assert(ctx->cur_page < ctx->super->md_len); 5341 lba = bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page); 5342 bs_sequence_read_dev(seq, ctx->page, lba, 5343 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 5344 bs_dump_read_md_page_cpl, ctx); 5345 } 5346 5347 static void 5348 bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5349 { 5350 struct spdk_bs_load_ctx *ctx = cb_arg; 5351 int rc; 5352 5353 fprintf(ctx->fp, "Signature: \"%.8s\" ", ctx->super->signature); 5354 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 5355 sizeof(ctx->super->signature)) != 0) { 5356 fprintf(ctx->fp, "(Mismatch)\n"); 5357 bs_dump_finish(seq, ctx, bserrno); 5358 return; 5359 } else { 5360 fprintf(ctx->fp, "(OK)\n"); 5361 } 5362 fprintf(ctx->fp, "Version: %" PRIu32 "\n", ctx->super->version); 5363 fprintf(ctx->fp, "CRC: 0x%x (%s)\n", ctx->super->crc, 5364 (ctx->super->crc == blob_md_page_calc_crc(ctx->super)) ? "OK" : "Mismatch"); 5365 fprintf(ctx->fp, "Blobstore Type: %.*s\n", SPDK_BLOBSTORE_TYPE_LENGTH, ctx->super->bstype.bstype); 5366 fprintf(ctx->fp, "Cluster Size: %" PRIu32 "\n", ctx->super->cluster_size); 5367 fprintf(ctx->fp, "Super Blob ID: "); 5368 if (ctx->super->super_blob == SPDK_BLOBID_INVALID) { 5369 fprintf(ctx->fp, "(None)\n"); 5370 } else { 5371 fprintf(ctx->fp, "0x%" PRIx64 "\n", ctx->super->super_blob); 5372 } 5373 fprintf(ctx->fp, "Clean: %" PRIu32 "\n", ctx->super->clean); 5374 fprintf(ctx->fp, "Used Metadata Page Mask Start: %" PRIu32 "\n", ctx->super->used_page_mask_start); 5375 fprintf(ctx->fp, "Used Metadata Page Mask Length: %" PRIu32 "\n", ctx->super->used_page_mask_len); 5376 fprintf(ctx->fp, "Used Cluster Mask Start: %" PRIu32 "\n", ctx->super->used_cluster_mask_start); 5377 fprintf(ctx->fp, "Used Cluster Mask Length: %" PRIu32 "\n", ctx->super->used_cluster_mask_len); 5378 fprintf(ctx->fp, "Used Blob ID Mask Start: %" PRIu32 "\n", ctx->super->used_blobid_mask_start); 5379 fprintf(ctx->fp, "Used Blob ID Mask Length: %" PRIu32 "\n", ctx->super->used_blobid_mask_len); 5380 fprintf(ctx->fp, "Metadata Start: %" PRIu32 "\n", ctx->super->md_start); 5381 fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len); 5382 5383 ctx->cur_page = 0; 5384 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 5385 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 5386 if (!ctx->page) { 5387 bs_dump_finish(seq, ctx, -ENOMEM); 5388 return; 5389 } 5390 5391 rc = bs_parse_super(ctx); 5392 if (rc < 0) { 5393 bs_load_ctx_fail(ctx, rc); 5394 return; 5395 } 5396 5397 bs_load_read_used_pages(ctx); 5398 } 5399 5400 void 5401 spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_xattr_fn, 5402 spdk_bs_op_complete cb_fn, void *cb_arg) 5403 { 5404 struct spdk_blob_store *bs; 5405 struct spdk_bs_cpl cpl; 5406 struct spdk_bs_load_ctx *ctx; 5407 struct spdk_bs_opts opts = {}; 5408 int err; 5409 5410 SPDK_DEBUGLOG(blob, "Dumping blobstore from dev %p\n", dev); 5411 5412 spdk_bs_opts_init(&opts, sizeof(opts)); 5413 5414 err = bs_alloc(dev, &opts, &bs, &ctx); 5415 if (err) { 5416 dev->destroy(dev); 5417 cb_fn(cb_arg, err); 5418 return; 5419 } 5420 5421 ctx->dumping = true; 5422 ctx->fp = fp; 5423 ctx->print_xattr_fn = print_xattr_fn; 5424 5425 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5426 cpl.u.bs_basic.cb_fn = cb_fn; 5427 cpl.u.bs_basic.cb_arg = cb_arg; 5428 5429 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5430 if (!ctx->seq) { 5431 spdk_free(ctx->super); 5432 free(ctx); 5433 bs_free(bs); 5434 cb_fn(cb_arg, -ENOMEM); 5435 return; 5436 } 5437 5438 /* Read the super block */ 5439 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5440 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5441 bs_dump_super_cpl, ctx); 5442 } 5443 5444 /* END spdk_bs_dump */ 5445 5446 /* START spdk_bs_init */ 5447 5448 static void 5449 bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5450 { 5451 struct spdk_bs_load_ctx *ctx = cb_arg; 5452 5453 ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters); 5454 spdk_free(ctx->super); 5455 free(ctx); 5456 5457 bs_sequence_finish(seq, bserrno); 5458 } 5459 5460 static void 5461 bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5462 { 5463 struct spdk_bs_load_ctx *ctx = cb_arg; 5464 5465 /* Write super block */ 5466 bs_sequence_write_dev(seq, ctx->super, bs_page_to_lba(ctx->bs, 0), 5467 bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 5468 bs_init_persist_super_cpl, ctx); 5469 } 5470 5471 void 5472 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 5473 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 5474 { 5475 struct spdk_bs_load_ctx *ctx; 5476 struct spdk_blob_store *bs; 5477 struct spdk_bs_cpl cpl; 5478 spdk_bs_sequence_t *seq; 5479 spdk_bs_batch_t *batch; 5480 uint64_t num_md_lba; 5481 uint64_t num_md_pages; 5482 uint64_t num_md_clusters; 5483 uint64_t max_used_cluster_mask_len; 5484 uint32_t i; 5485 struct spdk_bs_opts opts = {}; 5486 int rc; 5487 uint64_t lba, lba_count; 5488 5489 SPDK_DEBUGLOG(blob, "Initializing blobstore on dev %p\n", dev); 5490 5491 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 5492 SPDK_ERRLOG("unsupported dev block length of %d\n", 5493 dev->blocklen); 5494 dev->destroy(dev); 5495 cb_fn(cb_arg, NULL, -EINVAL); 5496 return; 5497 } 5498 5499 spdk_bs_opts_init(&opts, sizeof(opts)); 5500 if (o) { 5501 if (bs_opts_copy(o, &opts)) { 5502 return; 5503 } 5504 } 5505 5506 if (bs_opts_verify(&opts) != 0) { 5507 dev->destroy(dev); 5508 cb_fn(cb_arg, NULL, -EINVAL); 5509 return; 5510 } 5511 5512 rc = bs_alloc(dev, &opts, &bs, &ctx); 5513 if (rc) { 5514 dev->destroy(dev); 5515 cb_fn(cb_arg, NULL, rc); 5516 return; 5517 } 5518 5519 if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) { 5520 /* By default, allocate 1 page per cluster. 5521 * Technically, this over-allocates metadata 5522 * because more metadata will reduce the number 5523 * of usable clusters. This can be addressed with 5524 * more complex math in the future. 5525 */ 5526 bs->md_len = bs->total_clusters; 5527 } else { 5528 bs->md_len = opts.num_md_pages; 5529 } 5530 rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len); 5531 if (rc < 0) { 5532 spdk_free(ctx->super); 5533 free(ctx); 5534 bs_free(bs); 5535 cb_fn(cb_arg, NULL, -ENOMEM); 5536 return; 5537 } 5538 5539 rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len); 5540 if (rc < 0) { 5541 spdk_free(ctx->super); 5542 free(ctx); 5543 bs_free(bs); 5544 cb_fn(cb_arg, NULL, -ENOMEM); 5545 return; 5546 } 5547 5548 rc = spdk_bit_array_resize(&bs->open_blobids, bs->md_len); 5549 if (rc < 0) { 5550 spdk_free(ctx->super); 5551 free(ctx); 5552 bs_free(bs); 5553 cb_fn(cb_arg, NULL, -ENOMEM); 5554 return; 5555 } 5556 5557 memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 5558 sizeof(ctx->super->signature)); 5559 ctx->super->version = SPDK_BS_VERSION; 5560 ctx->super->length = sizeof(*ctx->super); 5561 ctx->super->super_blob = bs->super_blob; 5562 ctx->super->clean = 0; 5563 ctx->super->cluster_size = bs->cluster_sz; 5564 ctx->super->io_unit_size = bs->io_unit_size; 5565 memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype)); 5566 5567 /* Calculate how many pages the metadata consumes at the front 5568 * of the disk. 5569 */ 5570 5571 /* The super block uses 1 page */ 5572 num_md_pages = 1; 5573 5574 /* The used_md_pages mask requires 1 bit per metadata page, rounded 5575 * up to the nearest page, plus a header. 5576 */ 5577 ctx->super->used_page_mask_start = num_md_pages; 5578 ctx->super->used_page_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5579 spdk_divide_round_up(bs->md_len, 8), 5580 SPDK_BS_PAGE_SIZE); 5581 num_md_pages += ctx->super->used_page_mask_len; 5582 5583 /* The used_clusters mask requires 1 bit per cluster, rounded 5584 * up to the nearest page, plus a header. 5585 */ 5586 ctx->super->used_cluster_mask_start = num_md_pages; 5587 ctx->super->used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5588 spdk_divide_round_up(bs->total_clusters, 8), 5589 SPDK_BS_PAGE_SIZE); 5590 /* The blobstore might be extended, then the used_cluster bitmap will need more space. 5591 * Here we calculate the max clusters we can support according to the 5592 * num_md_pages (bs->md_len). 5593 */ 5594 max_used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5595 spdk_divide_round_up(bs->md_len, 8), 5596 SPDK_BS_PAGE_SIZE); 5597 max_used_cluster_mask_len = spdk_max(max_used_cluster_mask_len, 5598 ctx->super->used_cluster_mask_len); 5599 num_md_pages += max_used_cluster_mask_len; 5600 5601 /* The used_blobids mask requires 1 bit per metadata page, rounded 5602 * up to the nearest page, plus a header. 5603 */ 5604 ctx->super->used_blobid_mask_start = num_md_pages; 5605 ctx->super->used_blobid_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5606 spdk_divide_round_up(bs->md_len, 8), 5607 SPDK_BS_PAGE_SIZE); 5608 num_md_pages += ctx->super->used_blobid_mask_len; 5609 5610 /* The metadata region size was chosen above */ 5611 ctx->super->md_start = bs->md_start = num_md_pages; 5612 ctx->super->md_len = bs->md_len; 5613 num_md_pages += bs->md_len; 5614 5615 num_md_lba = bs_page_to_lba(bs, num_md_pages); 5616 5617 ctx->super->size = dev->blockcnt * dev->blocklen; 5618 5619 ctx->super->crc = blob_md_page_calc_crc(ctx->super); 5620 5621 num_md_clusters = spdk_divide_round_up(num_md_pages, bs->pages_per_cluster); 5622 if (num_md_clusters > bs->total_clusters) { 5623 SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, " 5624 "please decrease number of pages reserved for metadata " 5625 "or increase cluster size.\n"); 5626 spdk_free(ctx->super); 5627 spdk_bit_array_free(&ctx->used_clusters); 5628 free(ctx); 5629 bs_free(bs); 5630 cb_fn(cb_arg, NULL, -ENOMEM); 5631 return; 5632 } 5633 /* Claim all of the clusters used by the metadata */ 5634 for (i = 0; i < num_md_clusters; i++) { 5635 spdk_bit_array_set(ctx->used_clusters, i); 5636 } 5637 5638 bs->num_free_clusters -= num_md_clusters; 5639 bs->total_data_clusters = bs->num_free_clusters; 5640 5641 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 5642 cpl.u.bs_handle.cb_fn = cb_fn; 5643 cpl.u.bs_handle.cb_arg = cb_arg; 5644 cpl.u.bs_handle.bs = bs; 5645 5646 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5647 if (!seq) { 5648 spdk_free(ctx->super); 5649 free(ctx); 5650 bs_free(bs); 5651 cb_fn(cb_arg, NULL, -ENOMEM); 5652 return; 5653 } 5654 5655 batch = bs_sequence_to_batch(seq, bs_init_trim_cpl, ctx); 5656 5657 /* Clear metadata space */ 5658 bs_batch_write_zeroes_dev(batch, 0, num_md_lba); 5659 5660 lba = num_md_lba; 5661 lba_count = ctx->bs->dev->blockcnt - lba; 5662 switch (opts.clear_method) { 5663 case BS_CLEAR_WITH_UNMAP: 5664 /* Trim data clusters */ 5665 bs_batch_unmap_dev(batch, lba, lba_count); 5666 break; 5667 case BS_CLEAR_WITH_WRITE_ZEROES: 5668 /* Write_zeroes to data clusters */ 5669 bs_batch_write_zeroes_dev(batch, lba, lba_count); 5670 break; 5671 case BS_CLEAR_WITH_NONE: 5672 default: 5673 break; 5674 } 5675 5676 bs_batch_close(batch); 5677 } 5678 5679 /* END spdk_bs_init */ 5680 5681 /* START spdk_bs_destroy */ 5682 5683 static void 5684 bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5685 { 5686 struct spdk_bs_load_ctx *ctx = cb_arg; 5687 struct spdk_blob_store *bs = ctx->bs; 5688 5689 /* 5690 * We need to defer calling bs_call_cpl() until after 5691 * dev destruction, so tuck these away for later use. 5692 */ 5693 bs->unload_err = bserrno; 5694 memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5695 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5696 5697 bs_sequence_finish(seq, bserrno); 5698 5699 bs_free(bs); 5700 free(ctx); 5701 } 5702 5703 void 5704 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, 5705 void *cb_arg) 5706 { 5707 struct spdk_bs_cpl cpl; 5708 spdk_bs_sequence_t *seq; 5709 struct spdk_bs_load_ctx *ctx; 5710 5711 SPDK_DEBUGLOG(blob, "Destroying blobstore\n"); 5712 5713 if (!RB_EMPTY(&bs->open_blobs)) { 5714 SPDK_ERRLOG("Blobstore still has open blobs\n"); 5715 cb_fn(cb_arg, -EBUSY); 5716 return; 5717 } 5718 5719 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5720 cpl.u.bs_basic.cb_fn = cb_fn; 5721 cpl.u.bs_basic.cb_arg = cb_arg; 5722 5723 ctx = calloc(1, sizeof(*ctx)); 5724 if (!ctx) { 5725 cb_fn(cb_arg, -ENOMEM); 5726 return; 5727 } 5728 5729 ctx->bs = bs; 5730 5731 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5732 if (!seq) { 5733 free(ctx); 5734 cb_fn(cb_arg, -ENOMEM); 5735 return; 5736 } 5737 5738 /* Write zeroes to the super block */ 5739 bs_sequence_write_zeroes_dev(seq, 5740 bs_page_to_lba(bs, 0), 5741 bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)), 5742 bs_destroy_trim_cpl, ctx); 5743 } 5744 5745 /* END spdk_bs_destroy */ 5746 5747 /* START spdk_bs_unload */ 5748 5749 static void 5750 bs_unload_finish(struct spdk_bs_load_ctx *ctx, int bserrno) 5751 { 5752 spdk_bs_sequence_t *seq = ctx->seq; 5753 5754 spdk_free(ctx->super); 5755 5756 /* 5757 * We need to defer calling bs_call_cpl() until after 5758 * dev destruction, so tuck these away for later use. 5759 */ 5760 ctx->bs->unload_err = bserrno; 5761 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5762 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5763 5764 bs_sequence_finish(seq, bserrno); 5765 5766 bs_free(ctx->bs); 5767 free(ctx); 5768 } 5769 5770 static void 5771 bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5772 { 5773 struct spdk_bs_load_ctx *ctx = cb_arg; 5774 5775 bs_unload_finish(ctx, bserrno); 5776 } 5777 5778 static void 5779 bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5780 { 5781 struct spdk_bs_load_ctx *ctx = cb_arg; 5782 5783 spdk_free(ctx->mask); 5784 5785 if (bserrno != 0) { 5786 bs_unload_finish(ctx, bserrno); 5787 return; 5788 } 5789 5790 ctx->super->clean = 1; 5791 5792 bs_write_super(seq, ctx->bs, ctx->super, bs_unload_write_super_cpl, ctx); 5793 } 5794 5795 static void 5796 bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5797 { 5798 struct spdk_bs_load_ctx *ctx = cb_arg; 5799 5800 spdk_free(ctx->mask); 5801 ctx->mask = NULL; 5802 5803 if (bserrno != 0) { 5804 bs_unload_finish(ctx, bserrno); 5805 return; 5806 } 5807 5808 bs_write_used_clusters(seq, ctx, bs_unload_write_used_clusters_cpl); 5809 } 5810 5811 static void 5812 bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5813 { 5814 struct spdk_bs_load_ctx *ctx = cb_arg; 5815 5816 spdk_free(ctx->mask); 5817 ctx->mask = NULL; 5818 5819 if (bserrno != 0) { 5820 bs_unload_finish(ctx, bserrno); 5821 return; 5822 } 5823 5824 bs_write_used_blobids(seq, ctx, bs_unload_write_used_blobids_cpl); 5825 } 5826 5827 static void 5828 bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5829 { 5830 struct spdk_bs_load_ctx *ctx = cb_arg; 5831 int rc; 5832 5833 if (bserrno != 0) { 5834 bs_unload_finish(ctx, bserrno); 5835 return; 5836 } 5837 5838 rc = bs_super_validate(ctx->super, ctx->bs); 5839 if (rc != 0) { 5840 bs_unload_finish(ctx, rc); 5841 return; 5842 } 5843 5844 bs_write_used_md(seq, cb_arg, bs_unload_write_used_pages_cpl); 5845 } 5846 5847 void 5848 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg) 5849 { 5850 struct spdk_bs_cpl cpl; 5851 struct spdk_bs_load_ctx *ctx; 5852 5853 SPDK_DEBUGLOG(blob, "Syncing blobstore\n"); 5854 5855 /* 5856 * If external snapshot channels are being destroyed while the blobstore is unloaded, the 5857 * unload is deferred until after the channel destruction completes. 5858 */ 5859 if (bs->esnap_channels_unloading != 0) { 5860 if (bs->esnap_unload_cb_fn != NULL) { 5861 SPDK_ERRLOG("Blobstore unload in progress\n"); 5862 cb_fn(cb_arg, -EBUSY); 5863 return; 5864 } 5865 SPDK_DEBUGLOG(blob_esnap, "Blobstore unload deferred: %" PRIu32 5866 " esnap clones are unloading\n", bs->esnap_channels_unloading); 5867 bs->esnap_unload_cb_fn = cb_fn; 5868 bs->esnap_unload_cb_arg = cb_arg; 5869 return; 5870 } 5871 if (bs->esnap_unload_cb_fn != NULL) { 5872 SPDK_DEBUGLOG(blob_esnap, "Blobstore deferred unload progressing\n"); 5873 assert(bs->esnap_unload_cb_fn == cb_fn); 5874 assert(bs->esnap_unload_cb_arg == cb_arg); 5875 bs->esnap_unload_cb_fn = NULL; 5876 bs->esnap_unload_cb_arg = NULL; 5877 } 5878 5879 if (!RB_EMPTY(&bs->open_blobs)) { 5880 SPDK_ERRLOG("Blobstore still has open blobs\n"); 5881 cb_fn(cb_arg, -EBUSY); 5882 return; 5883 } 5884 5885 ctx = calloc(1, sizeof(*ctx)); 5886 if (!ctx) { 5887 cb_fn(cb_arg, -ENOMEM); 5888 return; 5889 } 5890 5891 ctx->bs = bs; 5892 5893 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 5894 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 5895 if (!ctx->super) { 5896 free(ctx); 5897 cb_fn(cb_arg, -ENOMEM); 5898 return; 5899 } 5900 5901 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5902 cpl.u.bs_basic.cb_fn = cb_fn; 5903 cpl.u.bs_basic.cb_arg = cb_arg; 5904 5905 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5906 if (!ctx->seq) { 5907 spdk_free(ctx->super); 5908 free(ctx); 5909 cb_fn(cb_arg, -ENOMEM); 5910 return; 5911 } 5912 5913 /* Read super block */ 5914 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5915 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5916 bs_unload_read_super_cpl, ctx); 5917 } 5918 5919 /* END spdk_bs_unload */ 5920 5921 /* START spdk_bs_set_super */ 5922 5923 struct spdk_bs_set_super_ctx { 5924 struct spdk_blob_store *bs; 5925 struct spdk_bs_super_block *super; 5926 }; 5927 5928 static void 5929 bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5930 { 5931 struct spdk_bs_set_super_ctx *ctx = cb_arg; 5932 5933 if (bserrno != 0) { 5934 SPDK_ERRLOG("Unable to write to super block of blobstore\n"); 5935 } 5936 5937 spdk_free(ctx->super); 5938 5939 bs_sequence_finish(seq, bserrno); 5940 5941 free(ctx); 5942 } 5943 5944 static void 5945 bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5946 { 5947 struct spdk_bs_set_super_ctx *ctx = cb_arg; 5948 int rc; 5949 5950 if (bserrno != 0) { 5951 SPDK_ERRLOG("Unable to read super block of blobstore\n"); 5952 spdk_free(ctx->super); 5953 bs_sequence_finish(seq, bserrno); 5954 free(ctx); 5955 return; 5956 } 5957 5958 rc = bs_super_validate(ctx->super, ctx->bs); 5959 if (rc != 0) { 5960 SPDK_ERRLOG("Not a valid super block\n"); 5961 spdk_free(ctx->super); 5962 bs_sequence_finish(seq, rc); 5963 free(ctx); 5964 return; 5965 } 5966 5967 bs_write_super(seq, ctx->bs, ctx->super, bs_set_super_write_cpl, ctx); 5968 } 5969 5970 void 5971 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid, 5972 spdk_bs_op_complete cb_fn, void *cb_arg) 5973 { 5974 struct spdk_bs_cpl cpl; 5975 spdk_bs_sequence_t *seq; 5976 struct spdk_bs_set_super_ctx *ctx; 5977 5978 SPDK_DEBUGLOG(blob, "Setting super blob id on blobstore\n"); 5979 5980 ctx = calloc(1, sizeof(*ctx)); 5981 if (!ctx) { 5982 cb_fn(cb_arg, -ENOMEM); 5983 return; 5984 } 5985 5986 ctx->bs = bs; 5987 5988 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 5989 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 5990 if (!ctx->super) { 5991 free(ctx); 5992 cb_fn(cb_arg, -ENOMEM); 5993 return; 5994 } 5995 5996 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5997 cpl.u.bs_basic.cb_fn = cb_fn; 5998 cpl.u.bs_basic.cb_arg = cb_arg; 5999 6000 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 6001 if (!seq) { 6002 spdk_free(ctx->super); 6003 free(ctx); 6004 cb_fn(cb_arg, -ENOMEM); 6005 return; 6006 } 6007 6008 bs->super_blob = blobid; 6009 6010 /* Read super block */ 6011 bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0), 6012 bs_byte_to_lba(bs, sizeof(*ctx->super)), 6013 bs_set_super_read_cpl, ctx); 6014 } 6015 6016 /* END spdk_bs_set_super */ 6017 6018 void 6019 spdk_bs_get_super(struct spdk_blob_store *bs, 6020 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6021 { 6022 if (bs->super_blob == SPDK_BLOBID_INVALID) { 6023 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT); 6024 } else { 6025 cb_fn(cb_arg, bs->super_blob, 0); 6026 } 6027 } 6028 6029 uint64_t 6030 spdk_bs_get_cluster_size(struct spdk_blob_store *bs) 6031 { 6032 return bs->cluster_sz; 6033 } 6034 6035 uint64_t 6036 spdk_bs_get_page_size(struct spdk_blob_store *bs) 6037 { 6038 return SPDK_BS_PAGE_SIZE; 6039 } 6040 6041 uint64_t 6042 spdk_bs_get_io_unit_size(struct spdk_blob_store *bs) 6043 { 6044 return bs->io_unit_size; 6045 } 6046 6047 uint64_t 6048 spdk_bs_free_cluster_count(struct spdk_blob_store *bs) 6049 { 6050 return bs->num_free_clusters; 6051 } 6052 6053 uint64_t 6054 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs) 6055 { 6056 return bs->total_data_clusters; 6057 } 6058 6059 static int 6060 bs_register_md_thread(struct spdk_blob_store *bs) 6061 { 6062 bs->md_channel = spdk_get_io_channel(bs); 6063 if (!bs->md_channel) { 6064 SPDK_ERRLOG("Failed to get IO channel.\n"); 6065 return -1; 6066 } 6067 6068 return 0; 6069 } 6070 6071 static int 6072 bs_unregister_md_thread(struct spdk_blob_store *bs) 6073 { 6074 spdk_put_io_channel(bs->md_channel); 6075 6076 return 0; 6077 } 6078 6079 spdk_blob_id 6080 spdk_blob_get_id(struct spdk_blob *blob) 6081 { 6082 assert(blob != NULL); 6083 6084 return blob->id; 6085 } 6086 6087 uint64_t 6088 spdk_blob_get_num_pages(struct spdk_blob *blob) 6089 { 6090 assert(blob != NULL); 6091 6092 return bs_cluster_to_page(blob->bs, blob->active.num_clusters); 6093 } 6094 6095 uint64_t 6096 spdk_blob_get_num_io_units(struct spdk_blob *blob) 6097 { 6098 assert(blob != NULL); 6099 6100 return spdk_blob_get_num_pages(blob) * bs_io_unit_per_page(blob->bs); 6101 } 6102 6103 uint64_t 6104 spdk_blob_get_num_clusters(struct spdk_blob *blob) 6105 { 6106 assert(blob != NULL); 6107 6108 return blob->active.num_clusters; 6109 } 6110 6111 uint64_t 6112 spdk_blob_get_num_allocated_clusters(struct spdk_blob *blob) 6113 { 6114 assert(blob != NULL); 6115 6116 return blob->active.num_allocated_clusters; 6117 } 6118 6119 static uint64_t 6120 blob_find_io_unit(struct spdk_blob *blob, uint64_t offset, bool is_allocated) 6121 { 6122 uint64_t blob_io_unit_num = spdk_blob_get_num_io_units(blob); 6123 6124 while (offset < blob_io_unit_num) { 6125 if (bs_io_unit_is_allocated(blob, offset) == is_allocated) { 6126 return offset; 6127 } 6128 6129 offset += bs_num_io_units_to_cluster_boundary(blob, offset); 6130 } 6131 6132 return UINT64_MAX; 6133 } 6134 6135 uint64_t 6136 spdk_blob_get_next_allocated_io_unit(struct spdk_blob *blob, uint64_t offset) 6137 { 6138 return blob_find_io_unit(blob, offset, true); 6139 } 6140 6141 uint64_t 6142 spdk_blob_get_next_unallocated_io_unit(struct spdk_blob *blob, uint64_t offset) 6143 { 6144 return blob_find_io_unit(blob, offset, false); 6145 } 6146 6147 /* START spdk_bs_create_blob */ 6148 6149 static void 6150 bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 6151 { 6152 struct spdk_blob *blob = cb_arg; 6153 uint32_t page_idx = bs_blobid_to_page(blob->id); 6154 6155 if (bserrno != 0) { 6156 spdk_spin_lock(&blob->bs->used_lock); 6157 spdk_bit_array_clear(blob->bs->used_blobids, page_idx); 6158 bs_release_md_page(blob->bs, page_idx); 6159 spdk_spin_unlock(&blob->bs->used_lock); 6160 } 6161 6162 blob_free(blob); 6163 6164 bs_sequence_finish(seq, bserrno); 6165 } 6166 6167 static int 6168 blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs, 6169 bool internal) 6170 { 6171 uint64_t i; 6172 size_t value_len = 0; 6173 int rc; 6174 const void *value = NULL; 6175 if (xattrs->count > 0 && xattrs->get_value == NULL) { 6176 return -EINVAL; 6177 } 6178 for (i = 0; i < xattrs->count; i++) { 6179 xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len); 6180 if (value == NULL || value_len == 0) { 6181 return -EINVAL; 6182 } 6183 rc = blob_set_xattr(blob, xattrs->names[i], value, value_len, internal); 6184 if (rc < 0) { 6185 return rc; 6186 } 6187 } 6188 return 0; 6189 } 6190 6191 static void 6192 blob_opts_copy(const struct spdk_blob_opts *src, struct spdk_blob_opts *dst) 6193 { 6194 #define FIELD_OK(field) \ 6195 offsetof(struct spdk_blob_opts, field) + sizeof(src->field) <= src->opts_size 6196 6197 #define SET_FIELD(field) \ 6198 if (FIELD_OK(field)) { \ 6199 dst->field = src->field; \ 6200 } \ 6201 6202 SET_FIELD(num_clusters); 6203 SET_FIELD(thin_provision); 6204 SET_FIELD(clear_method); 6205 6206 if (FIELD_OK(xattrs)) { 6207 memcpy(&dst->xattrs, &src->xattrs, sizeof(src->xattrs)); 6208 } 6209 6210 SET_FIELD(use_extent_table); 6211 SET_FIELD(esnap_id); 6212 SET_FIELD(esnap_id_len); 6213 6214 dst->opts_size = src->opts_size; 6215 6216 /* You should not remove this statement, but need to update the assert statement 6217 * if you add a new field, and also add a corresponding SET_FIELD statement */ 6218 SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_opts) == 80, "Incorrect size"); 6219 6220 #undef FIELD_OK 6221 #undef SET_FIELD 6222 } 6223 6224 static void 6225 bs_create_blob(struct spdk_blob_store *bs, 6226 const struct spdk_blob_opts *opts, 6227 const struct spdk_blob_xattr_opts *internal_xattrs, 6228 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6229 { 6230 struct spdk_blob *blob; 6231 uint32_t page_idx; 6232 struct spdk_bs_cpl cpl; 6233 struct spdk_blob_opts opts_local; 6234 struct spdk_blob_xattr_opts internal_xattrs_default; 6235 spdk_bs_sequence_t *seq; 6236 spdk_blob_id id; 6237 int rc; 6238 6239 assert(spdk_get_thread() == bs->md_thread); 6240 6241 spdk_spin_lock(&bs->used_lock); 6242 page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); 6243 if (page_idx == UINT32_MAX) { 6244 spdk_spin_unlock(&bs->used_lock); 6245 cb_fn(cb_arg, 0, -ENOMEM); 6246 return; 6247 } 6248 spdk_bit_array_set(bs->used_blobids, page_idx); 6249 bs_claim_md_page(bs, page_idx); 6250 spdk_spin_unlock(&bs->used_lock); 6251 6252 id = bs_page_to_blobid(page_idx); 6253 6254 SPDK_DEBUGLOG(blob, "Creating blob with id 0x%" PRIx64 " at page %u\n", id, page_idx); 6255 6256 spdk_blob_opts_init(&opts_local, sizeof(opts_local)); 6257 if (opts) { 6258 blob_opts_copy(opts, &opts_local); 6259 } 6260 6261 blob = blob_alloc(bs, id); 6262 if (!blob) { 6263 rc = -ENOMEM; 6264 goto error; 6265 } 6266 6267 blob->use_extent_table = opts_local.use_extent_table; 6268 if (blob->use_extent_table) { 6269 blob->invalid_flags |= SPDK_BLOB_EXTENT_TABLE; 6270 } 6271 6272 if (!internal_xattrs) { 6273 blob_xattrs_init(&internal_xattrs_default); 6274 internal_xattrs = &internal_xattrs_default; 6275 } 6276 6277 rc = blob_set_xattrs(blob, &opts_local.xattrs, false); 6278 if (rc < 0) { 6279 goto error; 6280 } 6281 6282 rc = blob_set_xattrs(blob, internal_xattrs, true); 6283 if (rc < 0) { 6284 goto error; 6285 } 6286 6287 if (opts_local.thin_provision) { 6288 blob_set_thin_provision(blob); 6289 } 6290 6291 blob_set_clear_method(blob, opts_local.clear_method); 6292 6293 if (opts_local.esnap_id != NULL) { 6294 if (opts_local.esnap_id_len > UINT16_MAX) { 6295 SPDK_ERRLOG("esnap id length %" PRIu64 "is too long\n", 6296 opts_local.esnap_id_len); 6297 rc = -EINVAL; 6298 goto error; 6299 6300 } 6301 blob_set_thin_provision(blob); 6302 blob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 6303 rc = blob_set_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID, 6304 opts_local.esnap_id, opts_local.esnap_id_len, true); 6305 if (rc != 0) { 6306 goto error; 6307 } 6308 } 6309 6310 rc = blob_resize(blob, opts_local.num_clusters); 6311 if (rc < 0) { 6312 goto error; 6313 } 6314 cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6315 cpl.u.blobid.cb_fn = cb_fn; 6316 cpl.u.blobid.cb_arg = cb_arg; 6317 cpl.u.blobid.blobid = blob->id; 6318 6319 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 6320 if (!seq) { 6321 rc = -ENOMEM; 6322 goto error; 6323 } 6324 6325 blob_persist(seq, blob, bs_create_blob_cpl, blob); 6326 return; 6327 6328 error: 6329 SPDK_ERRLOG("Failed to create blob: %s, size in clusters/size: %lu (clusters)\n", 6330 spdk_strerror(rc), opts_local.num_clusters); 6331 if (blob != NULL) { 6332 blob_free(blob); 6333 } 6334 spdk_spin_lock(&bs->used_lock); 6335 spdk_bit_array_clear(bs->used_blobids, page_idx); 6336 bs_release_md_page(bs, page_idx); 6337 spdk_spin_unlock(&bs->used_lock); 6338 cb_fn(cb_arg, 0, rc); 6339 } 6340 6341 void 6342 spdk_bs_create_blob(struct spdk_blob_store *bs, 6343 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6344 { 6345 bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg); 6346 } 6347 6348 void 6349 spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts, 6350 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6351 { 6352 bs_create_blob(bs, opts, NULL, cb_fn, cb_arg); 6353 } 6354 6355 /* END spdk_bs_create_blob */ 6356 6357 /* START blob_cleanup */ 6358 6359 struct spdk_clone_snapshot_ctx { 6360 struct spdk_bs_cpl cpl; 6361 int bserrno; 6362 bool frozen; 6363 6364 struct spdk_io_channel *channel; 6365 6366 /* Current cluster for inflate operation */ 6367 uint64_t cluster; 6368 6369 /* For inflation force allocation of all unallocated clusters and remove 6370 * thin-provisioning. Otherwise only decouple parent and keep clone thin. */ 6371 bool allocate_all; 6372 6373 struct { 6374 spdk_blob_id id; 6375 struct spdk_blob *blob; 6376 bool md_ro; 6377 } original; 6378 struct { 6379 spdk_blob_id id; 6380 struct spdk_blob *blob; 6381 } new; 6382 6383 /* xattrs specified for snapshot/clones only. They have no impact on 6384 * the original blobs xattrs. */ 6385 const struct spdk_blob_xattr_opts *xattrs; 6386 }; 6387 6388 static void 6389 bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno) 6390 { 6391 struct spdk_clone_snapshot_ctx *ctx = cb_arg; 6392 struct spdk_bs_cpl *cpl = &ctx->cpl; 6393 6394 if (bserrno != 0) { 6395 if (ctx->bserrno != 0) { 6396 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6397 } else { 6398 ctx->bserrno = bserrno; 6399 } 6400 } 6401 6402 switch (cpl->type) { 6403 case SPDK_BS_CPL_TYPE_BLOBID: 6404 cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno); 6405 break; 6406 case SPDK_BS_CPL_TYPE_BLOB_BASIC: 6407 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno); 6408 break; 6409 default: 6410 SPDK_UNREACHABLE(); 6411 break; 6412 } 6413 6414 free(ctx); 6415 } 6416 6417 static void 6418 bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 6419 { 6420 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6421 struct spdk_blob *origblob = ctx->original.blob; 6422 6423 if (bserrno != 0) { 6424 if (ctx->bserrno != 0) { 6425 SPDK_ERRLOG("Unfreeze error %d\n", bserrno); 6426 } else { 6427 ctx->bserrno = bserrno; 6428 } 6429 } 6430 6431 ctx->original.id = origblob->id; 6432 origblob->locked_operation_in_progress = false; 6433 6434 /* Revert md_ro to original state */ 6435 origblob->md_ro = ctx->original.md_ro; 6436 6437 spdk_blob_close(origblob, bs_clone_snapshot_cleanup_finish, ctx); 6438 } 6439 6440 static void 6441 bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno) 6442 { 6443 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6444 struct spdk_blob *origblob = ctx->original.blob; 6445 6446 if (bserrno != 0) { 6447 if (ctx->bserrno != 0) { 6448 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6449 } else { 6450 ctx->bserrno = bserrno; 6451 } 6452 } 6453 6454 if (ctx->frozen) { 6455 /* Unfreeze any outstanding I/O */ 6456 blob_unfreeze_io(origblob, bs_snapshot_unfreeze_cpl, ctx); 6457 } else { 6458 bs_snapshot_unfreeze_cpl(ctx, 0); 6459 } 6460 6461 } 6462 6463 static void 6464 bs_clone_snapshot_newblob_cleanup(struct spdk_clone_snapshot_ctx *ctx, int bserrno) 6465 { 6466 struct spdk_blob *newblob = ctx->new.blob; 6467 6468 if (bserrno != 0) { 6469 if (ctx->bserrno != 0) { 6470 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6471 } else { 6472 ctx->bserrno = bserrno; 6473 } 6474 } 6475 6476 ctx->new.id = newblob->id; 6477 spdk_blob_close(newblob, bs_clone_snapshot_origblob_cleanup, ctx); 6478 } 6479 6480 /* END blob_cleanup */ 6481 6482 /* START spdk_bs_create_snapshot */ 6483 6484 static void 6485 bs_snapshot_swap_cluster_maps(struct spdk_blob *blob1, struct spdk_blob *blob2) 6486 { 6487 uint64_t *cluster_temp; 6488 uint64_t num_allocated_clusters_temp; 6489 uint32_t *extent_page_temp; 6490 6491 cluster_temp = blob1->active.clusters; 6492 blob1->active.clusters = blob2->active.clusters; 6493 blob2->active.clusters = cluster_temp; 6494 6495 num_allocated_clusters_temp = blob1->active.num_allocated_clusters; 6496 blob1->active.num_allocated_clusters = blob2->active.num_allocated_clusters; 6497 blob2->active.num_allocated_clusters = num_allocated_clusters_temp; 6498 6499 extent_page_temp = blob1->active.extent_pages; 6500 blob1->active.extent_pages = blob2->active.extent_pages; 6501 blob2->active.extent_pages = extent_page_temp; 6502 } 6503 6504 /* Copies an internal xattr */ 6505 static int 6506 bs_snapshot_copy_xattr(struct spdk_blob *toblob, struct spdk_blob *fromblob, const char *name) 6507 { 6508 const void *val = NULL; 6509 size_t len; 6510 int bserrno; 6511 6512 bserrno = blob_get_xattr_value(fromblob, name, &val, &len, true); 6513 if (bserrno != 0) { 6514 SPDK_ERRLOG("blob 0x%" PRIx64 " missing %s XATTR\n", fromblob->id, name); 6515 return bserrno; 6516 } 6517 6518 bserrno = blob_set_xattr(toblob, name, val, len, true); 6519 if (bserrno != 0) { 6520 SPDK_ERRLOG("could not set %s XATTR on blob 0x%" PRIx64 "\n", 6521 name, toblob->id); 6522 return bserrno; 6523 } 6524 return 0; 6525 } 6526 6527 static void 6528 bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno) 6529 { 6530 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6531 struct spdk_blob *origblob = ctx->original.blob; 6532 struct spdk_blob *newblob = ctx->new.blob; 6533 6534 if (bserrno != 0) { 6535 bs_snapshot_swap_cluster_maps(newblob, origblob); 6536 if (blob_is_esnap_clone(newblob)) { 6537 bs_snapshot_copy_xattr(origblob, newblob, BLOB_EXTERNAL_SNAPSHOT_ID); 6538 origblob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 6539 } 6540 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6541 return; 6542 } 6543 6544 /* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */ 6545 bserrno = blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true); 6546 if (bserrno != 0) { 6547 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6548 return; 6549 } 6550 6551 bs_blob_list_add(ctx->original.blob); 6552 6553 spdk_blob_set_read_only(newblob); 6554 6555 /* sync snapshot metadata */ 6556 spdk_blob_sync_md(newblob, bs_clone_snapshot_origblob_cleanup, ctx); 6557 } 6558 6559 static void 6560 bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno) 6561 { 6562 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6563 struct spdk_blob *origblob = ctx->original.blob; 6564 struct spdk_blob *newblob = ctx->new.blob; 6565 6566 if (bserrno != 0) { 6567 /* return cluster map back to original */ 6568 bs_snapshot_swap_cluster_maps(newblob, origblob); 6569 6570 /* Newblob md sync failed. Valid clusters are only present in origblob. 6571 * Since I/O is frozen on origblob, not changes to zeroed out cluster map should have occurred. 6572 * Newblob needs to be reverted to thin_provisioned state at creation to properly close. */ 6573 blob_set_thin_provision(newblob); 6574 assert(spdk_mem_all_zero(newblob->active.clusters, 6575 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 6576 assert(spdk_mem_all_zero(newblob->active.extent_pages, 6577 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 6578 6579 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6580 return; 6581 } 6582 6583 /* Set internal xattr for snapshot id */ 6584 bserrno = blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true); 6585 if (bserrno != 0) { 6586 /* return cluster map back to original */ 6587 bs_snapshot_swap_cluster_maps(newblob, origblob); 6588 blob_set_thin_provision(newblob); 6589 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6590 return; 6591 } 6592 6593 /* Create new back_bs_dev for snapshot */ 6594 origblob->back_bs_dev = bs_create_blob_bs_dev(newblob); 6595 if (origblob->back_bs_dev == NULL) { 6596 /* return cluster map back to original */ 6597 bs_snapshot_swap_cluster_maps(newblob, origblob); 6598 blob_set_thin_provision(newblob); 6599 bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL); 6600 return; 6601 } 6602 6603 /* Remove the xattr that references an external snapshot */ 6604 if (blob_is_esnap_clone(origblob)) { 6605 origblob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT; 6606 bserrno = blob_remove_xattr(origblob, BLOB_EXTERNAL_SNAPSHOT_ID, true); 6607 if (bserrno != 0) { 6608 if (bserrno == -ENOENT) { 6609 SPDK_ERRLOG("blob 0x%" PRIx64 " has no " BLOB_EXTERNAL_SNAPSHOT_ID 6610 " xattr to remove\n", origblob->id); 6611 assert(false); 6612 } else { 6613 /* return cluster map back to original */ 6614 bs_snapshot_swap_cluster_maps(newblob, origblob); 6615 blob_set_thin_provision(newblob); 6616 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6617 return; 6618 } 6619 } 6620 } 6621 6622 bs_blob_list_remove(origblob); 6623 origblob->parent_id = newblob->id; 6624 /* set clone blob as thin provisioned */ 6625 blob_set_thin_provision(origblob); 6626 6627 bs_blob_list_add(newblob); 6628 6629 /* sync clone metadata */ 6630 spdk_blob_sync_md(origblob, bs_snapshot_origblob_sync_cpl, ctx); 6631 } 6632 6633 static void 6634 bs_snapshot_freeze_cpl(void *cb_arg, int rc) 6635 { 6636 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6637 struct spdk_blob *origblob = ctx->original.blob; 6638 struct spdk_blob *newblob = ctx->new.blob; 6639 int bserrno; 6640 6641 if (rc != 0) { 6642 bs_clone_snapshot_newblob_cleanup(ctx, rc); 6643 return; 6644 } 6645 6646 ctx->frozen = true; 6647 6648 if (blob_is_esnap_clone(origblob)) { 6649 /* Clean up any channels associated with the original blob id because future IO will 6650 * perform IO using the snapshot blob_id. 6651 */ 6652 blob_esnap_destroy_bs_dev_channels(origblob, false, NULL, NULL); 6653 } 6654 if (newblob->back_bs_dev) { 6655 blob_back_bs_destroy(newblob); 6656 } 6657 /* set new back_bs_dev for snapshot */ 6658 newblob->back_bs_dev = origblob->back_bs_dev; 6659 /* Set invalid flags from origblob */ 6660 newblob->invalid_flags = origblob->invalid_flags; 6661 6662 /* inherit parent from original blob if set */ 6663 newblob->parent_id = origblob->parent_id; 6664 switch (origblob->parent_id) { 6665 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 6666 bserrno = bs_snapshot_copy_xattr(newblob, origblob, BLOB_EXTERNAL_SNAPSHOT_ID); 6667 if (bserrno != 0) { 6668 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6669 return; 6670 } 6671 break; 6672 case SPDK_BLOBID_INVALID: 6673 break; 6674 default: 6675 /* Set internal xattr for snapshot id */ 6676 bserrno = blob_set_xattr(newblob, BLOB_SNAPSHOT, 6677 &origblob->parent_id, sizeof(spdk_blob_id), true); 6678 if (bserrno != 0) { 6679 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6680 return; 6681 } 6682 } 6683 6684 /* swap cluster maps */ 6685 bs_snapshot_swap_cluster_maps(newblob, origblob); 6686 6687 /* Set the clear method on the new blob to match the original. */ 6688 blob_set_clear_method(newblob, origblob->clear_method); 6689 6690 /* sync snapshot metadata */ 6691 spdk_blob_sync_md(newblob, bs_snapshot_newblob_sync_cpl, ctx); 6692 } 6693 6694 static void 6695 bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6696 { 6697 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6698 struct spdk_blob *origblob = ctx->original.blob; 6699 struct spdk_blob *newblob = _blob; 6700 6701 if (bserrno != 0) { 6702 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6703 return; 6704 } 6705 6706 ctx->new.blob = newblob; 6707 assert(spdk_blob_is_thin_provisioned(newblob)); 6708 assert(spdk_mem_all_zero(newblob->active.clusters, 6709 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 6710 assert(spdk_mem_all_zero(newblob->active.extent_pages, 6711 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 6712 6713 blob_freeze_io(origblob, bs_snapshot_freeze_cpl, ctx); 6714 } 6715 6716 static void 6717 bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 6718 { 6719 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6720 struct spdk_blob *origblob = ctx->original.blob; 6721 6722 if (bserrno != 0) { 6723 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6724 return; 6725 } 6726 6727 ctx->new.id = blobid; 6728 ctx->cpl.u.blobid.blobid = blobid; 6729 6730 spdk_bs_open_blob(origblob->bs, ctx->new.id, bs_snapshot_newblob_open_cpl, ctx); 6731 } 6732 6733 6734 static void 6735 bs_xattr_snapshot(void *arg, const char *name, 6736 const void **value, size_t *value_len) 6737 { 6738 assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0); 6739 6740 struct spdk_blob *blob = (struct spdk_blob *)arg; 6741 *value = &blob->id; 6742 *value_len = sizeof(blob->id); 6743 } 6744 6745 static void 6746 bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6747 { 6748 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6749 struct spdk_blob_opts opts; 6750 struct spdk_blob_xattr_opts internal_xattrs; 6751 char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS }; 6752 6753 if (bserrno != 0) { 6754 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6755 return; 6756 } 6757 6758 ctx->original.blob = _blob; 6759 6760 if (_blob->data_ro || _blob->md_ro) { 6761 SPDK_DEBUGLOG(blob, "Cannot create snapshot from read only blob with id 0x%" 6762 PRIx64 "\n", _blob->id); 6763 ctx->bserrno = -EINVAL; 6764 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6765 return; 6766 } 6767 6768 if (_blob->locked_operation_in_progress) { 6769 SPDK_DEBUGLOG(blob, "Cannot create snapshot - another operation in progress\n"); 6770 ctx->bserrno = -EBUSY; 6771 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6772 return; 6773 } 6774 6775 _blob->locked_operation_in_progress = true; 6776 6777 spdk_blob_opts_init(&opts, sizeof(opts)); 6778 blob_xattrs_init(&internal_xattrs); 6779 6780 /* Change the size of new blob to the same as in original blob, 6781 * but do not allocate clusters */ 6782 opts.thin_provision = true; 6783 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 6784 opts.use_extent_table = _blob->use_extent_table; 6785 6786 /* If there are any xattrs specified for snapshot, set them now */ 6787 if (ctx->xattrs) { 6788 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 6789 } 6790 /* Set internal xattr SNAPSHOT_IN_PROGRESS */ 6791 internal_xattrs.count = 1; 6792 internal_xattrs.ctx = _blob; 6793 internal_xattrs.names = xattrs_names; 6794 internal_xattrs.get_value = bs_xattr_snapshot; 6795 6796 bs_create_blob(_blob->bs, &opts, &internal_xattrs, 6797 bs_snapshot_newblob_create_cpl, ctx); 6798 } 6799 6800 void 6801 spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid, 6802 const struct spdk_blob_xattr_opts *snapshot_xattrs, 6803 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6804 { 6805 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6806 6807 if (!ctx) { 6808 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 6809 return; 6810 } 6811 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6812 ctx->cpl.u.blobid.cb_fn = cb_fn; 6813 ctx->cpl.u.blobid.cb_arg = cb_arg; 6814 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 6815 ctx->bserrno = 0; 6816 ctx->frozen = false; 6817 ctx->original.id = blobid; 6818 ctx->xattrs = snapshot_xattrs; 6819 6820 spdk_bs_open_blob(bs, ctx->original.id, bs_snapshot_origblob_open_cpl, ctx); 6821 } 6822 /* END spdk_bs_create_snapshot */ 6823 6824 /* START spdk_bs_create_clone */ 6825 6826 static void 6827 bs_xattr_clone(void *arg, const char *name, 6828 const void **value, size_t *value_len) 6829 { 6830 assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0); 6831 6832 struct spdk_blob *blob = (struct spdk_blob *)arg; 6833 *value = &blob->id; 6834 *value_len = sizeof(blob->id); 6835 } 6836 6837 static void 6838 bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6839 { 6840 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6841 struct spdk_blob *clone = _blob; 6842 6843 ctx->new.blob = clone; 6844 bs_blob_list_add(clone); 6845 6846 spdk_blob_close(clone, bs_clone_snapshot_origblob_cleanup, ctx); 6847 } 6848 6849 static void 6850 bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 6851 { 6852 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6853 6854 ctx->cpl.u.blobid.blobid = blobid; 6855 spdk_bs_open_blob(ctx->original.blob->bs, blobid, bs_clone_newblob_open_cpl, ctx); 6856 } 6857 6858 static void 6859 bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6860 { 6861 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6862 struct spdk_blob_opts opts; 6863 struct spdk_blob_xattr_opts internal_xattrs; 6864 char *xattr_names[] = { BLOB_SNAPSHOT }; 6865 6866 if (bserrno != 0) { 6867 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6868 return; 6869 } 6870 6871 ctx->original.blob = _blob; 6872 ctx->original.md_ro = _blob->md_ro; 6873 6874 if (!_blob->data_ro || !_blob->md_ro) { 6875 SPDK_DEBUGLOG(blob, "Clone not from read-only blob\n"); 6876 ctx->bserrno = -EINVAL; 6877 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6878 return; 6879 } 6880 6881 if (_blob->locked_operation_in_progress) { 6882 SPDK_DEBUGLOG(blob, "Cannot create clone - another operation in progress\n"); 6883 ctx->bserrno = -EBUSY; 6884 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6885 return; 6886 } 6887 6888 _blob->locked_operation_in_progress = true; 6889 6890 spdk_blob_opts_init(&opts, sizeof(opts)); 6891 blob_xattrs_init(&internal_xattrs); 6892 6893 opts.thin_provision = true; 6894 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 6895 opts.use_extent_table = _blob->use_extent_table; 6896 if (ctx->xattrs) { 6897 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 6898 } 6899 6900 /* Set internal xattr BLOB_SNAPSHOT */ 6901 internal_xattrs.count = 1; 6902 internal_xattrs.ctx = _blob; 6903 internal_xattrs.names = xattr_names; 6904 internal_xattrs.get_value = bs_xattr_clone; 6905 6906 bs_create_blob(_blob->bs, &opts, &internal_xattrs, 6907 bs_clone_newblob_create_cpl, ctx); 6908 } 6909 6910 void 6911 spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid, 6912 const struct spdk_blob_xattr_opts *clone_xattrs, 6913 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6914 { 6915 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6916 6917 if (!ctx) { 6918 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 6919 return; 6920 } 6921 6922 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6923 ctx->cpl.u.blobid.cb_fn = cb_fn; 6924 ctx->cpl.u.blobid.cb_arg = cb_arg; 6925 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 6926 ctx->bserrno = 0; 6927 ctx->xattrs = clone_xattrs; 6928 ctx->original.id = blobid; 6929 6930 spdk_bs_open_blob(bs, ctx->original.id, bs_clone_origblob_open_cpl, ctx); 6931 } 6932 6933 /* END spdk_bs_create_clone */ 6934 6935 /* START spdk_bs_inflate_blob */ 6936 6937 static void 6938 bs_inflate_blob_set_parent_cpl(void *cb_arg, struct spdk_blob *_parent, int bserrno) 6939 { 6940 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6941 struct spdk_blob *_blob = ctx->original.blob; 6942 6943 if (bserrno != 0) { 6944 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6945 return; 6946 } 6947 6948 /* Temporarily override md_ro flag for MD modification */ 6949 _blob->md_ro = false; 6950 6951 bserrno = blob_set_xattr(_blob, BLOB_SNAPSHOT, &_parent->id, sizeof(spdk_blob_id), true); 6952 if (bserrno != 0) { 6953 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6954 return; 6955 } 6956 6957 assert(_parent != NULL); 6958 6959 bs_blob_list_remove(_blob); 6960 _blob->parent_id = _parent->id; 6961 6962 blob_back_bs_destroy(_blob); 6963 _blob->back_bs_dev = bs_create_blob_bs_dev(_parent); 6964 bs_blob_list_add(_blob); 6965 6966 spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx); 6967 } 6968 6969 static void 6970 bs_inflate_blob_done(struct spdk_clone_snapshot_ctx *ctx) 6971 { 6972 struct spdk_blob *_blob = ctx->original.blob; 6973 struct spdk_blob *_parent; 6974 6975 if (ctx->allocate_all) { 6976 /* remove thin provisioning */ 6977 bs_blob_list_remove(_blob); 6978 if (_blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 6979 blob_remove_xattr(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, true); 6980 _blob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT; 6981 } else { 6982 blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 6983 } 6984 _blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV; 6985 blob_back_bs_destroy(_blob); 6986 _blob->parent_id = SPDK_BLOBID_INVALID; 6987 } else { 6988 /* For now, esnap clones always have allocate_all set. */ 6989 assert(!blob_is_esnap_clone(_blob)); 6990 6991 _parent = ((struct spdk_blob_bs_dev *)(_blob->back_bs_dev))->blob; 6992 if (_parent->parent_id != SPDK_BLOBID_INVALID) { 6993 /* We must change the parent of the inflated blob */ 6994 spdk_bs_open_blob(_blob->bs, _parent->parent_id, 6995 bs_inflate_blob_set_parent_cpl, ctx); 6996 return; 6997 } 6998 6999 bs_blob_list_remove(_blob); 7000 _blob->parent_id = SPDK_BLOBID_INVALID; 7001 blob_back_bs_destroy(_blob); 7002 _blob->back_bs_dev = bs_create_zeroes_dev(); 7003 } 7004 7005 /* Temporarily override md_ro flag for MD modification */ 7006 _blob->md_ro = false; 7007 blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 7008 _blob->state = SPDK_BLOB_STATE_DIRTY; 7009 7010 spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx); 7011 } 7012 7013 /* Check if cluster needs allocation */ 7014 static inline bool 7015 bs_cluster_needs_allocation(struct spdk_blob *blob, uint64_t cluster, bool allocate_all) 7016 { 7017 struct spdk_blob_bs_dev *b; 7018 7019 assert(blob != NULL); 7020 7021 if (blob->active.clusters[cluster] != 0) { 7022 /* Cluster is already allocated */ 7023 return false; 7024 } 7025 7026 if (blob->parent_id == SPDK_BLOBID_INVALID) { 7027 /* Blob have no parent blob */ 7028 return allocate_all; 7029 } 7030 7031 if (blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 7032 return true; 7033 } 7034 7035 b = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 7036 return (allocate_all || b->blob->active.clusters[cluster] != 0); 7037 } 7038 7039 static void 7040 bs_inflate_blob_touch_next(void *cb_arg, int bserrno) 7041 { 7042 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 7043 struct spdk_blob *_blob = ctx->original.blob; 7044 struct spdk_bs_cpl cpl; 7045 spdk_bs_user_op_t *op; 7046 uint64_t offset; 7047 7048 if (bserrno != 0) { 7049 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 7050 return; 7051 } 7052 7053 for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) { 7054 if (bs_cluster_needs_allocation(_blob, ctx->cluster, ctx->allocate_all)) { 7055 break; 7056 } 7057 } 7058 7059 if (ctx->cluster < _blob->active.num_clusters) { 7060 offset = bs_cluster_to_lba(_blob->bs, ctx->cluster); 7061 7062 /* We may safely increment a cluster before copying */ 7063 ctx->cluster++; 7064 7065 /* Use a dummy 0B read as a context for cluster copy */ 7066 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7067 cpl.u.blob_basic.cb_fn = bs_inflate_blob_touch_next; 7068 cpl.u.blob_basic.cb_arg = ctx; 7069 7070 op = bs_user_op_alloc(ctx->channel, &cpl, SPDK_BLOB_READ, _blob, 7071 NULL, 0, offset, 0); 7072 if (!op) { 7073 bs_clone_snapshot_origblob_cleanup(ctx, -ENOMEM); 7074 return; 7075 } 7076 7077 bs_allocate_and_copy_cluster(_blob, ctx->channel, offset, op); 7078 } else { 7079 bs_inflate_blob_done(ctx); 7080 } 7081 } 7082 7083 static void 7084 bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 7085 { 7086 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 7087 uint64_t clusters_needed; 7088 uint64_t i; 7089 7090 if (bserrno != 0) { 7091 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 7092 return; 7093 } 7094 7095 ctx->original.blob = _blob; 7096 ctx->original.md_ro = _blob->md_ro; 7097 7098 if (_blob->locked_operation_in_progress) { 7099 SPDK_DEBUGLOG(blob, "Cannot inflate blob - another operation in progress\n"); 7100 ctx->bserrno = -EBUSY; 7101 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 7102 return; 7103 } 7104 7105 _blob->locked_operation_in_progress = true; 7106 7107 switch (_blob->parent_id) { 7108 case SPDK_BLOBID_INVALID: 7109 if (!ctx->allocate_all) { 7110 /* This blob has no parent, so we cannot decouple it. */ 7111 SPDK_ERRLOG("Cannot decouple parent of blob with no parent.\n"); 7112 bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 7113 return; 7114 } 7115 break; 7116 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 7117 /* 7118 * It would be better to rely on back_bs_dev->is_zeroes(), to determine which 7119 * clusters require allocation. Until there is a blobstore consumer that 7120 * uses esnaps with an spdk_bs_dev that implements a useful is_zeroes() it is not 7121 * worth the effort. 7122 */ 7123 ctx->allocate_all = true; 7124 break; 7125 default: 7126 break; 7127 } 7128 7129 if (spdk_blob_is_thin_provisioned(_blob) == false) { 7130 /* This is not thin provisioned blob. No need to inflate. */ 7131 bs_clone_snapshot_origblob_cleanup(ctx, 0); 7132 return; 7133 } 7134 7135 /* Do two passes - one to verify that we can obtain enough clusters 7136 * and another to actually claim them. 7137 */ 7138 clusters_needed = 0; 7139 for (i = 0; i < _blob->active.num_clusters; i++) { 7140 if (bs_cluster_needs_allocation(_blob, i, ctx->allocate_all)) { 7141 clusters_needed++; 7142 } 7143 } 7144 7145 if (clusters_needed > _blob->bs->num_free_clusters) { 7146 /* Not enough free clusters. Cannot satisfy the request. */ 7147 bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC); 7148 return; 7149 } 7150 7151 ctx->cluster = 0; 7152 bs_inflate_blob_touch_next(ctx, 0); 7153 } 7154 7155 static void 7156 bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7157 spdk_blob_id blobid, bool allocate_all, spdk_blob_op_complete cb_fn, void *cb_arg) 7158 { 7159 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 7160 7161 if (!ctx) { 7162 cb_fn(cb_arg, -ENOMEM); 7163 return; 7164 } 7165 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7166 ctx->cpl.u.bs_basic.cb_fn = cb_fn; 7167 ctx->cpl.u.bs_basic.cb_arg = cb_arg; 7168 ctx->bserrno = 0; 7169 ctx->original.id = blobid; 7170 ctx->channel = channel; 7171 ctx->allocate_all = allocate_all; 7172 7173 spdk_bs_open_blob(bs, ctx->original.id, bs_inflate_blob_open_cpl, ctx); 7174 } 7175 7176 void 7177 spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7178 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 7179 { 7180 bs_inflate_blob(bs, channel, blobid, true, cb_fn, cb_arg); 7181 } 7182 7183 void 7184 spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7185 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 7186 { 7187 bs_inflate_blob(bs, channel, blobid, false, cb_fn, cb_arg); 7188 } 7189 /* END spdk_bs_inflate_blob */ 7190 7191 /* START spdk_bs_blob_shallow_copy */ 7192 7193 struct shallow_copy_ctx { 7194 struct spdk_bs_cpl cpl; 7195 int bserrno; 7196 7197 /* Blob source for copy */ 7198 struct spdk_blob_store *bs; 7199 spdk_blob_id blobid; 7200 struct spdk_blob *blob; 7201 struct spdk_io_channel *blob_channel; 7202 7203 /* Destination device for copy */ 7204 struct spdk_bs_dev *ext_dev; 7205 struct spdk_io_channel *ext_channel; 7206 7207 /* Current cluster for copy operation */ 7208 uint64_t cluster; 7209 7210 /* Buffer for blob reading */ 7211 uint8_t *read_buff; 7212 7213 /* Struct for external device writing */ 7214 struct spdk_bs_dev_cb_args ext_args; 7215 7216 /* Actual number of copied clusters */ 7217 uint64_t copied_clusters_count; 7218 7219 /* Status callback for updates about the ongoing operation */ 7220 spdk_blob_shallow_copy_status status_cb; 7221 7222 /* Argument passed to function status_cb */ 7223 void *status_cb_arg; 7224 }; 7225 7226 static void 7227 bs_shallow_copy_cleanup_finish(void *cb_arg, int bserrno) 7228 { 7229 struct shallow_copy_ctx *ctx = cb_arg; 7230 struct spdk_bs_cpl *cpl = &ctx->cpl; 7231 7232 if (bserrno != 0) { 7233 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, cleanup error %d\n", ctx->blob->id, bserrno); 7234 ctx->bserrno = bserrno; 7235 } 7236 7237 ctx->ext_dev->destroy_channel(ctx->ext_dev, ctx->ext_channel); 7238 spdk_free(ctx->read_buff); 7239 7240 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno); 7241 7242 free(ctx); 7243 } 7244 7245 static void 7246 bs_shallow_copy_bdev_write_cpl(struct spdk_io_channel *channel, void *cb_arg, int bserrno) 7247 { 7248 struct shallow_copy_ctx *ctx = cb_arg; 7249 struct spdk_blob *_blob = ctx->blob; 7250 7251 if (bserrno != 0) { 7252 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, ext dev write error %d\n", ctx->blob->id, bserrno); 7253 ctx->bserrno = bserrno; 7254 _blob->locked_operation_in_progress = false; 7255 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7256 return; 7257 } 7258 7259 ctx->cluster++; 7260 if (ctx->status_cb) { 7261 ctx->copied_clusters_count++; 7262 ctx->status_cb(ctx->copied_clusters_count, ctx->status_cb_arg); 7263 } 7264 7265 bs_shallow_copy_cluster_find_next(ctx); 7266 } 7267 7268 static void 7269 bs_shallow_copy_blob_read_cpl(void *cb_arg, int bserrno) 7270 { 7271 struct shallow_copy_ctx *ctx = cb_arg; 7272 struct spdk_bs_dev *ext_dev = ctx->ext_dev; 7273 struct spdk_blob *_blob = ctx->blob; 7274 7275 if (bserrno != 0) { 7276 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, blob read error %d\n", ctx->blob->id, bserrno); 7277 ctx->bserrno = bserrno; 7278 _blob->locked_operation_in_progress = false; 7279 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7280 return; 7281 } 7282 7283 ctx->ext_args.channel = ctx->ext_channel; 7284 ctx->ext_args.cb_fn = bs_shallow_copy_bdev_write_cpl; 7285 ctx->ext_args.cb_arg = ctx; 7286 7287 ext_dev->write(ext_dev, ctx->ext_channel, ctx->read_buff, 7288 bs_cluster_to_lba(_blob->bs, ctx->cluster), 7289 bs_dev_byte_to_lba(_blob->bs->dev, _blob->bs->cluster_sz), 7290 &ctx->ext_args); 7291 } 7292 7293 static void 7294 bs_shallow_copy_cluster_find_next(void *cb_arg) 7295 { 7296 struct shallow_copy_ctx *ctx = cb_arg; 7297 struct spdk_blob *_blob = ctx->blob; 7298 7299 while (ctx->cluster < _blob->active.num_clusters) { 7300 if (_blob->active.clusters[ctx->cluster] != 0) { 7301 break; 7302 } 7303 7304 ctx->cluster++; 7305 } 7306 7307 if (ctx->cluster < _blob->active.num_clusters) { 7308 blob_request_submit_op_single(ctx->blob_channel, _blob, ctx->read_buff, 7309 bs_cluster_to_lba(_blob->bs, ctx->cluster), 7310 bs_dev_byte_to_lba(_blob->bs->dev, _blob->bs->cluster_sz), 7311 bs_shallow_copy_blob_read_cpl, ctx, SPDK_BLOB_READ); 7312 } else { 7313 _blob->locked_operation_in_progress = false; 7314 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7315 } 7316 } 7317 7318 static void 7319 bs_shallow_copy_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 7320 { 7321 struct shallow_copy_ctx *ctx = cb_arg; 7322 struct spdk_bs_dev *ext_dev = ctx->ext_dev; 7323 uint32_t blob_block_size; 7324 uint64_t blob_total_size; 7325 7326 if (bserrno != 0) { 7327 SPDK_ERRLOG("Shallow copy blob open error %d\n", bserrno); 7328 ctx->bserrno = bserrno; 7329 bs_shallow_copy_cleanup_finish(ctx, 0); 7330 return; 7331 } 7332 7333 if (!spdk_blob_is_read_only(_blob)) { 7334 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, blob must be read only\n", _blob->id); 7335 ctx->bserrno = -EPERM; 7336 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7337 return; 7338 } 7339 7340 blob_block_size = _blob->bs->dev->blocklen; 7341 blob_total_size = spdk_blob_get_num_clusters(_blob) * spdk_bs_get_cluster_size(_blob->bs); 7342 7343 if (blob_total_size > ext_dev->blockcnt * ext_dev->blocklen) { 7344 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, external device must have at least blob size\n", 7345 _blob->id); 7346 ctx->bserrno = -EINVAL; 7347 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7348 return; 7349 } 7350 7351 if (blob_block_size % ext_dev->blocklen != 0) { 7352 SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, external device block size is not compatible with \ 7353 blobstore block size\n", _blob->id); 7354 ctx->bserrno = -EINVAL; 7355 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7356 return; 7357 } 7358 7359 ctx->blob = _blob; 7360 7361 if (_blob->locked_operation_in_progress) { 7362 SPDK_DEBUGLOG(blob, "blob 0x%" PRIx64 " shallow copy - another operation in progress\n", _blob->id); 7363 ctx->bserrno = -EBUSY; 7364 spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx); 7365 return; 7366 } 7367 7368 _blob->locked_operation_in_progress = true; 7369 7370 ctx->cluster = 0; 7371 bs_shallow_copy_cluster_find_next(ctx); 7372 } 7373 7374 int 7375 spdk_bs_blob_shallow_copy(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7376 spdk_blob_id blobid, struct spdk_bs_dev *ext_dev, 7377 spdk_blob_shallow_copy_status status_cb_fn, void *status_cb_arg, 7378 spdk_blob_op_complete cb_fn, void *cb_arg) 7379 { 7380 struct shallow_copy_ctx *ctx; 7381 struct spdk_io_channel *ext_channel; 7382 7383 ctx = calloc(1, sizeof(*ctx)); 7384 if (!ctx) { 7385 return -ENOMEM; 7386 } 7387 7388 ctx->bs = bs; 7389 ctx->blobid = blobid; 7390 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7391 ctx->cpl.u.bs_basic.cb_fn = cb_fn; 7392 ctx->cpl.u.bs_basic.cb_arg = cb_arg; 7393 ctx->bserrno = 0; 7394 ctx->blob_channel = channel; 7395 ctx->status_cb = status_cb_fn; 7396 ctx->status_cb_arg = status_cb_arg; 7397 ctx->read_buff = spdk_malloc(bs->cluster_sz, bs->dev->blocklen, NULL, 7398 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 7399 if (!ctx->read_buff) { 7400 free(ctx); 7401 return -ENOMEM; 7402 } 7403 7404 ext_channel = ext_dev->create_channel(ext_dev); 7405 if (!ext_channel) { 7406 spdk_free(ctx->read_buff); 7407 free(ctx); 7408 return -ENOMEM; 7409 } 7410 ctx->ext_dev = ext_dev; 7411 ctx->ext_channel = ext_channel; 7412 7413 spdk_bs_open_blob(ctx->bs, ctx->blobid, bs_shallow_copy_blob_open_cpl, ctx); 7414 7415 return 0; 7416 } 7417 /* END spdk_bs_blob_shallow_copy */ 7418 7419 /* START spdk_bs_blob_set_parent */ 7420 7421 struct set_parent_ctx { 7422 struct spdk_blob_store *bs; 7423 int bserrno; 7424 spdk_bs_op_complete cb_fn; 7425 void *cb_arg; 7426 7427 struct spdk_blob *blob; 7428 bool blob_md_ro; 7429 7430 struct blob_parent parent; 7431 }; 7432 7433 static void 7434 bs_set_parent_cleanup_finish(void *cb_arg, int bserrno) 7435 { 7436 struct set_parent_ctx *ctx = cb_arg; 7437 7438 assert(ctx != NULL); 7439 7440 if (bserrno != 0) { 7441 SPDK_ERRLOG("blob set parent finish error %d\n", bserrno); 7442 if (ctx->bserrno == 0) { 7443 ctx->bserrno = bserrno; 7444 } 7445 } 7446 7447 ctx->cb_fn(ctx->cb_arg, ctx->bserrno); 7448 7449 free(ctx); 7450 } 7451 7452 static void 7453 bs_set_parent_close_snapshot(void *cb_arg, int bserrno) 7454 { 7455 struct set_parent_ctx *ctx = cb_arg; 7456 7457 if (ctx->bserrno != 0) { 7458 spdk_blob_close(ctx->parent.u.snapshot.blob, bs_set_parent_cleanup_finish, ctx); 7459 return; 7460 } 7461 7462 if (bserrno != 0) { 7463 SPDK_ERRLOG("blob close error %d\n", bserrno); 7464 ctx->bserrno = bserrno; 7465 } 7466 7467 bs_set_parent_cleanup_finish(ctx, ctx->bserrno); 7468 } 7469 7470 static void 7471 bs_set_parent_close_blob(void *cb_arg, int bserrno) 7472 { 7473 struct set_parent_ctx *ctx = cb_arg; 7474 struct spdk_blob *blob = ctx->blob; 7475 struct spdk_blob *snapshot = ctx->parent.u.snapshot.blob; 7476 7477 if (bserrno != 0 && ctx->bserrno == 0) { 7478 SPDK_ERRLOG("error %d in metadata sync\n", bserrno); 7479 ctx->bserrno = bserrno; 7480 } 7481 7482 /* Revert md_ro to original state */ 7483 blob->md_ro = ctx->blob_md_ro; 7484 7485 blob->locked_operation_in_progress = false; 7486 snapshot->locked_operation_in_progress = false; 7487 7488 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7489 } 7490 7491 static void 7492 bs_set_parent_set_back_bs_dev_done(void *cb_arg, int bserrno) 7493 { 7494 struct set_parent_ctx *ctx = cb_arg; 7495 struct spdk_blob *blob = ctx->blob; 7496 7497 if (bserrno != 0) { 7498 SPDK_ERRLOG("error %d setting back_bs_dev\n", bserrno); 7499 ctx->bserrno = bserrno; 7500 bs_set_parent_close_blob(ctx, bserrno); 7501 return; 7502 } 7503 7504 spdk_blob_sync_md(blob, bs_set_parent_close_blob, ctx); 7505 } 7506 7507 static int 7508 bs_set_parent_refs(struct spdk_blob *blob, struct blob_parent *parent) 7509 { 7510 int rc; 7511 7512 bs_blob_list_remove(blob); 7513 7514 rc = blob_set_xattr(blob, BLOB_SNAPSHOT, &parent->u.snapshot.id, sizeof(spdk_blob_id), true); 7515 if (rc != 0) { 7516 SPDK_ERRLOG("error %d setting snapshot xattr\n", rc); 7517 return rc; 7518 } 7519 blob->parent_id = parent->u.snapshot.id; 7520 7521 if (blob_is_esnap_clone(blob)) { 7522 /* Remove the xattr that references the external snapshot */ 7523 blob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT; 7524 blob_remove_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID, true); 7525 } 7526 7527 bs_blob_list_add(blob); 7528 7529 return 0; 7530 } 7531 7532 static void 7533 bs_set_parent_snapshot_open_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno) 7534 { 7535 struct set_parent_ctx *ctx = cb_arg; 7536 struct spdk_blob *blob = ctx->blob; 7537 struct spdk_bs_dev *back_bs_dev; 7538 7539 if (bserrno != 0) { 7540 SPDK_ERRLOG("snapshot open error %d\n", bserrno); 7541 ctx->bserrno = bserrno; 7542 spdk_blob_close(blob, bs_set_parent_cleanup_finish, ctx); 7543 return; 7544 } 7545 7546 ctx->parent.u.snapshot.blob = snapshot; 7547 ctx->parent.u.snapshot.id = snapshot->id; 7548 7549 if (!spdk_blob_is_snapshot(snapshot)) { 7550 SPDK_ERRLOG("parent blob is not a snapshot\n"); 7551 ctx->bserrno = -EINVAL; 7552 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7553 return; 7554 } 7555 7556 if (blob->active.num_clusters != snapshot->active.num_clusters) { 7557 SPDK_ERRLOG("parent blob has a number of clusters different from child's ones\n"); 7558 ctx->bserrno = -EINVAL; 7559 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7560 return; 7561 } 7562 7563 if (blob->locked_operation_in_progress || snapshot->locked_operation_in_progress) { 7564 SPDK_ERRLOG("cannot set parent of blob, another operation in progress\n"); 7565 ctx->bserrno = -EBUSY; 7566 spdk_blob_close(blob, bs_set_parent_close_snapshot, ctx); 7567 return; 7568 } 7569 7570 blob->locked_operation_in_progress = true; 7571 snapshot->locked_operation_in_progress = true; 7572 7573 /* Temporarily override md_ro flag for MD modification */ 7574 blob->md_ro = false; 7575 7576 back_bs_dev = bs_create_blob_bs_dev(snapshot); 7577 7578 blob_set_back_bs_dev(blob, back_bs_dev, bs_set_parent_refs, &ctx->parent, 7579 bs_set_parent_set_back_bs_dev_done, 7580 ctx); 7581 } 7582 7583 static void 7584 bs_set_parent_blob_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 7585 { 7586 struct set_parent_ctx *ctx = cb_arg; 7587 7588 if (bserrno != 0) { 7589 SPDK_ERRLOG("blob open error %d\n", bserrno); 7590 ctx->bserrno = bserrno; 7591 bs_set_parent_cleanup_finish(ctx, 0); 7592 return; 7593 } 7594 7595 if (!spdk_blob_is_thin_provisioned(blob)) { 7596 SPDK_ERRLOG("blob is not thin-provisioned\n"); 7597 ctx->bserrno = -EINVAL; 7598 spdk_blob_close(blob, bs_set_parent_cleanup_finish, ctx); 7599 return; 7600 } 7601 7602 ctx->blob = blob; 7603 ctx->blob_md_ro = blob->md_ro; 7604 7605 spdk_bs_open_blob(ctx->bs, ctx->parent.u.snapshot.id, bs_set_parent_snapshot_open_cpl, ctx); 7606 } 7607 7608 void 7609 spdk_bs_blob_set_parent(struct spdk_blob_store *bs, spdk_blob_id blob_id, 7610 spdk_blob_id snapshot_id, spdk_blob_op_complete cb_fn, void *cb_arg) 7611 { 7612 struct set_parent_ctx *ctx; 7613 7614 if (snapshot_id == SPDK_BLOBID_INVALID) { 7615 SPDK_ERRLOG("snapshot id not valid\n"); 7616 cb_fn(cb_arg, -EINVAL); 7617 return; 7618 } 7619 7620 if (blob_id == snapshot_id) { 7621 SPDK_ERRLOG("blob id and snapshot id cannot be the same\n"); 7622 cb_fn(cb_arg, -EINVAL); 7623 return; 7624 } 7625 7626 if (spdk_blob_get_parent_snapshot(bs, blob_id) == snapshot_id) { 7627 SPDK_NOTICELOG("snapshot is already the parent of blob\n"); 7628 cb_fn(cb_arg, -EEXIST); 7629 return; 7630 } 7631 7632 ctx = calloc(1, sizeof(*ctx)); 7633 if (!ctx) { 7634 cb_fn(cb_arg, -ENOMEM); 7635 return; 7636 } 7637 7638 ctx->bs = bs; 7639 ctx->parent.u.snapshot.id = snapshot_id; 7640 ctx->cb_fn = cb_fn; 7641 ctx->cb_arg = cb_arg; 7642 ctx->bserrno = 0; 7643 7644 spdk_bs_open_blob(bs, blob_id, bs_set_parent_blob_open_cpl, ctx); 7645 } 7646 /* END spdk_bs_blob_set_parent */ 7647 7648 /* START spdk_bs_blob_set_external_parent */ 7649 7650 static void 7651 bs_set_external_parent_cleanup_finish(void *cb_arg, int bserrno) 7652 { 7653 struct set_parent_ctx *ctx = cb_arg; 7654 7655 if (bserrno != 0) { 7656 SPDK_ERRLOG("blob set external parent finish error %d\n", bserrno); 7657 if (ctx->bserrno == 0) { 7658 ctx->bserrno = bserrno; 7659 } 7660 } 7661 7662 ctx->cb_fn(ctx->cb_arg, ctx->bserrno); 7663 7664 free(ctx->parent.u.esnap.id); 7665 free(ctx); 7666 } 7667 7668 static void 7669 bs_set_external_parent_close_blob(void *cb_arg, int bserrno) 7670 { 7671 struct set_parent_ctx *ctx = cb_arg; 7672 struct spdk_blob *blob = ctx->blob; 7673 7674 if (bserrno != 0 && ctx->bserrno == 0) { 7675 SPDK_ERRLOG("error %d in metadata sync\n", bserrno); 7676 ctx->bserrno = bserrno; 7677 } 7678 7679 /* Revert md_ro to original state */ 7680 blob->md_ro = ctx->blob_md_ro; 7681 7682 blob->locked_operation_in_progress = false; 7683 7684 spdk_blob_close(blob, bs_set_external_parent_cleanup_finish, ctx); 7685 } 7686 7687 static void 7688 bs_set_external_parent_unfrozen(void *cb_arg, int bserrno) 7689 { 7690 struct set_parent_ctx *ctx = cb_arg; 7691 struct spdk_blob *blob = ctx->blob; 7692 7693 if (bserrno != 0) { 7694 SPDK_ERRLOG("error %d setting back_bs_dev\n", bserrno); 7695 ctx->bserrno = bserrno; 7696 bs_set_external_parent_close_blob(ctx, bserrno); 7697 return; 7698 } 7699 7700 spdk_blob_sync_md(blob, bs_set_external_parent_close_blob, ctx); 7701 } 7702 7703 static int 7704 bs_set_external_parent_refs(struct spdk_blob *blob, struct blob_parent *parent) 7705 { 7706 int rc; 7707 7708 bs_blob_list_remove(blob); 7709 7710 if (spdk_blob_is_clone(blob)) { 7711 /* Remove the xattr that references the snapshot */ 7712 blob->parent_id = SPDK_BLOBID_INVALID; 7713 blob_remove_xattr(blob, BLOB_SNAPSHOT, true); 7714 } 7715 7716 rc = blob_set_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID, parent->u.esnap.id, 7717 parent->u.esnap.id_len, true); 7718 if (rc != 0) { 7719 SPDK_ERRLOG("error %d setting external snapshot xattr\n", rc); 7720 return rc; 7721 } 7722 blob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 7723 blob->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT; 7724 7725 bs_blob_list_add(blob); 7726 7727 return 0; 7728 } 7729 7730 static void 7731 bs_set_external_parent_blob_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 7732 { 7733 struct set_parent_ctx *ctx = cb_arg; 7734 const void *esnap_id; 7735 size_t esnap_id_len; 7736 int rc; 7737 7738 if (bserrno != 0) { 7739 SPDK_ERRLOG("blob open error %d\n", bserrno); 7740 ctx->bserrno = bserrno; 7741 bs_set_parent_cleanup_finish(ctx, 0); 7742 return; 7743 } 7744 7745 ctx->blob = blob; 7746 ctx->blob_md_ro = blob->md_ro; 7747 7748 rc = spdk_blob_get_esnap_id(blob, &esnap_id, &esnap_id_len); 7749 if (rc == 0 && esnap_id != NULL && esnap_id_len == ctx->parent.u.esnap.id_len && 7750 memcmp(esnap_id, ctx->parent.u.esnap.id, esnap_id_len) == 0) { 7751 SPDK_ERRLOG("external snapshot is already the parent of blob\n"); 7752 ctx->bserrno = -EEXIST; 7753 goto error; 7754 } 7755 7756 if (!spdk_blob_is_thin_provisioned(blob)) { 7757 SPDK_ERRLOG("blob is not thin-provisioned\n"); 7758 ctx->bserrno = -EINVAL; 7759 goto error; 7760 } 7761 7762 if (blob->locked_operation_in_progress) { 7763 SPDK_ERRLOG("cannot set external parent of blob, another operation in progress\n"); 7764 ctx->bserrno = -EBUSY; 7765 goto error; 7766 } 7767 7768 blob->locked_operation_in_progress = true; 7769 7770 /* Temporarily override md_ro flag for MD modification */ 7771 blob->md_ro = false; 7772 7773 blob_set_back_bs_dev(blob, ctx->parent.u.esnap.back_bs_dev, bs_set_external_parent_refs, 7774 &ctx->parent, bs_set_external_parent_unfrozen, ctx); 7775 return; 7776 7777 error: 7778 spdk_blob_close(blob, bs_set_external_parent_cleanup_finish, ctx); 7779 } 7780 7781 void 7782 spdk_bs_blob_set_external_parent(struct spdk_blob_store *bs, spdk_blob_id blob_id, 7783 struct spdk_bs_dev *esnap_bs_dev, const void *esnap_id, 7784 uint32_t esnap_id_len, spdk_blob_op_complete cb_fn, void *cb_arg) 7785 { 7786 struct set_parent_ctx *ctx; 7787 uint64_t esnap_dev_size, cluster_sz; 7788 7789 if (sizeof(blob_id) == esnap_id_len && memcmp(&blob_id, esnap_id, sizeof(blob_id)) == 0) { 7790 SPDK_ERRLOG("blob id and external snapshot id cannot be the same\n"); 7791 cb_fn(cb_arg, -EINVAL); 7792 return; 7793 } 7794 7795 esnap_dev_size = esnap_bs_dev->blockcnt * esnap_bs_dev->blocklen; 7796 cluster_sz = spdk_bs_get_cluster_size(bs); 7797 if ((esnap_dev_size % cluster_sz) != 0) { 7798 SPDK_ERRLOG("Esnap device size %" PRIu64 " is not an integer multiple of " 7799 "cluster size %" PRIu64 "\n", esnap_dev_size, cluster_sz); 7800 cb_fn(cb_arg, -EINVAL); 7801 return; 7802 } 7803 7804 ctx = calloc(1, sizeof(*ctx)); 7805 if (!ctx) { 7806 cb_fn(cb_arg, -ENOMEM); 7807 return; 7808 } 7809 7810 ctx->parent.u.esnap.id = calloc(1, esnap_id_len); 7811 if (!ctx->parent.u.esnap.id) { 7812 free(ctx); 7813 cb_fn(cb_arg, -ENOMEM); 7814 return; 7815 } 7816 7817 ctx->bs = bs; 7818 ctx->parent.u.esnap.back_bs_dev = esnap_bs_dev; 7819 memcpy(ctx->parent.u.esnap.id, esnap_id, esnap_id_len); 7820 ctx->parent.u.esnap.id_len = esnap_id_len; 7821 ctx->cb_fn = cb_fn; 7822 ctx->cb_arg = cb_arg; 7823 ctx->bserrno = 0; 7824 7825 spdk_bs_open_blob(bs, blob_id, bs_set_external_parent_blob_open_cpl, ctx); 7826 } 7827 /* END spdk_bs_blob_set_external_parent */ 7828 7829 /* START spdk_blob_resize */ 7830 struct spdk_bs_resize_ctx { 7831 spdk_blob_op_complete cb_fn; 7832 void *cb_arg; 7833 struct spdk_blob *blob; 7834 uint64_t sz; 7835 int rc; 7836 }; 7837 7838 static void 7839 bs_resize_unfreeze_cpl(void *cb_arg, int rc) 7840 { 7841 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 7842 7843 if (rc != 0) { 7844 SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc); 7845 } 7846 7847 if (ctx->rc != 0) { 7848 SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc); 7849 rc = ctx->rc; 7850 } 7851 7852 ctx->blob->locked_operation_in_progress = false; 7853 7854 ctx->cb_fn(ctx->cb_arg, rc); 7855 free(ctx); 7856 } 7857 7858 static void 7859 bs_resize_freeze_cpl(void *cb_arg, int rc) 7860 { 7861 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 7862 7863 if (rc != 0) { 7864 ctx->blob->locked_operation_in_progress = false; 7865 ctx->cb_fn(ctx->cb_arg, rc); 7866 free(ctx); 7867 return; 7868 } 7869 7870 ctx->rc = blob_resize(ctx->blob, ctx->sz); 7871 7872 blob_unfreeze_io(ctx->blob, bs_resize_unfreeze_cpl, ctx); 7873 } 7874 7875 void 7876 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg) 7877 { 7878 struct spdk_bs_resize_ctx *ctx; 7879 7880 blob_verify_md_op(blob); 7881 7882 SPDK_DEBUGLOG(blob, "Resizing blob 0x%" PRIx64 " to %" PRIu64 " clusters\n", blob->id, sz); 7883 7884 if (blob->md_ro) { 7885 cb_fn(cb_arg, -EPERM); 7886 return; 7887 } 7888 7889 if (sz == blob->active.num_clusters) { 7890 cb_fn(cb_arg, 0); 7891 return; 7892 } 7893 7894 if (blob->locked_operation_in_progress) { 7895 cb_fn(cb_arg, -EBUSY); 7896 return; 7897 } 7898 7899 ctx = calloc(1, sizeof(*ctx)); 7900 if (!ctx) { 7901 cb_fn(cb_arg, -ENOMEM); 7902 return; 7903 } 7904 7905 blob->locked_operation_in_progress = true; 7906 ctx->cb_fn = cb_fn; 7907 ctx->cb_arg = cb_arg; 7908 ctx->blob = blob; 7909 ctx->sz = sz; 7910 blob_freeze_io(blob, bs_resize_freeze_cpl, ctx); 7911 } 7912 7913 /* END spdk_blob_resize */ 7914 7915 7916 /* START spdk_bs_delete_blob */ 7917 7918 static void 7919 bs_delete_close_cpl(void *cb_arg, int bserrno) 7920 { 7921 spdk_bs_sequence_t *seq = cb_arg; 7922 7923 bs_sequence_finish(seq, bserrno); 7924 } 7925 7926 static void 7927 bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7928 { 7929 struct spdk_blob *blob = cb_arg; 7930 7931 if (bserrno != 0) { 7932 /* 7933 * We already removed this blob from the blobstore tailq, so 7934 * we need to free it here since this is the last reference 7935 * to it. 7936 */ 7937 blob_free(blob); 7938 bs_delete_close_cpl(seq, bserrno); 7939 return; 7940 } 7941 7942 /* 7943 * This will immediately decrement the ref_count and call 7944 * the completion routine since the metadata state is clean. 7945 * By calling spdk_blob_close, we reduce the number of call 7946 * points into code that touches the blob->open_ref count 7947 * and the blobstore's blob list. 7948 */ 7949 spdk_blob_close(blob, bs_delete_close_cpl, seq); 7950 } 7951 7952 struct delete_snapshot_ctx { 7953 struct spdk_blob_list *parent_snapshot_entry; 7954 struct spdk_blob *snapshot; 7955 struct spdk_blob_md_page *page; 7956 bool snapshot_md_ro; 7957 struct spdk_blob *clone; 7958 bool clone_md_ro; 7959 spdk_blob_op_with_handle_complete cb_fn; 7960 void *cb_arg; 7961 int bserrno; 7962 uint32_t next_extent_page; 7963 }; 7964 7965 static void 7966 delete_blob_cleanup_finish(void *cb_arg, int bserrno) 7967 { 7968 struct delete_snapshot_ctx *ctx = cb_arg; 7969 7970 if (bserrno != 0) { 7971 SPDK_ERRLOG("Snapshot cleanup error %d\n", bserrno); 7972 } 7973 7974 assert(ctx != NULL); 7975 7976 if (bserrno != 0 && ctx->bserrno == 0) { 7977 ctx->bserrno = bserrno; 7978 } 7979 7980 ctx->cb_fn(ctx->cb_arg, ctx->snapshot, ctx->bserrno); 7981 spdk_free(ctx->page); 7982 free(ctx); 7983 } 7984 7985 static void 7986 delete_snapshot_cleanup_snapshot(void *cb_arg, int bserrno) 7987 { 7988 struct delete_snapshot_ctx *ctx = cb_arg; 7989 7990 if (bserrno != 0) { 7991 ctx->bserrno = bserrno; 7992 SPDK_ERRLOG("Clone cleanup error %d\n", bserrno); 7993 } 7994 7995 if (ctx->bserrno != 0) { 7996 assert(blob_lookup(ctx->snapshot->bs, ctx->snapshot->id) == NULL); 7997 RB_INSERT(spdk_blob_tree, &ctx->snapshot->bs->open_blobs, ctx->snapshot); 7998 spdk_bit_array_set(ctx->snapshot->bs->open_blobids, ctx->snapshot->id); 7999 } 8000 8001 ctx->snapshot->locked_operation_in_progress = false; 8002 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 8003 8004 spdk_blob_close(ctx->snapshot, delete_blob_cleanup_finish, ctx); 8005 } 8006 8007 static void 8008 delete_snapshot_cleanup_clone(void *cb_arg, int bserrno) 8009 { 8010 struct delete_snapshot_ctx *ctx = cb_arg; 8011 8012 ctx->clone->locked_operation_in_progress = false; 8013 ctx->clone->md_ro = ctx->clone_md_ro; 8014 8015 spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx); 8016 } 8017 8018 static void 8019 delete_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 8020 { 8021 struct delete_snapshot_ctx *ctx = cb_arg; 8022 8023 if (bserrno) { 8024 ctx->bserrno = bserrno; 8025 delete_snapshot_cleanup_clone(ctx, 0); 8026 return; 8027 } 8028 8029 ctx->clone->locked_operation_in_progress = false; 8030 spdk_blob_close(ctx->clone, delete_blob_cleanup_finish, ctx); 8031 } 8032 8033 static void 8034 delete_snapshot_sync_snapshot_cpl(void *cb_arg, int bserrno) 8035 { 8036 struct delete_snapshot_ctx *ctx = cb_arg; 8037 struct spdk_blob_list *parent_snapshot_entry = NULL; 8038 struct spdk_blob_list *snapshot_entry = NULL; 8039 struct spdk_blob_list *clone_entry = NULL; 8040 struct spdk_blob_list *snapshot_clone_entry = NULL; 8041 8042 if (bserrno) { 8043 SPDK_ERRLOG("Failed to sync MD on blob\n"); 8044 ctx->bserrno = bserrno; 8045 delete_snapshot_cleanup_clone(ctx, 0); 8046 return; 8047 } 8048 8049 /* Get snapshot entry for the snapshot we want to remove */ 8050 snapshot_entry = bs_get_snapshot_entry(ctx->snapshot->bs, ctx->snapshot->id); 8051 8052 assert(snapshot_entry != NULL); 8053 8054 /* Remove clone entry in this snapshot (at this point there can be only one clone) */ 8055 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 8056 assert(clone_entry != NULL); 8057 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 8058 snapshot_entry->clone_count--; 8059 assert(TAILQ_EMPTY(&snapshot_entry->clones)); 8060 8061 switch (ctx->snapshot->parent_id) { 8062 case SPDK_BLOBID_INVALID: 8063 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 8064 /* No parent snapshot - just remove clone entry */ 8065 free(clone_entry); 8066 break; 8067 default: 8068 /* This snapshot is at the same time a clone of another snapshot - we need to 8069 * update parent snapshot (remove current clone, add new one inherited from 8070 * the snapshot that is being removed) */ 8071 8072 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 8073 * snapshot that we are removing */ 8074 blob_get_snapshot_and_clone_entries(ctx->snapshot, &parent_snapshot_entry, 8075 &snapshot_clone_entry); 8076 8077 /* Switch clone entry in parent snapshot */ 8078 TAILQ_INSERT_TAIL(&parent_snapshot_entry->clones, clone_entry, link); 8079 TAILQ_REMOVE(&parent_snapshot_entry->clones, snapshot_clone_entry, link); 8080 free(snapshot_clone_entry); 8081 } 8082 8083 /* Restore md_ro flags */ 8084 ctx->clone->md_ro = ctx->clone_md_ro; 8085 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 8086 8087 blob_unfreeze_io(ctx->clone, delete_snapshot_unfreeze_cpl, ctx); 8088 } 8089 8090 static void 8091 delete_snapshot_sync_clone_cpl(void *cb_arg, int bserrno) 8092 { 8093 struct delete_snapshot_ctx *ctx = cb_arg; 8094 uint64_t i; 8095 8096 ctx->snapshot->md_ro = false; 8097 8098 if (bserrno) { 8099 SPDK_ERRLOG("Failed to sync MD on clone\n"); 8100 ctx->bserrno = bserrno; 8101 8102 /* Restore snapshot to previous state */ 8103 bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true); 8104 if (bserrno != 0) { 8105 delete_snapshot_cleanup_clone(ctx, bserrno); 8106 return; 8107 } 8108 8109 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx); 8110 return; 8111 } 8112 8113 /* Clear cluster map entries for snapshot */ 8114 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 8115 if (ctx->clone->active.clusters[i] == ctx->snapshot->active.clusters[i]) { 8116 if (ctx->snapshot->active.clusters[i] != 0) { 8117 ctx->snapshot->active.num_allocated_clusters--; 8118 } 8119 ctx->snapshot->active.clusters[i] = 0; 8120 } 8121 } 8122 for (i = 0; i < ctx->snapshot->active.num_extent_pages && 8123 i < ctx->clone->active.num_extent_pages; i++) { 8124 if (ctx->clone->active.extent_pages[i] == ctx->snapshot->active.extent_pages[i]) { 8125 ctx->snapshot->active.extent_pages[i] = 0; 8126 } 8127 } 8128 8129 blob_set_thin_provision(ctx->snapshot); 8130 ctx->snapshot->state = SPDK_BLOB_STATE_DIRTY; 8131 8132 if (ctx->parent_snapshot_entry != NULL) { 8133 ctx->snapshot->back_bs_dev = NULL; 8134 } 8135 8136 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_cpl, ctx); 8137 } 8138 8139 static void 8140 delete_snapshot_update_extent_pages_cpl(struct delete_snapshot_ctx *ctx) 8141 { 8142 int bserrno; 8143 8144 /* Delete old backing bs_dev from clone (related to snapshot that will be removed) */ 8145 blob_back_bs_destroy(ctx->clone); 8146 8147 /* Set/remove snapshot xattr and switch parent ID and backing bs_dev on clone... */ 8148 if (ctx->snapshot->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 8149 bserrno = bs_snapshot_copy_xattr(ctx->clone, ctx->snapshot, 8150 BLOB_EXTERNAL_SNAPSHOT_ID); 8151 if (bserrno != 0) { 8152 ctx->bserrno = bserrno; 8153 8154 /* Restore snapshot to previous state */ 8155 bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true); 8156 if (bserrno != 0) { 8157 delete_snapshot_cleanup_clone(ctx, bserrno); 8158 return; 8159 } 8160 8161 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx); 8162 return; 8163 } 8164 ctx->clone->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT; 8165 ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev; 8166 /* Do not delete the external snapshot along with this snapshot */ 8167 ctx->snapshot->back_bs_dev = NULL; 8168 ctx->clone->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 8169 } else if (ctx->parent_snapshot_entry != NULL) { 8170 /* ...to parent snapshot */ 8171 ctx->clone->parent_id = ctx->parent_snapshot_entry->id; 8172 ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev; 8173 blob_set_xattr(ctx->clone, BLOB_SNAPSHOT, &ctx->parent_snapshot_entry->id, 8174 sizeof(spdk_blob_id), 8175 true); 8176 } else { 8177 /* ...to blobid invalid and zeroes dev */ 8178 ctx->clone->parent_id = SPDK_BLOBID_INVALID; 8179 ctx->clone->back_bs_dev = bs_create_zeroes_dev(); 8180 blob_remove_xattr(ctx->clone, BLOB_SNAPSHOT, true); 8181 } 8182 8183 spdk_blob_sync_md(ctx->clone, delete_snapshot_sync_clone_cpl, ctx); 8184 } 8185 8186 static void 8187 delete_snapshot_update_extent_pages(void *cb_arg, int bserrno) 8188 { 8189 struct delete_snapshot_ctx *ctx = cb_arg; 8190 uint32_t *extent_page; 8191 uint64_t i; 8192 8193 for (i = ctx->next_extent_page; i < ctx->snapshot->active.num_extent_pages && 8194 i < ctx->clone->active.num_extent_pages; i++) { 8195 if (ctx->snapshot->active.extent_pages[i] == 0) { 8196 /* No extent page to use from snapshot */ 8197 continue; 8198 } 8199 8200 extent_page = &ctx->clone->active.extent_pages[i]; 8201 if (*extent_page == 0) { 8202 /* Copy extent page from snapshot when clone did not have a matching one */ 8203 *extent_page = ctx->snapshot->active.extent_pages[i]; 8204 continue; 8205 } 8206 8207 /* Clone and snapshot both contain partially filled matching extent pages. 8208 * Update the clone extent page in place with cluster map containing the mix of both. */ 8209 ctx->next_extent_page = i + 1; 8210 memset(ctx->page, 0, SPDK_BS_PAGE_SIZE); 8211 8212 blob_write_extent_page(ctx->clone, *extent_page, i * SPDK_EXTENTS_PER_EP, ctx->page, 8213 delete_snapshot_update_extent_pages, ctx); 8214 return; 8215 } 8216 delete_snapshot_update_extent_pages_cpl(ctx); 8217 } 8218 8219 static void 8220 delete_snapshot_sync_snapshot_xattr_cpl(void *cb_arg, int bserrno) 8221 { 8222 struct delete_snapshot_ctx *ctx = cb_arg; 8223 uint64_t i; 8224 8225 /* Temporarily override md_ro flag for clone for MD modification */ 8226 ctx->clone_md_ro = ctx->clone->md_ro; 8227 ctx->clone->md_ro = false; 8228 8229 if (bserrno) { 8230 SPDK_ERRLOG("Failed to sync MD with xattr on blob\n"); 8231 ctx->bserrno = bserrno; 8232 delete_snapshot_cleanup_clone(ctx, 0); 8233 return; 8234 } 8235 8236 /* Copy snapshot map to clone map (only unallocated clusters in clone) */ 8237 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 8238 if (ctx->clone->active.clusters[i] == 0) { 8239 ctx->clone->active.clusters[i] = ctx->snapshot->active.clusters[i]; 8240 if (ctx->clone->active.clusters[i] != 0) { 8241 ctx->clone->active.num_allocated_clusters++; 8242 } 8243 } 8244 } 8245 ctx->next_extent_page = 0; 8246 delete_snapshot_update_extent_pages(ctx, 0); 8247 } 8248 8249 static void 8250 delete_snapshot_esnap_channels_destroyed_cb(void *cb_arg, struct spdk_blob *blob, int bserrno) 8251 { 8252 struct delete_snapshot_ctx *ctx = cb_arg; 8253 8254 if (bserrno != 0) { 8255 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to destroy esnap channels: %d\n", 8256 blob->id, bserrno); 8257 /* That error should not stop us from syncing metadata. */ 8258 } 8259 8260 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx); 8261 } 8262 8263 static void 8264 delete_snapshot_freeze_io_cb(void *cb_arg, int bserrno) 8265 { 8266 struct delete_snapshot_ctx *ctx = cb_arg; 8267 8268 if (bserrno) { 8269 SPDK_ERRLOG("Failed to freeze I/O on clone\n"); 8270 ctx->bserrno = bserrno; 8271 delete_snapshot_cleanup_clone(ctx, 0); 8272 return; 8273 } 8274 8275 /* Temporarily override md_ro flag for snapshot for MD modification */ 8276 ctx->snapshot_md_ro = ctx->snapshot->md_ro; 8277 ctx->snapshot->md_ro = false; 8278 8279 /* Mark blob as pending for removal for power failure safety, use clone id for recovery */ 8280 ctx->bserrno = blob_set_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, &ctx->clone->id, 8281 sizeof(spdk_blob_id), true); 8282 if (ctx->bserrno != 0) { 8283 delete_snapshot_cleanup_clone(ctx, 0); 8284 return; 8285 } 8286 8287 if (blob_is_esnap_clone(ctx->snapshot)) { 8288 blob_esnap_destroy_bs_dev_channels(ctx->snapshot, false, 8289 delete_snapshot_esnap_channels_destroyed_cb, 8290 ctx); 8291 return; 8292 } 8293 8294 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx); 8295 } 8296 8297 static void 8298 delete_snapshot_open_clone_cb(void *cb_arg, struct spdk_blob *clone, int bserrno) 8299 { 8300 struct delete_snapshot_ctx *ctx = cb_arg; 8301 8302 if (bserrno) { 8303 SPDK_ERRLOG("Failed to open clone\n"); 8304 ctx->bserrno = bserrno; 8305 delete_snapshot_cleanup_snapshot(ctx, 0); 8306 return; 8307 } 8308 8309 ctx->clone = clone; 8310 8311 if (clone->locked_operation_in_progress) { 8312 SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress on its clone\n"); 8313 ctx->bserrno = -EBUSY; 8314 spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx); 8315 return; 8316 } 8317 8318 clone->locked_operation_in_progress = true; 8319 8320 blob_freeze_io(clone, delete_snapshot_freeze_io_cb, ctx); 8321 } 8322 8323 static void 8324 update_clone_on_snapshot_deletion(struct spdk_blob *snapshot, struct delete_snapshot_ctx *ctx) 8325 { 8326 struct spdk_blob_list *snapshot_entry = NULL; 8327 struct spdk_blob_list *clone_entry = NULL; 8328 struct spdk_blob_list *snapshot_clone_entry = NULL; 8329 8330 /* Get snapshot entry for the snapshot we want to remove */ 8331 snapshot_entry = bs_get_snapshot_entry(snapshot->bs, snapshot->id); 8332 8333 assert(snapshot_entry != NULL); 8334 8335 /* Get clone of the snapshot (at this point there can be only one clone) */ 8336 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 8337 assert(snapshot_entry->clone_count == 1); 8338 assert(clone_entry != NULL); 8339 8340 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 8341 * snapshot that we are removing */ 8342 blob_get_snapshot_and_clone_entries(snapshot, &ctx->parent_snapshot_entry, 8343 &snapshot_clone_entry); 8344 8345 spdk_bs_open_blob(snapshot->bs, clone_entry->id, delete_snapshot_open_clone_cb, ctx); 8346 } 8347 8348 static void 8349 bs_delete_blob_finish(void *cb_arg, struct spdk_blob *blob, int bserrno) 8350 { 8351 spdk_bs_sequence_t *seq = cb_arg; 8352 struct spdk_blob_list *snapshot_entry = NULL; 8353 uint32_t page_num; 8354 8355 if (bserrno) { 8356 SPDK_ERRLOG("Failed to remove blob\n"); 8357 bs_sequence_finish(seq, bserrno); 8358 return; 8359 } 8360 8361 /* Remove snapshot from the list */ 8362 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 8363 if (snapshot_entry != NULL) { 8364 TAILQ_REMOVE(&blob->bs->snapshots, snapshot_entry, link); 8365 free(snapshot_entry); 8366 } 8367 8368 page_num = bs_blobid_to_page(blob->id); 8369 spdk_bit_array_clear(blob->bs->used_blobids, page_num); 8370 blob->state = SPDK_BLOB_STATE_DIRTY; 8371 blob->active.num_pages = 0; 8372 blob_resize(blob, 0); 8373 8374 blob_persist(seq, blob, bs_delete_persist_cpl, blob); 8375 } 8376 8377 static int 8378 bs_is_blob_deletable(struct spdk_blob *blob, bool *update_clone) 8379 { 8380 struct spdk_blob_list *snapshot_entry = NULL; 8381 struct spdk_blob_list *clone_entry = NULL; 8382 struct spdk_blob *clone = NULL; 8383 bool has_one_clone = false; 8384 8385 /* Check if this is a snapshot with clones */ 8386 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 8387 if (snapshot_entry != NULL) { 8388 if (snapshot_entry->clone_count > 1) { 8389 SPDK_ERRLOG("Cannot remove snapshot with more than one clone\n"); 8390 return -EBUSY; 8391 } else if (snapshot_entry->clone_count == 1) { 8392 has_one_clone = true; 8393 } 8394 } 8395 8396 /* Check if someone has this blob open (besides this delete context): 8397 * - open_ref = 1 - only this context opened blob, so it is ok to remove it 8398 * - open_ref <= 2 && has_one_clone = true - clone is holding snapshot 8399 * and that is ok, because we will update it accordingly */ 8400 if (blob->open_ref <= 2 && has_one_clone) { 8401 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 8402 assert(clone_entry != NULL); 8403 clone = blob_lookup(blob->bs, clone_entry->id); 8404 8405 if (blob->open_ref == 2 && clone == NULL) { 8406 /* Clone is closed and someone else opened this blob */ 8407 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 8408 return -EBUSY; 8409 } 8410 8411 *update_clone = true; 8412 return 0; 8413 } 8414 8415 if (blob->open_ref > 1) { 8416 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 8417 return -EBUSY; 8418 } 8419 8420 assert(has_one_clone == false); 8421 *update_clone = false; 8422 return 0; 8423 } 8424 8425 static void 8426 bs_delete_enomem_close_cpl(void *cb_arg, int bserrno) 8427 { 8428 spdk_bs_sequence_t *seq = cb_arg; 8429 8430 bs_sequence_finish(seq, -ENOMEM); 8431 } 8432 8433 static void 8434 bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 8435 { 8436 spdk_bs_sequence_t *seq = cb_arg; 8437 struct delete_snapshot_ctx *ctx; 8438 bool update_clone = false; 8439 8440 if (bserrno != 0) { 8441 bs_sequence_finish(seq, bserrno); 8442 return; 8443 } 8444 8445 blob_verify_md_op(blob); 8446 8447 ctx = calloc(1, sizeof(*ctx)); 8448 if (ctx == NULL) { 8449 spdk_blob_close(blob, bs_delete_enomem_close_cpl, seq); 8450 return; 8451 } 8452 8453 ctx->snapshot = blob; 8454 ctx->cb_fn = bs_delete_blob_finish; 8455 ctx->cb_arg = seq; 8456 8457 /* Check if blob can be removed and if it is a snapshot with clone on top of it */ 8458 ctx->bserrno = bs_is_blob_deletable(blob, &update_clone); 8459 if (ctx->bserrno) { 8460 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 8461 return; 8462 } 8463 8464 if (blob->locked_operation_in_progress) { 8465 SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress\n"); 8466 ctx->bserrno = -EBUSY; 8467 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 8468 return; 8469 } 8470 8471 blob->locked_operation_in_progress = true; 8472 8473 /* 8474 * Remove the blob from the blob_store list now, to ensure it does not 8475 * get returned after this point by blob_lookup(). 8476 */ 8477 spdk_bit_array_clear(blob->bs->open_blobids, blob->id); 8478 RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob); 8479 8480 if (update_clone) { 8481 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 8482 if (!ctx->page) { 8483 ctx->bserrno = -ENOMEM; 8484 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 8485 return; 8486 } 8487 /* This blob is a snapshot with active clone - update clone first */ 8488 update_clone_on_snapshot_deletion(blob, ctx); 8489 } else { 8490 /* This blob does not have any clones - just remove it */ 8491 bs_blob_list_remove(blob); 8492 bs_delete_blob_finish(seq, blob, 0); 8493 free(ctx); 8494 } 8495 } 8496 8497 void 8498 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 8499 spdk_blob_op_complete cb_fn, void *cb_arg) 8500 { 8501 struct spdk_bs_cpl cpl; 8502 spdk_bs_sequence_t *seq; 8503 8504 SPDK_DEBUGLOG(blob, "Deleting blob 0x%" PRIx64 "\n", blobid); 8505 8506 assert(spdk_get_thread() == bs->md_thread); 8507 8508 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 8509 cpl.u.blob_basic.cb_fn = cb_fn; 8510 cpl.u.blob_basic.cb_arg = cb_arg; 8511 8512 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 8513 if (!seq) { 8514 cb_fn(cb_arg, -ENOMEM); 8515 return; 8516 } 8517 8518 spdk_bs_open_blob(bs, blobid, bs_delete_open_cpl, seq); 8519 } 8520 8521 /* END spdk_bs_delete_blob */ 8522 8523 /* START spdk_bs_open_blob */ 8524 8525 static void 8526 bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8527 { 8528 struct spdk_blob *blob = cb_arg; 8529 struct spdk_blob *existing; 8530 8531 if (bserrno != 0) { 8532 blob_free(blob); 8533 seq->cpl.u.blob_handle.blob = NULL; 8534 bs_sequence_finish(seq, bserrno); 8535 return; 8536 } 8537 8538 existing = blob_lookup(blob->bs, blob->id); 8539 if (existing) { 8540 blob_free(blob); 8541 existing->open_ref++; 8542 seq->cpl.u.blob_handle.blob = existing; 8543 bs_sequence_finish(seq, 0); 8544 return; 8545 } 8546 8547 blob->open_ref++; 8548 8549 spdk_bit_array_set(blob->bs->open_blobids, blob->id); 8550 RB_INSERT(spdk_blob_tree, &blob->bs->open_blobs, blob); 8551 8552 bs_sequence_finish(seq, bserrno); 8553 } 8554 8555 static inline void 8556 blob_open_opts_copy(const struct spdk_blob_open_opts *src, struct spdk_blob_open_opts *dst) 8557 { 8558 #define FIELD_OK(field) \ 8559 offsetof(struct spdk_blob_open_opts, field) + sizeof(src->field) <= src->opts_size 8560 8561 #define SET_FIELD(field) \ 8562 if (FIELD_OK(field)) { \ 8563 dst->field = src->field; \ 8564 } \ 8565 8566 SET_FIELD(clear_method); 8567 SET_FIELD(esnap_ctx); 8568 8569 dst->opts_size = src->opts_size; 8570 8571 /* You should not remove this statement, but need to update the assert statement 8572 * if you add a new field, and also add a corresponding SET_FIELD statement */ 8573 SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_open_opts) == 24, "Incorrect size"); 8574 8575 #undef FIELD_OK 8576 #undef SET_FIELD 8577 } 8578 8579 static void 8580 bs_open_blob(struct spdk_blob_store *bs, 8581 spdk_blob_id blobid, 8582 struct spdk_blob_open_opts *opts, 8583 spdk_blob_op_with_handle_complete cb_fn, 8584 void *cb_arg) 8585 { 8586 struct spdk_blob *blob; 8587 struct spdk_bs_cpl cpl; 8588 struct spdk_blob_open_opts opts_local; 8589 spdk_bs_sequence_t *seq; 8590 uint32_t page_num; 8591 8592 SPDK_DEBUGLOG(blob, "Opening blob 0x%" PRIx64 "\n", blobid); 8593 assert(spdk_get_thread() == bs->md_thread); 8594 8595 page_num = bs_blobid_to_page(blobid); 8596 if (spdk_bit_array_get(bs->used_blobids, page_num) == false) { 8597 /* Invalid blobid */ 8598 cb_fn(cb_arg, NULL, -ENOENT); 8599 return; 8600 } 8601 8602 blob = blob_lookup(bs, blobid); 8603 if (blob) { 8604 blob->open_ref++; 8605 cb_fn(cb_arg, blob, 0); 8606 return; 8607 } 8608 8609 blob = blob_alloc(bs, blobid); 8610 if (!blob) { 8611 cb_fn(cb_arg, NULL, -ENOMEM); 8612 return; 8613 } 8614 8615 spdk_blob_open_opts_init(&opts_local, sizeof(opts_local)); 8616 if (opts) { 8617 blob_open_opts_copy(opts, &opts_local); 8618 } 8619 8620 blob->clear_method = opts_local.clear_method; 8621 8622 cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE; 8623 cpl.u.blob_handle.cb_fn = cb_fn; 8624 cpl.u.blob_handle.cb_arg = cb_arg; 8625 cpl.u.blob_handle.blob = blob; 8626 cpl.u.blob_handle.esnap_ctx = opts_local.esnap_ctx; 8627 8628 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 8629 if (!seq) { 8630 blob_free(blob); 8631 cb_fn(cb_arg, NULL, -ENOMEM); 8632 return; 8633 } 8634 8635 blob_load(seq, blob, bs_open_blob_cpl, blob); 8636 } 8637 8638 void 8639 spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 8640 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 8641 { 8642 bs_open_blob(bs, blobid, NULL, cb_fn, cb_arg); 8643 } 8644 8645 void 8646 spdk_bs_open_blob_ext(struct spdk_blob_store *bs, spdk_blob_id blobid, 8647 struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 8648 { 8649 bs_open_blob(bs, blobid, opts, cb_fn, cb_arg); 8650 } 8651 8652 /* END spdk_bs_open_blob */ 8653 8654 /* START spdk_blob_set_read_only */ 8655 int 8656 spdk_blob_set_read_only(struct spdk_blob *blob) 8657 { 8658 blob_verify_md_op(blob); 8659 8660 blob->data_ro_flags |= SPDK_BLOB_READ_ONLY; 8661 8662 blob->state = SPDK_BLOB_STATE_DIRTY; 8663 return 0; 8664 } 8665 /* END spdk_blob_set_read_only */ 8666 8667 /* START spdk_blob_sync_md */ 8668 8669 static void 8670 blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8671 { 8672 struct spdk_blob *blob = cb_arg; 8673 8674 if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 8675 blob->data_ro = true; 8676 blob->md_ro = true; 8677 } 8678 8679 bs_sequence_finish(seq, bserrno); 8680 } 8681 8682 static void 8683 blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 8684 { 8685 struct spdk_bs_cpl cpl; 8686 spdk_bs_sequence_t *seq; 8687 8688 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 8689 cpl.u.blob_basic.cb_fn = cb_fn; 8690 cpl.u.blob_basic.cb_arg = cb_arg; 8691 8692 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 8693 if (!seq) { 8694 cb_fn(cb_arg, -ENOMEM); 8695 return; 8696 } 8697 8698 blob_persist(seq, blob, blob_sync_md_cpl, blob); 8699 } 8700 8701 void 8702 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 8703 { 8704 blob_verify_md_op(blob); 8705 8706 SPDK_DEBUGLOG(blob, "Syncing blob 0x%" PRIx64 "\n", blob->id); 8707 8708 if (blob->md_ro) { 8709 assert(blob->state == SPDK_BLOB_STATE_CLEAN); 8710 cb_fn(cb_arg, 0); 8711 return; 8712 } 8713 8714 blob_sync_md(blob, cb_fn, cb_arg); 8715 } 8716 8717 /* END spdk_blob_sync_md */ 8718 8719 struct spdk_blob_cluster_op_ctx { 8720 struct spdk_thread *thread; 8721 struct spdk_blob *blob; 8722 uint32_t cluster_num; /* cluster index in blob */ 8723 uint32_t cluster; /* cluster on disk */ 8724 uint32_t extent_page; /* extent page on disk */ 8725 struct spdk_blob_md_page *page; /* preallocated extent page */ 8726 int rc; 8727 spdk_blob_op_complete cb_fn; 8728 void *cb_arg; 8729 }; 8730 8731 static void 8732 blob_op_cluster_msg_cpl(void *arg) 8733 { 8734 struct spdk_blob_cluster_op_ctx *ctx = arg; 8735 8736 ctx->cb_fn(ctx->cb_arg, ctx->rc); 8737 free(ctx); 8738 } 8739 8740 static void 8741 blob_op_cluster_msg_cb(void *arg, int bserrno) 8742 { 8743 struct spdk_blob_cluster_op_ctx *ctx = arg; 8744 8745 ctx->rc = bserrno; 8746 spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx); 8747 } 8748 8749 static void 8750 blob_insert_new_ep_cb(void *arg, int bserrno) 8751 { 8752 struct spdk_blob_cluster_op_ctx *ctx = arg; 8753 uint32_t *extent_page; 8754 8755 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 8756 *extent_page = ctx->extent_page; 8757 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8758 blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx); 8759 } 8760 8761 struct spdk_blob_write_extent_page_ctx { 8762 struct spdk_blob_store *bs; 8763 8764 uint32_t extent; 8765 struct spdk_blob_md_page *page; 8766 }; 8767 8768 static void 8769 blob_free_cluster_msg_cb(void *arg, int bserrno) 8770 { 8771 struct spdk_blob_cluster_op_ctx *ctx = arg; 8772 8773 spdk_spin_lock(&ctx->blob->bs->used_lock); 8774 bs_release_cluster(ctx->blob->bs, ctx->cluster); 8775 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8776 8777 ctx->rc = bserrno; 8778 spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx); 8779 } 8780 8781 static void 8782 blob_free_cluster_update_ep_cb(void *arg, int bserrno) 8783 { 8784 struct spdk_blob_cluster_op_ctx *ctx = arg; 8785 8786 if (bserrno != 0 || ctx->blob->bs->clean == 0) { 8787 blob_free_cluster_msg_cb(ctx, bserrno); 8788 return; 8789 } 8790 8791 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8792 blob_sync_md(ctx->blob, blob_free_cluster_msg_cb, ctx); 8793 } 8794 8795 static void 8796 blob_free_cluster_free_ep_cb(void *arg, int bserrno) 8797 { 8798 struct spdk_blob_cluster_op_ctx *ctx = arg; 8799 8800 spdk_spin_lock(&ctx->blob->bs->used_lock); 8801 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8802 bs_release_md_page(ctx->blob->bs, ctx->extent_page); 8803 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8804 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8805 blob_sync_md(ctx->blob, blob_free_cluster_msg_cb, ctx); 8806 } 8807 8808 static void 8809 blob_persist_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8810 { 8811 struct spdk_blob_write_extent_page_ctx *ctx = cb_arg; 8812 8813 free(ctx); 8814 bs_sequence_finish(seq, bserrno); 8815 } 8816 8817 static void 8818 blob_write_extent_page_ready(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8819 { 8820 struct spdk_blob_write_extent_page_ctx *ctx = cb_arg; 8821 8822 if (bserrno != 0) { 8823 blob_persist_extent_page_cpl(seq, ctx, bserrno); 8824 return; 8825 } 8826 bs_sequence_write_dev(seq, ctx->page, bs_md_page_to_lba(ctx->bs, ctx->extent), 8827 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 8828 blob_persist_extent_page_cpl, ctx); 8829 } 8830 8831 static void 8832 blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 8833 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg) 8834 { 8835 struct spdk_blob_write_extent_page_ctx *ctx; 8836 spdk_bs_sequence_t *seq; 8837 struct spdk_bs_cpl cpl; 8838 8839 ctx = calloc(1, sizeof(*ctx)); 8840 if (!ctx) { 8841 cb_fn(cb_arg, -ENOMEM); 8842 return; 8843 } 8844 ctx->bs = blob->bs; 8845 ctx->extent = extent; 8846 ctx->page = page; 8847 8848 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 8849 cpl.u.blob_basic.cb_fn = cb_fn; 8850 cpl.u.blob_basic.cb_arg = cb_arg; 8851 8852 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 8853 if (!seq) { 8854 free(ctx); 8855 cb_fn(cb_arg, -ENOMEM); 8856 return; 8857 } 8858 8859 assert(page); 8860 page->next = SPDK_INVALID_MD_PAGE; 8861 page->id = blob->id; 8862 page->sequence_num = 0; 8863 8864 blob_serialize_extent_page(blob, cluster_num, page); 8865 8866 page->crc = blob_md_page_calc_crc(page); 8867 8868 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent) == true); 8869 8870 bs_mark_dirty(seq, blob->bs, blob_write_extent_page_ready, ctx); 8871 } 8872 8873 static void 8874 blob_insert_cluster_msg(void *arg) 8875 { 8876 struct spdk_blob_cluster_op_ctx *ctx = arg; 8877 uint32_t *extent_page; 8878 8879 ctx->rc = blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster); 8880 if (ctx->rc != 0) { 8881 spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx); 8882 return; 8883 } 8884 8885 if (ctx->blob->use_extent_table == false) { 8886 /* Extent table is not used, proceed with sync of md that will only use extents_rle. */ 8887 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8888 blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx); 8889 return; 8890 } 8891 8892 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 8893 if (*extent_page == 0) { 8894 /* Extent page requires allocation. 8895 * It was already claimed in the used_md_pages map and placed in ctx. */ 8896 assert(ctx->extent_page != 0); 8897 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8898 blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, ctx->page, 8899 blob_insert_new_ep_cb, ctx); 8900 } else { 8901 /* It is possible for original thread to allocate extent page for 8902 * different cluster in the same extent page. In such case proceed with 8903 * updating the existing extent page, but release the additional one. */ 8904 if (ctx->extent_page != 0) { 8905 spdk_spin_lock(&ctx->blob->bs->used_lock); 8906 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8907 bs_release_md_page(ctx->blob->bs, ctx->extent_page); 8908 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8909 ctx->extent_page = 0; 8910 } 8911 /* Extent page already allocated. 8912 * Every cluster allocation, requires just an update of single extent page. */ 8913 blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, ctx->page, 8914 blob_op_cluster_msg_cb, ctx); 8915 } 8916 } 8917 8918 static void 8919 blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 8920 uint64_t cluster, uint32_t extent_page, struct spdk_blob_md_page *page, 8921 spdk_blob_op_complete cb_fn, void *cb_arg) 8922 { 8923 struct spdk_blob_cluster_op_ctx *ctx; 8924 8925 ctx = calloc(1, sizeof(*ctx)); 8926 if (ctx == NULL) { 8927 cb_fn(cb_arg, -ENOMEM); 8928 return; 8929 } 8930 8931 ctx->thread = spdk_get_thread(); 8932 ctx->blob = blob; 8933 ctx->cluster_num = cluster_num; 8934 ctx->cluster = cluster; 8935 ctx->extent_page = extent_page; 8936 ctx->page = page; 8937 ctx->cb_fn = cb_fn; 8938 ctx->cb_arg = cb_arg; 8939 8940 spdk_thread_send_msg(blob->bs->md_thread, blob_insert_cluster_msg, ctx); 8941 } 8942 8943 static void 8944 blob_free_cluster_msg(void *arg) 8945 { 8946 struct spdk_blob_cluster_op_ctx *ctx = arg; 8947 uint32_t *extent_page; 8948 uint32_t start_cluster_idx; 8949 bool free_extent_page = true; 8950 size_t i; 8951 8952 ctx->cluster = bs_lba_to_cluster(ctx->blob->bs, ctx->blob->active.clusters[ctx->cluster_num]); 8953 8954 /* There were concurrent unmaps to the same cluster, only release the cluster on the first one */ 8955 if (ctx->cluster == 0) { 8956 blob_op_cluster_msg_cb(ctx, 0); 8957 return; 8958 } 8959 8960 ctx->blob->active.clusters[ctx->cluster_num] = 0; 8961 if (ctx->cluster != 0) { 8962 ctx->blob->active.num_allocated_clusters--; 8963 } 8964 8965 if (ctx->blob->use_extent_table == false) { 8966 /* Extent table is not used, proceed with sync of md that will only use extents_rle. */ 8967 spdk_spin_lock(&ctx->blob->bs->used_lock); 8968 bs_release_cluster(ctx->blob->bs, ctx->cluster); 8969 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8970 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8971 blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx); 8972 return; 8973 } 8974 8975 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 8976 8977 /* There shouldn't be parallel release operations on same cluster */ 8978 assert(*extent_page == ctx->extent_page); 8979 8980 start_cluster_idx = (ctx->cluster_num / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP; 8981 for (i = 0; i < SPDK_EXTENTS_PER_EP; ++i) { 8982 if (ctx->blob->active.clusters[start_cluster_idx + i] != 0) { 8983 free_extent_page = false; 8984 break; 8985 } 8986 } 8987 8988 if (free_extent_page) { 8989 assert(ctx->extent_page != 0); 8990 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8991 ctx->blob->active.extent_pages[bs_cluster_to_extent_table_id(ctx->cluster_num)] = 0; 8992 blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, ctx->page, 8993 blob_free_cluster_free_ep_cb, ctx); 8994 } else { 8995 blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, ctx->page, 8996 blob_free_cluster_update_ep_cb, ctx); 8997 } 8998 } 8999 9000 9001 static void 9002 blob_free_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, uint32_t extent_page, 9003 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg) 9004 { 9005 struct spdk_blob_cluster_op_ctx *ctx; 9006 9007 ctx = calloc(1, sizeof(*ctx)); 9008 if (ctx == NULL) { 9009 cb_fn(cb_arg, -ENOMEM); 9010 return; 9011 } 9012 9013 ctx->thread = spdk_get_thread(); 9014 ctx->blob = blob; 9015 ctx->cluster_num = cluster_num; 9016 ctx->extent_page = extent_page; 9017 ctx->page = page; 9018 ctx->cb_fn = cb_fn; 9019 ctx->cb_arg = cb_arg; 9020 9021 spdk_thread_send_msg(blob->bs->md_thread, blob_free_cluster_msg, ctx); 9022 } 9023 9024 /* START spdk_blob_close */ 9025 9026 static void 9027 blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9028 { 9029 struct spdk_blob *blob = cb_arg; 9030 9031 if (bserrno == 0) { 9032 blob->open_ref--; 9033 if (blob->open_ref == 0) { 9034 /* 9035 * Blobs with active.num_pages == 0 are deleted blobs. 9036 * these blobs are removed from the blob_store list 9037 * when the deletion process starts - so don't try to 9038 * remove them again. 9039 */ 9040 if (blob->active.num_pages > 0) { 9041 spdk_bit_array_clear(blob->bs->open_blobids, blob->id); 9042 RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob); 9043 } 9044 blob_free(blob); 9045 } 9046 } 9047 9048 bs_sequence_finish(seq, bserrno); 9049 } 9050 9051 static void 9052 blob_close_esnap_done(void *cb_arg, struct spdk_blob *blob, int bserrno) 9053 { 9054 spdk_bs_sequence_t *seq = cb_arg; 9055 9056 if (bserrno != 0) { 9057 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": close failed with error %d\n", 9058 blob->id, bserrno); 9059 bs_sequence_finish(seq, bserrno); 9060 return; 9061 } 9062 9063 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": closed, syncing metadata on thread %s\n", 9064 blob->id, spdk_thread_get_name(spdk_get_thread())); 9065 9066 /* Sync metadata */ 9067 blob_persist(seq, blob, blob_close_cpl, blob); 9068 } 9069 9070 void 9071 spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 9072 { 9073 struct spdk_bs_cpl cpl; 9074 spdk_bs_sequence_t *seq; 9075 9076 blob_verify_md_op(blob); 9077 9078 SPDK_DEBUGLOG(blob, "Closing blob 0x%" PRIx64 "\n", blob->id); 9079 9080 if (blob->open_ref == 0) { 9081 cb_fn(cb_arg, -EBADF); 9082 return; 9083 } 9084 9085 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 9086 cpl.u.blob_basic.cb_fn = cb_fn; 9087 cpl.u.blob_basic.cb_arg = cb_arg; 9088 9089 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 9090 if (!seq) { 9091 cb_fn(cb_arg, -ENOMEM); 9092 return; 9093 } 9094 9095 if (blob->open_ref == 1 && blob_is_esnap_clone(blob)) { 9096 blob_esnap_destroy_bs_dev_channels(blob, false, blob_close_esnap_done, seq); 9097 return; 9098 } 9099 9100 /* Sync metadata */ 9101 blob_persist(seq, blob, blob_close_cpl, blob); 9102 } 9103 9104 /* END spdk_blob_close */ 9105 9106 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs) 9107 { 9108 return spdk_get_io_channel(bs); 9109 } 9110 9111 void 9112 spdk_bs_free_io_channel(struct spdk_io_channel *channel) 9113 { 9114 blob_esnap_destroy_bs_channel(spdk_io_channel_get_ctx(channel)); 9115 spdk_put_io_channel(channel); 9116 } 9117 9118 void 9119 spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel, 9120 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 9121 { 9122 blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 9123 SPDK_BLOB_UNMAP); 9124 } 9125 9126 void 9127 spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel, 9128 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 9129 { 9130 blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 9131 SPDK_BLOB_WRITE_ZEROES); 9132 } 9133 9134 void 9135 spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel, 9136 void *payload, uint64_t offset, uint64_t length, 9137 spdk_blob_op_complete cb_fn, void *cb_arg) 9138 { 9139 blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 9140 SPDK_BLOB_WRITE); 9141 } 9142 9143 void 9144 spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel, 9145 void *payload, uint64_t offset, uint64_t length, 9146 spdk_blob_op_complete cb_fn, void *cb_arg) 9147 { 9148 blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 9149 SPDK_BLOB_READ); 9150 } 9151 9152 void 9153 spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel, 9154 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9155 spdk_blob_op_complete cb_fn, void *cb_arg) 9156 { 9157 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, NULL); 9158 } 9159 9160 void 9161 spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel, 9162 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9163 spdk_blob_op_complete cb_fn, void *cb_arg) 9164 { 9165 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, NULL); 9166 } 9167 9168 void 9169 spdk_blob_io_writev_ext(struct spdk_blob *blob, struct spdk_io_channel *channel, 9170 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9171 spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts) 9172 { 9173 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, 9174 io_opts); 9175 } 9176 9177 void 9178 spdk_blob_io_readv_ext(struct spdk_blob *blob, struct spdk_io_channel *channel, 9179 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 9180 spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts) 9181 { 9182 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, 9183 io_opts); 9184 } 9185 9186 struct spdk_bs_iter_ctx { 9187 int64_t page_num; 9188 struct spdk_blob_store *bs; 9189 9190 spdk_blob_op_with_handle_complete cb_fn; 9191 void *cb_arg; 9192 }; 9193 9194 static void 9195 bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 9196 { 9197 struct spdk_bs_iter_ctx *ctx = cb_arg; 9198 struct spdk_blob_store *bs = ctx->bs; 9199 spdk_blob_id id; 9200 9201 if (bserrno == 0) { 9202 ctx->cb_fn(ctx->cb_arg, _blob, bserrno); 9203 free(ctx); 9204 return; 9205 } 9206 9207 ctx->page_num++; 9208 ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num); 9209 if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) { 9210 ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT); 9211 free(ctx); 9212 return; 9213 } 9214 9215 id = bs_page_to_blobid(ctx->page_num); 9216 9217 spdk_bs_open_blob(bs, id, bs_iter_cpl, ctx); 9218 } 9219 9220 void 9221 spdk_bs_iter_first(struct spdk_blob_store *bs, 9222 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 9223 { 9224 struct spdk_bs_iter_ctx *ctx; 9225 9226 ctx = calloc(1, sizeof(*ctx)); 9227 if (!ctx) { 9228 cb_fn(cb_arg, NULL, -ENOMEM); 9229 return; 9230 } 9231 9232 ctx->page_num = -1; 9233 ctx->bs = bs; 9234 ctx->cb_fn = cb_fn; 9235 ctx->cb_arg = cb_arg; 9236 9237 bs_iter_cpl(ctx, NULL, -1); 9238 } 9239 9240 static void 9241 bs_iter_close_cpl(void *cb_arg, int bserrno) 9242 { 9243 struct spdk_bs_iter_ctx *ctx = cb_arg; 9244 9245 bs_iter_cpl(ctx, NULL, -1); 9246 } 9247 9248 void 9249 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob, 9250 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 9251 { 9252 struct spdk_bs_iter_ctx *ctx; 9253 9254 assert(blob != NULL); 9255 9256 ctx = calloc(1, sizeof(*ctx)); 9257 if (!ctx) { 9258 cb_fn(cb_arg, NULL, -ENOMEM); 9259 return; 9260 } 9261 9262 ctx->page_num = bs_blobid_to_page(blob->id); 9263 ctx->bs = bs; 9264 ctx->cb_fn = cb_fn; 9265 ctx->cb_arg = cb_arg; 9266 9267 /* Close the existing blob */ 9268 spdk_blob_close(blob, bs_iter_close_cpl, ctx); 9269 } 9270 9271 static int 9272 blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 9273 uint16_t value_len, bool internal) 9274 { 9275 struct spdk_xattr_tailq *xattrs; 9276 struct spdk_xattr *xattr; 9277 size_t desc_size; 9278 void *tmp; 9279 9280 blob_verify_md_op(blob); 9281 9282 if (blob->md_ro) { 9283 return -EPERM; 9284 } 9285 9286 desc_size = sizeof(struct spdk_blob_md_descriptor_xattr) + strlen(name) + value_len; 9287 if (desc_size > SPDK_BS_MAX_DESC_SIZE) { 9288 SPDK_DEBUGLOG(blob, "Xattr '%s' of size %zu does not fix into single page %zu\n", name, 9289 desc_size, SPDK_BS_MAX_DESC_SIZE); 9290 return -ENOMEM; 9291 } 9292 9293 if (internal) { 9294 xattrs = &blob->xattrs_internal; 9295 blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR; 9296 } else { 9297 xattrs = &blob->xattrs; 9298 } 9299 9300 TAILQ_FOREACH(xattr, xattrs, link) { 9301 if (!strcmp(name, xattr->name)) { 9302 tmp = malloc(value_len); 9303 if (!tmp) { 9304 return -ENOMEM; 9305 } 9306 9307 free(xattr->value); 9308 xattr->value_len = value_len; 9309 xattr->value = tmp; 9310 memcpy(xattr->value, value, value_len); 9311 9312 blob->state = SPDK_BLOB_STATE_DIRTY; 9313 9314 return 0; 9315 } 9316 } 9317 9318 xattr = calloc(1, sizeof(*xattr)); 9319 if (!xattr) { 9320 return -ENOMEM; 9321 } 9322 9323 xattr->name = strdup(name); 9324 if (!xattr->name) { 9325 free(xattr); 9326 return -ENOMEM; 9327 } 9328 9329 xattr->value_len = value_len; 9330 xattr->value = malloc(value_len); 9331 if (!xattr->value) { 9332 free(xattr->name); 9333 free(xattr); 9334 return -ENOMEM; 9335 } 9336 memcpy(xattr->value, value, value_len); 9337 TAILQ_INSERT_TAIL(xattrs, xattr, link); 9338 9339 blob->state = SPDK_BLOB_STATE_DIRTY; 9340 9341 return 0; 9342 } 9343 9344 int 9345 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 9346 uint16_t value_len) 9347 { 9348 return blob_set_xattr(blob, name, value, value_len, false); 9349 } 9350 9351 static int 9352 blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal) 9353 { 9354 struct spdk_xattr_tailq *xattrs; 9355 struct spdk_xattr *xattr; 9356 9357 blob_verify_md_op(blob); 9358 9359 if (blob->md_ro) { 9360 return -EPERM; 9361 } 9362 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 9363 9364 TAILQ_FOREACH(xattr, xattrs, link) { 9365 if (!strcmp(name, xattr->name)) { 9366 TAILQ_REMOVE(xattrs, xattr, link); 9367 free(xattr->value); 9368 free(xattr->name); 9369 free(xattr); 9370 9371 if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) { 9372 blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR; 9373 } 9374 blob->state = SPDK_BLOB_STATE_DIRTY; 9375 9376 return 0; 9377 } 9378 } 9379 9380 return -ENOENT; 9381 } 9382 9383 int 9384 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name) 9385 { 9386 return blob_remove_xattr(blob, name, false); 9387 } 9388 9389 static int 9390 blob_get_xattr_value(struct spdk_blob *blob, const char *name, 9391 const void **value, size_t *value_len, bool internal) 9392 { 9393 struct spdk_xattr *xattr; 9394 struct spdk_xattr_tailq *xattrs; 9395 9396 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 9397 9398 TAILQ_FOREACH(xattr, xattrs, link) { 9399 if (!strcmp(name, xattr->name)) { 9400 *value = xattr->value; 9401 *value_len = xattr->value_len; 9402 return 0; 9403 } 9404 } 9405 return -ENOENT; 9406 } 9407 9408 int 9409 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 9410 const void **value, size_t *value_len) 9411 { 9412 blob_verify_md_op(blob); 9413 9414 return blob_get_xattr_value(blob, name, value, value_len, false); 9415 } 9416 9417 struct spdk_xattr_names { 9418 uint32_t count; 9419 const char *names[0]; 9420 }; 9421 9422 static int 9423 blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names) 9424 { 9425 struct spdk_xattr *xattr; 9426 int count = 0; 9427 9428 TAILQ_FOREACH(xattr, xattrs, link) { 9429 count++; 9430 } 9431 9432 *names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *)); 9433 if (*names == NULL) { 9434 return -ENOMEM; 9435 } 9436 9437 TAILQ_FOREACH(xattr, xattrs, link) { 9438 (*names)->names[(*names)->count++] = xattr->name; 9439 } 9440 9441 return 0; 9442 } 9443 9444 int 9445 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names) 9446 { 9447 blob_verify_md_op(blob); 9448 9449 return blob_get_xattr_names(&blob->xattrs, names); 9450 } 9451 9452 uint32_t 9453 spdk_xattr_names_get_count(struct spdk_xattr_names *names) 9454 { 9455 assert(names != NULL); 9456 9457 return names->count; 9458 } 9459 9460 const char * 9461 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index) 9462 { 9463 if (index >= names->count) { 9464 return NULL; 9465 } 9466 9467 return names->names[index]; 9468 } 9469 9470 void 9471 spdk_xattr_names_free(struct spdk_xattr_names *names) 9472 { 9473 free(names); 9474 } 9475 9476 struct spdk_bs_type 9477 spdk_bs_get_bstype(struct spdk_blob_store *bs) 9478 { 9479 return bs->bstype; 9480 } 9481 9482 void 9483 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype) 9484 { 9485 memcpy(&bs->bstype, &bstype, sizeof(bstype)); 9486 } 9487 9488 bool 9489 spdk_blob_is_read_only(struct spdk_blob *blob) 9490 { 9491 assert(blob != NULL); 9492 return (blob->data_ro || blob->md_ro); 9493 } 9494 9495 bool 9496 spdk_blob_is_snapshot(struct spdk_blob *blob) 9497 { 9498 struct spdk_blob_list *snapshot_entry; 9499 9500 assert(blob != NULL); 9501 9502 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 9503 if (snapshot_entry == NULL) { 9504 return false; 9505 } 9506 9507 return true; 9508 } 9509 9510 bool 9511 spdk_blob_is_clone(struct spdk_blob *blob) 9512 { 9513 assert(blob != NULL); 9514 9515 if (blob->parent_id != SPDK_BLOBID_INVALID && 9516 blob->parent_id != SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 9517 assert(spdk_blob_is_thin_provisioned(blob)); 9518 return true; 9519 } 9520 9521 return false; 9522 } 9523 9524 bool 9525 spdk_blob_is_thin_provisioned(struct spdk_blob *blob) 9526 { 9527 assert(blob != NULL); 9528 return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 9529 } 9530 9531 bool 9532 spdk_blob_is_esnap_clone(const struct spdk_blob *blob) 9533 { 9534 return blob_is_esnap_clone(blob); 9535 } 9536 9537 static void 9538 blob_update_clear_method(struct spdk_blob *blob) 9539 { 9540 enum blob_clear_method stored_cm; 9541 9542 assert(blob != NULL); 9543 9544 /* If BLOB_CLEAR_WITH_DEFAULT was passed in, use the setting stored 9545 * in metadata previously. If something other than the default was 9546 * specified, ignore stored value and used what was passed in. 9547 */ 9548 stored_cm = ((blob->md_ro_flags & SPDK_BLOB_CLEAR_METHOD) >> SPDK_BLOB_CLEAR_METHOD_SHIFT); 9549 9550 if (blob->clear_method == BLOB_CLEAR_WITH_DEFAULT) { 9551 blob->clear_method = stored_cm; 9552 } else if (blob->clear_method != stored_cm) { 9553 SPDK_WARNLOG("Using passed in clear method 0x%x instead of stored value of 0x%x\n", 9554 blob->clear_method, stored_cm); 9555 } 9556 } 9557 9558 spdk_blob_id 9559 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id) 9560 { 9561 struct spdk_blob_list *snapshot_entry = NULL; 9562 struct spdk_blob_list *clone_entry = NULL; 9563 9564 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 9565 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 9566 if (clone_entry->id == blob_id) { 9567 return snapshot_entry->id; 9568 } 9569 } 9570 } 9571 9572 return SPDK_BLOBID_INVALID; 9573 } 9574 9575 int 9576 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids, 9577 size_t *count) 9578 { 9579 struct spdk_blob_list *snapshot_entry, *clone_entry; 9580 size_t n; 9581 9582 snapshot_entry = bs_get_snapshot_entry(bs, blobid); 9583 if (snapshot_entry == NULL) { 9584 *count = 0; 9585 return 0; 9586 } 9587 9588 if (ids == NULL || *count < snapshot_entry->clone_count) { 9589 *count = snapshot_entry->clone_count; 9590 return -ENOMEM; 9591 } 9592 *count = snapshot_entry->clone_count; 9593 9594 n = 0; 9595 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 9596 ids[n++] = clone_entry->id; 9597 } 9598 9599 return 0; 9600 } 9601 9602 static void 9603 bs_load_grow_continue(struct spdk_bs_load_ctx *ctx) 9604 { 9605 int rc; 9606 9607 if (ctx->super->size == 0) { 9608 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9609 } 9610 9611 if (ctx->super->io_unit_size == 0) { 9612 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 9613 } 9614 9615 /* Parse the super block */ 9616 ctx->bs->clean = 1; 9617 ctx->bs->cluster_sz = ctx->super->cluster_size; 9618 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 9619 ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE; 9620 if (spdk_u32_is_pow2(ctx->bs->pages_per_cluster)) { 9621 ctx->bs->pages_per_cluster_shift = spdk_u32log2(ctx->bs->pages_per_cluster); 9622 } 9623 ctx->bs->io_unit_size = ctx->super->io_unit_size; 9624 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 9625 if (rc < 0) { 9626 bs_load_ctx_fail(ctx, -ENOMEM); 9627 return; 9628 } 9629 ctx->bs->md_start = ctx->super->md_start; 9630 ctx->bs->md_len = ctx->super->md_len; 9631 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len); 9632 if (rc < 0) { 9633 bs_load_ctx_fail(ctx, -ENOMEM); 9634 return; 9635 } 9636 9637 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 9638 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 9639 ctx->bs->super_blob = ctx->super->super_blob; 9640 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 9641 9642 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0) { 9643 SPDK_ERRLOG("Can not grow an unclean blobstore, please load it normally to clean it.\n"); 9644 bs_load_ctx_fail(ctx, -EIO); 9645 return; 9646 } else { 9647 bs_load_read_used_pages(ctx); 9648 } 9649 } 9650 9651 static void 9652 bs_load_grow_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9653 { 9654 struct spdk_bs_load_ctx *ctx = cb_arg; 9655 9656 if (bserrno != 0) { 9657 bs_load_ctx_fail(ctx, bserrno); 9658 return; 9659 } 9660 bs_load_grow_continue(ctx); 9661 } 9662 9663 static void 9664 bs_load_grow_used_clusters_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9665 { 9666 struct spdk_bs_load_ctx *ctx = cb_arg; 9667 9668 if (bserrno != 0) { 9669 bs_load_ctx_fail(ctx, bserrno); 9670 return; 9671 } 9672 9673 spdk_free(ctx->mask); 9674 9675 bs_sequence_write_dev(ctx->seq, ctx->super, bs_page_to_lba(ctx->bs, 0), 9676 bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 9677 bs_load_grow_super_write_cpl, ctx); 9678 } 9679 9680 static void 9681 bs_load_grow_used_clusters_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9682 { 9683 struct spdk_bs_load_ctx *ctx = cb_arg; 9684 uint64_t lba, lba_count; 9685 uint64_t dev_size; 9686 uint64_t total_clusters; 9687 9688 if (bserrno != 0) { 9689 bs_load_ctx_fail(ctx, bserrno); 9690 return; 9691 } 9692 9693 /* The type must be correct */ 9694 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 9695 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 9696 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 9697 struct spdk_blob_md_page) * 8)); 9698 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9699 total_clusters = dev_size / ctx->super->cluster_size; 9700 ctx->mask->length = total_clusters; 9701 9702 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 9703 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 9704 bs_sequence_write_dev(ctx->seq, ctx->mask, lba, lba_count, 9705 bs_load_grow_used_clusters_write_cpl, ctx); 9706 } 9707 9708 static void 9709 bs_load_try_to_grow(struct spdk_bs_load_ctx *ctx) 9710 { 9711 uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask; 9712 uint64_t lba, lba_count, mask_size; 9713 9714 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9715 total_clusters = dev_size / ctx->super->cluster_size; 9716 used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 9717 spdk_divide_round_up(total_clusters, 8), 9718 SPDK_BS_PAGE_SIZE); 9719 max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start; 9720 /* No necessary to grow or no space to grow */ 9721 if (ctx->super->size >= dev_size || used_cluster_mask_len > max_used_cluster_mask) { 9722 SPDK_DEBUGLOG(blob, "No grow\n"); 9723 bs_load_grow_continue(ctx); 9724 return; 9725 } 9726 9727 SPDK_DEBUGLOG(blob, "Resize blobstore\n"); 9728 9729 ctx->super->size = dev_size; 9730 ctx->super->used_cluster_mask_len = used_cluster_mask_len; 9731 ctx->super->crc = blob_md_page_calc_crc(ctx->super); 9732 9733 mask_size = used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 9734 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 9735 SPDK_MALLOC_DMA); 9736 if (!ctx->mask) { 9737 bs_load_ctx_fail(ctx, -ENOMEM); 9738 return; 9739 } 9740 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 9741 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 9742 bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count, 9743 bs_load_grow_used_clusters_read_cpl, ctx); 9744 } 9745 9746 static void 9747 bs_grow_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9748 { 9749 struct spdk_bs_load_ctx *ctx = cb_arg; 9750 int rc; 9751 9752 rc = bs_super_validate(ctx->super, ctx->bs); 9753 if (rc != 0) { 9754 bs_load_ctx_fail(ctx, rc); 9755 return; 9756 } 9757 9758 bs_load_try_to_grow(ctx); 9759 } 9760 9761 struct spdk_bs_grow_ctx { 9762 struct spdk_blob_store *bs; 9763 struct spdk_bs_super_block *super; 9764 9765 struct spdk_bit_pool *new_used_clusters; 9766 struct spdk_bs_md_mask *new_used_clusters_mask; 9767 9768 spdk_bs_sequence_t *seq; 9769 }; 9770 9771 static void 9772 bs_grow_live_done(struct spdk_bs_grow_ctx *ctx, int bserrno) 9773 { 9774 if (bserrno != 0) { 9775 spdk_bit_pool_free(&ctx->new_used_clusters); 9776 } 9777 9778 bs_sequence_finish(ctx->seq, bserrno); 9779 free(ctx->new_used_clusters_mask); 9780 spdk_free(ctx->super); 9781 free(ctx); 9782 } 9783 9784 static void 9785 bs_grow_live_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9786 { 9787 struct spdk_bs_grow_ctx *ctx = cb_arg; 9788 struct spdk_blob_store *bs = ctx->bs; 9789 uint64_t total_clusters; 9790 9791 if (bserrno != 0) { 9792 bs_grow_live_done(ctx, bserrno); 9793 return; 9794 } 9795 9796 /* 9797 * Blobstore is not clean until unload, for now only the super block is up to date. 9798 * This is similar to state right after blobstore init, when bs_write_used_md() didn't 9799 * yet execute. 9800 * When cleanly unloaded, the used md pages will be written out. 9801 * In case of unclean shutdown, loading blobstore will go through recovery path correctly 9802 * filling out the used_clusters with new size and writing it out. 9803 */ 9804 bs->clean = 0; 9805 9806 /* Reverting the super->size past this point is complex, avoid any error paths 9807 * that require to do so. */ 9808 spdk_spin_lock(&bs->used_lock); 9809 9810 total_clusters = ctx->super->size / ctx->super->cluster_size; 9811 9812 assert(total_clusters >= spdk_bit_pool_capacity(bs->used_clusters)); 9813 spdk_bit_pool_store_mask(bs->used_clusters, ctx->new_used_clusters_mask); 9814 9815 assert(total_clusters == spdk_bit_pool_capacity(ctx->new_used_clusters)); 9816 spdk_bit_pool_load_mask(ctx->new_used_clusters, ctx->new_used_clusters_mask); 9817 9818 spdk_bit_pool_free(&bs->used_clusters); 9819 bs->used_clusters = ctx->new_used_clusters; 9820 9821 bs->total_clusters = total_clusters; 9822 bs->total_data_clusters = bs->total_clusters - spdk_divide_round_up( 9823 bs->md_start + bs->md_len, bs->pages_per_cluster); 9824 9825 bs->num_free_clusters = spdk_bit_pool_count_free(bs->used_clusters); 9826 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 9827 spdk_spin_unlock(&bs->used_lock); 9828 9829 bs_grow_live_done(ctx, 0); 9830 } 9831 9832 static void 9833 bs_grow_live_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9834 { 9835 struct spdk_bs_grow_ctx *ctx = cb_arg; 9836 uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask; 9837 int rc; 9838 9839 if (bserrno != 0) { 9840 bs_grow_live_done(ctx, bserrno); 9841 return; 9842 } 9843 9844 rc = bs_super_validate(ctx->super, ctx->bs); 9845 if (rc != 0) { 9846 bs_grow_live_done(ctx, rc); 9847 return; 9848 } 9849 9850 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9851 total_clusters = dev_size / ctx->super->cluster_size; 9852 used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 9853 spdk_divide_round_up(total_clusters, 8), 9854 SPDK_BS_PAGE_SIZE); 9855 max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start; 9856 /* Only checking dev_size. Since it can change, but total_clusters remain the same. */ 9857 if (dev_size == ctx->super->size) { 9858 SPDK_DEBUGLOG(blob, "No need to grow blobstore\n"); 9859 bs_grow_live_done(ctx, 0); 9860 return; 9861 } 9862 /* 9863 * Blobstore cannot be shrunk, so check before if: 9864 * - new size of the device is smaller than size in super_block 9865 * - new total number of clusters is smaller than used_clusters bit_pool 9866 * - there is enough space in metadata for used_cluster_mask to be written out 9867 */ 9868 if (dev_size < ctx->super->size || 9869 total_clusters < spdk_bit_pool_capacity(ctx->bs->used_clusters) || 9870 used_cluster_mask_len > max_used_cluster_mask) { 9871 SPDK_DEBUGLOG(blob, "No space to grow blobstore\n"); 9872 bs_grow_live_done(ctx, -ENOSPC); 9873 return; 9874 } 9875 9876 SPDK_DEBUGLOG(blob, "Resizing blobstore\n"); 9877 9878 ctx->new_used_clusters_mask = calloc(1, total_clusters); 9879 if (!ctx->new_used_clusters_mask) { 9880 bs_grow_live_done(ctx, -ENOMEM); 9881 return; 9882 } 9883 ctx->new_used_clusters = spdk_bit_pool_create(total_clusters); 9884 if (!ctx->new_used_clusters) { 9885 bs_grow_live_done(ctx, -ENOMEM); 9886 return; 9887 } 9888 9889 ctx->super->clean = 0; 9890 ctx->super->size = dev_size; 9891 ctx->super->used_cluster_mask_len = used_cluster_mask_len; 9892 bs_write_super(seq, ctx->bs, ctx->super, bs_grow_live_super_write_cpl, ctx); 9893 } 9894 9895 void 9896 spdk_bs_grow_live(struct spdk_blob_store *bs, 9897 spdk_bs_op_complete cb_fn, void *cb_arg) 9898 { 9899 struct spdk_bs_cpl cpl; 9900 struct spdk_bs_grow_ctx *ctx; 9901 9902 assert(spdk_get_thread() == bs->md_thread); 9903 9904 SPDK_DEBUGLOG(blob, "Growing blobstore on dev %p\n", bs->dev); 9905 9906 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 9907 cpl.u.bs_basic.cb_fn = cb_fn; 9908 cpl.u.bs_basic.cb_arg = cb_arg; 9909 9910 ctx = calloc(1, sizeof(struct spdk_bs_grow_ctx)); 9911 if (!ctx) { 9912 cb_fn(cb_arg, -ENOMEM); 9913 return; 9914 } 9915 ctx->bs = bs; 9916 9917 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 9918 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 9919 if (!ctx->super) { 9920 free(ctx); 9921 cb_fn(cb_arg, -ENOMEM); 9922 return; 9923 } 9924 9925 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 9926 if (!ctx->seq) { 9927 spdk_free(ctx->super); 9928 free(ctx); 9929 cb_fn(cb_arg, -ENOMEM); 9930 return; 9931 } 9932 9933 /* Read the super block */ 9934 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 9935 bs_byte_to_lba(bs, sizeof(*ctx->super)), 9936 bs_grow_live_load_super_cpl, ctx); 9937 } 9938 9939 void 9940 spdk_bs_grow(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 9941 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 9942 { 9943 struct spdk_blob_store *bs; 9944 struct spdk_bs_cpl cpl; 9945 struct spdk_bs_load_ctx *ctx; 9946 struct spdk_bs_opts opts = {}; 9947 int err; 9948 9949 SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev); 9950 9951 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 9952 SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen); 9953 dev->destroy(dev); 9954 cb_fn(cb_arg, NULL, -EINVAL); 9955 return; 9956 } 9957 9958 spdk_bs_opts_init(&opts, sizeof(opts)); 9959 if (o) { 9960 if (bs_opts_copy(o, &opts)) { 9961 return; 9962 } 9963 } 9964 9965 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 9966 dev->destroy(dev); 9967 cb_fn(cb_arg, NULL, -EINVAL); 9968 return; 9969 } 9970 9971 err = bs_alloc(dev, &opts, &bs, &ctx); 9972 if (err) { 9973 dev->destroy(dev); 9974 cb_fn(cb_arg, NULL, err); 9975 return; 9976 } 9977 9978 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 9979 cpl.u.bs_handle.cb_fn = cb_fn; 9980 cpl.u.bs_handle.cb_arg = cb_arg; 9981 cpl.u.bs_handle.bs = bs; 9982 9983 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 9984 if (!ctx->seq) { 9985 spdk_free(ctx->super); 9986 free(ctx); 9987 bs_free(bs); 9988 cb_fn(cb_arg, NULL, -ENOMEM); 9989 return; 9990 } 9991 9992 /* Read the super block */ 9993 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 9994 bs_byte_to_lba(bs, sizeof(*ctx->super)), 9995 bs_grow_load_super_cpl, ctx); 9996 } 9997 9998 int 9999 spdk_blob_get_esnap_id(struct spdk_blob *blob, const void **id, size_t *len) 10000 { 10001 if (!blob_is_esnap_clone(blob)) { 10002 return -EINVAL; 10003 } 10004 10005 return blob_get_xattr_value(blob, BLOB_EXTERNAL_SNAPSHOT_ID, id, len, true); 10006 } 10007 10008 struct spdk_io_channel * 10009 blob_esnap_get_io_channel(struct spdk_io_channel *ch, struct spdk_blob *blob) 10010 { 10011 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(ch); 10012 struct spdk_bs_dev *bs_dev = blob->back_bs_dev; 10013 struct blob_esnap_channel find = {}; 10014 struct blob_esnap_channel *esnap_channel, *existing; 10015 10016 find.blob_id = blob->id; 10017 esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find); 10018 if (spdk_likely(esnap_channel != NULL)) { 10019 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": using cached channel on thread %s\n", 10020 blob->id, spdk_thread_get_name(spdk_get_thread())); 10021 return esnap_channel->channel; 10022 } 10023 10024 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": allocating channel on thread %s\n", 10025 blob->id, spdk_thread_get_name(spdk_get_thread())); 10026 10027 esnap_channel = calloc(1, sizeof(*esnap_channel)); 10028 if (esnap_channel == NULL) { 10029 SPDK_NOTICELOG("blob 0x%" PRIx64 " channel allocation failed: no memory\n", 10030 find.blob_id); 10031 return NULL; 10032 } 10033 esnap_channel->channel = bs_dev->create_channel(bs_dev); 10034 if (esnap_channel->channel == NULL) { 10035 SPDK_NOTICELOG("blob 0x%" PRIx64 " back channel allocation failed\n", blob->id); 10036 free(esnap_channel); 10037 return NULL; 10038 } 10039 esnap_channel->blob_id = find.blob_id; 10040 existing = RB_INSERT(blob_esnap_channel_tree, &bs_channel->esnap_channels, esnap_channel); 10041 if (spdk_unlikely(existing != NULL)) { 10042 /* 10043 * This should be unreachable: all modifications to this tree happen on this thread. 10044 */ 10045 SPDK_ERRLOG("blob 0x%" PRIx64 "lost race to allocate a channel\n", find.blob_id); 10046 assert(false); 10047 10048 bs_dev->destroy_channel(bs_dev, esnap_channel->channel); 10049 free(esnap_channel); 10050 10051 return existing->channel; 10052 } 10053 10054 return esnap_channel->channel; 10055 } 10056 10057 static int 10058 blob_esnap_channel_compare(struct blob_esnap_channel *c1, struct blob_esnap_channel *c2) 10059 { 10060 return (c1->blob_id < c2->blob_id ? -1 : c1->blob_id > c2->blob_id); 10061 } 10062 10063 struct blob_esnap_destroy_ctx { 10064 spdk_blob_op_with_handle_complete cb_fn; 10065 void *cb_arg; 10066 struct spdk_blob *blob; 10067 struct spdk_bs_dev *back_bs_dev; 10068 bool abort_io; 10069 }; 10070 10071 static void 10072 blob_esnap_destroy_channels_done(struct spdk_io_channel_iter *i, int status) 10073 { 10074 struct blob_esnap_destroy_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 10075 struct spdk_blob *blob = ctx->blob; 10076 struct spdk_blob_store *bs = blob->bs; 10077 10078 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": done destroying channels for this blob\n", 10079 blob->id); 10080 10081 if (ctx->cb_fn != NULL) { 10082 ctx->cb_fn(ctx->cb_arg, blob, status); 10083 } 10084 free(ctx); 10085 10086 bs->esnap_channels_unloading--; 10087 if (bs->esnap_channels_unloading == 0 && bs->esnap_unload_cb_fn != NULL) { 10088 spdk_bs_unload(bs, bs->esnap_unload_cb_fn, bs->esnap_unload_cb_arg); 10089 } 10090 } 10091 10092 static void 10093 blob_esnap_destroy_one_channel(struct spdk_io_channel_iter *i) 10094 { 10095 struct blob_esnap_destroy_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 10096 struct spdk_blob *blob = ctx->blob; 10097 struct spdk_bs_dev *bs_dev = ctx->back_bs_dev; 10098 struct spdk_io_channel *channel = spdk_io_channel_iter_get_channel(i); 10099 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(channel); 10100 struct blob_esnap_channel *esnap_channel; 10101 struct blob_esnap_channel find = {}; 10102 10103 assert(spdk_get_thread() == spdk_io_channel_get_thread(channel)); 10104 10105 find.blob_id = blob->id; 10106 esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find); 10107 if (esnap_channel != NULL) { 10108 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": destroying channel on thread %s\n", 10109 blob->id, spdk_thread_get_name(spdk_get_thread())); 10110 RB_REMOVE(blob_esnap_channel_tree, &bs_channel->esnap_channels, esnap_channel); 10111 10112 if (ctx->abort_io) { 10113 spdk_bs_user_op_t *op, *tmp; 10114 10115 TAILQ_FOREACH_SAFE(op, &bs_channel->queued_io, link, tmp) { 10116 if (op->back_channel == esnap_channel->channel) { 10117 TAILQ_REMOVE(&bs_channel->queued_io, op, link); 10118 bs_user_op_abort(op, -EIO); 10119 } 10120 } 10121 } 10122 10123 bs_dev->destroy_channel(bs_dev, esnap_channel->channel); 10124 free(esnap_channel); 10125 } 10126 10127 spdk_for_each_channel_continue(i, 0); 10128 } 10129 10130 /* 10131 * Destroy the channels for a specific blob on each thread with a blobstore channel. This should be 10132 * used when closing an esnap clone blob and after decoupling from the parent. 10133 */ 10134 static void 10135 blob_esnap_destroy_bs_dev_channels(struct spdk_blob *blob, bool abort_io, 10136 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 10137 { 10138 struct blob_esnap_destroy_ctx *ctx; 10139 10140 if (!blob_is_esnap_clone(blob) || blob->back_bs_dev == NULL) { 10141 if (cb_fn != NULL) { 10142 cb_fn(cb_arg, blob, 0); 10143 } 10144 return; 10145 } 10146 10147 ctx = calloc(1, sizeof(*ctx)); 10148 if (ctx == NULL) { 10149 if (cb_fn != NULL) { 10150 cb_fn(cb_arg, blob, -ENOMEM); 10151 } 10152 return; 10153 } 10154 ctx->cb_fn = cb_fn; 10155 ctx->cb_arg = cb_arg; 10156 ctx->blob = blob; 10157 ctx->back_bs_dev = blob->back_bs_dev; 10158 ctx->abort_io = abort_io; 10159 10160 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": destroying channels for this blob\n", 10161 blob->id); 10162 10163 blob->bs->esnap_channels_unloading++; 10164 spdk_for_each_channel(blob->bs, blob_esnap_destroy_one_channel, ctx, 10165 blob_esnap_destroy_channels_done); 10166 } 10167 10168 /* 10169 * Destroy all bs_dev channels on a specific blobstore channel. This should be used when a 10170 * bs_channel is destroyed. 10171 */ 10172 static void 10173 blob_esnap_destroy_bs_channel(struct spdk_bs_channel *ch) 10174 { 10175 struct blob_esnap_channel *esnap_channel, *esnap_channel_tmp; 10176 10177 assert(spdk_get_thread() == spdk_io_channel_get_thread(spdk_io_channel_from_ctx(ch))); 10178 10179 SPDK_DEBUGLOG(blob_esnap, "destroying channels on thread %s\n", 10180 spdk_thread_get_name(spdk_get_thread())); 10181 RB_FOREACH_SAFE(esnap_channel, blob_esnap_channel_tree, &ch->esnap_channels, 10182 esnap_channel_tmp) { 10183 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 10184 ": destroying one channel in thread %s\n", 10185 esnap_channel->blob_id, spdk_thread_get_name(spdk_get_thread())); 10186 RB_REMOVE(blob_esnap_channel_tree, &ch->esnap_channels, esnap_channel); 10187 spdk_put_io_channel(esnap_channel->channel); 10188 free(esnap_channel); 10189 } 10190 SPDK_DEBUGLOG(blob_esnap, "done destroying channels on thread %s\n", 10191 spdk_thread_get_name(spdk_get_thread())); 10192 } 10193 10194 static void 10195 blob_set_back_bs_dev_done(void *_ctx, int bserrno) 10196 { 10197 struct set_bs_dev_ctx *ctx = _ctx; 10198 10199 if (bserrno != 0) { 10200 /* Even though the unfreeze failed, the update may have succeed. */ 10201 SPDK_ERRLOG("blob 0x%" PRIx64 ": unfreeze failed with error %d\n", ctx->blob->id, 10202 bserrno); 10203 } 10204 ctx->cb_fn(ctx->cb_arg, ctx->bserrno); 10205 free(ctx); 10206 } 10207 10208 static void 10209 blob_frozen_set_back_bs_dev(void *_ctx, struct spdk_blob *blob, int bserrno) 10210 { 10211 struct set_bs_dev_ctx *ctx = _ctx; 10212 int rc; 10213 10214 if (bserrno != 0) { 10215 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to release old back_bs_dev with error %d\n", 10216 blob->id, bserrno); 10217 ctx->bserrno = bserrno; 10218 blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx); 10219 return; 10220 } 10221 10222 if (blob->back_bs_dev != NULL) { 10223 blob->back_bs_dev->destroy(blob->back_bs_dev); 10224 blob->back_bs_dev = NULL; 10225 } 10226 10227 if (ctx->parent_refs_cb_fn) { 10228 rc = ctx->parent_refs_cb_fn(blob, ctx->parent_refs_cb_arg); 10229 if (rc != 0) { 10230 ctx->bserrno = rc; 10231 blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx); 10232 return; 10233 } 10234 } 10235 10236 SPDK_NOTICELOG("blob 0x%" PRIx64 ": hotplugged back_bs_dev\n", blob->id); 10237 blob->back_bs_dev = ctx->back_bs_dev; 10238 ctx->bserrno = 0; 10239 10240 blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx); 10241 } 10242 10243 static void 10244 blob_set_back_bs_dev_frozen(void *_ctx, int bserrno) 10245 { 10246 struct set_bs_dev_ctx *ctx = _ctx; 10247 struct spdk_blob *blob = ctx->blob; 10248 10249 if (bserrno != 0) { 10250 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to freeze with error %d\n", blob->id, 10251 bserrno); 10252 ctx->cb_fn(ctx->cb_arg, bserrno); 10253 free(ctx); 10254 return; 10255 } 10256 10257 /* 10258 * This does not prevent future reads from the esnap device because any future IO will 10259 * lazily create a new esnap IO channel. 10260 */ 10261 blob_esnap_destroy_bs_dev_channels(blob, true, blob_frozen_set_back_bs_dev, ctx); 10262 } 10263 10264 void 10265 spdk_blob_set_esnap_bs_dev(struct spdk_blob *blob, struct spdk_bs_dev *back_bs_dev, 10266 spdk_blob_op_complete cb_fn, void *cb_arg) 10267 { 10268 if (!blob_is_esnap_clone(blob)) { 10269 SPDK_ERRLOG("blob 0x%" PRIx64 ": not an esnap clone\n", blob->id); 10270 cb_fn(cb_arg, -EINVAL); 10271 return; 10272 } 10273 10274 blob_set_back_bs_dev(blob, back_bs_dev, NULL, NULL, cb_fn, cb_arg); 10275 } 10276 10277 struct spdk_bs_dev * 10278 spdk_blob_get_esnap_bs_dev(const struct spdk_blob *blob) 10279 { 10280 if (!blob_is_esnap_clone(blob)) { 10281 SPDK_ERRLOG("blob 0x%" PRIx64 ": not an esnap clone\n", blob->id); 10282 return NULL; 10283 } 10284 10285 return blob->back_bs_dev; 10286 } 10287 10288 bool 10289 spdk_blob_is_degraded(const struct spdk_blob *blob) 10290 { 10291 if (blob->bs->dev->is_degraded != NULL && blob->bs->dev->is_degraded(blob->bs->dev)) { 10292 return true; 10293 } 10294 if (blob->back_bs_dev == NULL || blob->back_bs_dev->is_degraded == NULL) { 10295 return false; 10296 } 10297 10298 return blob->back_bs_dev->is_degraded(blob->back_bs_dev); 10299 } 10300 10301 SPDK_LOG_REGISTER_COMPONENT(blob) 10302 SPDK_LOG_REGISTER_COMPONENT(blob_esnap) 10303 10304 SPDK_TRACE_REGISTER_FN(blob_trace, "blob", TRACE_GROUP_BLOB) 10305 { 10306 struct spdk_trace_tpoint_opts opts[] = { 10307 { 10308 "BLOB_REQ_SET_START", TRACE_BLOB_REQ_SET_START, 10309 OWNER_TYPE_NONE, OBJECT_BLOB_CB_ARG, 1, 10310 { 10311 { "ctx", SPDK_TRACE_ARG_TYPE_PTR, 8 } 10312 } 10313 }, 10314 { 10315 "BLOB_REQ_SET_COMPLETE", TRACE_BLOB_REQ_SET_COMPLETE, 10316 OWNER_TYPE_NONE, OBJECT_BLOB_CB_ARG, 0, 10317 { 10318 { "ctx", SPDK_TRACE_ARG_TYPE_PTR, 8 } 10319 } 10320 }, 10321 }; 10322 10323 spdk_trace_register_object(OBJECT_BLOB_CB_ARG, 'a'); 10324 spdk_trace_register_description_ext(opts, SPDK_COUNTOF(opts)); 10325 spdk_trace_tpoint_register_relation(TRACE_BDEV_IO_START, OBJECT_BLOB_CB_ARG, 1); 10326 spdk_trace_tpoint_register_relation(TRACE_BDEV_IO_DONE, OBJECT_BLOB_CB_ARG, 0); 10327 } 10328