1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "spdk/blob.h" 10 #include "spdk/crc32.h" 11 #include "spdk/env.h" 12 #include "spdk/queue.h" 13 #include "spdk/thread.h" 14 #include "spdk/bit_array.h" 15 #include "spdk/bit_pool.h" 16 #include "spdk/likely.h" 17 #include "spdk/util.h" 18 #include "spdk/string.h" 19 20 #include "spdk_internal/assert.h" 21 #include "spdk/log.h" 22 23 #include "blobstore.h" 24 25 #define BLOB_CRC32C_INITIAL 0xffffffffUL 26 27 static int bs_register_md_thread(struct spdk_blob_store *bs); 28 static int bs_unregister_md_thread(struct spdk_blob_store *bs); 29 static void blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 30 static void blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 31 uint64_t cluster, uint32_t extent, struct spdk_blob_md_page *page, 32 spdk_blob_op_complete cb_fn, void *cb_arg); 33 34 static int blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 35 uint16_t value_len, bool internal); 36 static int blob_get_xattr_value(struct spdk_blob *blob, const char *name, 37 const void **value, size_t *value_len, bool internal); 38 static int blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal); 39 40 static void blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 41 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg); 42 43 /* 44 * External snapshots require a channel per thread per esnap bdev. The tree 45 * is populated lazily as blob IOs are handled by the back_bs_dev. When this 46 * channel is destroyed, all the channels in the tree are destroyed. 47 */ 48 49 struct blob_esnap_channel { 50 RB_ENTRY(blob_esnap_channel) node; 51 spdk_blob_id blob_id; 52 struct spdk_io_channel *channel; 53 }; 54 55 static int blob_esnap_channel_compare(struct blob_esnap_channel *c1, struct blob_esnap_channel *c2); 56 static void blob_esnap_destroy_bs_dev_channels(struct spdk_blob *blob, bool abort_io, 57 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg); 58 static void blob_esnap_destroy_bs_channel(struct spdk_bs_channel *ch); 59 RB_GENERATE_STATIC(blob_esnap_channel_tree, blob_esnap_channel, node, blob_esnap_channel_compare) 60 61 static inline bool 62 blob_is_esnap_clone(const struct spdk_blob *blob) 63 { 64 assert(blob != NULL); 65 return !!(blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT); 66 } 67 68 static int 69 blob_id_cmp(struct spdk_blob *blob1, struct spdk_blob *blob2) 70 { 71 assert(blob1 != NULL && blob2 != NULL); 72 return (blob1->id < blob2->id ? -1 : blob1->id > blob2->id); 73 } 74 75 RB_GENERATE_STATIC(spdk_blob_tree, spdk_blob, link, blob_id_cmp); 76 77 static void 78 blob_verify_md_op(struct spdk_blob *blob) 79 { 80 assert(blob != NULL); 81 assert(spdk_get_thread() == blob->bs->md_thread); 82 assert(blob->state != SPDK_BLOB_STATE_LOADING); 83 } 84 85 static struct spdk_blob_list * 86 bs_get_snapshot_entry(struct spdk_blob_store *bs, spdk_blob_id blobid) 87 { 88 struct spdk_blob_list *snapshot_entry = NULL; 89 90 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 91 if (snapshot_entry->id == blobid) { 92 break; 93 } 94 } 95 96 return snapshot_entry; 97 } 98 99 static void 100 bs_claim_md_page(struct spdk_blob_store *bs, uint32_t page) 101 { 102 assert(spdk_spin_held(&bs->used_lock)); 103 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 104 assert(spdk_bit_array_get(bs->used_md_pages, page) == false); 105 106 spdk_bit_array_set(bs->used_md_pages, page); 107 } 108 109 static void 110 bs_release_md_page(struct spdk_blob_store *bs, uint32_t page) 111 { 112 assert(spdk_spin_held(&bs->used_lock)); 113 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 114 assert(spdk_bit_array_get(bs->used_md_pages, page) == true); 115 116 spdk_bit_array_clear(bs->used_md_pages, page); 117 } 118 119 static uint32_t 120 bs_claim_cluster(struct spdk_blob_store *bs) 121 { 122 uint32_t cluster_num; 123 124 assert(spdk_spin_held(&bs->used_lock)); 125 126 cluster_num = spdk_bit_pool_allocate_bit(bs->used_clusters); 127 if (cluster_num == UINT32_MAX) { 128 return UINT32_MAX; 129 } 130 131 SPDK_DEBUGLOG(blob, "Claiming cluster %u\n", cluster_num); 132 bs->num_free_clusters--; 133 134 return cluster_num; 135 } 136 137 static void 138 bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 139 { 140 assert(spdk_spin_held(&bs->used_lock)); 141 assert(cluster_num < spdk_bit_pool_capacity(bs->used_clusters)); 142 assert(spdk_bit_pool_is_allocated(bs->used_clusters, cluster_num) == true); 143 assert(bs->num_free_clusters < bs->total_clusters); 144 145 SPDK_DEBUGLOG(blob, "Releasing cluster %u\n", cluster_num); 146 147 spdk_bit_pool_free_bit(bs->used_clusters, cluster_num); 148 bs->num_free_clusters++; 149 } 150 151 static int 152 blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster) 153 { 154 uint64_t *cluster_lba = &blob->active.clusters[cluster_num]; 155 156 blob_verify_md_op(blob); 157 158 if (*cluster_lba != 0) { 159 return -EEXIST; 160 } 161 162 *cluster_lba = bs_cluster_to_lba(blob->bs, cluster); 163 return 0; 164 } 165 166 static int 167 bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num, 168 uint64_t *cluster, uint32_t *lowest_free_md_page, bool update_map) 169 { 170 uint32_t *extent_page = 0; 171 172 assert(spdk_spin_held(&blob->bs->used_lock)); 173 174 *cluster = bs_claim_cluster(blob->bs); 175 if (*cluster == UINT32_MAX) { 176 /* No more free clusters. Cannot satisfy the request */ 177 return -ENOSPC; 178 } 179 180 if (blob->use_extent_table) { 181 extent_page = bs_cluster_to_extent_page(blob, cluster_num); 182 if (*extent_page == 0) { 183 /* Extent page shall never occupy md_page so start the search from 1 */ 184 if (*lowest_free_md_page == 0) { 185 *lowest_free_md_page = 1; 186 } 187 /* No extent_page is allocated for the cluster */ 188 *lowest_free_md_page = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, 189 *lowest_free_md_page); 190 if (*lowest_free_md_page == UINT32_MAX) { 191 /* No more free md pages. Cannot satisfy the request */ 192 bs_release_cluster(blob->bs, *cluster); 193 return -ENOSPC; 194 } 195 bs_claim_md_page(blob->bs, *lowest_free_md_page); 196 } 197 } 198 199 SPDK_DEBUGLOG(blob, "Claiming cluster %" PRIu64 " for blob 0x%" PRIx64 "\n", *cluster, 200 blob->id); 201 202 if (update_map) { 203 blob_insert_cluster(blob, cluster_num, *cluster); 204 if (blob->use_extent_table && *extent_page == 0) { 205 *extent_page = *lowest_free_md_page; 206 } 207 } 208 209 return 0; 210 } 211 212 static void 213 blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs) 214 { 215 xattrs->count = 0; 216 xattrs->names = NULL; 217 xattrs->ctx = NULL; 218 xattrs->get_value = NULL; 219 } 220 221 void 222 spdk_blob_opts_init(struct spdk_blob_opts *opts, size_t opts_size) 223 { 224 if (!opts) { 225 SPDK_ERRLOG("opts should not be NULL\n"); 226 return; 227 } 228 229 if (!opts_size) { 230 SPDK_ERRLOG("opts_size should not be zero value\n"); 231 return; 232 } 233 234 memset(opts, 0, opts_size); 235 opts->opts_size = opts_size; 236 237 #define FIELD_OK(field) \ 238 offsetof(struct spdk_blob_opts, field) + sizeof(opts->field) <= opts_size 239 240 #define SET_FIELD(field, value) \ 241 if (FIELD_OK(field)) { \ 242 opts->field = value; \ 243 } \ 244 245 SET_FIELD(num_clusters, 0); 246 SET_FIELD(thin_provision, false); 247 SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT); 248 249 if (FIELD_OK(xattrs)) { 250 blob_xattrs_init(&opts->xattrs); 251 } 252 253 SET_FIELD(use_extent_table, true); 254 255 #undef FIELD_OK 256 #undef SET_FIELD 257 } 258 259 void 260 spdk_blob_open_opts_init(struct spdk_blob_open_opts *opts, size_t opts_size) 261 { 262 if (!opts) { 263 SPDK_ERRLOG("opts should not be NULL\n"); 264 return; 265 } 266 267 if (!opts_size) { 268 SPDK_ERRLOG("opts_size should not be zero value\n"); 269 return; 270 } 271 272 memset(opts, 0, opts_size); 273 opts->opts_size = opts_size; 274 275 #define FIELD_OK(field) \ 276 offsetof(struct spdk_blob_open_opts, field) + sizeof(opts->field) <= opts_size 277 278 #define SET_FIELD(field, value) \ 279 if (FIELD_OK(field)) { \ 280 opts->field = value; \ 281 } \ 282 283 SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT); 284 285 #undef FIELD_OK 286 #undef SET_FILED 287 } 288 289 static struct spdk_blob * 290 blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id) 291 { 292 struct spdk_blob *blob; 293 294 blob = calloc(1, sizeof(*blob)); 295 if (!blob) { 296 return NULL; 297 } 298 299 blob->id = id; 300 blob->bs = bs; 301 302 blob->parent_id = SPDK_BLOBID_INVALID; 303 304 blob->state = SPDK_BLOB_STATE_DIRTY; 305 blob->extent_rle_found = false; 306 blob->extent_table_found = false; 307 blob->active.num_pages = 1; 308 blob->active.pages = calloc(1, sizeof(*blob->active.pages)); 309 if (!blob->active.pages) { 310 free(blob); 311 return NULL; 312 } 313 314 blob->active.pages[0] = bs_blobid_to_page(id); 315 316 TAILQ_INIT(&blob->xattrs); 317 TAILQ_INIT(&blob->xattrs_internal); 318 TAILQ_INIT(&blob->pending_persists); 319 TAILQ_INIT(&blob->persists_to_complete); 320 321 return blob; 322 } 323 324 static void 325 xattrs_free(struct spdk_xattr_tailq *xattrs) 326 { 327 struct spdk_xattr *xattr, *xattr_tmp; 328 329 TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) { 330 TAILQ_REMOVE(xattrs, xattr, link); 331 free(xattr->name); 332 free(xattr->value); 333 free(xattr); 334 } 335 } 336 337 static void 338 blob_free(struct spdk_blob *blob) 339 { 340 assert(blob != NULL); 341 assert(TAILQ_EMPTY(&blob->pending_persists)); 342 assert(TAILQ_EMPTY(&blob->persists_to_complete)); 343 344 free(blob->active.extent_pages); 345 free(blob->clean.extent_pages); 346 free(blob->active.clusters); 347 free(blob->clean.clusters); 348 free(blob->active.pages); 349 free(blob->clean.pages); 350 351 xattrs_free(&blob->xattrs); 352 xattrs_free(&blob->xattrs_internal); 353 354 if (blob->back_bs_dev) { 355 blob->back_bs_dev->destroy(blob->back_bs_dev); 356 } 357 358 free(blob); 359 } 360 361 static void 362 blob_back_bs_destroy_esnap_done(void *ctx, struct spdk_blob *blob, int bserrno) 363 { 364 struct spdk_bs_dev *bs_dev = ctx; 365 366 if (bserrno != 0) { 367 /* 368 * This is probably due to a memory allocation failure when creating the 369 * blob_esnap_destroy_ctx before iterating threads. 370 */ 371 SPDK_ERRLOG("blob 0x%" PRIx64 ": Unable to destroy bs dev channels: error %d\n", 372 blob->id, bserrno); 373 assert(false); 374 } 375 376 if (bs_dev == NULL) { 377 /* 378 * This check exists to make scanbuild happy. 379 * 380 * blob->back_bs_dev for an esnap is NULL during the first iteration of blobs while 381 * the blobstore is being loaded. It could also be NULL if there was an error 382 * opening the esnap device. In each of these cases, no channels could have been 383 * created because back_bs_dev->create_channel() would have led to a NULL pointer 384 * deref. 385 */ 386 assert(false); 387 return; 388 } 389 390 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": calling destroy on back_bs_dev\n", blob->id); 391 bs_dev->destroy(bs_dev); 392 } 393 394 static void 395 blob_back_bs_destroy(struct spdk_blob *blob) 396 { 397 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": preparing to destroy back_bs_dev\n", 398 blob->id); 399 400 blob_esnap_destroy_bs_dev_channels(blob, false, blob_back_bs_destroy_esnap_done, 401 blob->back_bs_dev); 402 blob->back_bs_dev = NULL; 403 } 404 405 struct freeze_io_ctx { 406 struct spdk_bs_cpl cpl; 407 struct spdk_blob *blob; 408 }; 409 410 static void 411 blob_io_sync(struct spdk_io_channel_iter *i) 412 { 413 spdk_for_each_channel_continue(i, 0); 414 } 415 416 static void 417 blob_execute_queued_io(struct spdk_io_channel_iter *i) 418 { 419 struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); 420 struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch); 421 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 422 struct spdk_bs_request_set *set; 423 struct spdk_bs_user_op_args *args; 424 spdk_bs_user_op_t *op, *tmp; 425 426 TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) { 427 set = (struct spdk_bs_request_set *)op; 428 args = &set->u.user_op; 429 430 if (args->blob == ctx->blob) { 431 TAILQ_REMOVE(&ch->queued_io, op, link); 432 bs_user_op_execute(op); 433 } 434 } 435 436 spdk_for_each_channel_continue(i, 0); 437 } 438 439 static void 440 blob_io_cpl(struct spdk_io_channel_iter *i, int status) 441 { 442 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 443 444 ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0); 445 446 free(ctx); 447 } 448 449 static void 450 blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 451 { 452 struct freeze_io_ctx *ctx; 453 454 blob_verify_md_op(blob); 455 456 ctx = calloc(1, sizeof(*ctx)); 457 if (!ctx) { 458 cb_fn(cb_arg, -ENOMEM); 459 return; 460 } 461 462 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 463 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 464 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 465 ctx->blob = blob; 466 467 /* Freeze I/O on blob */ 468 blob->frozen_refcnt++; 469 470 spdk_for_each_channel(blob->bs, blob_io_sync, ctx, blob_io_cpl); 471 } 472 473 static void 474 blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 475 { 476 struct freeze_io_ctx *ctx; 477 478 blob_verify_md_op(blob); 479 480 ctx = calloc(1, sizeof(*ctx)); 481 if (!ctx) { 482 cb_fn(cb_arg, -ENOMEM); 483 return; 484 } 485 486 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 487 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 488 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 489 ctx->blob = blob; 490 491 assert(blob->frozen_refcnt > 0); 492 493 blob->frozen_refcnt--; 494 495 spdk_for_each_channel(blob->bs, blob_execute_queued_io, ctx, blob_io_cpl); 496 } 497 498 static int 499 blob_mark_clean(struct spdk_blob *blob) 500 { 501 uint32_t *extent_pages = NULL; 502 uint64_t *clusters = NULL; 503 uint32_t *pages = NULL; 504 505 assert(blob != NULL); 506 507 if (blob->active.num_extent_pages) { 508 assert(blob->active.extent_pages); 509 extent_pages = calloc(blob->active.num_extent_pages, sizeof(*blob->active.extent_pages)); 510 if (!extent_pages) { 511 return -ENOMEM; 512 } 513 memcpy(extent_pages, blob->active.extent_pages, 514 blob->active.num_extent_pages * sizeof(*extent_pages)); 515 } 516 517 if (blob->active.num_clusters) { 518 assert(blob->active.clusters); 519 clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters)); 520 if (!clusters) { 521 free(extent_pages); 522 return -ENOMEM; 523 } 524 memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 525 } 526 527 if (blob->active.num_pages) { 528 assert(blob->active.pages); 529 pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages)); 530 if (!pages) { 531 free(extent_pages); 532 free(clusters); 533 return -ENOMEM; 534 } 535 memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 536 } 537 538 free(blob->clean.extent_pages); 539 free(blob->clean.clusters); 540 free(blob->clean.pages); 541 542 blob->clean.num_extent_pages = blob->active.num_extent_pages; 543 blob->clean.extent_pages = blob->active.extent_pages; 544 blob->clean.num_clusters = blob->active.num_clusters; 545 blob->clean.clusters = blob->active.clusters; 546 blob->clean.num_pages = blob->active.num_pages; 547 blob->clean.pages = blob->active.pages; 548 549 blob->active.extent_pages = extent_pages; 550 blob->active.clusters = clusters; 551 blob->active.pages = pages; 552 553 /* If the metadata was dirtied again while the metadata was being written to disk, 554 * we do not want to revert the DIRTY state back to CLEAN here. 555 */ 556 if (blob->state == SPDK_BLOB_STATE_LOADING) { 557 blob->state = SPDK_BLOB_STATE_CLEAN; 558 } 559 560 return 0; 561 } 562 563 static int 564 blob_deserialize_xattr(struct spdk_blob *blob, 565 struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal) 566 { 567 struct spdk_xattr *xattr; 568 569 if (desc_xattr->length != sizeof(desc_xattr->name_length) + 570 sizeof(desc_xattr->value_length) + 571 desc_xattr->name_length + desc_xattr->value_length) { 572 return -EINVAL; 573 } 574 575 xattr = calloc(1, sizeof(*xattr)); 576 if (xattr == NULL) { 577 return -ENOMEM; 578 } 579 580 xattr->name = malloc(desc_xattr->name_length + 1); 581 if (xattr->name == NULL) { 582 free(xattr); 583 return -ENOMEM; 584 } 585 586 xattr->value = malloc(desc_xattr->value_length); 587 if (xattr->value == NULL) { 588 free(xattr->name); 589 free(xattr); 590 return -ENOMEM; 591 } 592 593 memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length); 594 xattr->name[desc_xattr->name_length] = '\0'; 595 xattr->value_len = desc_xattr->value_length; 596 memcpy(xattr->value, 597 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 598 desc_xattr->value_length); 599 600 TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link); 601 602 return 0; 603 } 604 605 606 static int 607 blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob) 608 { 609 struct spdk_blob_md_descriptor *desc; 610 size_t cur_desc = 0; 611 void *tmp; 612 613 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 614 while (cur_desc < sizeof(page->descriptors)) { 615 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 616 if (desc->length == 0) { 617 /* If padding and length are 0, this terminates the page */ 618 break; 619 } 620 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 621 struct spdk_blob_md_descriptor_flags *desc_flags; 622 623 desc_flags = (struct spdk_blob_md_descriptor_flags *)desc; 624 625 if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) { 626 return -EINVAL; 627 } 628 629 if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) != 630 SPDK_BLOB_INVALID_FLAGS_MASK) { 631 return -EINVAL; 632 } 633 634 if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) != 635 SPDK_BLOB_DATA_RO_FLAGS_MASK) { 636 blob->data_ro = true; 637 blob->md_ro = true; 638 } 639 640 if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) != 641 SPDK_BLOB_MD_RO_FLAGS_MASK) { 642 blob->md_ro = true; 643 } 644 645 if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 646 blob->data_ro = true; 647 blob->md_ro = true; 648 } 649 650 blob->invalid_flags = desc_flags->invalid_flags; 651 blob->data_ro_flags = desc_flags->data_ro_flags; 652 blob->md_ro_flags = desc_flags->md_ro_flags; 653 654 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 655 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 656 unsigned int i, j; 657 unsigned int cluster_count = blob->active.num_clusters; 658 659 if (blob->extent_table_found) { 660 /* Extent Table already present in the md, 661 * both descriptors should never be at the same time. */ 662 return -EINVAL; 663 } 664 blob->extent_rle_found = true; 665 666 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 667 668 if (desc_extent_rle->length == 0 || 669 (desc_extent_rle->length % sizeof(desc_extent_rle->extents[0]) != 0)) { 670 return -EINVAL; 671 } 672 673 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 674 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 675 if (desc_extent_rle->extents[i].cluster_idx != 0) { 676 if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, 677 desc_extent_rle->extents[i].cluster_idx + j)) { 678 return -EINVAL; 679 } 680 } 681 cluster_count++; 682 } 683 } 684 685 if (cluster_count == 0) { 686 return -EINVAL; 687 } 688 tmp = realloc(blob->active.clusters, cluster_count * sizeof(*blob->active.clusters)); 689 if (tmp == NULL) { 690 return -ENOMEM; 691 } 692 blob->active.clusters = tmp; 693 blob->active.cluster_array_size = cluster_count; 694 695 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 696 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 697 if (desc_extent_rle->extents[i].cluster_idx != 0) { 698 blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs, 699 desc_extent_rle->extents[i].cluster_idx + j); 700 } else if (spdk_blob_is_thin_provisioned(blob)) { 701 blob->active.clusters[blob->active.num_clusters++] = 0; 702 } else { 703 return -EINVAL; 704 } 705 } 706 } 707 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 708 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 709 uint32_t num_extent_pages = blob->active.num_extent_pages; 710 uint32_t i, j; 711 size_t extent_pages_length; 712 713 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 714 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 715 716 if (blob->extent_rle_found) { 717 /* This means that Extent RLE is present in MD, 718 * both should never be at the same time. */ 719 return -EINVAL; 720 } else if (blob->extent_table_found && 721 desc_extent_table->num_clusters != blob->remaining_clusters_in_et) { 722 /* Number of clusters in this ET does not match number 723 * from previously read EXTENT_TABLE. */ 724 return -EINVAL; 725 } 726 727 if (desc_extent_table->length == 0 || 728 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 729 return -EINVAL; 730 } 731 732 blob->extent_table_found = true; 733 734 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 735 num_extent_pages += desc_extent_table->extent_page[i].num_pages; 736 } 737 738 if (num_extent_pages > 0) { 739 tmp = realloc(blob->active.extent_pages, num_extent_pages * sizeof(uint32_t)); 740 if (tmp == NULL) { 741 return -ENOMEM; 742 } 743 blob->active.extent_pages = tmp; 744 } 745 blob->active.extent_pages_array_size = num_extent_pages; 746 747 blob->remaining_clusters_in_et = desc_extent_table->num_clusters; 748 749 /* Extent table entries contain md page numbers for extent pages. 750 * Zeroes represent unallocated extent pages, those are run-length-encoded. 751 */ 752 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 753 if (desc_extent_table->extent_page[i].page_idx != 0) { 754 assert(desc_extent_table->extent_page[i].num_pages == 1); 755 blob->active.extent_pages[blob->active.num_extent_pages++] = 756 desc_extent_table->extent_page[i].page_idx; 757 } else if (spdk_blob_is_thin_provisioned(blob)) { 758 for (j = 0; j < desc_extent_table->extent_page[i].num_pages; j++) { 759 blob->active.extent_pages[blob->active.num_extent_pages++] = 0; 760 } 761 } else { 762 return -EINVAL; 763 } 764 } 765 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 766 struct spdk_blob_md_descriptor_extent_page *desc_extent; 767 unsigned int i; 768 unsigned int cluster_count = 0; 769 size_t cluster_idx_length; 770 771 if (blob->extent_rle_found) { 772 /* This means that Extent RLE is present in MD, 773 * both should never be at the same time. */ 774 return -EINVAL; 775 } 776 777 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 778 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 779 780 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 781 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 782 return -EINVAL; 783 } 784 785 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 786 if (desc_extent->cluster_idx[i] != 0) { 787 if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, desc_extent->cluster_idx[i])) { 788 return -EINVAL; 789 } 790 } 791 cluster_count++; 792 } 793 794 if (cluster_count == 0) { 795 return -EINVAL; 796 } 797 798 /* When reading extent pages sequentially starting cluster idx should match 799 * current size of a blob. 800 * If changed to batch reading, this check shall be removed. */ 801 if (desc_extent->start_cluster_idx != blob->active.num_clusters) { 802 return -EINVAL; 803 } 804 805 tmp = realloc(blob->active.clusters, 806 (cluster_count + blob->active.num_clusters) * sizeof(*blob->active.clusters)); 807 if (tmp == NULL) { 808 return -ENOMEM; 809 } 810 blob->active.clusters = tmp; 811 blob->active.cluster_array_size = (cluster_count + blob->active.num_clusters); 812 813 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 814 if (desc_extent->cluster_idx[i] != 0) { 815 blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs, 816 desc_extent->cluster_idx[i]); 817 } else if (spdk_blob_is_thin_provisioned(blob)) { 818 blob->active.clusters[blob->active.num_clusters++] = 0; 819 } else { 820 return -EINVAL; 821 } 822 } 823 assert(desc_extent->start_cluster_idx + cluster_count == blob->active.num_clusters); 824 assert(blob->remaining_clusters_in_et >= cluster_count); 825 blob->remaining_clusters_in_et -= cluster_count; 826 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 827 int rc; 828 829 rc = blob_deserialize_xattr(blob, 830 (struct spdk_blob_md_descriptor_xattr *) desc, false); 831 if (rc != 0) { 832 return rc; 833 } 834 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 835 int rc; 836 837 rc = blob_deserialize_xattr(blob, 838 (struct spdk_blob_md_descriptor_xattr *) desc, true); 839 if (rc != 0) { 840 return rc; 841 } 842 } else { 843 /* Unrecognized descriptor type. Do not fail - just continue to the 844 * next descriptor. If this descriptor is associated with some feature 845 * defined in a newer version of blobstore, that version of blobstore 846 * should create and set an associated feature flag to specify if this 847 * blob can be loaded or not. 848 */ 849 } 850 851 /* Advance to the next descriptor */ 852 cur_desc += sizeof(*desc) + desc->length; 853 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 854 break; 855 } 856 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 857 } 858 859 return 0; 860 } 861 862 static bool bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page); 863 864 static int 865 blob_parse_extent_page(struct spdk_blob_md_page *extent_page, struct spdk_blob *blob) 866 { 867 assert(blob != NULL); 868 assert(blob->state == SPDK_BLOB_STATE_LOADING); 869 870 if (bs_load_cur_extent_page_valid(extent_page) == false) { 871 return -ENOENT; 872 } 873 874 return blob_parse_page(extent_page, blob); 875 } 876 877 static int 878 blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count, 879 struct spdk_blob *blob) 880 { 881 const struct spdk_blob_md_page *page; 882 uint32_t i; 883 int rc; 884 void *tmp; 885 886 assert(page_count > 0); 887 assert(pages[0].sequence_num == 0); 888 assert(blob != NULL); 889 assert(blob->state == SPDK_BLOB_STATE_LOADING); 890 assert(blob->active.clusters == NULL); 891 892 /* The blobid provided doesn't match what's in the MD, this can 893 * happen for example if a bogus blobid is passed in through open. 894 */ 895 if (blob->id != pages[0].id) { 896 SPDK_ERRLOG("Blobid (0x%" PRIx64 ") doesn't match what's in metadata " 897 "(0x%" PRIx64 ")\n", blob->id, pages[0].id); 898 return -ENOENT; 899 } 900 901 tmp = realloc(blob->active.pages, page_count * sizeof(*blob->active.pages)); 902 if (!tmp) { 903 return -ENOMEM; 904 } 905 blob->active.pages = tmp; 906 907 blob->active.pages[0] = pages[0].id; 908 909 for (i = 1; i < page_count; i++) { 910 assert(spdk_bit_array_get(blob->bs->used_md_pages, pages[i - 1].next)); 911 blob->active.pages[i] = pages[i - 1].next; 912 } 913 blob->active.num_pages = page_count; 914 915 for (i = 0; i < page_count; i++) { 916 page = &pages[i]; 917 918 assert(page->id == blob->id); 919 assert(page->sequence_num == i); 920 921 rc = blob_parse_page(page, blob); 922 if (rc != 0) { 923 return rc; 924 } 925 } 926 927 return 0; 928 } 929 930 static int 931 blob_serialize_add_page(const struct spdk_blob *blob, 932 struct spdk_blob_md_page **pages, 933 uint32_t *page_count, 934 struct spdk_blob_md_page **last_page) 935 { 936 struct spdk_blob_md_page *page, *tmp_pages; 937 938 assert(pages != NULL); 939 assert(page_count != NULL); 940 941 *last_page = NULL; 942 if (*page_count == 0) { 943 assert(*pages == NULL); 944 *pages = spdk_malloc(SPDK_BS_PAGE_SIZE, 0, 945 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 946 if (*pages == NULL) { 947 return -ENOMEM; 948 } 949 *page_count = 1; 950 } else { 951 assert(*pages != NULL); 952 tmp_pages = spdk_realloc(*pages, SPDK_BS_PAGE_SIZE * (*page_count + 1), 0); 953 if (tmp_pages == NULL) { 954 return -ENOMEM; 955 } 956 (*page_count)++; 957 *pages = tmp_pages; 958 } 959 960 page = &(*pages)[*page_count - 1]; 961 memset(page, 0, sizeof(*page)); 962 page->id = blob->id; 963 page->sequence_num = *page_count - 1; 964 page->next = SPDK_INVALID_MD_PAGE; 965 *last_page = page; 966 967 return 0; 968 } 969 970 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor. 971 * Update required_sz on both success and failure. 972 * 973 */ 974 static int 975 blob_serialize_xattr(const struct spdk_xattr *xattr, 976 uint8_t *buf, size_t buf_sz, 977 size_t *required_sz, bool internal) 978 { 979 struct spdk_blob_md_descriptor_xattr *desc; 980 981 *required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) + 982 strlen(xattr->name) + 983 xattr->value_len; 984 985 if (buf_sz < *required_sz) { 986 return -1; 987 } 988 989 desc = (struct spdk_blob_md_descriptor_xattr *)buf; 990 991 desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR; 992 desc->length = sizeof(desc->name_length) + 993 sizeof(desc->value_length) + 994 strlen(xattr->name) + 995 xattr->value_len; 996 desc->name_length = strlen(xattr->name); 997 desc->value_length = xattr->value_len; 998 999 memcpy(desc->name, xattr->name, desc->name_length); 1000 memcpy((void *)((uintptr_t)desc->name + desc->name_length), 1001 xattr->value, 1002 desc->value_length); 1003 1004 return 0; 1005 } 1006 1007 static void 1008 blob_serialize_extent_table_entry(const struct spdk_blob *blob, 1009 uint64_t start_ep, uint64_t *next_ep, 1010 uint8_t **buf, size_t *remaining_sz) 1011 { 1012 struct spdk_blob_md_descriptor_extent_table *desc; 1013 size_t cur_sz; 1014 uint64_t i, et_idx; 1015 uint32_t extent_page, ep_len; 1016 1017 /* The buffer must have room for at least num_clusters entry */ 1018 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->num_clusters); 1019 if (*remaining_sz < cur_sz) { 1020 *next_ep = start_ep; 1021 return; 1022 } 1023 1024 desc = (struct spdk_blob_md_descriptor_extent_table *)*buf; 1025 desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE; 1026 1027 desc->num_clusters = blob->active.num_clusters; 1028 1029 ep_len = 1; 1030 et_idx = 0; 1031 for (i = start_ep; i < blob->active.num_extent_pages; i++) { 1032 if (*remaining_sz < cur_sz + sizeof(desc->extent_page[0])) { 1033 /* If we ran out of buffer space, return */ 1034 break; 1035 } 1036 1037 extent_page = blob->active.extent_pages[i]; 1038 /* Verify that next extent_page is unallocated */ 1039 if (extent_page == 0 && 1040 (i + 1 < blob->active.num_extent_pages && blob->active.extent_pages[i + 1] == 0)) { 1041 ep_len++; 1042 continue; 1043 } 1044 desc->extent_page[et_idx].page_idx = extent_page; 1045 desc->extent_page[et_idx].num_pages = ep_len; 1046 et_idx++; 1047 1048 ep_len = 1; 1049 cur_sz += sizeof(desc->extent_page[et_idx]); 1050 } 1051 *next_ep = i; 1052 1053 desc->length = sizeof(desc->num_clusters) + sizeof(desc->extent_page[0]) * et_idx; 1054 *remaining_sz -= sizeof(struct spdk_blob_md_descriptor) + desc->length; 1055 *buf += sizeof(struct spdk_blob_md_descriptor) + desc->length; 1056 } 1057 1058 static int 1059 blob_serialize_extent_table(const struct spdk_blob *blob, 1060 struct spdk_blob_md_page **pages, 1061 struct spdk_blob_md_page *cur_page, 1062 uint32_t *page_count, uint8_t **buf, 1063 size_t *remaining_sz) 1064 { 1065 uint64_t last_extent_page; 1066 int rc; 1067 1068 last_extent_page = 0; 1069 /* At least single extent table entry has to be always persisted. 1070 * Such case occurs with num_extent_pages == 0. */ 1071 while (last_extent_page <= blob->active.num_extent_pages) { 1072 blob_serialize_extent_table_entry(blob, last_extent_page, &last_extent_page, buf, 1073 remaining_sz); 1074 1075 if (last_extent_page == blob->active.num_extent_pages) { 1076 break; 1077 } 1078 1079 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1080 if (rc < 0) { 1081 return rc; 1082 } 1083 1084 *buf = (uint8_t *)cur_page->descriptors; 1085 *remaining_sz = sizeof(cur_page->descriptors); 1086 } 1087 1088 return 0; 1089 } 1090 1091 static void 1092 blob_serialize_extent_rle(const struct spdk_blob *blob, 1093 uint64_t start_cluster, uint64_t *next_cluster, 1094 uint8_t **buf, size_t *buf_sz) 1095 { 1096 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 1097 size_t cur_sz; 1098 uint64_t i, extent_idx; 1099 uint64_t lba, lba_per_cluster, lba_count; 1100 1101 /* The buffer must have room for at least one extent */ 1102 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc_extent_rle->extents[0]); 1103 if (*buf_sz < cur_sz) { 1104 *next_cluster = start_cluster; 1105 return; 1106 } 1107 1108 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)*buf; 1109 desc_extent_rle->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE; 1110 1111 lba_per_cluster = bs_cluster_to_lba(blob->bs, 1); 1112 /* Assert for scan-build false positive */ 1113 assert(lba_per_cluster > 0); 1114 1115 lba = blob->active.clusters[start_cluster]; 1116 lba_count = lba_per_cluster; 1117 extent_idx = 0; 1118 for (i = start_cluster + 1; i < blob->active.num_clusters; i++) { 1119 if ((lba + lba_count) == blob->active.clusters[i] && lba != 0) { 1120 /* Run-length encode sequential non-zero LBA */ 1121 lba_count += lba_per_cluster; 1122 continue; 1123 } else if (lba == 0 && blob->active.clusters[i] == 0) { 1124 /* Run-length encode unallocated clusters */ 1125 lba_count += lba_per_cluster; 1126 continue; 1127 } 1128 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1129 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1130 extent_idx++; 1131 1132 cur_sz += sizeof(desc_extent_rle->extents[extent_idx]); 1133 1134 if (*buf_sz < cur_sz) { 1135 /* If we ran out of buffer space, return */ 1136 *next_cluster = i; 1137 break; 1138 } 1139 1140 lba = blob->active.clusters[i]; 1141 lba_count = lba_per_cluster; 1142 } 1143 1144 if (*buf_sz >= cur_sz) { 1145 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1146 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1147 extent_idx++; 1148 1149 *next_cluster = blob->active.num_clusters; 1150 } 1151 1152 desc_extent_rle->length = sizeof(desc_extent_rle->extents[0]) * extent_idx; 1153 *buf_sz -= sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1154 *buf += sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1155 } 1156 1157 static int 1158 blob_serialize_extents_rle(const struct spdk_blob *blob, 1159 struct spdk_blob_md_page **pages, 1160 struct spdk_blob_md_page *cur_page, 1161 uint32_t *page_count, uint8_t **buf, 1162 size_t *remaining_sz) 1163 { 1164 uint64_t last_cluster; 1165 int rc; 1166 1167 last_cluster = 0; 1168 while (last_cluster < blob->active.num_clusters) { 1169 blob_serialize_extent_rle(blob, last_cluster, &last_cluster, buf, remaining_sz); 1170 1171 if (last_cluster == blob->active.num_clusters) { 1172 break; 1173 } 1174 1175 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1176 if (rc < 0) { 1177 return rc; 1178 } 1179 1180 *buf = (uint8_t *)cur_page->descriptors; 1181 *remaining_sz = sizeof(cur_page->descriptors); 1182 } 1183 1184 return 0; 1185 } 1186 1187 static void 1188 blob_serialize_extent_page(const struct spdk_blob *blob, 1189 uint64_t cluster, struct spdk_blob_md_page *page) 1190 { 1191 struct spdk_blob_md_descriptor_extent_page *desc_extent; 1192 uint64_t i, extent_idx; 1193 uint64_t lba, lba_per_cluster; 1194 uint64_t start_cluster_idx = (cluster / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP; 1195 1196 desc_extent = (struct spdk_blob_md_descriptor_extent_page *) page->descriptors; 1197 desc_extent->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE; 1198 1199 lba_per_cluster = bs_cluster_to_lba(blob->bs, 1); 1200 1201 desc_extent->start_cluster_idx = start_cluster_idx; 1202 extent_idx = 0; 1203 for (i = start_cluster_idx; i < blob->active.num_clusters; i++) { 1204 lba = blob->active.clusters[i]; 1205 desc_extent->cluster_idx[extent_idx++] = lba / lba_per_cluster; 1206 if (extent_idx >= SPDK_EXTENTS_PER_EP) { 1207 break; 1208 } 1209 } 1210 desc_extent->length = sizeof(desc_extent->start_cluster_idx) + 1211 sizeof(desc_extent->cluster_idx[0]) * extent_idx; 1212 } 1213 1214 static void 1215 blob_serialize_flags(const struct spdk_blob *blob, 1216 uint8_t *buf, size_t *buf_sz) 1217 { 1218 struct spdk_blob_md_descriptor_flags *desc; 1219 1220 /* 1221 * Flags get serialized first, so we should always have room for the flags 1222 * descriptor. 1223 */ 1224 assert(*buf_sz >= sizeof(*desc)); 1225 1226 desc = (struct spdk_blob_md_descriptor_flags *)buf; 1227 desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS; 1228 desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor); 1229 desc->invalid_flags = blob->invalid_flags; 1230 desc->data_ro_flags = blob->data_ro_flags; 1231 desc->md_ro_flags = blob->md_ro_flags; 1232 1233 *buf_sz -= sizeof(*desc); 1234 } 1235 1236 static int 1237 blob_serialize_xattrs(const struct spdk_blob *blob, 1238 const struct spdk_xattr_tailq *xattrs, bool internal, 1239 struct spdk_blob_md_page **pages, 1240 struct spdk_blob_md_page *cur_page, 1241 uint32_t *page_count, uint8_t **buf, 1242 size_t *remaining_sz) 1243 { 1244 const struct spdk_xattr *xattr; 1245 int rc; 1246 1247 TAILQ_FOREACH(xattr, xattrs, link) { 1248 size_t required_sz = 0; 1249 1250 rc = blob_serialize_xattr(xattr, 1251 *buf, *remaining_sz, 1252 &required_sz, internal); 1253 if (rc < 0) { 1254 /* Need to add a new page to the chain */ 1255 rc = blob_serialize_add_page(blob, pages, page_count, 1256 &cur_page); 1257 if (rc < 0) { 1258 spdk_free(*pages); 1259 *pages = NULL; 1260 *page_count = 0; 1261 return rc; 1262 } 1263 1264 *buf = (uint8_t *)cur_page->descriptors; 1265 *remaining_sz = sizeof(cur_page->descriptors); 1266 1267 /* Try again */ 1268 required_sz = 0; 1269 rc = blob_serialize_xattr(xattr, 1270 *buf, *remaining_sz, 1271 &required_sz, internal); 1272 1273 if (rc < 0) { 1274 spdk_free(*pages); 1275 *pages = NULL; 1276 *page_count = 0; 1277 return rc; 1278 } 1279 } 1280 1281 *remaining_sz -= required_sz; 1282 *buf += required_sz; 1283 } 1284 1285 return 0; 1286 } 1287 1288 static int 1289 blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages, 1290 uint32_t *page_count) 1291 { 1292 struct spdk_blob_md_page *cur_page; 1293 int rc; 1294 uint8_t *buf; 1295 size_t remaining_sz; 1296 1297 assert(pages != NULL); 1298 assert(page_count != NULL); 1299 assert(blob != NULL); 1300 assert(blob->state == SPDK_BLOB_STATE_DIRTY); 1301 1302 *pages = NULL; 1303 *page_count = 0; 1304 1305 /* A blob always has at least 1 page, even if it has no descriptors */ 1306 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1307 if (rc < 0) { 1308 return rc; 1309 } 1310 1311 buf = (uint8_t *)cur_page->descriptors; 1312 remaining_sz = sizeof(cur_page->descriptors); 1313 1314 /* Serialize flags */ 1315 blob_serialize_flags(blob, buf, &remaining_sz); 1316 buf += sizeof(struct spdk_blob_md_descriptor_flags); 1317 1318 /* Serialize xattrs */ 1319 rc = blob_serialize_xattrs(blob, &blob->xattrs, false, 1320 pages, cur_page, page_count, &buf, &remaining_sz); 1321 if (rc < 0) { 1322 return rc; 1323 } 1324 1325 /* Serialize internal xattrs */ 1326 rc = blob_serialize_xattrs(blob, &blob->xattrs_internal, true, 1327 pages, cur_page, page_count, &buf, &remaining_sz); 1328 if (rc < 0) { 1329 return rc; 1330 } 1331 1332 if (blob->use_extent_table) { 1333 /* Serialize extent table */ 1334 rc = blob_serialize_extent_table(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1335 } else { 1336 /* Serialize extents */ 1337 rc = blob_serialize_extents_rle(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1338 } 1339 1340 return rc; 1341 } 1342 1343 struct spdk_blob_load_ctx { 1344 struct spdk_blob *blob; 1345 1346 struct spdk_blob_md_page *pages; 1347 uint32_t num_pages; 1348 uint32_t next_extent_page; 1349 spdk_bs_sequence_t *seq; 1350 1351 spdk_bs_sequence_cpl cb_fn; 1352 void *cb_arg; 1353 }; 1354 1355 static uint32_t 1356 blob_md_page_calc_crc(void *page) 1357 { 1358 uint32_t crc; 1359 1360 crc = BLOB_CRC32C_INITIAL; 1361 crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc); 1362 crc ^= BLOB_CRC32C_INITIAL; 1363 1364 return crc; 1365 1366 } 1367 1368 static void 1369 blob_load_final(struct spdk_blob_load_ctx *ctx, int bserrno) 1370 { 1371 struct spdk_blob *blob = ctx->blob; 1372 1373 if (bserrno == 0) { 1374 blob_mark_clean(blob); 1375 } 1376 1377 ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno); 1378 1379 /* Free the memory */ 1380 spdk_free(ctx->pages); 1381 free(ctx); 1382 } 1383 1384 static void 1385 blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno) 1386 { 1387 struct spdk_blob_load_ctx *ctx = cb_arg; 1388 struct spdk_blob *blob = ctx->blob; 1389 1390 if (bserrno == 0) { 1391 blob->back_bs_dev = bs_create_blob_bs_dev(snapshot); 1392 if (blob->back_bs_dev == NULL) { 1393 bserrno = -ENOMEM; 1394 } 1395 } 1396 if (bserrno != 0) { 1397 SPDK_ERRLOG("Snapshot fail\n"); 1398 } 1399 1400 blob_load_final(ctx, bserrno); 1401 } 1402 1403 static void blob_update_clear_method(struct spdk_blob *blob); 1404 1405 static int 1406 blob_load_esnap(struct spdk_blob *blob, void *blob_ctx) 1407 { 1408 struct spdk_blob_store *bs = blob->bs; 1409 struct spdk_bs_dev *bs_dev = NULL; 1410 const void *esnap_id = NULL; 1411 size_t id_len = 0; 1412 int rc; 1413 1414 if (bs->esnap_bs_dev_create == NULL) { 1415 SPDK_NOTICELOG("blob 0x%" PRIx64 " is an esnap clone but the blobstore was opened " 1416 "without support for esnap clones\n", blob->id); 1417 return -ENOTSUP; 1418 } 1419 assert(blob->back_bs_dev == NULL); 1420 1421 rc = blob_get_xattr_value(blob, BLOB_EXTERNAL_SNAPSHOT_ID, &esnap_id, &id_len, true); 1422 if (rc != 0) { 1423 SPDK_ERRLOG("blob 0x%" PRIx64 " is an esnap clone but has no esnap ID\n", blob->id); 1424 return -EINVAL; 1425 } 1426 assert(id_len > 0 && id_len < UINT32_MAX); 1427 1428 SPDK_INFOLOG(blob, "Creating external snapshot device\n"); 1429 1430 rc = bs->esnap_bs_dev_create(bs->esnap_ctx, blob_ctx, blob, esnap_id, (uint32_t)id_len, 1431 &bs_dev); 1432 if (rc != 0) { 1433 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": failed to load back_bs_dev " 1434 "with error %d\n", blob->id, rc); 1435 return rc; 1436 } 1437 1438 /* 1439 * Note: bs_dev might be NULL if the consumer chose to not open the external snapshot. 1440 * This especially might happen during spdk_bs_load() iteration. 1441 */ 1442 if (bs_dev != NULL) { 1443 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": loaded back_bs_dev\n", blob->id); 1444 if ((bs->io_unit_size % bs_dev->blocklen) != 0) { 1445 SPDK_NOTICELOG("blob 0x%" PRIx64 " external snapshot device block size %u " 1446 "is not compatible with blobstore block size %u\n", 1447 blob->id, bs_dev->blocklen, bs->io_unit_size); 1448 bs_dev->destroy(bs_dev); 1449 return -EINVAL; 1450 } 1451 } 1452 1453 blob->back_bs_dev = bs_dev; 1454 blob->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT; 1455 1456 return 0; 1457 } 1458 1459 static void 1460 blob_load_backing_dev(spdk_bs_sequence_t *seq, void *cb_arg) 1461 { 1462 struct spdk_blob_load_ctx *ctx = cb_arg; 1463 struct spdk_blob *blob = ctx->blob; 1464 const void *value; 1465 size_t len; 1466 int rc; 1467 1468 if (blob_is_esnap_clone(blob)) { 1469 rc = blob_load_esnap(blob, seq->cpl.u.blob_handle.esnap_ctx); 1470 blob_load_final(ctx, rc); 1471 return; 1472 } 1473 1474 if (spdk_blob_is_thin_provisioned(blob)) { 1475 rc = blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true); 1476 if (rc == 0) { 1477 if (len != sizeof(spdk_blob_id)) { 1478 blob_load_final(ctx, -EINVAL); 1479 return; 1480 } 1481 /* open snapshot blob and continue in the callback function */ 1482 blob->parent_id = *(spdk_blob_id *)value; 1483 spdk_bs_open_blob(blob->bs, blob->parent_id, 1484 blob_load_snapshot_cpl, ctx); 1485 return; 1486 } else { 1487 /* add zeroes_dev for thin provisioned blob */ 1488 blob->back_bs_dev = bs_create_zeroes_dev(); 1489 } 1490 } else { 1491 /* standard blob */ 1492 blob->back_bs_dev = NULL; 1493 } 1494 blob_load_final(ctx, 0); 1495 } 1496 1497 static void 1498 blob_load_cpl_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1499 { 1500 struct spdk_blob_load_ctx *ctx = cb_arg; 1501 struct spdk_blob *blob = ctx->blob; 1502 struct spdk_blob_md_page *page; 1503 uint64_t i; 1504 uint32_t crc; 1505 uint64_t lba; 1506 void *tmp; 1507 uint64_t sz; 1508 1509 if (bserrno) { 1510 SPDK_ERRLOG("Extent page read failed: %d\n", bserrno); 1511 blob_load_final(ctx, bserrno); 1512 return; 1513 } 1514 1515 if (ctx->pages == NULL) { 1516 /* First iteration of this function, allocate buffer for single EXTENT_PAGE */ 1517 ctx->pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 1518 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 1519 if (!ctx->pages) { 1520 blob_load_final(ctx, -ENOMEM); 1521 return; 1522 } 1523 ctx->num_pages = 1; 1524 ctx->next_extent_page = 0; 1525 } else { 1526 page = &ctx->pages[0]; 1527 crc = blob_md_page_calc_crc(page); 1528 if (crc != page->crc) { 1529 blob_load_final(ctx, -EINVAL); 1530 return; 1531 } 1532 1533 if (page->next != SPDK_INVALID_MD_PAGE) { 1534 blob_load_final(ctx, -EINVAL); 1535 return; 1536 } 1537 1538 bserrno = blob_parse_extent_page(page, blob); 1539 if (bserrno) { 1540 blob_load_final(ctx, bserrno); 1541 return; 1542 } 1543 } 1544 1545 for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) { 1546 if (blob->active.extent_pages[i] != 0) { 1547 /* Extent page was allocated, read and parse it. */ 1548 lba = bs_md_page_to_lba(blob->bs, blob->active.extent_pages[i]); 1549 ctx->next_extent_page = i + 1; 1550 1551 bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1552 bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 1553 blob_load_cpl_extents_cpl, ctx); 1554 return; 1555 } else { 1556 /* Thin provisioned blobs can point to unallocated extent pages. 1557 * In this case blob size should be increased by up to the amount left in remaining_clusters_in_et. */ 1558 1559 sz = spdk_min(blob->remaining_clusters_in_et, SPDK_EXTENTS_PER_EP); 1560 blob->active.num_clusters += sz; 1561 blob->remaining_clusters_in_et -= sz; 1562 1563 assert(spdk_blob_is_thin_provisioned(blob)); 1564 assert(i + 1 < blob->active.num_extent_pages || blob->remaining_clusters_in_et == 0); 1565 1566 tmp = realloc(blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 1567 if (tmp == NULL) { 1568 blob_load_final(ctx, -ENOMEM); 1569 return; 1570 } 1571 memset(tmp + sizeof(*blob->active.clusters) * blob->active.cluster_array_size, 0, 1572 sizeof(*blob->active.clusters) * (blob->active.num_clusters - blob->active.cluster_array_size)); 1573 blob->active.clusters = tmp; 1574 blob->active.cluster_array_size = blob->active.num_clusters; 1575 } 1576 } 1577 1578 blob_load_backing_dev(seq, ctx); 1579 } 1580 1581 static void 1582 blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1583 { 1584 struct spdk_blob_load_ctx *ctx = cb_arg; 1585 struct spdk_blob *blob = ctx->blob; 1586 struct spdk_blob_md_page *page; 1587 int rc; 1588 uint32_t crc; 1589 uint32_t current_page; 1590 1591 if (ctx->num_pages == 1) { 1592 current_page = bs_blobid_to_page(blob->id); 1593 } else { 1594 assert(ctx->num_pages != 0); 1595 page = &ctx->pages[ctx->num_pages - 2]; 1596 current_page = page->next; 1597 } 1598 1599 if (bserrno) { 1600 SPDK_ERRLOG("Metadata page %d read failed for blobid 0x%" PRIx64 ": %d\n", 1601 current_page, blob->id, bserrno); 1602 blob_load_final(ctx, bserrno); 1603 return; 1604 } 1605 1606 page = &ctx->pages[ctx->num_pages - 1]; 1607 crc = blob_md_page_calc_crc(page); 1608 if (crc != page->crc) { 1609 SPDK_ERRLOG("Metadata page %d crc mismatch for blobid 0x%" PRIx64 "\n", 1610 current_page, blob->id); 1611 blob_load_final(ctx, -EINVAL); 1612 return; 1613 } 1614 1615 if (page->next != SPDK_INVALID_MD_PAGE) { 1616 struct spdk_blob_md_page *tmp_pages; 1617 uint32_t next_page = page->next; 1618 uint64_t next_lba = bs_md_page_to_lba(blob->bs, next_page); 1619 1620 /* Read the next page */ 1621 tmp_pages = spdk_realloc(ctx->pages, (sizeof(*page) * (ctx->num_pages + 1)), 0); 1622 if (tmp_pages == NULL) { 1623 blob_load_final(ctx, -ENOMEM); 1624 return; 1625 } 1626 ctx->num_pages++; 1627 ctx->pages = tmp_pages; 1628 1629 bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1], 1630 next_lba, 1631 bs_byte_to_lba(blob->bs, sizeof(*page)), 1632 blob_load_cpl, ctx); 1633 return; 1634 } 1635 1636 /* Parse the pages */ 1637 rc = blob_parse(ctx->pages, ctx->num_pages, blob); 1638 if (rc) { 1639 blob_load_final(ctx, rc); 1640 return; 1641 } 1642 1643 if (blob->extent_table_found == true) { 1644 /* If EXTENT_TABLE was found, that means support for it should be enabled. */ 1645 assert(blob->extent_rle_found == false); 1646 blob->use_extent_table = true; 1647 } else { 1648 /* If EXTENT_RLE or no extent_* descriptor was found disable support 1649 * for extent table. No extent_* descriptors means that blob has length of 0 1650 * and no extent_rle descriptors were persisted for it. 1651 * EXTENT_TABLE if used, is always present in metadata regardless of length. */ 1652 blob->use_extent_table = false; 1653 } 1654 1655 /* Check the clear_method stored in metadata vs what may have been passed 1656 * via spdk_bs_open_blob_ext() and update accordingly. 1657 */ 1658 blob_update_clear_method(blob); 1659 1660 spdk_free(ctx->pages); 1661 ctx->pages = NULL; 1662 1663 if (blob->extent_table_found) { 1664 blob_load_cpl_extents_cpl(seq, ctx, 0); 1665 } else { 1666 blob_load_backing_dev(seq, ctx); 1667 } 1668 } 1669 1670 /* Load a blob from disk given a blobid */ 1671 static void 1672 blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 1673 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 1674 { 1675 struct spdk_blob_load_ctx *ctx; 1676 struct spdk_blob_store *bs; 1677 uint32_t page_num; 1678 uint64_t lba; 1679 1680 blob_verify_md_op(blob); 1681 1682 bs = blob->bs; 1683 1684 ctx = calloc(1, sizeof(*ctx)); 1685 if (!ctx) { 1686 cb_fn(seq, cb_arg, -ENOMEM); 1687 return; 1688 } 1689 1690 ctx->blob = blob; 1691 ctx->pages = spdk_realloc(ctx->pages, SPDK_BS_PAGE_SIZE, 0); 1692 if (!ctx->pages) { 1693 free(ctx); 1694 cb_fn(seq, cb_arg, -ENOMEM); 1695 return; 1696 } 1697 ctx->num_pages = 1; 1698 ctx->cb_fn = cb_fn; 1699 ctx->cb_arg = cb_arg; 1700 ctx->seq = seq; 1701 1702 page_num = bs_blobid_to_page(blob->id); 1703 lba = bs_md_page_to_lba(blob->bs, page_num); 1704 1705 blob->state = SPDK_BLOB_STATE_LOADING; 1706 1707 bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1708 bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE), 1709 blob_load_cpl, ctx); 1710 } 1711 1712 struct spdk_blob_persist_ctx { 1713 struct spdk_blob *blob; 1714 1715 struct spdk_blob_md_page *pages; 1716 uint32_t next_extent_page; 1717 struct spdk_blob_md_page *extent_page; 1718 1719 spdk_bs_sequence_t *seq; 1720 spdk_bs_sequence_cpl cb_fn; 1721 void *cb_arg; 1722 TAILQ_ENTRY(spdk_blob_persist_ctx) link; 1723 }; 1724 1725 static void 1726 bs_batch_clear_dev(struct spdk_blob_persist_ctx *ctx, spdk_bs_batch_t *batch, uint64_t lba, 1727 uint64_t lba_count) 1728 { 1729 switch (ctx->blob->clear_method) { 1730 case BLOB_CLEAR_WITH_DEFAULT: 1731 case BLOB_CLEAR_WITH_UNMAP: 1732 bs_batch_unmap_dev(batch, lba, lba_count); 1733 break; 1734 case BLOB_CLEAR_WITH_WRITE_ZEROES: 1735 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1736 break; 1737 case BLOB_CLEAR_WITH_NONE: 1738 default: 1739 break; 1740 } 1741 } 1742 1743 static int 1744 bs_super_validate(struct spdk_bs_super_block *super, struct spdk_blob_store *bs) 1745 { 1746 uint32_t crc; 1747 static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH]; 1748 1749 if (super->version > SPDK_BS_VERSION || 1750 super->version < SPDK_BS_INITIAL_VERSION) { 1751 return -EILSEQ; 1752 } 1753 1754 if (memcmp(super->signature, SPDK_BS_SUPER_BLOCK_SIG, 1755 sizeof(super->signature)) != 0) { 1756 return -EILSEQ; 1757 } 1758 1759 crc = blob_md_page_calc_crc(super); 1760 if (crc != super->crc) { 1761 return -EILSEQ; 1762 } 1763 1764 if (memcmp(&bs->bstype, &super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 1765 SPDK_DEBUGLOG(blob, "Bstype matched - loading blobstore\n"); 1766 } else if (memcmp(&bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 1767 SPDK_DEBUGLOG(blob, "Bstype wildcard used - loading blobstore regardless bstype\n"); 1768 } else { 1769 SPDK_DEBUGLOG(blob, "Unexpected bstype\n"); 1770 SPDK_LOGDUMP(blob, "Expected:", bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 1771 SPDK_LOGDUMP(blob, "Found:", super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 1772 return -ENXIO; 1773 } 1774 1775 if (super->size > bs->dev->blockcnt * bs->dev->blocklen) { 1776 SPDK_NOTICELOG("Size mismatch, dev size: %" PRIu64 ", blobstore size: %" PRIu64 "\n", 1777 bs->dev->blockcnt * bs->dev->blocklen, super->size); 1778 return -EILSEQ; 1779 } 1780 1781 return 0; 1782 } 1783 1784 static void bs_mark_dirty(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 1785 spdk_bs_sequence_cpl cb_fn, void *cb_arg); 1786 1787 static void 1788 blob_persist_complete_cb(void *arg) 1789 { 1790 struct spdk_blob_persist_ctx *ctx = arg; 1791 1792 /* Call user callback */ 1793 ctx->cb_fn(ctx->seq, ctx->cb_arg, 0); 1794 1795 /* Free the memory */ 1796 spdk_free(ctx->pages); 1797 free(ctx); 1798 } 1799 1800 static void blob_persist_start(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 1801 1802 static void 1803 blob_persist_complete(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx, int bserrno) 1804 { 1805 struct spdk_blob_persist_ctx *next_persist, *tmp; 1806 struct spdk_blob *blob = ctx->blob; 1807 1808 if (bserrno == 0) { 1809 blob_mark_clean(blob); 1810 } 1811 1812 assert(ctx == TAILQ_FIRST(&blob->persists_to_complete)); 1813 1814 /* Complete all persists that were pending when the current persist started */ 1815 TAILQ_FOREACH_SAFE(next_persist, &blob->persists_to_complete, link, tmp) { 1816 TAILQ_REMOVE(&blob->persists_to_complete, next_persist, link); 1817 spdk_thread_send_msg(spdk_get_thread(), blob_persist_complete_cb, next_persist); 1818 } 1819 1820 if (TAILQ_EMPTY(&blob->pending_persists)) { 1821 return; 1822 } 1823 1824 /* Queue up all pending persists for completion and start blob persist with first one */ 1825 TAILQ_SWAP(&blob->persists_to_complete, &blob->pending_persists, spdk_blob_persist_ctx, link); 1826 next_persist = TAILQ_FIRST(&blob->persists_to_complete); 1827 1828 blob->state = SPDK_BLOB_STATE_DIRTY; 1829 bs_mark_dirty(seq, blob->bs, blob_persist_start, next_persist); 1830 } 1831 1832 static void 1833 blob_persist_clear_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1834 { 1835 struct spdk_blob_persist_ctx *ctx = cb_arg; 1836 struct spdk_blob *blob = ctx->blob; 1837 struct spdk_blob_store *bs = blob->bs; 1838 size_t i; 1839 1840 if (bserrno != 0) { 1841 blob_persist_complete(seq, ctx, bserrno); 1842 return; 1843 } 1844 1845 spdk_spin_lock(&bs->used_lock); 1846 1847 /* Release all extent_pages that were truncated */ 1848 for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) { 1849 /* Nothing to release if it was not allocated */ 1850 if (blob->active.extent_pages[i] != 0) { 1851 bs_release_md_page(bs, blob->active.extent_pages[i]); 1852 } 1853 } 1854 1855 spdk_spin_unlock(&bs->used_lock); 1856 1857 if (blob->active.num_extent_pages == 0) { 1858 free(blob->active.extent_pages); 1859 blob->active.extent_pages = NULL; 1860 blob->active.extent_pages_array_size = 0; 1861 } else if (blob->active.num_extent_pages != blob->active.extent_pages_array_size) { 1862 #ifndef __clang_analyzer__ 1863 void *tmp; 1864 1865 /* scan-build really can't figure reallocs, workaround it */ 1866 tmp = realloc(blob->active.extent_pages, sizeof(uint32_t) * blob->active.num_extent_pages); 1867 assert(tmp != NULL); 1868 blob->active.extent_pages = tmp; 1869 #endif 1870 blob->active.extent_pages_array_size = blob->active.num_extent_pages; 1871 } 1872 1873 blob_persist_complete(seq, ctx, bserrno); 1874 } 1875 1876 static void 1877 blob_persist_clear_extents(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 1878 { 1879 struct spdk_blob *blob = ctx->blob; 1880 struct spdk_blob_store *bs = blob->bs; 1881 size_t i; 1882 uint64_t lba; 1883 uint64_t lba_count; 1884 spdk_bs_batch_t *batch; 1885 1886 batch = bs_sequence_to_batch(seq, blob_persist_clear_extents_cpl, ctx); 1887 lba_count = bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE); 1888 1889 /* Clear all extent_pages that were truncated */ 1890 for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) { 1891 /* Nothing to clear if it was not allocated */ 1892 if (blob->active.extent_pages[i] != 0) { 1893 lba = bs_md_page_to_lba(bs, blob->active.extent_pages[i]); 1894 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1895 } 1896 } 1897 1898 bs_batch_close(batch); 1899 } 1900 1901 static void 1902 blob_persist_clear_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1903 { 1904 struct spdk_blob_persist_ctx *ctx = cb_arg; 1905 struct spdk_blob *blob = ctx->blob; 1906 struct spdk_blob_store *bs = blob->bs; 1907 size_t i; 1908 1909 if (bserrno != 0) { 1910 blob_persist_complete(seq, ctx, bserrno); 1911 return; 1912 } 1913 1914 spdk_spin_lock(&bs->used_lock); 1915 /* Release all clusters that were truncated */ 1916 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1917 uint32_t cluster_num = bs_lba_to_cluster(bs, blob->active.clusters[i]); 1918 1919 /* Nothing to release if it was not allocated */ 1920 if (blob->active.clusters[i] != 0) { 1921 bs_release_cluster(bs, cluster_num); 1922 } 1923 } 1924 spdk_spin_unlock(&bs->used_lock); 1925 1926 if (blob->active.num_clusters == 0) { 1927 free(blob->active.clusters); 1928 blob->active.clusters = NULL; 1929 blob->active.cluster_array_size = 0; 1930 } else if (blob->active.num_clusters != blob->active.cluster_array_size) { 1931 #ifndef __clang_analyzer__ 1932 void *tmp; 1933 1934 /* scan-build really can't figure reallocs, workaround it */ 1935 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * blob->active.num_clusters); 1936 assert(tmp != NULL); 1937 blob->active.clusters = tmp; 1938 1939 #endif 1940 blob->active.cluster_array_size = blob->active.num_clusters; 1941 } 1942 1943 /* Move on to clearing extent pages */ 1944 blob_persist_clear_extents(seq, ctx); 1945 } 1946 1947 static void 1948 blob_persist_clear_clusters(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 1949 { 1950 struct spdk_blob *blob = ctx->blob; 1951 struct spdk_blob_store *bs = blob->bs; 1952 spdk_bs_batch_t *batch; 1953 size_t i; 1954 uint64_t lba; 1955 uint64_t lba_count; 1956 1957 /* Clusters don't move around in blobs. The list shrinks or grows 1958 * at the end, but no changes ever occur in the middle of the list. 1959 */ 1960 1961 batch = bs_sequence_to_batch(seq, blob_persist_clear_clusters_cpl, ctx); 1962 1963 /* Clear all clusters that were truncated */ 1964 lba = 0; 1965 lba_count = 0; 1966 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1967 uint64_t next_lba = blob->active.clusters[i]; 1968 uint64_t next_lba_count = bs_cluster_to_lba(bs, 1); 1969 1970 if (next_lba > 0 && (lba + lba_count) == next_lba) { 1971 /* This cluster is contiguous with the previous one. */ 1972 lba_count += next_lba_count; 1973 continue; 1974 } else if (next_lba == 0) { 1975 continue; 1976 } 1977 1978 /* This cluster is not contiguous with the previous one. */ 1979 1980 /* If a run of LBAs previously existing, clear them now */ 1981 if (lba_count > 0) { 1982 bs_batch_clear_dev(ctx, batch, lba, lba_count); 1983 } 1984 1985 /* Start building the next batch */ 1986 lba = next_lba; 1987 if (next_lba > 0) { 1988 lba_count = next_lba_count; 1989 } else { 1990 lba_count = 0; 1991 } 1992 } 1993 1994 /* If we ended with a contiguous set of LBAs, clear them now */ 1995 if (lba_count > 0) { 1996 bs_batch_clear_dev(ctx, batch, lba, lba_count); 1997 } 1998 1999 bs_batch_close(batch); 2000 } 2001 2002 static void 2003 blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2004 { 2005 struct spdk_blob_persist_ctx *ctx = cb_arg; 2006 struct spdk_blob *blob = ctx->blob; 2007 struct spdk_blob_store *bs = blob->bs; 2008 size_t i; 2009 2010 if (bserrno != 0) { 2011 blob_persist_complete(seq, ctx, bserrno); 2012 return; 2013 } 2014 2015 spdk_spin_lock(&bs->used_lock); 2016 2017 /* This loop starts at 1 because the first page is special and handled 2018 * below. The pages (except the first) are never written in place, 2019 * so any pages in the clean list must be zeroed. 2020 */ 2021 for (i = 1; i < blob->clean.num_pages; i++) { 2022 bs_release_md_page(bs, blob->clean.pages[i]); 2023 } 2024 2025 if (blob->active.num_pages == 0) { 2026 uint32_t page_num; 2027 2028 page_num = bs_blobid_to_page(blob->id); 2029 bs_release_md_page(bs, page_num); 2030 } 2031 2032 spdk_spin_unlock(&bs->used_lock); 2033 2034 /* Move on to clearing clusters */ 2035 blob_persist_clear_clusters(seq, ctx); 2036 } 2037 2038 static void 2039 blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2040 { 2041 struct spdk_blob_persist_ctx *ctx = cb_arg; 2042 struct spdk_blob *blob = ctx->blob; 2043 struct spdk_blob_store *bs = blob->bs; 2044 uint64_t lba; 2045 uint64_t lba_count; 2046 spdk_bs_batch_t *batch; 2047 size_t i; 2048 2049 if (bserrno != 0) { 2050 blob_persist_complete(seq, ctx, bserrno); 2051 return; 2052 } 2053 2054 batch = bs_sequence_to_batch(seq, blob_persist_zero_pages_cpl, ctx); 2055 2056 lba_count = bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE); 2057 2058 /* This loop starts at 1 because the first page is special and handled 2059 * below. The pages (except the first) are never written in place, 2060 * so any pages in the clean list must be zeroed. 2061 */ 2062 for (i = 1; i < blob->clean.num_pages; i++) { 2063 lba = bs_md_page_to_lba(bs, blob->clean.pages[i]); 2064 2065 bs_batch_write_zeroes_dev(batch, lba, lba_count); 2066 } 2067 2068 /* The first page will only be zeroed if this is a delete. */ 2069 if (blob->active.num_pages == 0) { 2070 uint32_t page_num; 2071 2072 /* The first page in the metadata goes where the blobid indicates */ 2073 page_num = bs_blobid_to_page(blob->id); 2074 lba = bs_md_page_to_lba(bs, page_num); 2075 2076 bs_batch_write_zeroes_dev(batch, lba, lba_count); 2077 } 2078 2079 bs_batch_close(batch); 2080 } 2081 2082 static void 2083 blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2084 { 2085 struct spdk_blob_persist_ctx *ctx = cb_arg; 2086 struct spdk_blob *blob = ctx->blob; 2087 struct spdk_blob_store *bs = blob->bs; 2088 uint64_t lba; 2089 uint32_t lba_count; 2090 struct spdk_blob_md_page *page; 2091 2092 if (bserrno != 0) { 2093 blob_persist_complete(seq, ctx, bserrno); 2094 return; 2095 } 2096 2097 if (blob->active.num_pages == 0) { 2098 /* Move on to the next step */ 2099 blob_persist_zero_pages(seq, ctx, 0); 2100 return; 2101 } 2102 2103 lba_count = bs_byte_to_lba(bs, sizeof(*page)); 2104 2105 page = &ctx->pages[0]; 2106 /* The first page in the metadata goes where the blobid indicates */ 2107 lba = bs_md_page_to_lba(bs, bs_blobid_to_page(blob->id)); 2108 2109 bs_sequence_write_dev(seq, page, lba, lba_count, 2110 blob_persist_zero_pages, ctx); 2111 } 2112 2113 static void 2114 blob_persist_write_page_chain(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 2115 { 2116 struct spdk_blob *blob = ctx->blob; 2117 struct spdk_blob_store *bs = blob->bs; 2118 uint64_t lba; 2119 uint32_t lba_count; 2120 struct spdk_blob_md_page *page; 2121 spdk_bs_batch_t *batch; 2122 size_t i; 2123 2124 /* Clusters don't move around in blobs. The list shrinks or grows 2125 * at the end, but no changes ever occur in the middle of the list. 2126 */ 2127 2128 lba_count = bs_byte_to_lba(bs, sizeof(*page)); 2129 2130 batch = bs_sequence_to_batch(seq, blob_persist_write_page_root, ctx); 2131 2132 /* This starts at 1. The root page is not written until 2133 * all of the others are finished 2134 */ 2135 for (i = 1; i < blob->active.num_pages; i++) { 2136 page = &ctx->pages[i]; 2137 assert(page->sequence_num == i); 2138 2139 lba = bs_md_page_to_lba(bs, blob->active.pages[i]); 2140 2141 bs_batch_write_dev(batch, page, lba, lba_count); 2142 } 2143 2144 bs_batch_close(batch); 2145 } 2146 2147 static int 2148 blob_resize(struct spdk_blob *blob, uint64_t sz) 2149 { 2150 uint64_t i; 2151 uint64_t *tmp; 2152 uint64_t cluster; 2153 uint32_t lfmd; /* lowest free md page */ 2154 uint64_t num_clusters; 2155 uint32_t *ep_tmp; 2156 uint64_t new_num_ep = 0, current_num_ep = 0; 2157 struct spdk_blob_store *bs; 2158 int rc; 2159 2160 bs = blob->bs; 2161 2162 blob_verify_md_op(blob); 2163 2164 if (blob->active.num_clusters == sz) { 2165 return 0; 2166 } 2167 2168 if (blob->active.num_clusters < blob->active.cluster_array_size) { 2169 /* If this blob was resized to be larger, then smaller, then 2170 * larger without syncing, then the cluster array already 2171 * contains spare assigned clusters we can use. 2172 */ 2173 num_clusters = spdk_min(blob->active.cluster_array_size, 2174 sz); 2175 } else { 2176 num_clusters = blob->active.num_clusters; 2177 } 2178 2179 if (blob->use_extent_table) { 2180 /* Round up since every cluster beyond current Extent Table size, 2181 * requires new extent page. */ 2182 new_num_ep = spdk_divide_round_up(sz, SPDK_EXTENTS_PER_EP); 2183 current_num_ep = spdk_divide_round_up(num_clusters, SPDK_EXTENTS_PER_EP); 2184 } 2185 2186 assert(!spdk_spin_held(&bs->used_lock)); 2187 2188 /* Check first that we have enough clusters and md pages before we start claiming them. 2189 * bs->used_lock is held to ensure that clusters we think are free are still free when we go 2190 * to claim them later in this function. 2191 */ 2192 if (sz > num_clusters && spdk_blob_is_thin_provisioned(blob) == false) { 2193 spdk_spin_lock(&bs->used_lock); 2194 if ((sz - num_clusters) > bs->num_free_clusters) { 2195 rc = -ENOSPC; 2196 goto out; 2197 } 2198 lfmd = 0; 2199 for (i = current_num_ep; i < new_num_ep ; i++) { 2200 lfmd = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, lfmd); 2201 if (lfmd == UINT32_MAX) { 2202 /* No more free md pages. Cannot satisfy the request */ 2203 rc = -ENOSPC; 2204 goto out; 2205 } 2206 } 2207 } 2208 2209 if (sz > num_clusters) { 2210 /* Expand the cluster array if necessary. 2211 * We only shrink the array when persisting. 2212 */ 2213 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * sz); 2214 if (sz > 0 && tmp == NULL) { 2215 rc = -ENOMEM; 2216 goto out; 2217 } 2218 memset(tmp + blob->active.cluster_array_size, 0, 2219 sizeof(*blob->active.clusters) * (sz - blob->active.cluster_array_size)); 2220 blob->active.clusters = tmp; 2221 blob->active.cluster_array_size = sz; 2222 2223 /* Expand the extents table, only if enough clusters were added */ 2224 if (new_num_ep > current_num_ep && blob->use_extent_table) { 2225 ep_tmp = realloc(blob->active.extent_pages, sizeof(*blob->active.extent_pages) * new_num_ep); 2226 if (new_num_ep > 0 && ep_tmp == NULL) { 2227 rc = -ENOMEM; 2228 goto out; 2229 } 2230 memset(ep_tmp + blob->active.extent_pages_array_size, 0, 2231 sizeof(*blob->active.extent_pages) * (new_num_ep - blob->active.extent_pages_array_size)); 2232 blob->active.extent_pages = ep_tmp; 2233 blob->active.extent_pages_array_size = new_num_ep; 2234 } 2235 } 2236 2237 blob->state = SPDK_BLOB_STATE_DIRTY; 2238 2239 if (spdk_blob_is_thin_provisioned(blob) == false) { 2240 cluster = 0; 2241 lfmd = 0; 2242 for (i = num_clusters; i < sz; i++) { 2243 bs_allocate_cluster(blob, i, &cluster, &lfmd, true); 2244 /* Do not increment lfmd here. lfmd will get updated 2245 * to the md_page allocated (if any) when a new extent 2246 * page is needed. Just pass that value again, 2247 * bs_allocate_cluster will just start at that index 2248 * to find the next free md_page when needed. 2249 */ 2250 } 2251 } 2252 2253 blob->active.num_clusters = sz; 2254 blob->active.num_extent_pages = new_num_ep; 2255 2256 rc = 0; 2257 out: 2258 if (spdk_spin_held(&bs->used_lock)) { 2259 spdk_spin_unlock(&bs->used_lock); 2260 } 2261 2262 return rc; 2263 } 2264 2265 static void 2266 blob_persist_generate_new_md(struct spdk_blob_persist_ctx *ctx) 2267 { 2268 spdk_bs_sequence_t *seq = ctx->seq; 2269 struct spdk_blob *blob = ctx->blob; 2270 struct spdk_blob_store *bs = blob->bs; 2271 uint64_t i; 2272 uint32_t page_num; 2273 void *tmp; 2274 int rc; 2275 2276 /* Generate the new metadata */ 2277 rc = blob_serialize(blob, &ctx->pages, &blob->active.num_pages); 2278 if (rc < 0) { 2279 blob_persist_complete(seq, ctx, rc); 2280 return; 2281 } 2282 2283 assert(blob->active.num_pages >= 1); 2284 2285 /* Resize the cache of page indices */ 2286 tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 2287 if (!tmp) { 2288 blob_persist_complete(seq, ctx, -ENOMEM); 2289 return; 2290 } 2291 blob->active.pages = tmp; 2292 2293 /* Assign this metadata to pages. This requires two passes - one to verify that there are 2294 * enough pages and a second to actually claim them. The used_lock is held across 2295 * both passes to ensure things don't change in the middle. 2296 */ 2297 spdk_spin_lock(&bs->used_lock); 2298 page_num = 0; 2299 /* Note that this loop starts at one. The first page location is fixed by the blobid. */ 2300 for (i = 1; i < blob->active.num_pages; i++) { 2301 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 2302 if (page_num == UINT32_MAX) { 2303 spdk_spin_unlock(&bs->used_lock); 2304 blob_persist_complete(seq, ctx, -ENOMEM); 2305 return; 2306 } 2307 page_num++; 2308 } 2309 2310 page_num = 0; 2311 blob->active.pages[0] = bs_blobid_to_page(blob->id); 2312 for (i = 1; i < blob->active.num_pages; i++) { 2313 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 2314 ctx->pages[i - 1].next = page_num; 2315 /* Now that previous metadata page is complete, calculate the crc for it. */ 2316 ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]); 2317 blob->active.pages[i] = page_num; 2318 bs_claim_md_page(bs, page_num); 2319 SPDK_DEBUGLOG(blob, "Claiming page %u for blob 0x%" PRIx64 "\n", page_num, 2320 blob->id); 2321 page_num++; 2322 } 2323 spdk_spin_unlock(&bs->used_lock); 2324 ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]); 2325 /* Start writing the metadata from last page to first */ 2326 blob->state = SPDK_BLOB_STATE_CLEAN; 2327 blob_persist_write_page_chain(seq, ctx); 2328 } 2329 2330 static void 2331 blob_persist_write_extent_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2332 { 2333 struct spdk_blob_persist_ctx *ctx = cb_arg; 2334 struct spdk_blob *blob = ctx->blob; 2335 size_t i; 2336 uint32_t extent_page_id; 2337 uint32_t page_count = 0; 2338 int rc; 2339 2340 if (ctx->extent_page != NULL) { 2341 spdk_free(ctx->extent_page); 2342 ctx->extent_page = NULL; 2343 } 2344 2345 if (bserrno != 0) { 2346 blob_persist_complete(seq, ctx, bserrno); 2347 return; 2348 } 2349 2350 /* Only write out Extent Pages when blob was resized. */ 2351 for (i = ctx->next_extent_page; i < blob->active.extent_pages_array_size; i++) { 2352 extent_page_id = blob->active.extent_pages[i]; 2353 if (extent_page_id == 0) { 2354 /* No Extent Page to persist */ 2355 assert(spdk_blob_is_thin_provisioned(blob)); 2356 continue; 2357 } 2358 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent_page_id)); 2359 ctx->next_extent_page = i + 1; 2360 rc = blob_serialize_add_page(ctx->blob, &ctx->extent_page, &page_count, &ctx->extent_page); 2361 if (rc < 0) { 2362 blob_persist_complete(seq, ctx, rc); 2363 return; 2364 } 2365 2366 blob->state = SPDK_BLOB_STATE_DIRTY; 2367 blob_serialize_extent_page(blob, i * SPDK_EXTENTS_PER_EP, ctx->extent_page); 2368 2369 ctx->extent_page->crc = blob_md_page_calc_crc(ctx->extent_page); 2370 2371 bs_sequence_write_dev(seq, ctx->extent_page, bs_md_page_to_lba(blob->bs, extent_page_id), 2372 bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 2373 blob_persist_write_extent_pages, ctx); 2374 return; 2375 } 2376 2377 blob_persist_generate_new_md(ctx); 2378 } 2379 2380 static void 2381 blob_persist_start(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2382 { 2383 struct spdk_blob_persist_ctx *ctx = cb_arg; 2384 struct spdk_blob *blob = ctx->blob; 2385 2386 if (bserrno != 0) { 2387 blob_persist_complete(seq, ctx, bserrno); 2388 return; 2389 } 2390 2391 if (blob->active.num_pages == 0) { 2392 /* This is the signal that the blob should be deleted. 2393 * Immediately jump to the clean up routine. */ 2394 assert(blob->clean.num_pages > 0); 2395 blob->state = SPDK_BLOB_STATE_CLEAN; 2396 blob_persist_zero_pages(seq, ctx, 0); 2397 return; 2398 2399 } 2400 2401 if (blob->clean.num_clusters < blob->active.num_clusters) { 2402 /* Blob was resized up */ 2403 assert(blob->clean.num_extent_pages <= blob->active.num_extent_pages); 2404 ctx->next_extent_page = spdk_max(1, blob->clean.num_extent_pages) - 1; 2405 } else if (blob->active.num_clusters < blob->active.cluster_array_size) { 2406 /* Blob was resized down */ 2407 assert(blob->clean.num_extent_pages >= blob->active.num_extent_pages); 2408 ctx->next_extent_page = spdk_max(1, blob->active.num_extent_pages) - 1; 2409 } else { 2410 /* No change in size occurred */ 2411 blob_persist_generate_new_md(ctx); 2412 return; 2413 } 2414 2415 blob_persist_write_extent_pages(seq, ctx, 0); 2416 } 2417 2418 struct spdk_bs_mark_dirty { 2419 struct spdk_blob_store *bs; 2420 struct spdk_bs_super_block *super; 2421 spdk_bs_sequence_cpl cb_fn; 2422 void *cb_arg; 2423 }; 2424 2425 static void 2426 bs_mark_dirty_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2427 { 2428 struct spdk_bs_mark_dirty *ctx = cb_arg; 2429 2430 if (bserrno == 0) { 2431 ctx->bs->clean = 0; 2432 } 2433 2434 ctx->cb_fn(seq, ctx->cb_arg, bserrno); 2435 2436 spdk_free(ctx->super); 2437 free(ctx); 2438 } 2439 2440 static void bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2441 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg); 2442 2443 2444 static void 2445 bs_mark_dirty_write(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2446 { 2447 struct spdk_bs_mark_dirty *ctx = cb_arg; 2448 int rc; 2449 2450 if (bserrno != 0) { 2451 bs_mark_dirty_write_cpl(seq, ctx, bserrno); 2452 return; 2453 } 2454 2455 rc = bs_super_validate(ctx->super, ctx->bs); 2456 if (rc != 0) { 2457 bs_mark_dirty_write_cpl(seq, ctx, rc); 2458 return; 2459 } 2460 2461 ctx->super->clean = 0; 2462 if (ctx->super->size == 0) { 2463 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 2464 } 2465 2466 bs_write_super(seq, ctx->bs, ctx->super, bs_mark_dirty_write_cpl, ctx); 2467 } 2468 2469 static void 2470 bs_mark_dirty(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2471 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2472 { 2473 struct spdk_bs_mark_dirty *ctx; 2474 2475 /* Blobstore is already marked dirty */ 2476 if (bs->clean == 0) { 2477 cb_fn(seq, cb_arg, 0); 2478 return; 2479 } 2480 2481 ctx = calloc(1, sizeof(*ctx)); 2482 if (!ctx) { 2483 cb_fn(seq, cb_arg, -ENOMEM); 2484 return; 2485 } 2486 ctx->bs = bs; 2487 ctx->cb_fn = cb_fn; 2488 ctx->cb_arg = cb_arg; 2489 2490 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 2491 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 2492 if (!ctx->super) { 2493 free(ctx); 2494 cb_fn(seq, cb_arg, -ENOMEM); 2495 return; 2496 } 2497 2498 bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0), 2499 bs_byte_to_lba(bs, sizeof(*ctx->super)), 2500 bs_mark_dirty_write, ctx); 2501 } 2502 2503 /* Write a blob to disk */ 2504 static void 2505 blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 2506 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2507 { 2508 struct spdk_blob_persist_ctx *ctx; 2509 2510 blob_verify_md_op(blob); 2511 2512 if (blob->state == SPDK_BLOB_STATE_CLEAN && TAILQ_EMPTY(&blob->persists_to_complete)) { 2513 cb_fn(seq, cb_arg, 0); 2514 return; 2515 } 2516 2517 ctx = calloc(1, sizeof(*ctx)); 2518 if (!ctx) { 2519 cb_fn(seq, cb_arg, -ENOMEM); 2520 return; 2521 } 2522 ctx->blob = blob; 2523 ctx->seq = seq; 2524 ctx->cb_fn = cb_fn; 2525 ctx->cb_arg = cb_arg; 2526 2527 /* Multiple blob persists can affect one another, via blob->state or 2528 * blob mutable data changes. To prevent it, queue up the persists. */ 2529 if (!TAILQ_EMPTY(&blob->persists_to_complete)) { 2530 TAILQ_INSERT_TAIL(&blob->pending_persists, ctx, link); 2531 return; 2532 } 2533 TAILQ_INSERT_HEAD(&blob->persists_to_complete, ctx, link); 2534 2535 bs_mark_dirty(seq, blob->bs, blob_persist_start, ctx); 2536 } 2537 2538 struct spdk_blob_copy_cluster_ctx { 2539 struct spdk_blob *blob; 2540 uint8_t *buf; 2541 uint64_t page; 2542 uint64_t new_cluster; 2543 uint32_t new_extent_page; 2544 spdk_bs_sequence_t *seq; 2545 struct spdk_blob_md_page *new_cluster_page; 2546 }; 2547 2548 static void 2549 blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno) 2550 { 2551 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2552 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq; 2553 TAILQ_HEAD(, spdk_bs_request_set) requests; 2554 spdk_bs_user_op_t *op; 2555 2556 TAILQ_INIT(&requests); 2557 TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link); 2558 2559 while (!TAILQ_EMPTY(&requests)) { 2560 op = TAILQ_FIRST(&requests); 2561 TAILQ_REMOVE(&requests, op, link); 2562 if (bserrno == 0) { 2563 bs_user_op_execute(op); 2564 } else { 2565 bs_user_op_abort(op, bserrno); 2566 } 2567 } 2568 2569 spdk_free(ctx->buf); 2570 free(ctx); 2571 } 2572 2573 static void 2574 blob_insert_cluster_cpl(void *cb_arg, int bserrno) 2575 { 2576 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2577 2578 if (bserrno) { 2579 if (bserrno == -EEXIST) { 2580 /* The metadata insert failed because another thread 2581 * allocated the cluster first. Free our cluster 2582 * but continue without error. */ 2583 bserrno = 0; 2584 } 2585 spdk_spin_lock(&ctx->blob->bs->used_lock); 2586 bs_release_cluster(ctx->blob->bs, ctx->new_cluster); 2587 if (ctx->new_extent_page != 0) { 2588 bs_release_md_page(ctx->blob->bs, ctx->new_extent_page); 2589 } 2590 spdk_spin_unlock(&ctx->blob->bs->used_lock); 2591 } 2592 2593 bs_sequence_finish(ctx->seq, bserrno); 2594 } 2595 2596 static void 2597 blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2598 { 2599 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2600 uint32_t cluster_number; 2601 2602 if (bserrno) { 2603 /* The write failed, so jump to the final completion handler */ 2604 bs_sequence_finish(seq, bserrno); 2605 return; 2606 } 2607 2608 cluster_number = bs_page_to_cluster(ctx->blob->bs, ctx->page); 2609 2610 blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2611 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx); 2612 } 2613 2614 static void 2615 blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2616 { 2617 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2618 2619 if (bserrno != 0) { 2620 /* The read failed, so jump to the final completion handler */ 2621 bs_sequence_finish(seq, bserrno); 2622 return; 2623 } 2624 2625 /* Write whole cluster */ 2626 bs_sequence_write_dev(seq, ctx->buf, 2627 bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 2628 bs_cluster_to_lba(ctx->blob->bs, 1), 2629 blob_write_copy_cpl, ctx); 2630 } 2631 2632 static bool 2633 blob_can_copy(struct spdk_blob *blob, uint32_t cluster_start_page, uint64_t *base_lba) 2634 { 2635 uint64_t lba = bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page); 2636 2637 return (!blob_is_esnap_clone(blob) && blob->bs->dev->copy != NULL) && 2638 blob->back_bs_dev->translate_lba(blob->back_bs_dev, lba, base_lba); 2639 } 2640 2641 static void 2642 blob_copy(struct spdk_blob_copy_cluster_ctx *ctx, spdk_bs_user_op_t *op, uint64_t src_lba) 2643 { 2644 struct spdk_blob *blob = ctx->blob; 2645 uint64_t lba_count = bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz); 2646 2647 bs_sequence_copy_dev(ctx->seq, 2648 bs_cluster_to_lba(blob->bs, ctx->new_cluster), 2649 src_lba, 2650 lba_count, 2651 blob_write_copy_cpl, ctx); 2652 } 2653 2654 static void 2655 bs_allocate_and_copy_cluster(struct spdk_blob *blob, 2656 struct spdk_io_channel *_ch, 2657 uint64_t io_unit, spdk_bs_user_op_t *op) 2658 { 2659 struct spdk_bs_cpl cpl; 2660 struct spdk_bs_channel *ch; 2661 struct spdk_blob_copy_cluster_ctx *ctx; 2662 uint32_t cluster_start_page; 2663 uint32_t cluster_number; 2664 bool is_zeroes; 2665 bool can_copy; 2666 uint64_t copy_src_lba; 2667 int rc; 2668 2669 ch = spdk_io_channel_get_ctx(_ch); 2670 2671 if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) { 2672 /* There are already operations pending. Queue this user op 2673 * and return because it will be re-executed when the outstanding 2674 * cluster allocation completes. */ 2675 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2676 return; 2677 } 2678 2679 /* Round the io_unit offset down to the first page in the cluster */ 2680 cluster_start_page = bs_io_unit_to_cluster_start(blob, io_unit); 2681 2682 /* Calculate which index in the metadata cluster array the corresponding 2683 * cluster is supposed to be at. */ 2684 cluster_number = bs_io_unit_to_cluster_number(blob, io_unit); 2685 2686 ctx = calloc(1, sizeof(*ctx)); 2687 if (!ctx) { 2688 bs_user_op_abort(op, -ENOMEM); 2689 return; 2690 } 2691 2692 assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0); 2693 2694 ctx->blob = blob; 2695 ctx->page = cluster_start_page; 2696 ctx->new_cluster_page = ch->new_cluster_page; 2697 memset(ctx->new_cluster_page, 0, SPDK_BS_PAGE_SIZE); 2698 can_copy = blob_can_copy(blob, cluster_start_page, ©_src_lba); 2699 2700 is_zeroes = blob->back_bs_dev->is_zeroes(blob->back_bs_dev, 2701 bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page), 2702 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz)); 2703 if (blob->parent_id != SPDK_BLOBID_INVALID && !is_zeroes && !can_copy) { 2704 ctx->buf = spdk_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen, 2705 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 2706 if (!ctx->buf) { 2707 SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n", 2708 blob->bs->cluster_sz); 2709 free(ctx); 2710 bs_user_op_abort(op, -ENOMEM); 2711 return; 2712 } 2713 } 2714 2715 spdk_spin_lock(&blob->bs->used_lock); 2716 rc = bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, &ctx->new_extent_page, 2717 false); 2718 spdk_spin_unlock(&blob->bs->used_lock); 2719 if (rc != 0) { 2720 spdk_free(ctx->buf); 2721 free(ctx); 2722 bs_user_op_abort(op, rc); 2723 return; 2724 } 2725 2726 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2727 cpl.u.blob_basic.cb_fn = blob_allocate_and_copy_cluster_cpl; 2728 cpl.u.blob_basic.cb_arg = ctx; 2729 2730 ctx->seq = bs_sequence_start_blob(_ch, &cpl, blob); 2731 if (!ctx->seq) { 2732 spdk_spin_lock(&blob->bs->used_lock); 2733 bs_release_cluster(blob->bs, ctx->new_cluster); 2734 spdk_spin_unlock(&blob->bs->used_lock); 2735 spdk_free(ctx->buf); 2736 free(ctx); 2737 bs_user_op_abort(op, -ENOMEM); 2738 return; 2739 } 2740 2741 /* Queue the user op to block other incoming operations */ 2742 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2743 2744 if (blob->parent_id != SPDK_BLOBID_INVALID && !is_zeroes) { 2745 if (can_copy) { 2746 blob_copy(ctx, op, copy_src_lba); 2747 } else { 2748 /* Read cluster from backing device */ 2749 bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf, 2750 bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page), 2751 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz), 2752 blob_write_copy, ctx); 2753 } 2754 2755 } else { 2756 blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2757 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx); 2758 } 2759 } 2760 2761 static inline bool 2762 blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t io_unit, uint64_t length, 2763 uint64_t *lba, uint64_t *lba_count) 2764 { 2765 *lba_count = length; 2766 2767 if (!bs_io_unit_is_allocated(blob, io_unit)) { 2768 assert(blob->back_bs_dev != NULL); 2769 *lba = bs_io_unit_to_back_dev_lba(blob, io_unit); 2770 *lba_count = bs_io_unit_to_back_dev_lba(blob, *lba_count); 2771 return false; 2772 } else { 2773 *lba = bs_blob_io_unit_to_lba(blob, io_unit); 2774 return true; 2775 } 2776 } 2777 2778 struct op_split_ctx { 2779 struct spdk_blob *blob; 2780 struct spdk_io_channel *channel; 2781 uint64_t io_unit_offset; 2782 uint64_t io_units_remaining; 2783 void *curr_payload; 2784 enum spdk_blob_op_type op_type; 2785 spdk_bs_sequence_t *seq; 2786 bool in_submit_ctx; 2787 bool completed_in_submit_ctx; 2788 bool done; 2789 }; 2790 2791 static void 2792 blob_request_submit_op_split_next(void *cb_arg, int bserrno) 2793 { 2794 struct op_split_ctx *ctx = cb_arg; 2795 struct spdk_blob *blob = ctx->blob; 2796 struct spdk_io_channel *ch = ctx->channel; 2797 enum spdk_blob_op_type op_type = ctx->op_type; 2798 uint8_t *buf; 2799 uint64_t offset; 2800 uint64_t length; 2801 uint64_t op_length; 2802 2803 if (bserrno != 0 || ctx->io_units_remaining == 0) { 2804 bs_sequence_finish(ctx->seq, bserrno); 2805 if (ctx->in_submit_ctx) { 2806 /* Defer freeing of the ctx object, since it will be 2807 * accessed when this unwinds back to the submisison 2808 * context. 2809 */ 2810 ctx->done = true; 2811 } else { 2812 free(ctx); 2813 } 2814 return; 2815 } 2816 2817 if (ctx->in_submit_ctx) { 2818 /* If this split operation completed in the context 2819 * of its submission, mark the flag and return immediately 2820 * to avoid recursion. 2821 */ 2822 ctx->completed_in_submit_ctx = true; 2823 return; 2824 } 2825 2826 while (true) { 2827 ctx->completed_in_submit_ctx = false; 2828 2829 offset = ctx->io_unit_offset; 2830 length = ctx->io_units_remaining; 2831 buf = ctx->curr_payload; 2832 op_length = spdk_min(length, bs_num_io_units_to_cluster_boundary(blob, 2833 offset)); 2834 2835 /* Update length and payload for next operation */ 2836 ctx->io_units_remaining -= op_length; 2837 ctx->io_unit_offset += op_length; 2838 if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) { 2839 ctx->curr_payload += op_length * blob->bs->io_unit_size; 2840 } 2841 2842 assert(!ctx->in_submit_ctx); 2843 ctx->in_submit_ctx = true; 2844 2845 switch (op_type) { 2846 case SPDK_BLOB_READ: 2847 spdk_blob_io_read(blob, ch, buf, offset, op_length, 2848 blob_request_submit_op_split_next, ctx); 2849 break; 2850 case SPDK_BLOB_WRITE: 2851 spdk_blob_io_write(blob, ch, buf, offset, op_length, 2852 blob_request_submit_op_split_next, ctx); 2853 break; 2854 case SPDK_BLOB_UNMAP: 2855 spdk_blob_io_unmap(blob, ch, offset, op_length, 2856 blob_request_submit_op_split_next, ctx); 2857 break; 2858 case SPDK_BLOB_WRITE_ZEROES: 2859 spdk_blob_io_write_zeroes(blob, ch, offset, op_length, 2860 blob_request_submit_op_split_next, ctx); 2861 break; 2862 case SPDK_BLOB_READV: 2863 case SPDK_BLOB_WRITEV: 2864 SPDK_ERRLOG("readv/write not valid\n"); 2865 bs_sequence_finish(ctx->seq, -EINVAL); 2866 free(ctx); 2867 return; 2868 } 2869 2870 #ifndef __clang_analyzer__ 2871 /* scan-build reports a false positive around accessing the ctx here. It 2872 * forms a path that recursively calls this function, but then says 2873 * "assuming ctx->in_submit_ctx is false", when that isn't possible. 2874 * This path does free(ctx), returns to here, and reports a use-after-free 2875 * bug. Wrapping this bit of code so that scan-build doesn't see it 2876 * works around the scan-build bug. 2877 */ 2878 assert(ctx->in_submit_ctx); 2879 ctx->in_submit_ctx = false; 2880 2881 /* If the operation completed immediately, loop back and submit the 2882 * next operation. Otherwise we can return and the next split 2883 * operation will get submitted when this current operation is 2884 * later completed asynchronously. 2885 */ 2886 if (ctx->completed_in_submit_ctx) { 2887 continue; 2888 } else if (ctx->done) { 2889 free(ctx); 2890 } 2891 #endif 2892 break; 2893 } 2894 } 2895 2896 static void 2897 blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob, 2898 void *payload, uint64_t offset, uint64_t length, 2899 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 2900 { 2901 struct op_split_ctx *ctx; 2902 spdk_bs_sequence_t *seq; 2903 struct spdk_bs_cpl cpl; 2904 2905 assert(blob != NULL); 2906 2907 ctx = calloc(1, sizeof(struct op_split_ctx)); 2908 if (ctx == NULL) { 2909 cb_fn(cb_arg, -ENOMEM); 2910 return; 2911 } 2912 2913 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2914 cpl.u.blob_basic.cb_fn = cb_fn; 2915 cpl.u.blob_basic.cb_arg = cb_arg; 2916 2917 seq = bs_sequence_start_blob(ch, &cpl, blob); 2918 if (!seq) { 2919 free(ctx); 2920 cb_fn(cb_arg, -ENOMEM); 2921 return; 2922 } 2923 2924 ctx->blob = blob; 2925 ctx->channel = ch; 2926 ctx->curr_payload = payload; 2927 ctx->io_unit_offset = offset; 2928 ctx->io_units_remaining = length; 2929 ctx->op_type = op_type; 2930 ctx->seq = seq; 2931 2932 blob_request_submit_op_split_next(ctx, 0); 2933 } 2934 2935 static void 2936 blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob, 2937 void *payload, uint64_t offset, uint64_t length, 2938 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 2939 { 2940 struct spdk_bs_cpl cpl; 2941 uint64_t lba; 2942 uint64_t lba_count; 2943 bool is_allocated; 2944 2945 assert(blob != NULL); 2946 2947 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2948 cpl.u.blob_basic.cb_fn = cb_fn; 2949 cpl.u.blob_basic.cb_arg = cb_arg; 2950 2951 if (blob->frozen_refcnt) { 2952 /* This blob I/O is frozen */ 2953 spdk_bs_user_op_t *op; 2954 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 2955 2956 op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 2957 if (!op) { 2958 cb_fn(cb_arg, -ENOMEM); 2959 return; 2960 } 2961 2962 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 2963 2964 return; 2965 } 2966 2967 is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 2968 2969 switch (op_type) { 2970 case SPDK_BLOB_READ: { 2971 spdk_bs_batch_t *batch; 2972 2973 batch = bs_batch_open(_ch, &cpl, blob); 2974 if (!batch) { 2975 cb_fn(cb_arg, -ENOMEM); 2976 return; 2977 } 2978 2979 if (is_allocated) { 2980 /* Read from the blob */ 2981 bs_batch_read_dev(batch, payload, lba, lba_count); 2982 } else { 2983 /* Read from the backing block device */ 2984 bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count); 2985 } 2986 2987 bs_batch_close(batch); 2988 break; 2989 } 2990 case SPDK_BLOB_WRITE: 2991 case SPDK_BLOB_WRITE_ZEROES: { 2992 if (is_allocated) { 2993 /* Write to the blob */ 2994 spdk_bs_batch_t *batch; 2995 2996 if (lba_count == 0) { 2997 cb_fn(cb_arg, 0); 2998 return; 2999 } 3000 3001 batch = bs_batch_open(_ch, &cpl, blob); 3002 if (!batch) { 3003 cb_fn(cb_arg, -ENOMEM); 3004 return; 3005 } 3006 3007 if (op_type == SPDK_BLOB_WRITE) { 3008 bs_batch_write_dev(batch, payload, lba, lba_count); 3009 } else { 3010 bs_batch_write_zeroes_dev(batch, lba, lba_count); 3011 } 3012 3013 bs_batch_close(batch); 3014 } else { 3015 /* Queue this operation and allocate the cluster */ 3016 spdk_bs_user_op_t *op; 3017 3018 op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 3019 if (!op) { 3020 cb_fn(cb_arg, -ENOMEM); 3021 return; 3022 } 3023 3024 bs_allocate_and_copy_cluster(blob, _ch, offset, op); 3025 } 3026 break; 3027 } 3028 case SPDK_BLOB_UNMAP: { 3029 spdk_bs_batch_t *batch; 3030 3031 batch = bs_batch_open(_ch, &cpl, blob); 3032 if (!batch) { 3033 cb_fn(cb_arg, -ENOMEM); 3034 return; 3035 } 3036 3037 if (is_allocated) { 3038 bs_batch_unmap_dev(batch, lba, lba_count); 3039 } 3040 3041 bs_batch_close(batch); 3042 break; 3043 } 3044 case SPDK_BLOB_READV: 3045 case SPDK_BLOB_WRITEV: 3046 SPDK_ERRLOG("readv/write not valid\n"); 3047 cb_fn(cb_arg, -EINVAL); 3048 break; 3049 } 3050 } 3051 3052 static void 3053 blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel, 3054 void *payload, uint64_t offset, uint64_t length, 3055 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 3056 { 3057 assert(blob != NULL); 3058 3059 if (blob->data_ro && op_type != SPDK_BLOB_READ) { 3060 cb_fn(cb_arg, -EPERM); 3061 return; 3062 } 3063 3064 if (length == 0) { 3065 cb_fn(cb_arg, 0); 3066 return; 3067 } 3068 3069 if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 3070 cb_fn(cb_arg, -EINVAL); 3071 return; 3072 } 3073 if (length <= bs_num_io_units_to_cluster_boundary(blob, offset)) { 3074 blob_request_submit_op_single(_channel, blob, payload, offset, length, 3075 cb_fn, cb_arg, op_type); 3076 } else { 3077 blob_request_submit_op_split(_channel, blob, payload, offset, length, 3078 cb_fn, cb_arg, op_type); 3079 } 3080 } 3081 3082 struct rw_iov_ctx { 3083 struct spdk_blob *blob; 3084 struct spdk_io_channel *channel; 3085 spdk_blob_op_complete cb_fn; 3086 void *cb_arg; 3087 bool read; 3088 int iovcnt; 3089 struct iovec *orig_iov; 3090 uint64_t io_unit_offset; 3091 uint64_t io_units_remaining; 3092 uint64_t io_units_done; 3093 struct spdk_blob_ext_io_opts *ext_io_opts; 3094 struct iovec iov[0]; 3095 }; 3096 3097 static void 3098 rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3099 { 3100 assert(cb_arg == NULL); 3101 bs_sequence_finish(seq, bserrno); 3102 } 3103 3104 static void 3105 rw_iov_split_next(void *cb_arg, int bserrno) 3106 { 3107 struct rw_iov_ctx *ctx = cb_arg; 3108 struct spdk_blob *blob = ctx->blob; 3109 struct iovec *iov, *orig_iov; 3110 int iovcnt; 3111 size_t orig_iovoff; 3112 uint64_t io_units_count, io_units_to_boundary, io_unit_offset; 3113 uint64_t byte_count; 3114 3115 if (bserrno != 0 || ctx->io_units_remaining == 0) { 3116 ctx->cb_fn(ctx->cb_arg, bserrno); 3117 free(ctx); 3118 return; 3119 } 3120 3121 io_unit_offset = ctx->io_unit_offset; 3122 io_units_to_boundary = bs_num_io_units_to_cluster_boundary(blob, io_unit_offset); 3123 io_units_count = spdk_min(ctx->io_units_remaining, io_units_to_boundary); 3124 /* 3125 * Get index and offset into the original iov array for our current position in the I/O sequence. 3126 * byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will 3127 * point to the current position in the I/O sequence. 3128 */ 3129 byte_count = ctx->io_units_done * blob->bs->io_unit_size; 3130 orig_iov = &ctx->orig_iov[0]; 3131 orig_iovoff = 0; 3132 while (byte_count > 0) { 3133 if (byte_count >= orig_iov->iov_len) { 3134 byte_count -= orig_iov->iov_len; 3135 orig_iov++; 3136 } else { 3137 orig_iovoff = byte_count; 3138 byte_count = 0; 3139 } 3140 } 3141 3142 /* 3143 * Build an iov array for the next I/O in the sequence. byte_count will keep track of how many 3144 * bytes of this next I/O remain to be accounted for in the new iov array. 3145 */ 3146 byte_count = io_units_count * blob->bs->io_unit_size; 3147 iov = &ctx->iov[0]; 3148 iovcnt = 0; 3149 while (byte_count > 0) { 3150 assert(iovcnt < ctx->iovcnt); 3151 iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff); 3152 iov->iov_base = orig_iov->iov_base + orig_iovoff; 3153 byte_count -= iov->iov_len; 3154 orig_iovoff = 0; 3155 orig_iov++; 3156 iov++; 3157 iovcnt++; 3158 } 3159 3160 ctx->io_unit_offset += io_units_count; 3161 ctx->io_units_remaining -= io_units_count; 3162 ctx->io_units_done += io_units_count; 3163 iov = &ctx->iov[0]; 3164 3165 if (ctx->read) { 3166 spdk_blob_io_readv_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 3167 io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts); 3168 } else { 3169 spdk_blob_io_writev_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 3170 io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts); 3171 } 3172 } 3173 3174 static void 3175 blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel, 3176 struct iovec *iov, int iovcnt, 3177 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg, bool read, 3178 struct spdk_blob_ext_io_opts *ext_io_opts) 3179 { 3180 struct spdk_bs_cpl cpl; 3181 3182 assert(blob != NULL); 3183 3184 if (!read && blob->data_ro) { 3185 cb_fn(cb_arg, -EPERM); 3186 return; 3187 } 3188 3189 if (length == 0) { 3190 cb_fn(cb_arg, 0); 3191 return; 3192 } 3193 3194 if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 3195 cb_fn(cb_arg, -EINVAL); 3196 return; 3197 } 3198 3199 /* 3200 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having 3201 * to split a request that spans a cluster boundary. For I/O that do not span a cluster boundary, 3202 * there will be no noticeable difference compared to using a batch. For I/O that do span a cluster 3203 * boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need 3204 * to allocate a separate iov array and split the I/O such that none of the resulting 3205 * smaller I/O cross a cluster boundary. These smaller I/O will be issued in sequence (not in parallel) 3206 * but since this case happens very infrequently, any performance impact will be negligible. 3207 * 3208 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs 3209 * for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them 3210 * in a batch. That would also require creating an intermediate spdk_bs_cpl that would get called 3211 * when the batch was completed, to allow for freeing the memory for the iov arrays. 3212 */ 3213 if (spdk_likely(length <= bs_num_io_units_to_cluster_boundary(blob, offset))) { 3214 uint64_t lba_count; 3215 uint64_t lba; 3216 bool is_allocated; 3217 3218 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 3219 cpl.u.blob_basic.cb_fn = cb_fn; 3220 cpl.u.blob_basic.cb_arg = cb_arg; 3221 3222 if (blob->frozen_refcnt) { 3223 /* This blob I/O is frozen */ 3224 enum spdk_blob_op_type op_type; 3225 spdk_bs_user_op_t *op; 3226 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel); 3227 3228 op_type = read ? SPDK_BLOB_READV : SPDK_BLOB_WRITEV; 3229 op = bs_user_op_alloc(_channel, &cpl, op_type, blob, iov, iovcnt, offset, length); 3230 if (!op) { 3231 cb_fn(cb_arg, -ENOMEM); 3232 return; 3233 } 3234 3235 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 3236 3237 return; 3238 } 3239 3240 is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 3241 3242 if (read) { 3243 spdk_bs_sequence_t *seq; 3244 3245 seq = bs_sequence_start_blob(_channel, &cpl, blob); 3246 if (!seq) { 3247 cb_fn(cb_arg, -ENOMEM); 3248 return; 3249 } 3250 3251 seq->ext_io_opts = ext_io_opts; 3252 3253 if (is_allocated) { 3254 bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL); 3255 } else { 3256 bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count, 3257 rw_iov_done, NULL); 3258 } 3259 } else { 3260 if (is_allocated) { 3261 spdk_bs_sequence_t *seq; 3262 3263 seq = bs_sequence_start_blob(_channel, &cpl, blob); 3264 if (!seq) { 3265 cb_fn(cb_arg, -ENOMEM); 3266 return; 3267 } 3268 3269 seq->ext_io_opts = ext_io_opts; 3270 3271 bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL); 3272 } else { 3273 /* Queue this operation and allocate the cluster */ 3274 spdk_bs_user_op_t *op; 3275 3276 op = bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset, 3277 length); 3278 if (!op) { 3279 cb_fn(cb_arg, -ENOMEM); 3280 return; 3281 } 3282 3283 op->ext_io_opts = ext_io_opts; 3284 3285 bs_allocate_and_copy_cluster(blob, _channel, offset, op); 3286 } 3287 } 3288 } else { 3289 struct rw_iov_ctx *ctx; 3290 3291 ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec)); 3292 if (ctx == NULL) { 3293 cb_fn(cb_arg, -ENOMEM); 3294 return; 3295 } 3296 3297 ctx->blob = blob; 3298 ctx->channel = _channel; 3299 ctx->cb_fn = cb_fn; 3300 ctx->cb_arg = cb_arg; 3301 ctx->read = read; 3302 ctx->orig_iov = iov; 3303 ctx->iovcnt = iovcnt; 3304 ctx->io_unit_offset = offset; 3305 ctx->io_units_remaining = length; 3306 ctx->io_units_done = 0; 3307 ctx->ext_io_opts = ext_io_opts; 3308 3309 rw_iov_split_next(ctx, 0); 3310 } 3311 } 3312 3313 static struct spdk_blob * 3314 blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid) 3315 { 3316 struct spdk_blob find; 3317 3318 if (spdk_bit_array_get(bs->open_blobids, blobid) == 0) { 3319 return NULL; 3320 } 3321 3322 find.id = blobid; 3323 return RB_FIND(spdk_blob_tree, &bs->open_blobs, &find); 3324 } 3325 3326 static void 3327 blob_get_snapshot_and_clone_entries(struct spdk_blob *blob, 3328 struct spdk_blob_list **snapshot_entry, struct spdk_blob_list **clone_entry) 3329 { 3330 assert(blob != NULL); 3331 *snapshot_entry = NULL; 3332 *clone_entry = NULL; 3333 3334 if (blob->parent_id == SPDK_BLOBID_INVALID) { 3335 return; 3336 } 3337 3338 TAILQ_FOREACH(*snapshot_entry, &blob->bs->snapshots, link) { 3339 if ((*snapshot_entry)->id == blob->parent_id) { 3340 break; 3341 } 3342 } 3343 3344 if (*snapshot_entry != NULL) { 3345 TAILQ_FOREACH(*clone_entry, &(*snapshot_entry)->clones, link) { 3346 if ((*clone_entry)->id == blob->id) { 3347 break; 3348 } 3349 } 3350 3351 assert(*clone_entry != NULL); 3352 } 3353 } 3354 3355 static int 3356 bs_channel_create(void *io_device, void *ctx_buf) 3357 { 3358 struct spdk_blob_store *bs = io_device; 3359 struct spdk_bs_channel *channel = ctx_buf; 3360 struct spdk_bs_dev *dev; 3361 uint32_t max_ops = bs->max_channel_ops; 3362 uint32_t i; 3363 3364 dev = bs->dev; 3365 3366 channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set)); 3367 if (!channel->req_mem) { 3368 return -1; 3369 } 3370 3371 TAILQ_INIT(&channel->reqs); 3372 3373 for (i = 0; i < max_ops; i++) { 3374 TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link); 3375 } 3376 3377 channel->bs = bs; 3378 channel->dev = dev; 3379 channel->dev_channel = dev->create_channel(dev); 3380 3381 if (!channel->dev_channel) { 3382 SPDK_ERRLOG("Failed to create device channel.\n"); 3383 free(channel->req_mem); 3384 return -1; 3385 } 3386 3387 channel->new_cluster_page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, NULL, SPDK_ENV_SOCKET_ID_ANY, 3388 SPDK_MALLOC_DMA); 3389 if (!channel->new_cluster_page) { 3390 SPDK_ERRLOG("Failed to allocate new cluster page\n"); 3391 free(channel->req_mem); 3392 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 3393 return -1; 3394 } 3395 3396 TAILQ_INIT(&channel->need_cluster_alloc); 3397 TAILQ_INIT(&channel->queued_io); 3398 RB_INIT(&channel->esnap_channels); 3399 3400 return 0; 3401 } 3402 3403 static void 3404 bs_channel_destroy(void *io_device, void *ctx_buf) 3405 { 3406 struct spdk_bs_channel *channel = ctx_buf; 3407 spdk_bs_user_op_t *op; 3408 3409 while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) { 3410 op = TAILQ_FIRST(&channel->need_cluster_alloc); 3411 TAILQ_REMOVE(&channel->need_cluster_alloc, op, link); 3412 bs_user_op_abort(op, -EIO); 3413 } 3414 3415 while (!TAILQ_EMPTY(&channel->queued_io)) { 3416 op = TAILQ_FIRST(&channel->queued_io); 3417 TAILQ_REMOVE(&channel->queued_io, op, link); 3418 bs_user_op_abort(op, -EIO); 3419 } 3420 3421 blob_esnap_destroy_bs_channel(channel); 3422 3423 free(channel->req_mem); 3424 spdk_free(channel->new_cluster_page); 3425 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 3426 } 3427 3428 static void 3429 bs_dev_destroy(void *io_device) 3430 { 3431 struct spdk_blob_store *bs = io_device; 3432 struct spdk_blob *blob, *blob_tmp; 3433 3434 bs->dev->destroy(bs->dev); 3435 3436 RB_FOREACH_SAFE(blob, spdk_blob_tree, &bs->open_blobs, blob_tmp) { 3437 RB_REMOVE(spdk_blob_tree, &bs->open_blobs, blob); 3438 spdk_bit_array_clear(bs->open_blobids, blob->id); 3439 blob_free(blob); 3440 } 3441 3442 spdk_spin_destroy(&bs->used_lock); 3443 3444 spdk_bit_array_free(&bs->open_blobids); 3445 spdk_bit_array_free(&bs->used_blobids); 3446 spdk_bit_array_free(&bs->used_md_pages); 3447 spdk_bit_pool_free(&bs->used_clusters); 3448 /* 3449 * If this function is called for any reason except a successful unload, 3450 * the unload_cpl type will be NONE and this will be a nop. 3451 */ 3452 bs_call_cpl(&bs->unload_cpl, bs->unload_err); 3453 3454 free(bs); 3455 } 3456 3457 static int 3458 bs_blob_list_add(struct spdk_blob *blob) 3459 { 3460 spdk_blob_id snapshot_id; 3461 struct spdk_blob_list *snapshot_entry = NULL; 3462 struct spdk_blob_list *clone_entry = NULL; 3463 3464 assert(blob != NULL); 3465 3466 snapshot_id = blob->parent_id; 3467 if (snapshot_id == SPDK_BLOBID_INVALID || 3468 snapshot_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 3469 return 0; 3470 } 3471 3472 snapshot_entry = bs_get_snapshot_entry(blob->bs, snapshot_id); 3473 if (snapshot_entry == NULL) { 3474 /* Snapshot not found */ 3475 snapshot_entry = calloc(1, sizeof(struct spdk_blob_list)); 3476 if (snapshot_entry == NULL) { 3477 return -ENOMEM; 3478 } 3479 snapshot_entry->id = snapshot_id; 3480 TAILQ_INIT(&snapshot_entry->clones); 3481 TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link); 3482 } else { 3483 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 3484 if (clone_entry->id == blob->id) { 3485 break; 3486 } 3487 } 3488 } 3489 3490 if (clone_entry == NULL) { 3491 /* Clone not found */ 3492 clone_entry = calloc(1, sizeof(struct spdk_blob_list)); 3493 if (clone_entry == NULL) { 3494 return -ENOMEM; 3495 } 3496 clone_entry->id = blob->id; 3497 TAILQ_INIT(&clone_entry->clones); 3498 TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link); 3499 snapshot_entry->clone_count++; 3500 } 3501 3502 return 0; 3503 } 3504 3505 static void 3506 bs_blob_list_remove(struct spdk_blob *blob) 3507 { 3508 struct spdk_blob_list *snapshot_entry = NULL; 3509 struct spdk_blob_list *clone_entry = NULL; 3510 3511 blob_get_snapshot_and_clone_entries(blob, &snapshot_entry, &clone_entry); 3512 3513 if (snapshot_entry == NULL) { 3514 return; 3515 } 3516 3517 blob->parent_id = SPDK_BLOBID_INVALID; 3518 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3519 free(clone_entry); 3520 3521 snapshot_entry->clone_count--; 3522 } 3523 3524 static int 3525 bs_blob_list_free(struct spdk_blob_store *bs) 3526 { 3527 struct spdk_blob_list *snapshot_entry; 3528 struct spdk_blob_list *snapshot_entry_tmp; 3529 struct spdk_blob_list *clone_entry; 3530 struct spdk_blob_list *clone_entry_tmp; 3531 3532 TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) { 3533 TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) { 3534 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3535 free(clone_entry); 3536 } 3537 TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link); 3538 free(snapshot_entry); 3539 } 3540 3541 return 0; 3542 } 3543 3544 static void 3545 bs_free(struct spdk_blob_store *bs) 3546 { 3547 bs_blob_list_free(bs); 3548 3549 bs_unregister_md_thread(bs); 3550 spdk_io_device_unregister(bs, bs_dev_destroy); 3551 } 3552 3553 void 3554 spdk_bs_opts_init(struct spdk_bs_opts *opts, size_t opts_size) 3555 { 3556 3557 if (!opts) { 3558 SPDK_ERRLOG("opts should not be NULL\n"); 3559 return; 3560 } 3561 3562 if (!opts_size) { 3563 SPDK_ERRLOG("opts_size should not be zero value\n"); 3564 return; 3565 } 3566 3567 memset(opts, 0, opts_size); 3568 opts->opts_size = opts_size; 3569 3570 #define FIELD_OK(field) \ 3571 offsetof(struct spdk_bs_opts, field) + sizeof(opts->field) <= opts_size 3572 3573 #define SET_FIELD(field, value) \ 3574 if (FIELD_OK(field)) { \ 3575 opts->field = value; \ 3576 } \ 3577 3578 SET_FIELD(cluster_sz, SPDK_BLOB_OPTS_CLUSTER_SZ); 3579 SET_FIELD(num_md_pages, SPDK_BLOB_OPTS_NUM_MD_PAGES); 3580 SET_FIELD(max_md_ops, SPDK_BLOB_OPTS_NUM_MD_PAGES); 3581 SET_FIELD(max_channel_ops, SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS); 3582 SET_FIELD(clear_method, BS_CLEAR_WITH_UNMAP); 3583 3584 if (FIELD_OK(bstype)) { 3585 memset(&opts->bstype, 0, sizeof(opts->bstype)); 3586 } 3587 3588 SET_FIELD(iter_cb_fn, NULL); 3589 SET_FIELD(iter_cb_arg, NULL); 3590 SET_FIELD(force_recover, false); 3591 SET_FIELD(esnap_bs_dev_create, NULL); 3592 SET_FIELD(esnap_ctx, NULL); 3593 3594 #undef FIELD_OK 3595 #undef SET_FIELD 3596 } 3597 3598 static int 3599 bs_opts_verify(struct spdk_bs_opts *opts) 3600 { 3601 if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 || 3602 opts->max_channel_ops == 0) { 3603 SPDK_ERRLOG("Blobstore options cannot be set to 0\n"); 3604 return -1; 3605 } 3606 3607 return 0; 3608 } 3609 3610 /* START spdk_bs_load */ 3611 3612 /* spdk_bs_load_ctx is used for init, load, unload and dump code paths. */ 3613 3614 struct spdk_bs_load_ctx { 3615 struct spdk_blob_store *bs; 3616 struct spdk_bs_super_block *super; 3617 3618 struct spdk_bs_md_mask *mask; 3619 bool in_page_chain; 3620 uint32_t page_index; 3621 uint32_t cur_page; 3622 struct spdk_blob_md_page *page; 3623 3624 uint64_t num_extent_pages; 3625 uint32_t *extent_page_num; 3626 struct spdk_blob_md_page *extent_pages; 3627 struct spdk_bit_array *used_clusters; 3628 3629 spdk_bs_sequence_t *seq; 3630 spdk_blob_op_with_handle_complete iter_cb_fn; 3631 void *iter_cb_arg; 3632 struct spdk_blob *blob; 3633 spdk_blob_id blobid; 3634 3635 bool force_recover; 3636 3637 /* These fields are used in the spdk_bs_dump path. */ 3638 bool dumping; 3639 FILE *fp; 3640 spdk_bs_dump_print_xattr print_xattr_fn; 3641 char xattr_name[4096]; 3642 }; 3643 3644 static int 3645 bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_blob_store **_bs, 3646 struct spdk_bs_load_ctx **_ctx) 3647 { 3648 struct spdk_blob_store *bs; 3649 struct spdk_bs_load_ctx *ctx; 3650 uint64_t dev_size; 3651 int rc; 3652 3653 dev_size = dev->blocklen * dev->blockcnt; 3654 if (dev_size < opts->cluster_sz) { 3655 /* Device size cannot be smaller than cluster size of blobstore */ 3656 SPDK_INFOLOG(blob, "Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n", 3657 dev_size, opts->cluster_sz); 3658 return -ENOSPC; 3659 } 3660 if (opts->cluster_sz < SPDK_BS_PAGE_SIZE) { 3661 /* Cluster size cannot be smaller than page size */ 3662 SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n", 3663 opts->cluster_sz, SPDK_BS_PAGE_SIZE); 3664 return -EINVAL; 3665 } 3666 bs = calloc(1, sizeof(struct spdk_blob_store)); 3667 if (!bs) { 3668 return -ENOMEM; 3669 } 3670 3671 ctx = calloc(1, sizeof(struct spdk_bs_load_ctx)); 3672 if (!ctx) { 3673 free(bs); 3674 return -ENOMEM; 3675 } 3676 3677 ctx->bs = bs; 3678 ctx->iter_cb_fn = opts->iter_cb_fn; 3679 ctx->iter_cb_arg = opts->iter_cb_arg; 3680 ctx->force_recover = opts->force_recover; 3681 3682 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 3683 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3684 if (!ctx->super) { 3685 free(ctx); 3686 free(bs); 3687 return -ENOMEM; 3688 } 3689 3690 RB_INIT(&bs->open_blobs); 3691 TAILQ_INIT(&bs->snapshots); 3692 bs->dev = dev; 3693 bs->md_thread = spdk_get_thread(); 3694 assert(bs->md_thread != NULL); 3695 3696 /* 3697 * Do not use bs_lba_to_cluster() here since blockcnt may not be an 3698 * even multiple of the cluster size. 3699 */ 3700 bs->cluster_sz = opts->cluster_sz; 3701 bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen); 3702 ctx->used_clusters = spdk_bit_array_create(bs->total_clusters); 3703 if (!ctx->used_clusters) { 3704 spdk_free(ctx->super); 3705 free(ctx); 3706 free(bs); 3707 return -ENOMEM; 3708 } 3709 3710 bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE; 3711 if (spdk_u32_is_pow2(bs->pages_per_cluster)) { 3712 bs->pages_per_cluster_shift = spdk_u32log2(bs->pages_per_cluster); 3713 } 3714 bs->num_free_clusters = bs->total_clusters; 3715 bs->io_unit_size = dev->blocklen; 3716 3717 bs->max_channel_ops = opts->max_channel_ops; 3718 bs->super_blob = SPDK_BLOBID_INVALID; 3719 memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype)); 3720 bs->esnap_bs_dev_create = opts->esnap_bs_dev_create; 3721 bs->esnap_ctx = opts->esnap_ctx; 3722 3723 /* The metadata is assumed to be at least 1 page */ 3724 bs->used_md_pages = spdk_bit_array_create(1); 3725 bs->used_blobids = spdk_bit_array_create(0); 3726 bs->open_blobids = spdk_bit_array_create(0); 3727 3728 spdk_spin_init(&bs->used_lock); 3729 3730 spdk_io_device_register(bs, bs_channel_create, bs_channel_destroy, 3731 sizeof(struct spdk_bs_channel), "blobstore"); 3732 rc = bs_register_md_thread(bs); 3733 if (rc == -1) { 3734 spdk_io_device_unregister(bs, NULL); 3735 spdk_spin_destroy(&bs->used_lock); 3736 spdk_bit_array_free(&bs->open_blobids); 3737 spdk_bit_array_free(&bs->used_blobids); 3738 spdk_bit_array_free(&bs->used_md_pages); 3739 spdk_bit_array_free(&ctx->used_clusters); 3740 spdk_free(ctx->super); 3741 free(ctx); 3742 free(bs); 3743 /* FIXME: this is a lie but don't know how to get a proper error code here */ 3744 return -ENOMEM; 3745 } 3746 3747 *_ctx = ctx; 3748 *_bs = bs; 3749 return 0; 3750 } 3751 3752 static void 3753 bs_load_ctx_fail(struct spdk_bs_load_ctx *ctx, int bserrno) 3754 { 3755 assert(bserrno != 0); 3756 3757 spdk_free(ctx->super); 3758 bs_sequence_finish(ctx->seq, bserrno); 3759 bs_free(ctx->bs); 3760 spdk_bit_array_free(&ctx->used_clusters); 3761 free(ctx); 3762 } 3763 3764 static void 3765 bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 3766 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg) 3767 { 3768 /* Update the values in the super block */ 3769 super->super_blob = bs->super_blob; 3770 memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype)); 3771 super->crc = blob_md_page_calc_crc(super); 3772 bs_sequence_write_dev(seq, super, bs_page_to_lba(bs, 0), 3773 bs_byte_to_lba(bs, sizeof(*super)), 3774 cb_fn, cb_arg); 3775 } 3776 3777 static void 3778 bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 3779 { 3780 struct spdk_bs_load_ctx *ctx = arg; 3781 uint64_t mask_size, lba, lba_count; 3782 3783 /* Write out the used clusters mask */ 3784 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 3785 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 3786 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3787 if (!ctx->mask) { 3788 bs_load_ctx_fail(ctx, -ENOMEM); 3789 return; 3790 } 3791 3792 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS; 3793 ctx->mask->length = ctx->bs->total_clusters; 3794 /* We could get here through the normal unload path, or through dirty 3795 * shutdown recovery. For the normal unload path, we use the mask from 3796 * the bit pool. For dirty shutdown recovery, we don't have a bit pool yet - 3797 * only the bit array from the load ctx. 3798 */ 3799 if (ctx->bs->used_clusters) { 3800 assert(ctx->mask->length == spdk_bit_pool_capacity(ctx->bs->used_clusters)); 3801 spdk_bit_pool_store_mask(ctx->bs->used_clusters, ctx->mask->mask); 3802 } else { 3803 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->used_clusters)); 3804 spdk_bit_array_store_mask(ctx->used_clusters, ctx->mask->mask); 3805 } 3806 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 3807 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 3808 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 3809 } 3810 3811 static void 3812 bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 3813 { 3814 struct spdk_bs_load_ctx *ctx = arg; 3815 uint64_t mask_size, lba, lba_count; 3816 3817 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 3818 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 3819 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3820 if (!ctx->mask) { 3821 bs_load_ctx_fail(ctx, -ENOMEM); 3822 return; 3823 } 3824 3825 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES; 3826 ctx->mask->length = ctx->super->md_len; 3827 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages)); 3828 3829 spdk_bit_array_store_mask(ctx->bs->used_md_pages, ctx->mask->mask); 3830 lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 3831 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 3832 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 3833 } 3834 3835 static void 3836 bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 3837 { 3838 struct spdk_bs_load_ctx *ctx = arg; 3839 uint64_t mask_size, lba, lba_count; 3840 3841 if (ctx->super->used_blobid_mask_len == 0) { 3842 /* 3843 * This is a pre-v3 on-disk format where the blobid mask does not get 3844 * written to disk. 3845 */ 3846 cb_fn(seq, arg, 0); 3847 return; 3848 } 3849 3850 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 3851 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 3852 SPDK_MALLOC_DMA); 3853 if (!ctx->mask) { 3854 bs_load_ctx_fail(ctx, -ENOMEM); 3855 return; 3856 } 3857 3858 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS; 3859 ctx->mask->length = ctx->super->md_len; 3860 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids)); 3861 3862 spdk_bit_array_store_mask(ctx->bs->used_blobids, ctx->mask->mask); 3863 lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 3864 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 3865 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 3866 } 3867 3868 static void 3869 blob_set_thin_provision(struct spdk_blob *blob) 3870 { 3871 blob_verify_md_op(blob); 3872 blob->invalid_flags |= SPDK_BLOB_THIN_PROV; 3873 blob->state = SPDK_BLOB_STATE_DIRTY; 3874 } 3875 3876 static void 3877 blob_set_clear_method(struct spdk_blob *blob, enum blob_clear_method clear_method) 3878 { 3879 blob_verify_md_op(blob); 3880 blob->clear_method = clear_method; 3881 blob->md_ro_flags |= (clear_method << SPDK_BLOB_CLEAR_METHOD_SHIFT); 3882 blob->state = SPDK_BLOB_STATE_DIRTY; 3883 } 3884 3885 static void bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno); 3886 3887 static void 3888 bs_delete_corrupted_blob_cpl(void *cb_arg, int bserrno) 3889 { 3890 struct spdk_bs_load_ctx *ctx = cb_arg; 3891 spdk_blob_id id; 3892 int64_t page_num; 3893 3894 /* Iterate to next blob (we can't use spdk_bs_iter_next function as our 3895 * last blob has been removed */ 3896 page_num = bs_blobid_to_page(ctx->blobid); 3897 page_num++; 3898 page_num = spdk_bit_array_find_first_set(ctx->bs->used_blobids, page_num); 3899 if (page_num >= spdk_bit_array_capacity(ctx->bs->used_blobids)) { 3900 bs_load_iter(ctx, NULL, -ENOENT); 3901 return; 3902 } 3903 3904 id = bs_page_to_blobid(page_num); 3905 3906 spdk_bs_open_blob(ctx->bs, id, bs_load_iter, ctx); 3907 } 3908 3909 static void 3910 bs_delete_corrupted_close_cb(void *cb_arg, int bserrno) 3911 { 3912 struct spdk_bs_load_ctx *ctx = cb_arg; 3913 3914 if (bserrno != 0) { 3915 SPDK_ERRLOG("Failed to close corrupted blob\n"); 3916 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 3917 return; 3918 } 3919 3920 spdk_bs_delete_blob(ctx->bs, ctx->blobid, bs_delete_corrupted_blob_cpl, ctx); 3921 } 3922 3923 static void 3924 bs_delete_corrupted_blob(void *cb_arg, int bserrno) 3925 { 3926 struct spdk_bs_load_ctx *ctx = cb_arg; 3927 uint64_t i; 3928 3929 if (bserrno != 0) { 3930 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 3931 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 3932 return; 3933 } 3934 3935 /* Snapshot and clone have the same copy of cluster map and extent pages 3936 * at this point. Let's clear both for snapshot now, 3937 * so that it won't be cleared for clone later when we remove snapshot. 3938 * Also set thin provision to pass data corruption check */ 3939 for (i = 0; i < ctx->blob->active.num_clusters; i++) { 3940 ctx->blob->active.clusters[i] = 0; 3941 } 3942 for (i = 0; i < ctx->blob->active.num_extent_pages; i++) { 3943 ctx->blob->active.extent_pages[i] = 0; 3944 } 3945 3946 ctx->blob->md_ro = false; 3947 3948 blob_set_thin_provision(ctx->blob); 3949 3950 ctx->blobid = ctx->blob->id; 3951 3952 spdk_blob_close(ctx->blob, bs_delete_corrupted_close_cb, ctx); 3953 } 3954 3955 static void 3956 bs_update_corrupted_blob(void *cb_arg, int bserrno) 3957 { 3958 struct spdk_bs_load_ctx *ctx = cb_arg; 3959 3960 if (bserrno != 0) { 3961 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 3962 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 3963 return; 3964 } 3965 3966 ctx->blob->md_ro = false; 3967 blob_remove_xattr(ctx->blob, SNAPSHOT_PENDING_REMOVAL, true); 3968 blob_remove_xattr(ctx->blob, SNAPSHOT_IN_PROGRESS, true); 3969 spdk_blob_set_read_only(ctx->blob); 3970 3971 if (ctx->iter_cb_fn) { 3972 ctx->iter_cb_fn(ctx->iter_cb_arg, ctx->blob, 0); 3973 } 3974 bs_blob_list_add(ctx->blob); 3975 3976 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 3977 } 3978 3979 static void 3980 bs_examine_clone(void *cb_arg, struct spdk_blob *blob, int bserrno) 3981 { 3982 struct spdk_bs_load_ctx *ctx = cb_arg; 3983 3984 if (bserrno != 0) { 3985 SPDK_ERRLOG("Failed to open clone of a corrupted blob\n"); 3986 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 3987 return; 3988 } 3989 3990 if (blob->parent_id == ctx->blob->id) { 3991 /* Power failure occurred before updating clone (snapshot delete case) 3992 * or after updating clone (creating snapshot case) - keep snapshot */ 3993 spdk_blob_close(blob, bs_update_corrupted_blob, ctx); 3994 } else { 3995 /* Power failure occurred after updating clone (snapshot delete case) 3996 * or before updating clone (creating snapshot case) - remove snapshot */ 3997 spdk_blob_close(blob, bs_delete_corrupted_blob, ctx); 3998 } 3999 } 4000 4001 static void 4002 bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno) 4003 { 4004 struct spdk_bs_load_ctx *ctx = arg; 4005 const void *value; 4006 size_t len; 4007 int rc = 0; 4008 4009 if (bserrno == 0) { 4010 /* Examine blob if it is corrupted after power failure. Fix 4011 * the ones that can be fixed and remove any other corrupted 4012 * ones. If it is not corrupted just process it */ 4013 rc = blob_get_xattr_value(blob, SNAPSHOT_PENDING_REMOVAL, &value, &len, true); 4014 if (rc != 0) { 4015 rc = blob_get_xattr_value(blob, SNAPSHOT_IN_PROGRESS, &value, &len, true); 4016 if (rc != 0) { 4017 /* Not corrupted - process it and continue with iterating through blobs */ 4018 if (ctx->iter_cb_fn) { 4019 ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0); 4020 } 4021 bs_blob_list_add(blob); 4022 spdk_bs_iter_next(ctx->bs, blob, bs_load_iter, ctx); 4023 return; 4024 } 4025 4026 } 4027 4028 assert(len == sizeof(spdk_blob_id)); 4029 4030 ctx->blob = blob; 4031 4032 /* Open clone to check if we are able to fix this blob or should we remove it */ 4033 spdk_bs_open_blob(ctx->bs, *(spdk_blob_id *)value, bs_examine_clone, ctx); 4034 return; 4035 } else if (bserrno == -ENOENT) { 4036 bserrno = 0; 4037 } else { 4038 /* 4039 * This case needs to be looked at further. Same problem 4040 * exists with applications that rely on explicit blob 4041 * iteration. We should just skip the blob that failed 4042 * to load and continue on to the next one. 4043 */ 4044 SPDK_ERRLOG("Error in iterating blobs\n"); 4045 } 4046 4047 ctx->iter_cb_fn = NULL; 4048 4049 spdk_free(ctx->super); 4050 spdk_free(ctx->mask); 4051 bs_sequence_finish(ctx->seq, bserrno); 4052 free(ctx); 4053 } 4054 4055 static void bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg); 4056 4057 static void 4058 bs_load_complete(struct spdk_bs_load_ctx *ctx) 4059 { 4060 ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters); 4061 if (ctx->dumping) { 4062 bs_dump_read_md_page(ctx->seq, ctx); 4063 return; 4064 } 4065 spdk_bs_iter_first(ctx->bs, bs_load_iter, ctx); 4066 } 4067 4068 static void 4069 bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4070 { 4071 struct spdk_bs_load_ctx *ctx = cb_arg; 4072 int rc; 4073 4074 /* The type must be correct */ 4075 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS); 4076 4077 /* The length of the mask (in bits) must not be greater than 4078 * the length of the buffer (converted to bits) */ 4079 assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE * 8)); 4080 4081 /* The length of the mask must be exactly equal to the size 4082 * (in pages) of the metadata region */ 4083 assert(ctx->mask->length == ctx->super->md_len); 4084 4085 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->mask->length); 4086 if (rc < 0) { 4087 spdk_free(ctx->mask); 4088 bs_load_ctx_fail(ctx, rc); 4089 return; 4090 } 4091 4092 spdk_bit_array_load_mask(ctx->bs->used_blobids, ctx->mask->mask); 4093 bs_load_complete(ctx); 4094 } 4095 4096 static void 4097 bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4098 { 4099 struct spdk_bs_load_ctx *ctx = cb_arg; 4100 uint64_t lba, lba_count, mask_size; 4101 int rc; 4102 4103 if (bserrno != 0) { 4104 bs_load_ctx_fail(ctx, bserrno); 4105 return; 4106 } 4107 4108 /* The type must be correct */ 4109 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 4110 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 4111 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 4112 struct spdk_blob_md_page) * 8)); 4113 /* 4114 * The length of the mask must be equal to or larger than the total number of clusters. It may be 4115 * larger than the total number of clusters due to a failure spdk_bs_grow. 4116 */ 4117 assert(ctx->mask->length >= ctx->bs->total_clusters); 4118 if (ctx->mask->length > ctx->bs->total_clusters) { 4119 SPDK_WARNLOG("Shrink the used_custers mask length to total_clusters"); 4120 ctx->mask->length = ctx->bs->total_clusters; 4121 } 4122 4123 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->mask->length); 4124 if (rc < 0) { 4125 spdk_free(ctx->mask); 4126 bs_load_ctx_fail(ctx, rc); 4127 return; 4128 } 4129 4130 spdk_bit_array_load_mask(ctx->used_clusters, ctx->mask->mask); 4131 ctx->bs->num_free_clusters = spdk_bit_array_count_clear(ctx->used_clusters); 4132 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 4133 4134 spdk_free(ctx->mask); 4135 4136 /* Read the used blobids mask */ 4137 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 4138 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 4139 SPDK_MALLOC_DMA); 4140 if (!ctx->mask) { 4141 bs_load_ctx_fail(ctx, -ENOMEM); 4142 return; 4143 } 4144 lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 4145 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 4146 bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 4147 bs_load_used_blobids_cpl, ctx); 4148 } 4149 4150 static void 4151 bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4152 { 4153 struct spdk_bs_load_ctx *ctx = cb_arg; 4154 uint64_t lba, lba_count, mask_size; 4155 int rc; 4156 4157 if (bserrno != 0) { 4158 bs_load_ctx_fail(ctx, bserrno); 4159 return; 4160 } 4161 4162 /* The type must be correct */ 4163 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES); 4164 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 4165 assert(ctx->mask->length <= (ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE * 4166 8)); 4167 /* The length of the mask must be exactly equal to the size (in pages) of the metadata region */ 4168 if (ctx->mask->length != ctx->super->md_len) { 4169 SPDK_ERRLOG("mismatched md_len in used_pages mask: " 4170 "mask->length=%" PRIu32 " super->md_len=%" PRIu32 "\n", 4171 ctx->mask->length, ctx->super->md_len); 4172 assert(false); 4173 } 4174 4175 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->mask->length); 4176 if (rc < 0) { 4177 spdk_free(ctx->mask); 4178 bs_load_ctx_fail(ctx, rc); 4179 return; 4180 } 4181 4182 spdk_bit_array_load_mask(ctx->bs->used_md_pages, ctx->mask->mask); 4183 spdk_free(ctx->mask); 4184 4185 /* Read the used clusters mask */ 4186 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 4187 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 4188 SPDK_MALLOC_DMA); 4189 if (!ctx->mask) { 4190 bs_load_ctx_fail(ctx, -ENOMEM); 4191 return; 4192 } 4193 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 4194 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 4195 bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 4196 bs_load_used_clusters_cpl, ctx); 4197 } 4198 4199 static void 4200 bs_load_read_used_pages(struct spdk_bs_load_ctx *ctx) 4201 { 4202 uint64_t lba, lba_count, mask_size; 4203 4204 /* Read the used pages mask */ 4205 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 4206 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 4207 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4208 if (!ctx->mask) { 4209 bs_load_ctx_fail(ctx, -ENOMEM); 4210 return; 4211 } 4212 4213 lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 4214 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 4215 bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count, 4216 bs_load_used_pages_cpl, ctx); 4217 } 4218 4219 static int 4220 bs_load_replay_md_parse_page(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_page *page) 4221 { 4222 struct spdk_blob_store *bs = ctx->bs; 4223 struct spdk_blob_md_descriptor *desc; 4224 size_t cur_desc = 0; 4225 4226 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4227 while (cur_desc < sizeof(page->descriptors)) { 4228 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 4229 if (desc->length == 0) { 4230 /* If padding and length are 0, this terminates the page */ 4231 break; 4232 } 4233 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 4234 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 4235 unsigned int i, j; 4236 unsigned int cluster_count = 0; 4237 uint32_t cluster_idx; 4238 4239 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 4240 4241 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 4242 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 4243 cluster_idx = desc_extent_rle->extents[i].cluster_idx; 4244 /* 4245 * cluster_idx = 0 means an unallocated cluster - don't mark that 4246 * in the used cluster map. 4247 */ 4248 if (cluster_idx != 0) { 4249 SPDK_NOTICELOG("Recover: cluster %" PRIu32 "\n", cluster_idx + j); 4250 spdk_bit_array_set(ctx->used_clusters, cluster_idx + j); 4251 if (bs->num_free_clusters == 0) { 4252 return -ENOSPC; 4253 } 4254 bs->num_free_clusters--; 4255 } 4256 cluster_count++; 4257 } 4258 } 4259 if (cluster_count == 0) { 4260 return -EINVAL; 4261 } 4262 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4263 struct spdk_blob_md_descriptor_extent_page *desc_extent; 4264 uint32_t i; 4265 uint32_t cluster_count = 0; 4266 uint32_t cluster_idx; 4267 size_t cluster_idx_length; 4268 4269 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 4270 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 4271 4272 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 4273 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 4274 return -EINVAL; 4275 } 4276 4277 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 4278 cluster_idx = desc_extent->cluster_idx[i]; 4279 /* 4280 * cluster_idx = 0 means an unallocated cluster - don't mark that 4281 * in the used cluster map. 4282 */ 4283 if (cluster_idx != 0) { 4284 if (cluster_idx < desc_extent->start_cluster_idx && 4285 cluster_idx >= desc_extent->start_cluster_idx + cluster_count) { 4286 return -EINVAL; 4287 } 4288 spdk_bit_array_set(ctx->used_clusters, cluster_idx); 4289 if (bs->num_free_clusters == 0) { 4290 return -ENOSPC; 4291 } 4292 bs->num_free_clusters--; 4293 } 4294 cluster_count++; 4295 } 4296 4297 if (cluster_count == 0) { 4298 return -EINVAL; 4299 } 4300 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 4301 /* Skip this item */ 4302 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 4303 /* Skip this item */ 4304 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 4305 /* Skip this item */ 4306 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 4307 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 4308 uint32_t num_extent_pages = ctx->num_extent_pages; 4309 uint32_t i; 4310 size_t extent_pages_length; 4311 void *tmp; 4312 4313 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 4314 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 4315 4316 if (desc_extent_table->length == 0 || 4317 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 4318 return -EINVAL; 4319 } 4320 4321 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 4322 if (desc_extent_table->extent_page[i].page_idx != 0) { 4323 if (desc_extent_table->extent_page[i].num_pages != 1) { 4324 return -EINVAL; 4325 } 4326 num_extent_pages += 1; 4327 } 4328 } 4329 4330 if (num_extent_pages > 0) { 4331 tmp = realloc(ctx->extent_page_num, num_extent_pages * sizeof(uint32_t)); 4332 if (tmp == NULL) { 4333 return -ENOMEM; 4334 } 4335 ctx->extent_page_num = tmp; 4336 4337 /* Extent table entries contain md page numbers for extent pages. 4338 * Zeroes represent unallocated extent pages, those are run-length-encoded. 4339 */ 4340 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 4341 if (desc_extent_table->extent_page[i].page_idx != 0) { 4342 ctx->extent_page_num[ctx->num_extent_pages] = desc_extent_table->extent_page[i].page_idx; 4343 ctx->num_extent_pages += 1; 4344 } 4345 } 4346 } 4347 } else { 4348 /* Error */ 4349 return -EINVAL; 4350 } 4351 /* Advance to the next descriptor */ 4352 cur_desc += sizeof(*desc) + desc->length; 4353 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 4354 break; 4355 } 4356 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 4357 } 4358 return 0; 4359 } 4360 4361 static bool 4362 bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page) 4363 { 4364 uint32_t crc; 4365 struct spdk_blob_md_descriptor *desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4366 size_t desc_len; 4367 4368 crc = blob_md_page_calc_crc(page); 4369 if (crc != page->crc) { 4370 return false; 4371 } 4372 4373 /* Extent page should always be of sequence num 0. */ 4374 if (page->sequence_num != 0) { 4375 return false; 4376 } 4377 4378 /* Descriptor type must be EXTENT_PAGE. */ 4379 if (desc->type != SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4380 return false; 4381 } 4382 4383 /* Descriptor length cannot exceed the page. */ 4384 desc_len = sizeof(*desc) + desc->length; 4385 if (desc_len > sizeof(page->descriptors)) { 4386 return false; 4387 } 4388 4389 /* It has to be the only descriptor in the page. */ 4390 if (desc_len + sizeof(*desc) <= sizeof(page->descriptors)) { 4391 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + desc_len); 4392 if (desc->length != 0) { 4393 return false; 4394 } 4395 } 4396 4397 return true; 4398 } 4399 4400 static bool 4401 bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx) 4402 { 4403 uint32_t crc; 4404 struct spdk_blob_md_page *page = ctx->page; 4405 4406 crc = blob_md_page_calc_crc(page); 4407 if (crc != page->crc) { 4408 return false; 4409 } 4410 4411 /* First page of a sequence should match the blobid. */ 4412 if (page->sequence_num == 0 && 4413 bs_page_to_blobid(ctx->cur_page) != page->id) { 4414 return false; 4415 } 4416 assert(bs_load_cur_extent_page_valid(page) == false); 4417 4418 return true; 4419 } 4420 4421 static void bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx); 4422 4423 static void 4424 bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4425 { 4426 struct spdk_bs_load_ctx *ctx = cb_arg; 4427 4428 if (bserrno != 0) { 4429 bs_load_ctx_fail(ctx, bserrno); 4430 return; 4431 } 4432 4433 bs_load_complete(ctx); 4434 } 4435 4436 static void 4437 bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4438 { 4439 struct spdk_bs_load_ctx *ctx = cb_arg; 4440 4441 spdk_free(ctx->mask); 4442 ctx->mask = NULL; 4443 4444 if (bserrno != 0) { 4445 bs_load_ctx_fail(ctx, bserrno); 4446 return; 4447 } 4448 4449 bs_write_used_clusters(seq, ctx, bs_load_write_used_clusters_cpl); 4450 } 4451 4452 static void 4453 bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4454 { 4455 struct spdk_bs_load_ctx *ctx = cb_arg; 4456 4457 spdk_free(ctx->mask); 4458 ctx->mask = NULL; 4459 4460 if (bserrno != 0) { 4461 bs_load_ctx_fail(ctx, bserrno); 4462 return; 4463 } 4464 4465 bs_write_used_blobids(seq, ctx, bs_load_write_used_blobids_cpl); 4466 } 4467 4468 static void 4469 bs_load_write_used_md(struct spdk_bs_load_ctx *ctx) 4470 { 4471 bs_write_used_md(ctx->seq, ctx, bs_load_write_used_pages_cpl); 4472 } 4473 4474 static void 4475 bs_load_replay_md_chain_cpl(struct spdk_bs_load_ctx *ctx) 4476 { 4477 uint64_t num_md_clusters; 4478 uint64_t i; 4479 4480 ctx->in_page_chain = false; 4481 4482 do { 4483 ctx->page_index++; 4484 } while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true); 4485 4486 if (ctx->page_index < ctx->super->md_len) { 4487 ctx->cur_page = ctx->page_index; 4488 bs_load_replay_cur_md_page(ctx); 4489 } else { 4490 /* Claim all of the clusters used by the metadata */ 4491 num_md_clusters = spdk_divide_round_up( 4492 ctx->super->md_start + ctx->super->md_len, ctx->bs->pages_per_cluster); 4493 for (i = 0; i < num_md_clusters; i++) { 4494 spdk_bit_array_set(ctx->used_clusters, i); 4495 } 4496 ctx->bs->num_free_clusters -= num_md_clusters; 4497 spdk_free(ctx->page); 4498 bs_load_write_used_md(ctx); 4499 } 4500 } 4501 4502 static void 4503 bs_load_replay_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4504 { 4505 struct spdk_bs_load_ctx *ctx = cb_arg; 4506 uint32_t page_num; 4507 uint64_t i; 4508 4509 if (bserrno != 0) { 4510 spdk_free(ctx->extent_pages); 4511 bs_load_ctx_fail(ctx, bserrno); 4512 return; 4513 } 4514 4515 for (i = 0; i < ctx->num_extent_pages; i++) { 4516 /* Extent pages are only read when present within in chain md. 4517 * Integrity of md is not right if that page was not a valid extent page. */ 4518 if (bs_load_cur_extent_page_valid(&ctx->extent_pages[i]) != true) { 4519 spdk_free(ctx->extent_pages); 4520 bs_load_ctx_fail(ctx, -EILSEQ); 4521 return; 4522 } 4523 4524 page_num = ctx->extent_page_num[i]; 4525 spdk_bit_array_set(ctx->bs->used_md_pages, page_num); 4526 if (bs_load_replay_md_parse_page(ctx, &ctx->extent_pages[i])) { 4527 spdk_free(ctx->extent_pages); 4528 bs_load_ctx_fail(ctx, -EILSEQ); 4529 return; 4530 } 4531 } 4532 4533 spdk_free(ctx->extent_pages); 4534 free(ctx->extent_page_num); 4535 ctx->extent_page_num = NULL; 4536 ctx->num_extent_pages = 0; 4537 4538 bs_load_replay_md_chain_cpl(ctx); 4539 } 4540 4541 static void 4542 bs_load_replay_extent_pages(struct spdk_bs_load_ctx *ctx) 4543 { 4544 spdk_bs_batch_t *batch; 4545 uint32_t page; 4546 uint64_t lba; 4547 uint64_t i; 4548 4549 ctx->extent_pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE * ctx->num_extent_pages, 0, 4550 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4551 if (!ctx->extent_pages) { 4552 bs_load_ctx_fail(ctx, -ENOMEM); 4553 return; 4554 } 4555 4556 batch = bs_sequence_to_batch(ctx->seq, bs_load_replay_extent_page_cpl, ctx); 4557 4558 for (i = 0; i < ctx->num_extent_pages; i++) { 4559 page = ctx->extent_page_num[i]; 4560 assert(page < ctx->super->md_len); 4561 lba = bs_md_page_to_lba(ctx->bs, page); 4562 bs_batch_read_dev(batch, &ctx->extent_pages[i], lba, 4563 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE)); 4564 } 4565 4566 bs_batch_close(batch); 4567 } 4568 4569 static void 4570 bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4571 { 4572 struct spdk_bs_load_ctx *ctx = cb_arg; 4573 uint32_t page_num; 4574 struct spdk_blob_md_page *page; 4575 4576 if (bserrno != 0) { 4577 bs_load_ctx_fail(ctx, bserrno); 4578 return; 4579 } 4580 4581 page_num = ctx->cur_page; 4582 page = ctx->page; 4583 if (bs_load_cur_md_page_valid(ctx) == true) { 4584 if (page->sequence_num == 0 || ctx->in_page_chain == true) { 4585 spdk_spin_lock(&ctx->bs->used_lock); 4586 bs_claim_md_page(ctx->bs, page_num); 4587 spdk_spin_unlock(&ctx->bs->used_lock); 4588 if (page->sequence_num == 0) { 4589 SPDK_NOTICELOG("Recover: blob 0x%" PRIx32 "\n", page_num); 4590 spdk_bit_array_set(ctx->bs->used_blobids, page_num); 4591 } 4592 if (bs_load_replay_md_parse_page(ctx, page)) { 4593 bs_load_ctx_fail(ctx, -EILSEQ); 4594 return; 4595 } 4596 if (page->next != SPDK_INVALID_MD_PAGE) { 4597 ctx->in_page_chain = true; 4598 ctx->cur_page = page->next; 4599 bs_load_replay_cur_md_page(ctx); 4600 return; 4601 } 4602 if (ctx->num_extent_pages != 0) { 4603 bs_load_replay_extent_pages(ctx); 4604 return; 4605 } 4606 } 4607 } 4608 bs_load_replay_md_chain_cpl(ctx); 4609 } 4610 4611 static void 4612 bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx) 4613 { 4614 uint64_t lba; 4615 4616 assert(ctx->cur_page < ctx->super->md_len); 4617 lba = bs_md_page_to_lba(ctx->bs, ctx->cur_page); 4618 bs_sequence_read_dev(ctx->seq, ctx->page, lba, 4619 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 4620 bs_load_replay_md_cpl, ctx); 4621 } 4622 4623 static void 4624 bs_load_replay_md(struct spdk_bs_load_ctx *ctx) 4625 { 4626 ctx->page_index = 0; 4627 ctx->cur_page = 0; 4628 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 4629 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4630 if (!ctx->page) { 4631 bs_load_ctx_fail(ctx, -ENOMEM); 4632 return; 4633 } 4634 bs_load_replay_cur_md_page(ctx); 4635 } 4636 4637 static void 4638 bs_recover(struct spdk_bs_load_ctx *ctx) 4639 { 4640 int rc; 4641 4642 SPDK_NOTICELOG("Performing recovery on blobstore\n"); 4643 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len); 4644 if (rc < 0) { 4645 bs_load_ctx_fail(ctx, -ENOMEM); 4646 return; 4647 } 4648 4649 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len); 4650 if (rc < 0) { 4651 bs_load_ctx_fail(ctx, -ENOMEM); 4652 return; 4653 } 4654 4655 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 4656 if (rc < 0) { 4657 bs_load_ctx_fail(ctx, -ENOMEM); 4658 return; 4659 } 4660 4661 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->super->md_len); 4662 if (rc < 0) { 4663 bs_load_ctx_fail(ctx, -ENOMEM); 4664 return; 4665 } 4666 4667 ctx->bs->num_free_clusters = ctx->bs->total_clusters; 4668 bs_load_replay_md(ctx); 4669 } 4670 4671 static int 4672 bs_parse_super(struct spdk_bs_load_ctx *ctx) 4673 { 4674 int rc; 4675 4676 if (ctx->super->size == 0) { 4677 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 4678 } 4679 4680 if (ctx->super->io_unit_size == 0) { 4681 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 4682 } 4683 4684 ctx->bs->clean = 1; 4685 ctx->bs->cluster_sz = ctx->super->cluster_size; 4686 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 4687 ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE; 4688 if (spdk_u32_is_pow2(ctx->bs->pages_per_cluster)) { 4689 ctx->bs->pages_per_cluster_shift = spdk_u32log2(ctx->bs->pages_per_cluster); 4690 } 4691 ctx->bs->io_unit_size = ctx->super->io_unit_size; 4692 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 4693 if (rc < 0) { 4694 return -ENOMEM; 4695 } 4696 ctx->bs->md_start = ctx->super->md_start; 4697 ctx->bs->md_len = ctx->super->md_len; 4698 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len); 4699 if (rc < 0) { 4700 return -ENOMEM; 4701 } 4702 4703 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 4704 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 4705 ctx->bs->super_blob = ctx->super->super_blob; 4706 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 4707 4708 return 0; 4709 } 4710 4711 static void 4712 bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4713 { 4714 struct spdk_bs_load_ctx *ctx = cb_arg; 4715 int rc; 4716 4717 rc = bs_super_validate(ctx->super, ctx->bs); 4718 if (rc != 0) { 4719 bs_load_ctx_fail(ctx, rc); 4720 return; 4721 } 4722 4723 rc = bs_parse_super(ctx); 4724 if (rc < 0) { 4725 bs_load_ctx_fail(ctx, rc); 4726 return; 4727 } 4728 4729 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0 || ctx->force_recover) { 4730 bs_recover(ctx); 4731 } else { 4732 bs_load_read_used_pages(ctx); 4733 } 4734 } 4735 4736 static inline int 4737 bs_opts_copy(struct spdk_bs_opts *src, struct spdk_bs_opts *dst) 4738 { 4739 4740 if (!src->opts_size) { 4741 SPDK_ERRLOG("opts_size should not be zero value\n"); 4742 return -1; 4743 } 4744 4745 #define FIELD_OK(field) \ 4746 offsetof(struct spdk_bs_opts, field) + sizeof(src->field) <= src->opts_size 4747 4748 #define SET_FIELD(field) \ 4749 if (FIELD_OK(field)) { \ 4750 dst->field = src->field; \ 4751 } \ 4752 4753 SET_FIELD(cluster_sz); 4754 SET_FIELD(num_md_pages); 4755 SET_FIELD(max_md_ops); 4756 SET_FIELD(max_channel_ops); 4757 SET_FIELD(clear_method); 4758 4759 if (FIELD_OK(bstype)) { 4760 memcpy(&dst->bstype, &src->bstype, sizeof(dst->bstype)); 4761 } 4762 SET_FIELD(iter_cb_fn); 4763 SET_FIELD(iter_cb_arg); 4764 SET_FIELD(force_recover); 4765 SET_FIELD(esnap_bs_dev_create); 4766 SET_FIELD(esnap_ctx); 4767 4768 dst->opts_size = src->opts_size; 4769 4770 /* You should not remove this statement, but need to update the assert statement 4771 * if you add a new field, and also add a corresponding SET_FIELD statement */ 4772 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_opts) == 88, "Incorrect size"); 4773 4774 #undef FIELD_OK 4775 #undef SET_FIELD 4776 4777 return 0; 4778 } 4779 4780 void 4781 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 4782 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 4783 { 4784 struct spdk_blob_store *bs; 4785 struct spdk_bs_cpl cpl; 4786 struct spdk_bs_load_ctx *ctx; 4787 struct spdk_bs_opts opts = {}; 4788 int err; 4789 4790 SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev); 4791 4792 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 4793 SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen); 4794 dev->destroy(dev); 4795 cb_fn(cb_arg, NULL, -EINVAL); 4796 return; 4797 } 4798 4799 spdk_bs_opts_init(&opts, sizeof(opts)); 4800 if (o) { 4801 if (bs_opts_copy(o, &opts)) { 4802 return; 4803 } 4804 } 4805 4806 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 4807 dev->destroy(dev); 4808 cb_fn(cb_arg, NULL, -EINVAL); 4809 return; 4810 } 4811 4812 err = bs_alloc(dev, &opts, &bs, &ctx); 4813 if (err) { 4814 dev->destroy(dev); 4815 cb_fn(cb_arg, NULL, err); 4816 return; 4817 } 4818 4819 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 4820 cpl.u.bs_handle.cb_fn = cb_fn; 4821 cpl.u.bs_handle.cb_arg = cb_arg; 4822 cpl.u.bs_handle.bs = bs; 4823 4824 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 4825 if (!ctx->seq) { 4826 spdk_free(ctx->super); 4827 free(ctx); 4828 bs_free(bs); 4829 cb_fn(cb_arg, NULL, -ENOMEM); 4830 return; 4831 } 4832 4833 /* Read the super block */ 4834 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 4835 bs_byte_to_lba(bs, sizeof(*ctx->super)), 4836 bs_load_super_cpl, ctx); 4837 } 4838 4839 /* END spdk_bs_load */ 4840 4841 /* START spdk_bs_dump */ 4842 4843 static void 4844 bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno) 4845 { 4846 spdk_free(ctx->super); 4847 4848 /* 4849 * We need to defer calling bs_call_cpl() until after 4850 * dev destruction, so tuck these away for later use. 4851 */ 4852 ctx->bs->unload_err = bserrno; 4853 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 4854 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 4855 4856 bs_sequence_finish(seq, 0); 4857 bs_free(ctx->bs); 4858 free(ctx); 4859 } 4860 4861 static void 4862 bs_dump_print_xattr(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 4863 { 4864 struct spdk_blob_md_descriptor_xattr *desc_xattr; 4865 uint32_t i; 4866 const char *type; 4867 4868 desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc; 4869 4870 if (desc_xattr->length != 4871 sizeof(desc_xattr->name_length) + sizeof(desc_xattr->value_length) + 4872 desc_xattr->name_length + desc_xattr->value_length) { 4873 } 4874 4875 memcpy(ctx->xattr_name, desc_xattr->name, desc_xattr->name_length); 4876 ctx->xattr_name[desc_xattr->name_length] = '\0'; 4877 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 4878 type = "XATTR"; 4879 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 4880 type = "XATTR_INTERNAL"; 4881 } else { 4882 assert(false); 4883 type = "XATTR_?"; 4884 } 4885 fprintf(ctx->fp, "%s: name = \"%s\"\n", type, ctx->xattr_name); 4886 fprintf(ctx->fp, " value = \""); 4887 ctx->print_xattr_fn(ctx->fp, ctx->super->bstype.bstype, ctx->xattr_name, 4888 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 4889 desc_xattr->value_length); 4890 fprintf(ctx->fp, "\"\n"); 4891 for (i = 0; i < desc_xattr->value_length; i++) { 4892 if (i % 16 == 0) { 4893 fprintf(ctx->fp, " "); 4894 } 4895 fprintf(ctx->fp, "%02" PRIx8 " ", *((uint8_t *)desc_xattr->name + desc_xattr->name_length + i)); 4896 if ((i + 1) % 16 == 0) { 4897 fprintf(ctx->fp, "\n"); 4898 } 4899 } 4900 if (i % 16 != 0) { 4901 fprintf(ctx->fp, "\n"); 4902 } 4903 } 4904 4905 struct type_flag_desc { 4906 uint64_t mask; 4907 uint64_t val; 4908 const char *name; 4909 }; 4910 4911 static void 4912 bs_dump_print_type_bits(struct spdk_bs_load_ctx *ctx, uint64_t flags, 4913 struct type_flag_desc *desc, size_t numflags) 4914 { 4915 uint64_t covered = 0; 4916 size_t i; 4917 4918 for (i = 0; i < numflags; i++) { 4919 if ((desc[i].mask & flags) != desc[i].val) { 4920 continue; 4921 } 4922 fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " %s", desc[i].val, desc[i].name); 4923 if (desc[i].mask != desc[i].val) { 4924 fprintf(ctx->fp, " (mask 0x%" PRIx64 " value 0x%" PRIx64 ")", 4925 desc[i].mask, desc[i].val); 4926 } 4927 fprintf(ctx->fp, "\n"); 4928 covered |= desc[i].mask; 4929 } 4930 if ((flags & ~covered) != 0) { 4931 fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " Unknown\n", flags & ~covered); 4932 } 4933 } 4934 4935 static void 4936 bs_dump_print_type_flags(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 4937 { 4938 struct spdk_blob_md_descriptor_flags *type_desc; 4939 #define ADD_FLAG(f) { f, f, #f } 4940 #define ADD_MASK_VAL(m, v) { m, v, #v } 4941 static struct type_flag_desc invalid[] = { 4942 ADD_FLAG(SPDK_BLOB_THIN_PROV), 4943 ADD_FLAG(SPDK_BLOB_INTERNAL_XATTR), 4944 ADD_FLAG(SPDK_BLOB_EXTENT_TABLE), 4945 }; 4946 static struct type_flag_desc data_ro[] = { 4947 ADD_FLAG(SPDK_BLOB_READ_ONLY), 4948 }; 4949 static struct type_flag_desc md_ro[] = { 4950 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_DEFAULT), 4951 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_NONE), 4952 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_UNMAP), 4953 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_WRITE_ZEROES), 4954 }; 4955 #undef ADD_FLAG 4956 #undef ADD_MASK_VAL 4957 4958 type_desc = (struct spdk_blob_md_descriptor_flags *)desc; 4959 fprintf(ctx->fp, "Flags:\n"); 4960 fprintf(ctx->fp, "\tinvalid: 0x%016" PRIx64 "\n", type_desc->invalid_flags); 4961 bs_dump_print_type_bits(ctx, type_desc->invalid_flags, invalid, 4962 SPDK_COUNTOF(invalid)); 4963 fprintf(ctx->fp, "\tdata_ro: 0x%016" PRIx64 "\n", type_desc->data_ro_flags); 4964 bs_dump_print_type_bits(ctx, type_desc->data_ro_flags, data_ro, 4965 SPDK_COUNTOF(data_ro)); 4966 fprintf(ctx->fp, "\t md_ro: 0x%016" PRIx64 "\n", type_desc->md_ro_flags); 4967 bs_dump_print_type_bits(ctx, type_desc->md_ro_flags, md_ro, 4968 SPDK_COUNTOF(md_ro)); 4969 } 4970 4971 static void 4972 bs_dump_print_extent_table(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 4973 { 4974 struct spdk_blob_md_descriptor_extent_table *et_desc; 4975 uint64_t num_extent_pages; 4976 uint32_t et_idx; 4977 4978 et_desc = (struct spdk_blob_md_descriptor_extent_table *)desc; 4979 num_extent_pages = (et_desc->length - sizeof(et_desc->num_clusters)) / 4980 sizeof(et_desc->extent_page[0]); 4981 4982 fprintf(ctx->fp, "Extent table:\n"); 4983 for (et_idx = 0; et_idx < num_extent_pages; et_idx++) { 4984 if (et_desc->extent_page[et_idx].page_idx == 0) { 4985 /* Zeroes represent unallocated extent pages. */ 4986 continue; 4987 } 4988 fprintf(ctx->fp, "\tExtent page: %5" PRIu32 " length %3" PRIu32 4989 " at LBA %" PRIu64 "\n", et_desc->extent_page[et_idx].page_idx, 4990 et_desc->extent_page[et_idx].num_pages, 4991 bs_md_page_to_lba(ctx->bs, et_desc->extent_page[et_idx].page_idx)); 4992 } 4993 } 4994 4995 static void 4996 bs_dump_print_md_page(struct spdk_bs_load_ctx *ctx) 4997 { 4998 uint32_t page_idx = ctx->cur_page; 4999 struct spdk_blob_md_page *page = ctx->page; 5000 struct spdk_blob_md_descriptor *desc; 5001 size_t cur_desc = 0; 5002 uint32_t crc; 5003 5004 fprintf(ctx->fp, "=========\n"); 5005 fprintf(ctx->fp, "Metadata Page Index: %" PRIu32 " (0x%" PRIx32 ")\n", page_idx, page_idx); 5006 fprintf(ctx->fp, "Start LBA: %" PRIu64 "\n", bs_md_page_to_lba(ctx->bs, page_idx)); 5007 fprintf(ctx->fp, "Blob ID: 0x%" PRIx64 "\n", page->id); 5008 fprintf(ctx->fp, "Sequence: %" PRIu32 "\n", page->sequence_num); 5009 if (page->next == SPDK_INVALID_MD_PAGE) { 5010 fprintf(ctx->fp, "Next: None\n"); 5011 } else { 5012 fprintf(ctx->fp, "Next: %" PRIu32 "\n", page->next); 5013 } 5014 fprintf(ctx->fp, "In used bit array%s:", ctx->super->clean ? "" : " (not clean: dubious)"); 5015 if (spdk_bit_array_get(ctx->bs->used_md_pages, page_idx)) { 5016 fprintf(ctx->fp, " md"); 5017 } 5018 if (spdk_bit_array_get(ctx->bs->used_blobids, page_idx)) { 5019 fprintf(ctx->fp, " blob"); 5020 } 5021 fprintf(ctx->fp, "\n"); 5022 5023 crc = blob_md_page_calc_crc(page); 5024 fprintf(ctx->fp, "CRC: 0x%" PRIx32 " (%s)\n", page->crc, crc == page->crc ? "OK" : "Mismatch"); 5025 5026 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 5027 while (cur_desc < sizeof(page->descriptors)) { 5028 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 5029 if (desc->length == 0) { 5030 /* If padding and length are 0, this terminates the page */ 5031 break; 5032 } 5033 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 5034 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 5035 unsigned int i; 5036 5037 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 5038 5039 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 5040 if (desc_extent_rle->extents[i].cluster_idx != 0) { 5041 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 5042 desc_extent_rle->extents[i].cluster_idx); 5043 } else { 5044 fprintf(ctx->fp, "Unallocated Extent - "); 5045 } 5046 fprintf(ctx->fp, " Length: %" PRIu32, desc_extent_rle->extents[i].length); 5047 fprintf(ctx->fp, "\n"); 5048 } 5049 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 5050 struct spdk_blob_md_descriptor_extent_page *desc_extent; 5051 unsigned int i; 5052 5053 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 5054 5055 for (i = 0; i < desc_extent->length / sizeof(desc_extent->cluster_idx[0]); i++) { 5056 if (desc_extent->cluster_idx[i] != 0) { 5057 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 5058 desc_extent->cluster_idx[i]); 5059 } else { 5060 fprintf(ctx->fp, "Unallocated Extent"); 5061 } 5062 fprintf(ctx->fp, "\n"); 5063 } 5064 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 5065 bs_dump_print_xattr(ctx, desc); 5066 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 5067 bs_dump_print_xattr(ctx, desc); 5068 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 5069 bs_dump_print_type_flags(ctx, desc); 5070 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 5071 bs_dump_print_extent_table(ctx, desc); 5072 } else { 5073 /* Error */ 5074 fprintf(ctx->fp, "Unknown descriptor type %" PRIu8 "\n", desc->type); 5075 } 5076 /* Advance to the next descriptor */ 5077 cur_desc += sizeof(*desc) + desc->length; 5078 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 5079 break; 5080 } 5081 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 5082 } 5083 } 5084 5085 static void 5086 bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5087 { 5088 struct spdk_bs_load_ctx *ctx = cb_arg; 5089 5090 if (bserrno != 0) { 5091 bs_dump_finish(seq, ctx, bserrno); 5092 return; 5093 } 5094 5095 if (ctx->page->id != 0) { 5096 bs_dump_print_md_page(ctx); 5097 } 5098 5099 ctx->cur_page++; 5100 5101 if (ctx->cur_page < ctx->super->md_len) { 5102 bs_dump_read_md_page(seq, ctx); 5103 } else { 5104 spdk_free(ctx->page); 5105 bs_dump_finish(seq, ctx, 0); 5106 } 5107 } 5108 5109 static void 5110 bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg) 5111 { 5112 struct spdk_bs_load_ctx *ctx = cb_arg; 5113 uint64_t lba; 5114 5115 assert(ctx->cur_page < ctx->super->md_len); 5116 lba = bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page); 5117 bs_sequence_read_dev(seq, ctx->page, lba, 5118 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 5119 bs_dump_read_md_page_cpl, ctx); 5120 } 5121 5122 static void 5123 bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5124 { 5125 struct spdk_bs_load_ctx *ctx = cb_arg; 5126 int rc; 5127 5128 fprintf(ctx->fp, "Signature: \"%.8s\" ", ctx->super->signature); 5129 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 5130 sizeof(ctx->super->signature)) != 0) { 5131 fprintf(ctx->fp, "(Mismatch)\n"); 5132 bs_dump_finish(seq, ctx, bserrno); 5133 return; 5134 } else { 5135 fprintf(ctx->fp, "(OK)\n"); 5136 } 5137 fprintf(ctx->fp, "Version: %" PRIu32 "\n", ctx->super->version); 5138 fprintf(ctx->fp, "CRC: 0x%x (%s)\n", ctx->super->crc, 5139 (ctx->super->crc == blob_md_page_calc_crc(ctx->super)) ? "OK" : "Mismatch"); 5140 fprintf(ctx->fp, "Blobstore Type: %.*s\n", SPDK_BLOBSTORE_TYPE_LENGTH, ctx->super->bstype.bstype); 5141 fprintf(ctx->fp, "Cluster Size: %" PRIu32 "\n", ctx->super->cluster_size); 5142 fprintf(ctx->fp, "Super Blob ID: "); 5143 if (ctx->super->super_blob == SPDK_BLOBID_INVALID) { 5144 fprintf(ctx->fp, "(None)\n"); 5145 } else { 5146 fprintf(ctx->fp, "0x%" PRIx64 "\n", ctx->super->super_blob); 5147 } 5148 fprintf(ctx->fp, "Clean: %" PRIu32 "\n", ctx->super->clean); 5149 fprintf(ctx->fp, "Used Metadata Page Mask Start: %" PRIu32 "\n", ctx->super->used_page_mask_start); 5150 fprintf(ctx->fp, "Used Metadata Page Mask Length: %" PRIu32 "\n", ctx->super->used_page_mask_len); 5151 fprintf(ctx->fp, "Used Cluster Mask Start: %" PRIu32 "\n", ctx->super->used_cluster_mask_start); 5152 fprintf(ctx->fp, "Used Cluster Mask Length: %" PRIu32 "\n", ctx->super->used_cluster_mask_len); 5153 fprintf(ctx->fp, "Used Blob ID Mask Start: %" PRIu32 "\n", ctx->super->used_blobid_mask_start); 5154 fprintf(ctx->fp, "Used Blob ID Mask Length: %" PRIu32 "\n", ctx->super->used_blobid_mask_len); 5155 fprintf(ctx->fp, "Metadata Start: %" PRIu32 "\n", ctx->super->md_start); 5156 fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len); 5157 5158 ctx->cur_page = 0; 5159 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 5160 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 5161 if (!ctx->page) { 5162 bs_dump_finish(seq, ctx, -ENOMEM); 5163 return; 5164 } 5165 5166 rc = bs_parse_super(ctx); 5167 if (rc < 0) { 5168 bs_load_ctx_fail(ctx, rc); 5169 return; 5170 } 5171 5172 bs_load_read_used_pages(ctx); 5173 } 5174 5175 void 5176 spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_xattr_fn, 5177 spdk_bs_op_complete cb_fn, void *cb_arg) 5178 { 5179 struct spdk_blob_store *bs; 5180 struct spdk_bs_cpl cpl; 5181 struct spdk_bs_load_ctx *ctx; 5182 struct spdk_bs_opts opts = {}; 5183 int err; 5184 5185 SPDK_DEBUGLOG(blob, "Dumping blobstore from dev %p\n", dev); 5186 5187 spdk_bs_opts_init(&opts, sizeof(opts)); 5188 5189 err = bs_alloc(dev, &opts, &bs, &ctx); 5190 if (err) { 5191 dev->destroy(dev); 5192 cb_fn(cb_arg, err); 5193 return; 5194 } 5195 5196 ctx->dumping = true; 5197 ctx->fp = fp; 5198 ctx->print_xattr_fn = print_xattr_fn; 5199 5200 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5201 cpl.u.bs_basic.cb_fn = cb_fn; 5202 cpl.u.bs_basic.cb_arg = cb_arg; 5203 5204 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5205 if (!ctx->seq) { 5206 spdk_free(ctx->super); 5207 free(ctx); 5208 bs_free(bs); 5209 cb_fn(cb_arg, -ENOMEM); 5210 return; 5211 } 5212 5213 /* Read the super block */ 5214 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5215 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5216 bs_dump_super_cpl, ctx); 5217 } 5218 5219 /* END spdk_bs_dump */ 5220 5221 /* START spdk_bs_init */ 5222 5223 static void 5224 bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5225 { 5226 struct spdk_bs_load_ctx *ctx = cb_arg; 5227 5228 ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters); 5229 spdk_free(ctx->super); 5230 free(ctx); 5231 5232 bs_sequence_finish(seq, bserrno); 5233 } 5234 5235 static void 5236 bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5237 { 5238 struct spdk_bs_load_ctx *ctx = cb_arg; 5239 5240 /* Write super block */ 5241 bs_sequence_write_dev(seq, ctx->super, bs_page_to_lba(ctx->bs, 0), 5242 bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 5243 bs_init_persist_super_cpl, ctx); 5244 } 5245 5246 void 5247 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 5248 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 5249 { 5250 struct spdk_bs_load_ctx *ctx; 5251 struct spdk_blob_store *bs; 5252 struct spdk_bs_cpl cpl; 5253 spdk_bs_sequence_t *seq; 5254 spdk_bs_batch_t *batch; 5255 uint64_t num_md_lba; 5256 uint64_t num_md_pages; 5257 uint64_t num_md_clusters; 5258 uint64_t max_used_cluster_mask_len; 5259 uint32_t i; 5260 struct spdk_bs_opts opts = {}; 5261 int rc; 5262 uint64_t lba, lba_count; 5263 5264 SPDK_DEBUGLOG(blob, "Initializing blobstore on dev %p\n", dev); 5265 5266 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 5267 SPDK_ERRLOG("unsupported dev block length of %d\n", 5268 dev->blocklen); 5269 dev->destroy(dev); 5270 cb_fn(cb_arg, NULL, -EINVAL); 5271 return; 5272 } 5273 5274 spdk_bs_opts_init(&opts, sizeof(opts)); 5275 if (o) { 5276 if (bs_opts_copy(o, &opts)) { 5277 return; 5278 } 5279 } 5280 5281 if (bs_opts_verify(&opts) != 0) { 5282 dev->destroy(dev); 5283 cb_fn(cb_arg, NULL, -EINVAL); 5284 return; 5285 } 5286 5287 rc = bs_alloc(dev, &opts, &bs, &ctx); 5288 if (rc) { 5289 dev->destroy(dev); 5290 cb_fn(cb_arg, NULL, rc); 5291 return; 5292 } 5293 5294 if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) { 5295 /* By default, allocate 1 page per cluster. 5296 * Technically, this over-allocates metadata 5297 * because more metadata will reduce the number 5298 * of usable clusters. This can be addressed with 5299 * more complex math in the future. 5300 */ 5301 bs->md_len = bs->total_clusters; 5302 } else { 5303 bs->md_len = opts.num_md_pages; 5304 } 5305 rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len); 5306 if (rc < 0) { 5307 spdk_free(ctx->super); 5308 free(ctx); 5309 bs_free(bs); 5310 cb_fn(cb_arg, NULL, -ENOMEM); 5311 return; 5312 } 5313 5314 rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len); 5315 if (rc < 0) { 5316 spdk_free(ctx->super); 5317 free(ctx); 5318 bs_free(bs); 5319 cb_fn(cb_arg, NULL, -ENOMEM); 5320 return; 5321 } 5322 5323 rc = spdk_bit_array_resize(&bs->open_blobids, bs->md_len); 5324 if (rc < 0) { 5325 spdk_free(ctx->super); 5326 free(ctx); 5327 bs_free(bs); 5328 cb_fn(cb_arg, NULL, -ENOMEM); 5329 return; 5330 } 5331 5332 memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 5333 sizeof(ctx->super->signature)); 5334 ctx->super->version = SPDK_BS_VERSION; 5335 ctx->super->length = sizeof(*ctx->super); 5336 ctx->super->super_blob = bs->super_blob; 5337 ctx->super->clean = 0; 5338 ctx->super->cluster_size = bs->cluster_sz; 5339 ctx->super->io_unit_size = bs->io_unit_size; 5340 memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype)); 5341 5342 /* Calculate how many pages the metadata consumes at the front 5343 * of the disk. 5344 */ 5345 5346 /* The super block uses 1 page */ 5347 num_md_pages = 1; 5348 5349 /* The used_md_pages mask requires 1 bit per metadata page, rounded 5350 * up to the nearest page, plus a header. 5351 */ 5352 ctx->super->used_page_mask_start = num_md_pages; 5353 ctx->super->used_page_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5354 spdk_divide_round_up(bs->md_len, 8), 5355 SPDK_BS_PAGE_SIZE); 5356 num_md_pages += ctx->super->used_page_mask_len; 5357 5358 /* The used_clusters mask requires 1 bit per cluster, rounded 5359 * up to the nearest page, plus a header. 5360 */ 5361 ctx->super->used_cluster_mask_start = num_md_pages; 5362 ctx->super->used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5363 spdk_divide_round_up(bs->total_clusters, 8), 5364 SPDK_BS_PAGE_SIZE); 5365 /* The blobstore might be extended, then the used_cluster bitmap will need more space. 5366 * Here we calculate the max clusters we can support according to the 5367 * num_md_pages (bs->md_len). 5368 */ 5369 max_used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5370 spdk_divide_round_up(bs->md_len, 8), 5371 SPDK_BS_PAGE_SIZE); 5372 max_used_cluster_mask_len = spdk_max(max_used_cluster_mask_len, 5373 ctx->super->used_cluster_mask_len); 5374 num_md_pages += max_used_cluster_mask_len; 5375 5376 /* The used_blobids mask requires 1 bit per metadata page, rounded 5377 * up to the nearest page, plus a header. 5378 */ 5379 ctx->super->used_blobid_mask_start = num_md_pages; 5380 ctx->super->used_blobid_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5381 spdk_divide_round_up(bs->md_len, 8), 5382 SPDK_BS_PAGE_SIZE); 5383 num_md_pages += ctx->super->used_blobid_mask_len; 5384 5385 /* The metadata region size was chosen above */ 5386 ctx->super->md_start = bs->md_start = num_md_pages; 5387 ctx->super->md_len = bs->md_len; 5388 num_md_pages += bs->md_len; 5389 5390 num_md_lba = bs_page_to_lba(bs, num_md_pages); 5391 5392 ctx->super->size = dev->blockcnt * dev->blocklen; 5393 5394 ctx->super->crc = blob_md_page_calc_crc(ctx->super); 5395 5396 num_md_clusters = spdk_divide_round_up(num_md_pages, bs->pages_per_cluster); 5397 if (num_md_clusters > bs->total_clusters) { 5398 SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, " 5399 "please decrease number of pages reserved for metadata " 5400 "or increase cluster size.\n"); 5401 spdk_free(ctx->super); 5402 spdk_bit_array_free(&ctx->used_clusters); 5403 free(ctx); 5404 bs_free(bs); 5405 cb_fn(cb_arg, NULL, -ENOMEM); 5406 return; 5407 } 5408 /* Claim all of the clusters used by the metadata */ 5409 for (i = 0; i < num_md_clusters; i++) { 5410 spdk_bit_array_set(ctx->used_clusters, i); 5411 } 5412 5413 bs->num_free_clusters -= num_md_clusters; 5414 bs->total_data_clusters = bs->num_free_clusters; 5415 5416 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 5417 cpl.u.bs_handle.cb_fn = cb_fn; 5418 cpl.u.bs_handle.cb_arg = cb_arg; 5419 cpl.u.bs_handle.bs = bs; 5420 5421 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5422 if (!seq) { 5423 spdk_free(ctx->super); 5424 free(ctx); 5425 bs_free(bs); 5426 cb_fn(cb_arg, NULL, -ENOMEM); 5427 return; 5428 } 5429 5430 batch = bs_sequence_to_batch(seq, bs_init_trim_cpl, ctx); 5431 5432 /* Clear metadata space */ 5433 bs_batch_write_zeroes_dev(batch, 0, num_md_lba); 5434 5435 lba = num_md_lba; 5436 lba_count = ctx->bs->dev->blockcnt - lba; 5437 switch (opts.clear_method) { 5438 case BS_CLEAR_WITH_UNMAP: 5439 /* Trim data clusters */ 5440 bs_batch_unmap_dev(batch, lba, lba_count); 5441 break; 5442 case BS_CLEAR_WITH_WRITE_ZEROES: 5443 /* Write_zeroes to data clusters */ 5444 bs_batch_write_zeroes_dev(batch, lba, lba_count); 5445 break; 5446 case BS_CLEAR_WITH_NONE: 5447 default: 5448 break; 5449 } 5450 5451 bs_batch_close(batch); 5452 } 5453 5454 /* END spdk_bs_init */ 5455 5456 /* START spdk_bs_destroy */ 5457 5458 static void 5459 bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5460 { 5461 struct spdk_bs_load_ctx *ctx = cb_arg; 5462 struct spdk_blob_store *bs = ctx->bs; 5463 5464 /* 5465 * We need to defer calling bs_call_cpl() until after 5466 * dev destruction, so tuck these away for later use. 5467 */ 5468 bs->unload_err = bserrno; 5469 memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5470 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5471 5472 bs_sequence_finish(seq, bserrno); 5473 5474 bs_free(bs); 5475 free(ctx); 5476 } 5477 5478 void 5479 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, 5480 void *cb_arg) 5481 { 5482 struct spdk_bs_cpl cpl; 5483 spdk_bs_sequence_t *seq; 5484 struct spdk_bs_load_ctx *ctx; 5485 5486 SPDK_DEBUGLOG(blob, "Destroying blobstore\n"); 5487 5488 if (!RB_EMPTY(&bs->open_blobs)) { 5489 SPDK_ERRLOG("Blobstore still has open blobs\n"); 5490 cb_fn(cb_arg, -EBUSY); 5491 return; 5492 } 5493 5494 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5495 cpl.u.bs_basic.cb_fn = cb_fn; 5496 cpl.u.bs_basic.cb_arg = cb_arg; 5497 5498 ctx = calloc(1, sizeof(*ctx)); 5499 if (!ctx) { 5500 cb_fn(cb_arg, -ENOMEM); 5501 return; 5502 } 5503 5504 ctx->bs = bs; 5505 5506 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5507 if (!seq) { 5508 free(ctx); 5509 cb_fn(cb_arg, -ENOMEM); 5510 return; 5511 } 5512 5513 /* Write zeroes to the super block */ 5514 bs_sequence_write_zeroes_dev(seq, 5515 bs_page_to_lba(bs, 0), 5516 bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)), 5517 bs_destroy_trim_cpl, ctx); 5518 } 5519 5520 /* END spdk_bs_destroy */ 5521 5522 /* START spdk_bs_unload */ 5523 5524 static void 5525 bs_unload_finish(struct spdk_bs_load_ctx *ctx, int bserrno) 5526 { 5527 spdk_bs_sequence_t *seq = ctx->seq; 5528 5529 spdk_free(ctx->super); 5530 5531 /* 5532 * We need to defer calling bs_call_cpl() until after 5533 * dev destruction, so tuck these away for later use. 5534 */ 5535 ctx->bs->unload_err = bserrno; 5536 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5537 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5538 5539 bs_sequence_finish(seq, bserrno); 5540 5541 bs_free(ctx->bs); 5542 free(ctx); 5543 } 5544 5545 static void 5546 bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5547 { 5548 struct spdk_bs_load_ctx *ctx = cb_arg; 5549 5550 bs_unload_finish(ctx, bserrno); 5551 } 5552 5553 static void 5554 bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5555 { 5556 struct spdk_bs_load_ctx *ctx = cb_arg; 5557 5558 spdk_free(ctx->mask); 5559 5560 if (bserrno != 0) { 5561 bs_unload_finish(ctx, bserrno); 5562 return; 5563 } 5564 5565 ctx->super->clean = 1; 5566 5567 bs_write_super(seq, ctx->bs, ctx->super, bs_unload_write_super_cpl, ctx); 5568 } 5569 5570 static void 5571 bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5572 { 5573 struct spdk_bs_load_ctx *ctx = cb_arg; 5574 5575 spdk_free(ctx->mask); 5576 ctx->mask = NULL; 5577 5578 if (bserrno != 0) { 5579 bs_unload_finish(ctx, bserrno); 5580 return; 5581 } 5582 5583 bs_write_used_clusters(seq, ctx, bs_unload_write_used_clusters_cpl); 5584 } 5585 5586 static void 5587 bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5588 { 5589 struct spdk_bs_load_ctx *ctx = cb_arg; 5590 5591 spdk_free(ctx->mask); 5592 ctx->mask = NULL; 5593 5594 if (bserrno != 0) { 5595 bs_unload_finish(ctx, bserrno); 5596 return; 5597 } 5598 5599 bs_write_used_blobids(seq, ctx, bs_unload_write_used_blobids_cpl); 5600 } 5601 5602 static void 5603 bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5604 { 5605 struct spdk_bs_load_ctx *ctx = cb_arg; 5606 int rc; 5607 5608 if (bserrno != 0) { 5609 bs_unload_finish(ctx, bserrno); 5610 return; 5611 } 5612 5613 rc = bs_super_validate(ctx->super, ctx->bs); 5614 if (rc != 0) { 5615 bs_unload_finish(ctx, rc); 5616 return; 5617 } 5618 5619 bs_write_used_md(seq, cb_arg, bs_unload_write_used_pages_cpl); 5620 } 5621 5622 void 5623 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg) 5624 { 5625 struct spdk_bs_cpl cpl; 5626 struct spdk_bs_load_ctx *ctx; 5627 5628 SPDK_DEBUGLOG(blob, "Syncing blobstore\n"); 5629 5630 /* 5631 * If external snapshot channels are being destroyed while the blobstore is unloaded, the 5632 * unload is deferred until after the channel destruction completes. 5633 */ 5634 if (bs->esnap_channels_unloading != 0) { 5635 if (bs->esnap_unload_cb_fn != NULL) { 5636 SPDK_ERRLOG("Blobstore unload in progress\n"); 5637 cb_fn(cb_arg, -EBUSY); 5638 return; 5639 } 5640 SPDK_DEBUGLOG(blob_esnap, "Blobstore unload deferred: %" PRIu32 5641 " esnap clones are unloading\n", bs->esnap_channels_unloading); 5642 bs->esnap_unload_cb_fn = cb_fn; 5643 bs->esnap_unload_cb_arg = cb_arg; 5644 return; 5645 } 5646 if (bs->esnap_unload_cb_fn != NULL) { 5647 SPDK_DEBUGLOG(blob_esnap, "Blobstore deferred unload progressing\n"); 5648 assert(bs->esnap_unload_cb_fn == cb_fn); 5649 assert(bs->esnap_unload_cb_arg == cb_arg); 5650 bs->esnap_unload_cb_fn = NULL; 5651 bs->esnap_unload_cb_arg = NULL; 5652 } 5653 5654 if (!RB_EMPTY(&bs->open_blobs)) { 5655 SPDK_ERRLOG("Blobstore still has open blobs\n"); 5656 cb_fn(cb_arg, -EBUSY); 5657 return; 5658 } 5659 5660 ctx = calloc(1, sizeof(*ctx)); 5661 if (!ctx) { 5662 cb_fn(cb_arg, -ENOMEM); 5663 return; 5664 } 5665 5666 ctx->bs = bs; 5667 5668 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 5669 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 5670 if (!ctx->super) { 5671 free(ctx); 5672 cb_fn(cb_arg, -ENOMEM); 5673 return; 5674 } 5675 5676 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5677 cpl.u.bs_basic.cb_fn = cb_fn; 5678 cpl.u.bs_basic.cb_arg = cb_arg; 5679 5680 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5681 if (!ctx->seq) { 5682 spdk_free(ctx->super); 5683 free(ctx); 5684 cb_fn(cb_arg, -ENOMEM); 5685 return; 5686 } 5687 5688 /* Read super block */ 5689 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5690 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5691 bs_unload_read_super_cpl, ctx); 5692 } 5693 5694 /* END spdk_bs_unload */ 5695 5696 /* START spdk_bs_set_super */ 5697 5698 struct spdk_bs_set_super_ctx { 5699 struct spdk_blob_store *bs; 5700 struct spdk_bs_super_block *super; 5701 }; 5702 5703 static void 5704 bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5705 { 5706 struct spdk_bs_set_super_ctx *ctx = cb_arg; 5707 5708 if (bserrno != 0) { 5709 SPDK_ERRLOG("Unable to write to super block of blobstore\n"); 5710 } 5711 5712 spdk_free(ctx->super); 5713 5714 bs_sequence_finish(seq, bserrno); 5715 5716 free(ctx); 5717 } 5718 5719 static void 5720 bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5721 { 5722 struct spdk_bs_set_super_ctx *ctx = cb_arg; 5723 int rc; 5724 5725 if (bserrno != 0) { 5726 SPDK_ERRLOG("Unable to read super block of blobstore\n"); 5727 spdk_free(ctx->super); 5728 bs_sequence_finish(seq, bserrno); 5729 free(ctx); 5730 return; 5731 } 5732 5733 rc = bs_super_validate(ctx->super, ctx->bs); 5734 if (rc != 0) { 5735 SPDK_ERRLOG("Not a valid super block\n"); 5736 spdk_free(ctx->super); 5737 bs_sequence_finish(seq, rc); 5738 free(ctx); 5739 return; 5740 } 5741 5742 bs_write_super(seq, ctx->bs, ctx->super, bs_set_super_write_cpl, ctx); 5743 } 5744 5745 void 5746 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid, 5747 spdk_bs_op_complete cb_fn, void *cb_arg) 5748 { 5749 struct spdk_bs_cpl cpl; 5750 spdk_bs_sequence_t *seq; 5751 struct spdk_bs_set_super_ctx *ctx; 5752 5753 SPDK_DEBUGLOG(blob, "Setting super blob id on blobstore\n"); 5754 5755 ctx = calloc(1, sizeof(*ctx)); 5756 if (!ctx) { 5757 cb_fn(cb_arg, -ENOMEM); 5758 return; 5759 } 5760 5761 ctx->bs = bs; 5762 5763 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 5764 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 5765 if (!ctx->super) { 5766 free(ctx); 5767 cb_fn(cb_arg, -ENOMEM); 5768 return; 5769 } 5770 5771 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5772 cpl.u.bs_basic.cb_fn = cb_fn; 5773 cpl.u.bs_basic.cb_arg = cb_arg; 5774 5775 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5776 if (!seq) { 5777 spdk_free(ctx->super); 5778 free(ctx); 5779 cb_fn(cb_arg, -ENOMEM); 5780 return; 5781 } 5782 5783 bs->super_blob = blobid; 5784 5785 /* Read super block */ 5786 bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0), 5787 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5788 bs_set_super_read_cpl, ctx); 5789 } 5790 5791 /* END spdk_bs_set_super */ 5792 5793 void 5794 spdk_bs_get_super(struct spdk_blob_store *bs, 5795 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5796 { 5797 if (bs->super_blob == SPDK_BLOBID_INVALID) { 5798 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT); 5799 } else { 5800 cb_fn(cb_arg, bs->super_blob, 0); 5801 } 5802 } 5803 5804 uint64_t 5805 spdk_bs_get_cluster_size(struct spdk_blob_store *bs) 5806 { 5807 return bs->cluster_sz; 5808 } 5809 5810 uint64_t 5811 spdk_bs_get_page_size(struct spdk_blob_store *bs) 5812 { 5813 return SPDK_BS_PAGE_SIZE; 5814 } 5815 5816 uint64_t 5817 spdk_bs_get_io_unit_size(struct spdk_blob_store *bs) 5818 { 5819 return bs->io_unit_size; 5820 } 5821 5822 uint64_t 5823 spdk_bs_free_cluster_count(struct spdk_blob_store *bs) 5824 { 5825 return bs->num_free_clusters; 5826 } 5827 5828 uint64_t 5829 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs) 5830 { 5831 return bs->total_data_clusters; 5832 } 5833 5834 static int 5835 bs_register_md_thread(struct spdk_blob_store *bs) 5836 { 5837 bs->md_channel = spdk_get_io_channel(bs); 5838 if (!bs->md_channel) { 5839 SPDK_ERRLOG("Failed to get IO channel.\n"); 5840 return -1; 5841 } 5842 5843 return 0; 5844 } 5845 5846 static int 5847 bs_unregister_md_thread(struct spdk_blob_store *bs) 5848 { 5849 spdk_put_io_channel(bs->md_channel); 5850 5851 return 0; 5852 } 5853 5854 spdk_blob_id 5855 spdk_blob_get_id(struct spdk_blob *blob) 5856 { 5857 assert(blob != NULL); 5858 5859 return blob->id; 5860 } 5861 5862 uint64_t 5863 spdk_blob_get_num_pages(struct spdk_blob *blob) 5864 { 5865 assert(blob != NULL); 5866 5867 return bs_cluster_to_page(blob->bs, blob->active.num_clusters); 5868 } 5869 5870 uint64_t 5871 spdk_blob_get_num_io_units(struct spdk_blob *blob) 5872 { 5873 assert(blob != NULL); 5874 5875 return spdk_blob_get_num_pages(blob) * bs_io_unit_per_page(blob->bs); 5876 } 5877 5878 uint64_t 5879 spdk_blob_get_num_clusters(struct spdk_blob *blob) 5880 { 5881 assert(blob != NULL); 5882 5883 return blob->active.num_clusters; 5884 } 5885 5886 static uint64_t 5887 blob_find_io_unit(struct spdk_blob *blob, uint64_t offset, bool is_allocated) 5888 { 5889 uint64_t blob_io_unit_num = spdk_blob_get_num_io_units(blob); 5890 5891 while (offset < blob_io_unit_num) { 5892 if (bs_io_unit_is_allocated(blob, offset) == is_allocated) { 5893 return offset; 5894 } 5895 5896 offset += bs_num_io_units_to_cluster_boundary(blob, offset); 5897 } 5898 5899 return UINT64_MAX; 5900 } 5901 5902 uint64_t 5903 spdk_blob_get_next_allocated_io_unit(struct spdk_blob *blob, uint64_t offset) 5904 { 5905 return blob_find_io_unit(blob, offset, true); 5906 } 5907 5908 uint64_t 5909 spdk_blob_get_next_unallocated_io_unit(struct spdk_blob *blob, uint64_t offset) 5910 { 5911 return blob_find_io_unit(blob, offset, false); 5912 } 5913 5914 /* START spdk_bs_create_blob */ 5915 5916 static void 5917 bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5918 { 5919 struct spdk_blob *blob = cb_arg; 5920 uint32_t page_idx = bs_blobid_to_page(blob->id); 5921 5922 if (bserrno != 0) { 5923 spdk_spin_lock(&blob->bs->used_lock); 5924 spdk_bit_array_clear(blob->bs->used_blobids, page_idx); 5925 bs_release_md_page(blob->bs, page_idx); 5926 spdk_spin_unlock(&blob->bs->used_lock); 5927 } 5928 5929 blob_free(blob); 5930 5931 bs_sequence_finish(seq, bserrno); 5932 } 5933 5934 static int 5935 blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs, 5936 bool internal) 5937 { 5938 uint64_t i; 5939 size_t value_len = 0; 5940 int rc; 5941 const void *value = NULL; 5942 if (xattrs->count > 0 && xattrs->get_value == NULL) { 5943 return -EINVAL; 5944 } 5945 for (i = 0; i < xattrs->count; i++) { 5946 xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len); 5947 if (value == NULL || value_len == 0) { 5948 return -EINVAL; 5949 } 5950 rc = blob_set_xattr(blob, xattrs->names[i], value, value_len, internal); 5951 if (rc < 0) { 5952 return rc; 5953 } 5954 } 5955 return 0; 5956 } 5957 5958 static void 5959 blob_opts_copy(const struct spdk_blob_opts *src, struct spdk_blob_opts *dst) 5960 { 5961 #define FIELD_OK(field) \ 5962 offsetof(struct spdk_blob_opts, field) + sizeof(src->field) <= src->opts_size 5963 5964 #define SET_FIELD(field) \ 5965 if (FIELD_OK(field)) { \ 5966 dst->field = src->field; \ 5967 } \ 5968 5969 SET_FIELD(num_clusters); 5970 SET_FIELD(thin_provision); 5971 SET_FIELD(clear_method); 5972 5973 if (FIELD_OK(xattrs)) { 5974 memcpy(&dst->xattrs, &src->xattrs, sizeof(src->xattrs)); 5975 } 5976 5977 SET_FIELD(use_extent_table); 5978 SET_FIELD(esnap_id); 5979 SET_FIELD(esnap_id_len); 5980 5981 dst->opts_size = src->opts_size; 5982 5983 /* You should not remove this statement, but need to update the assert statement 5984 * if you add a new field, and also add a corresponding SET_FIELD statement */ 5985 SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_opts) == 80, "Incorrect size"); 5986 5987 #undef FIELD_OK 5988 #undef SET_FIELD 5989 } 5990 5991 static void 5992 bs_create_blob(struct spdk_blob_store *bs, 5993 const struct spdk_blob_opts *opts, 5994 const struct spdk_blob_xattr_opts *internal_xattrs, 5995 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5996 { 5997 struct spdk_blob *blob; 5998 uint32_t page_idx; 5999 struct spdk_bs_cpl cpl; 6000 struct spdk_blob_opts opts_local; 6001 struct spdk_blob_xattr_opts internal_xattrs_default; 6002 spdk_bs_sequence_t *seq; 6003 spdk_blob_id id; 6004 int rc; 6005 6006 assert(spdk_get_thread() == bs->md_thread); 6007 6008 spdk_spin_lock(&bs->used_lock); 6009 page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); 6010 if (page_idx == UINT32_MAX) { 6011 spdk_spin_unlock(&bs->used_lock); 6012 cb_fn(cb_arg, 0, -ENOMEM); 6013 return; 6014 } 6015 spdk_bit_array_set(bs->used_blobids, page_idx); 6016 bs_claim_md_page(bs, page_idx); 6017 spdk_spin_unlock(&bs->used_lock); 6018 6019 id = bs_page_to_blobid(page_idx); 6020 6021 SPDK_DEBUGLOG(blob, "Creating blob with id 0x%" PRIx64 " at page %u\n", id, page_idx); 6022 6023 spdk_blob_opts_init(&opts_local, sizeof(opts_local)); 6024 if (opts) { 6025 blob_opts_copy(opts, &opts_local); 6026 } 6027 6028 blob = blob_alloc(bs, id); 6029 if (!blob) { 6030 rc = -ENOMEM; 6031 goto error; 6032 } 6033 6034 blob->use_extent_table = opts_local.use_extent_table; 6035 if (blob->use_extent_table) { 6036 blob->invalid_flags |= SPDK_BLOB_EXTENT_TABLE; 6037 } 6038 6039 if (!internal_xattrs) { 6040 blob_xattrs_init(&internal_xattrs_default); 6041 internal_xattrs = &internal_xattrs_default; 6042 } 6043 6044 rc = blob_set_xattrs(blob, &opts_local.xattrs, false); 6045 if (rc < 0) { 6046 goto error; 6047 } 6048 6049 rc = blob_set_xattrs(blob, internal_xattrs, true); 6050 if (rc < 0) { 6051 goto error; 6052 } 6053 6054 if (opts_local.thin_provision) { 6055 blob_set_thin_provision(blob); 6056 } 6057 6058 blob_set_clear_method(blob, opts_local.clear_method); 6059 6060 if (opts_local.esnap_id != NULL) { 6061 if (opts_local.esnap_id_len > UINT16_MAX) { 6062 SPDK_ERRLOG("esnap id length %" PRIu64 "is too long\n", 6063 opts_local.esnap_id_len); 6064 rc = -EINVAL; 6065 goto error; 6066 6067 } 6068 blob_set_thin_provision(blob); 6069 blob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 6070 rc = blob_set_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID, 6071 opts_local.esnap_id, opts_local.esnap_id_len, true); 6072 if (rc != 0) { 6073 goto error; 6074 } 6075 } 6076 6077 rc = blob_resize(blob, opts_local.num_clusters); 6078 if (rc < 0) { 6079 goto error; 6080 } 6081 cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6082 cpl.u.blobid.cb_fn = cb_fn; 6083 cpl.u.blobid.cb_arg = cb_arg; 6084 cpl.u.blobid.blobid = blob->id; 6085 6086 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 6087 if (!seq) { 6088 rc = -ENOMEM; 6089 goto error; 6090 } 6091 6092 blob_persist(seq, blob, bs_create_blob_cpl, blob); 6093 return; 6094 6095 error: 6096 SPDK_ERRLOG("Failed to create blob: %s, size in clusters/size: %lu (clusters)\n", 6097 spdk_strerror(rc), opts_local.num_clusters); 6098 if (blob != NULL) { 6099 blob_free(blob); 6100 } 6101 spdk_spin_lock(&bs->used_lock); 6102 spdk_bit_array_clear(bs->used_blobids, page_idx); 6103 bs_release_md_page(bs, page_idx); 6104 spdk_spin_unlock(&bs->used_lock); 6105 cb_fn(cb_arg, 0, rc); 6106 } 6107 6108 void 6109 spdk_bs_create_blob(struct spdk_blob_store *bs, 6110 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6111 { 6112 bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg); 6113 } 6114 6115 void 6116 spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts, 6117 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6118 { 6119 bs_create_blob(bs, opts, NULL, cb_fn, cb_arg); 6120 } 6121 6122 /* END spdk_bs_create_blob */ 6123 6124 /* START blob_cleanup */ 6125 6126 struct spdk_clone_snapshot_ctx { 6127 struct spdk_bs_cpl cpl; 6128 int bserrno; 6129 bool frozen; 6130 6131 struct spdk_io_channel *channel; 6132 6133 /* Current cluster for inflate operation */ 6134 uint64_t cluster; 6135 6136 /* For inflation force allocation of all unallocated clusters and remove 6137 * thin-provisioning. Otherwise only decouple parent and keep clone thin. */ 6138 bool allocate_all; 6139 6140 struct { 6141 spdk_blob_id id; 6142 struct spdk_blob *blob; 6143 bool md_ro; 6144 } original; 6145 struct { 6146 spdk_blob_id id; 6147 struct spdk_blob *blob; 6148 } new; 6149 6150 /* xattrs specified for snapshot/clones only. They have no impact on 6151 * the original blobs xattrs. */ 6152 const struct spdk_blob_xattr_opts *xattrs; 6153 }; 6154 6155 static void 6156 bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno) 6157 { 6158 struct spdk_clone_snapshot_ctx *ctx = cb_arg; 6159 struct spdk_bs_cpl *cpl = &ctx->cpl; 6160 6161 if (bserrno != 0) { 6162 if (ctx->bserrno != 0) { 6163 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6164 } else { 6165 ctx->bserrno = bserrno; 6166 } 6167 } 6168 6169 switch (cpl->type) { 6170 case SPDK_BS_CPL_TYPE_BLOBID: 6171 cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno); 6172 break; 6173 case SPDK_BS_CPL_TYPE_BLOB_BASIC: 6174 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno); 6175 break; 6176 default: 6177 SPDK_UNREACHABLE(); 6178 break; 6179 } 6180 6181 free(ctx); 6182 } 6183 6184 static void 6185 bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 6186 { 6187 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6188 struct spdk_blob *origblob = ctx->original.blob; 6189 6190 if (bserrno != 0) { 6191 if (ctx->bserrno != 0) { 6192 SPDK_ERRLOG("Unfreeze error %d\n", bserrno); 6193 } else { 6194 ctx->bserrno = bserrno; 6195 } 6196 } 6197 6198 ctx->original.id = origblob->id; 6199 origblob->locked_operation_in_progress = false; 6200 6201 /* Revert md_ro to original state */ 6202 origblob->md_ro = ctx->original.md_ro; 6203 6204 spdk_blob_close(origblob, bs_clone_snapshot_cleanup_finish, ctx); 6205 } 6206 6207 static void 6208 bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno) 6209 { 6210 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6211 struct spdk_blob *origblob = ctx->original.blob; 6212 6213 if (bserrno != 0) { 6214 if (ctx->bserrno != 0) { 6215 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6216 } else { 6217 ctx->bserrno = bserrno; 6218 } 6219 } 6220 6221 if (ctx->frozen) { 6222 /* Unfreeze any outstanding I/O */ 6223 blob_unfreeze_io(origblob, bs_snapshot_unfreeze_cpl, ctx); 6224 } else { 6225 bs_snapshot_unfreeze_cpl(ctx, 0); 6226 } 6227 6228 } 6229 6230 static void 6231 bs_clone_snapshot_newblob_cleanup(struct spdk_clone_snapshot_ctx *ctx, int bserrno) 6232 { 6233 struct spdk_blob *newblob = ctx->new.blob; 6234 6235 if (bserrno != 0) { 6236 if (ctx->bserrno != 0) { 6237 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6238 } else { 6239 ctx->bserrno = bserrno; 6240 } 6241 } 6242 6243 ctx->new.id = newblob->id; 6244 spdk_blob_close(newblob, bs_clone_snapshot_origblob_cleanup, ctx); 6245 } 6246 6247 /* END blob_cleanup */ 6248 6249 /* START spdk_bs_create_snapshot */ 6250 6251 static void 6252 bs_snapshot_swap_cluster_maps(struct spdk_blob *blob1, struct spdk_blob *blob2) 6253 { 6254 uint64_t *cluster_temp; 6255 uint32_t *extent_page_temp; 6256 6257 cluster_temp = blob1->active.clusters; 6258 blob1->active.clusters = blob2->active.clusters; 6259 blob2->active.clusters = cluster_temp; 6260 6261 extent_page_temp = blob1->active.extent_pages; 6262 blob1->active.extent_pages = blob2->active.extent_pages; 6263 blob2->active.extent_pages = extent_page_temp; 6264 } 6265 6266 /* Copies an internal xattr */ 6267 static int 6268 bs_snapshot_copy_xattr(struct spdk_blob *toblob, struct spdk_blob *fromblob, const char *name) 6269 { 6270 const void *val = NULL; 6271 size_t len; 6272 int bserrno; 6273 6274 bserrno = blob_get_xattr_value(fromblob, name, &val, &len, true); 6275 if (bserrno != 0) { 6276 SPDK_ERRLOG("blob 0x%" PRIx64 " missing %s XATTR\n", fromblob->id, name); 6277 return bserrno; 6278 } 6279 6280 bserrno = blob_set_xattr(toblob, name, val, len, true); 6281 if (bserrno != 0) { 6282 SPDK_ERRLOG("could not set %s XATTR on blob 0x%" PRIx64 "\n", 6283 name, toblob->id); 6284 return bserrno; 6285 } 6286 return 0; 6287 } 6288 6289 static void 6290 bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno) 6291 { 6292 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6293 struct spdk_blob *origblob = ctx->original.blob; 6294 struct spdk_blob *newblob = ctx->new.blob; 6295 6296 if (bserrno != 0) { 6297 bs_snapshot_swap_cluster_maps(newblob, origblob); 6298 if (blob_is_esnap_clone(newblob)) { 6299 bs_snapshot_copy_xattr(origblob, newblob, BLOB_EXTERNAL_SNAPSHOT_ID); 6300 origblob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 6301 } 6302 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6303 return; 6304 } 6305 6306 /* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */ 6307 bserrno = blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true); 6308 if (bserrno != 0) { 6309 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6310 return; 6311 } 6312 6313 bs_blob_list_add(ctx->original.blob); 6314 6315 spdk_blob_set_read_only(newblob); 6316 6317 /* sync snapshot metadata */ 6318 spdk_blob_sync_md(newblob, bs_clone_snapshot_origblob_cleanup, ctx); 6319 } 6320 6321 static void 6322 bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno) 6323 { 6324 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6325 struct spdk_blob *origblob = ctx->original.blob; 6326 struct spdk_blob *newblob = ctx->new.blob; 6327 6328 if (bserrno != 0) { 6329 /* return cluster map back to original */ 6330 bs_snapshot_swap_cluster_maps(newblob, origblob); 6331 6332 /* Newblob md sync failed. Valid clusters are only present in origblob. 6333 * Since I/O is frozen on origblob, not changes to zeroed out cluster map should have occurred. 6334 * Newblob needs to be reverted to thin_provisioned state at creation to properly close. */ 6335 blob_set_thin_provision(newblob); 6336 assert(spdk_mem_all_zero(newblob->active.clusters, 6337 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 6338 assert(spdk_mem_all_zero(newblob->active.extent_pages, 6339 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 6340 6341 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6342 return; 6343 } 6344 6345 /* Set internal xattr for snapshot id */ 6346 bserrno = blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true); 6347 if (bserrno != 0) { 6348 /* return cluster map back to original */ 6349 bs_snapshot_swap_cluster_maps(newblob, origblob); 6350 blob_set_thin_provision(newblob); 6351 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6352 return; 6353 } 6354 6355 /* Create new back_bs_dev for snapshot */ 6356 origblob->back_bs_dev = bs_create_blob_bs_dev(newblob); 6357 if (origblob->back_bs_dev == NULL) { 6358 /* return cluster map back to original */ 6359 bs_snapshot_swap_cluster_maps(newblob, origblob); 6360 blob_set_thin_provision(newblob); 6361 bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL); 6362 return; 6363 } 6364 6365 /* Remove the xattr that references an external snapshot */ 6366 if (blob_is_esnap_clone(origblob)) { 6367 origblob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT; 6368 bserrno = blob_remove_xattr(origblob, BLOB_EXTERNAL_SNAPSHOT_ID, true); 6369 if (bserrno != 0) { 6370 if (bserrno == -ENOENT) { 6371 SPDK_ERRLOG("blob 0x%" PRIx64 " has no " BLOB_EXTERNAL_SNAPSHOT_ID 6372 " xattr to remove\n", origblob->id); 6373 assert(false); 6374 } else { 6375 /* return cluster map back to original */ 6376 bs_snapshot_swap_cluster_maps(newblob, origblob); 6377 blob_set_thin_provision(newblob); 6378 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6379 return; 6380 } 6381 } 6382 } 6383 6384 bs_blob_list_remove(origblob); 6385 origblob->parent_id = newblob->id; 6386 /* set clone blob as thin provisioned */ 6387 blob_set_thin_provision(origblob); 6388 6389 bs_blob_list_add(newblob); 6390 6391 /* sync clone metadata */ 6392 spdk_blob_sync_md(origblob, bs_snapshot_origblob_sync_cpl, ctx); 6393 } 6394 6395 static void 6396 bs_snapshot_freeze_cpl(void *cb_arg, int rc) 6397 { 6398 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6399 struct spdk_blob *origblob = ctx->original.blob; 6400 struct spdk_blob *newblob = ctx->new.blob; 6401 int bserrno; 6402 6403 if (rc != 0) { 6404 bs_clone_snapshot_newblob_cleanup(ctx, rc); 6405 return; 6406 } 6407 6408 ctx->frozen = true; 6409 6410 if (blob_is_esnap_clone(origblob)) { 6411 /* Clean up any channels associated with the original blob id because future IO will 6412 * perform IO using the snapshot blob_id. 6413 */ 6414 blob_esnap_destroy_bs_dev_channels(origblob, false, NULL, NULL); 6415 } 6416 if (newblob->back_bs_dev) { 6417 blob_back_bs_destroy(newblob); 6418 } 6419 /* set new back_bs_dev for snapshot */ 6420 newblob->back_bs_dev = origblob->back_bs_dev; 6421 /* Set invalid flags from origblob */ 6422 newblob->invalid_flags = origblob->invalid_flags; 6423 6424 /* inherit parent from original blob if set */ 6425 newblob->parent_id = origblob->parent_id; 6426 switch (origblob->parent_id) { 6427 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 6428 bserrno = bs_snapshot_copy_xattr(newblob, origblob, BLOB_EXTERNAL_SNAPSHOT_ID); 6429 if (bserrno != 0) { 6430 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6431 return; 6432 } 6433 break; 6434 case SPDK_BLOBID_INVALID: 6435 break; 6436 default: 6437 /* Set internal xattr for snapshot id */ 6438 bserrno = blob_set_xattr(newblob, BLOB_SNAPSHOT, 6439 &origblob->parent_id, sizeof(spdk_blob_id), true); 6440 if (bserrno != 0) { 6441 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6442 return; 6443 } 6444 } 6445 6446 /* swap cluster maps */ 6447 bs_snapshot_swap_cluster_maps(newblob, origblob); 6448 6449 /* Set the clear method on the new blob to match the original. */ 6450 blob_set_clear_method(newblob, origblob->clear_method); 6451 6452 /* sync snapshot metadata */ 6453 spdk_blob_sync_md(newblob, bs_snapshot_newblob_sync_cpl, ctx); 6454 } 6455 6456 static void 6457 bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6458 { 6459 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6460 struct spdk_blob *origblob = ctx->original.blob; 6461 struct spdk_blob *newblob = _blob; 6462 6463 if (bserrno != 0) { 6464 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6465 return; 6466 } 6467 6468 ctx->new.blob = newblob; 6469 assert(spdk_blob_is_thin_provisioned(newblob)); 6470 assert(spdk_mem_all_zero(newblob->active.clusters, 6471 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 6472 assert(spdk_mem_all_zero(newblob->active.extent_pages, 6473 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 6474 6475 blob_freeze_io(origblob, bs_snapshot_freeze_cpl, ctx); 6476 } 6477 6478 static void 6479 bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 6480 { 6481 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6482 struct spdk_blob *origblob = ctx->original.blob; 6483 6484 if (bserrno != 0) { 6485 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6486 return; 6487 } 6488 6489 ctx->new.id = blobid; 6490 ctx->cpl.u.blobid.blobid = blobid; 6491 6492 spdk_bs_open_blob(origblob->bs, ctx->new.id, bs_snapshot_newblob_open_cpl, ctx); 6493 } 6494 6495 6496 static void 6497 bs_xattr_snapshot(void *arg, const char *name, 6498 const void **value, size_t *value_len) 6499 { 6500 assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0); 6501 6502 struct spdk_blob *blob = (struct spdk_blob *)arg; 6503 *value = &blob->id; 6504 *value_len = sizeof(blob->id); 6505 } 6506 6507 static void 6508 bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6509 { 6510 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6511 struct spdk_blob_opts opts; 6512 struct spdk_blob_xattr_opts internal_xattrs; 6513 char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS }; 6514 6515 if (bserrno != 0) { 6516 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6517 return; 6518 } 6519 6520 ctx->original.blob = _blob; 6521 6522 if (_blob->data_ro || _blob->md_ro) { 6523 SPDK_DEBUGLOG(blob, "Cannot create snapshot from read only blob with id 0x%" 6524 PRIx64 "\n", _blob->id); 6525 ctx->bserrno = -EINVAL; 6526 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6527 return; 6528 } 6529 6530 if (_blob->locked_operation_in_progress) { 6531 SPDK_DEBUGLOG(blob, "Cannot create snapshot - another operation in progress\n"); 6532 ctx->bserrno = -EBUSY; 6533 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6534 return; 6535 } 6536 6537 _blob->locked_operation_in_progress = true; 6538 6539 spdk_blob_opts_init(&opts, sizeof(opts)); 6540 blob_xattrs_init(&internal_xattrs); 6541 6542 /* Change the size of new blob to the same as in original blob, 6543 * but do not allocate clusters */ 6544 opts.thin_provision = true; 6545 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 6546 opts.use_extent_table = _blob->use_extent_table; 6547 6548 /* If there are any xattrs specified for snapshot, set them now */ 6549 if (ctx->xattrs) { 6550 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 6551 } 6552 /* Set internal xattr SNAPSHOT_IN_PROGRESS */ 6553 internal_xattrs.count = 1; 6554 internal_xattrs.ctx = _blob; 6555 internal_xattrs.names = xattrs_names; 6556 internal_xattrs.get_value = bs_xattr_snapshot; 6557 6558 bs_create_blob(_blob->bs, &opts, &internal_xattrs, 6559 bs_snapshot_newblob_create_cpl, ctx); 6560 } 6561 6562 void 6563 spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid, 6564 const struct spdk_blob_xattr_opts *snapshot_xattrs, 6565 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6566 { 6567 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6568 6569 if (!ctx) { 6570 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 6571 return; 6572 } 6573 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6574 ctx->cpl.u.blobid.cb_fn = cb_fn; 6575 ctx->cpl.u.blobid.cb_arg = cb_arg; 6576 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 6577 ctx->bserrno = 0; 6578 ctx->frozen = false; 6579 ctx->original.id = blobid; 6580 ctx->xattrs = snapshot_xattrs; 6581 6582 spdk_bs_open_blob(bs, ctx->original.id, bs_snapshot_origblob_open_cpl, ctx); 6583 } 6584 /* END spdk_bs_create_snapshot */ 6585 6586 /* START spdk_bs_create_clone */ 6587 6588 static void 6589 bs_xattr_clone(void *arg, const char *name, 6590 const void **value, size_t *value_len) 6591 { 6592 assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0); 6593 6594 struct spdk_blob *blob = (struct spdk_blob *)arg; 6595 *value = &blob->id; 6596 *value_len = sizeof(blob->id); 6597 } 6598 6599 static void 6600 bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6601 { 6602 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6603 struct spdk_blob *clone = _blob; 6604 6605 ctx->new.blob = clone; 6606 bs_blob_list_add(clone); 6607 6608 spdk_blob_close(clone, bs_clone_snapshot_origblob_cleanup, ctx); 6609 } 6610 6611 static void 6612 bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 6613 { 6614 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6615 6616 ctx->cpl.u.blobid.blobid = blobid; 6617 spdk_bs_open_blob(ctx->original.blob->bs, blobid, bs_clone_newblob_open_cpl, ctx); 6618 } 6619 6620 static void 6621 bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6622 { 6623 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6624 struct spdk_blob_opts opts; 6625 struct spdk_blob_xattr_opts internal_xattrs; 6626 char *xattr_names[] = { BLOB_SNAPSHOT }; 6627 6628 if (bserrno != 0) { 6629 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6630 return; 6631 } 6632 6633 ctx->original.blob = _blob; 6634 ctx->original.md_ro = _blob->md_ro; 6635 6636 if (!_blob->data_ro || !_blob->md_ro) { 6637 SPDK_DEBUGLOG(blob, "Clone not from read-only blob\n"); 6638 ctx->bserrno = -EINVAL; 6639 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6640 return; 6641 } 6642 6643 if (_blob->locked_operation_in_progress) { 6644 SPDK_DEBUGLOG(blob, "Cannot create clone - another operation in progress\n"); 6645 ctx->bserrno = -EBUSY; 6646 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6647 return; 6648 } 6649 6650 _blob->locked_operation_in_progress = true; 6651 6652 spdk_blob_opts_init(&opts, sizeof(opts)); 6653 blob_xattrs_init(&internal_xattrs); 6654 6655 opts.thin_provision = true; 6656 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 6657 opts.use_extent_table = _blob->use_extent_table; 6658 if (ctx->xattrs) { 6659 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 6660 } 6661 6662 /* Set internal xattr BLOB_SNAPSHOT */ 6663 internal_xattrs.count = 1; 6664 internal_xattrs.ctx = _blob; 6665 internal_xattrs.names = xattr_names; 6666 internal_xattrs.get_value = bs_xattr_clone; 6667 6668 bs_create_blob(_blob->bs, &opts, &internal_xattrs, 6669 bs_clone_newblob_create_cpl, ctx); 6670 } 6671 6672 void 6673 spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid, 6674 const struct spdk_blob_xattr_opts *clone_xattrs, 6675 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6676 { 6677 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6678 6679 if (!ctx) { 6680 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 6681 return; 6682 } 6683 6684 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6685 ctx->cpl.u.blobid.cb_fn = cb_fn; 6686 ctx->cpl.u.blobid.cb_arg = cb_arg; 6687 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 6688 ctx->bserrno = 0; 6689 ctx->xattrs = clone_xattrs; 6690 ctx->original.id = blobid; 6691 6692 spdk_bs_open_blob(bs, ctx->original.id, bs_clone_origblob_open_cpl, ctx); 6693 } 6694 6695 /* END spdk_bs_create_clone */ 6696 6697 /* START spdk_bs_inflate_blob */ 6698 6699 static void 6700 bs_inflate_blob_set_parent_cpl(void *cb_arg, struct spdk_blob *_parent, int bserrno) 6701 { 6702 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6703 struct spdk_blob *_blob = ctx->original.blob; 6704 6705 if (bserrno != 0) { 6706 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6707 return; 6708 } 6709 6710 /* Temporarily override md_ro flag for MD modification */ 6711 _blob->md_ro = false; 6712 6713 bserrno = blob_set_xattr(_blob, BLOB_SNAPSHOT, &_parent->id, sizeof(spdk_blob_id), true); 6714 if (bserrno != 0) { 6715 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6716 return; 6717 } 6718 6719 assert(_parent != NULL); 6720 6721 bs_blob_list_remove(_blob); 6722 _blob->parent_id = _parent->id; 6723 6724 blob_back_bs_destroy(_blob); 6725 _blob->back_bs_dev = bs_create_blob_bs_dev(_parent); 6726 bs_blob_list_add(_blob); 6727 6728 spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx); 6729 } 6730 6731 static void 6732 bs_inflate_blob_done(struct spdk_clone_snapshot_ctx *ctx) 6733 { 6734 struct spdk_blob *_blob = ctx->original.blob; 6735 struct spdk_blob *_parent; 6736 6737 if (ctx->allocate_all) { 6738 /* remove thin provisioning */ 6739 bs_blob_list_remove(_blob); 6740 if (_blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 6741 blob_remove_xattr(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, true); 6742 _blob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT; 6743 } else { 6744 blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 6745 } 6746 _blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV; 6747 blob_back_bs_destroy(_blob); 6748 _blob->parent_id = SPDK_BLOBID_INVALID; 6749 } else { 6750 /* For now, esnap clones always have allocate_all set. */ 6751 assert(!blob_is_esnap_clone(_blob)); 6752 6753 _parent = ((struct spdk_blob_bs_dev *)(_blob->back_bs_dev))->blob; 6754 if (_parent->parent_id != SPDK_BLOBID_INVALID) { 6755 /* We must change the parent of the inflated blob */ 6756 spdk_bs_open_blob(_blob->bs, _parent->parent_id, 6757 bs_inflate_blob_set_parent_cpl, ctx); 6758 return; 6759 } 6760 6761 bs_blob_list_remove(_blob); 6762 _blob->parent_id = SPDK_BLOBID_INVALID; 6763 blob_back_bs_destroy(_blob); 6764 _blob->back_bs_dev = bs_create_zeroes_dev(); 6765 } 6766 6767 /* Temporarily override md_ro flag for MD modification */ 6768 _blob->md_ro = false; 6769 blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 6770 _blob->state = SPDK_BLOB_STATE_DIRTY; 6771 6772 spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx); 6773 } 6774 6775 /* Check if cluster needs allocation */ 6776 static inline bool 6777 bs_cluster_needs_allocation(struct spdk_blob *blob, uint64_t cluster, bool allocate_all) 6778 { 6779 struct spdk_blob_bs_dev *b; 6780 6781 assert(blob != NULL); 6782 6783 if (blob->active.clusters[cluster] != 0) { 6784 /* Cluster is already allocated */ 6785 return false; 6786 } 6787 6788 if (blob->parent_id == SPDK_BLOBID_INVALID) { 6789 /* Blob have no parent blob */ 6790 return allocate_all; 6791 } 6792 6793 if (blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 6794 return true; 6795 } 6796 6797 b = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 6798 return (allocate_all || b->blob->active.clusters[cluster] != 0); 6799 } 6800 6801 static void 6802 bs_inflate_blob_touch_next(void *cb_arg, int bserrno) 6803 { 6804 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6805 struct spdk_blob *_blob = ctx->original.blob; 6806 struct spdk_bs_cpl cpl; 6807 spdk_bs_user_op_t *op; 6808 uint64_t offset; 6809 6810 if (bserrno != 0) { 6811 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6812 return; 6813 } 6814 6815 for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) { 6816 if (bs_cluster_needs_allocation(_blob, ctx->cluster, ctx->allocate_all)) { 6817 break; 6818 } 6819 } 6820 6821 if (ctx->cluster < _blob->active.num_clusters) { 6822 offset = bs_cluster_to_lba(_blob->bs, ctx->cluster); 6823 6824 /* We may safely increment a cluster before copying */ 6825 ctx->cluster++; 6826 6827 /* Use a dummy 0B read as a context for cluster copy */ 6828 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 6829 cpl.u.blob_basic.cb_fn = bs_inflate_blob_touch_next; 6830 cpl.u.blob_basic.cb_arg = ctx; 6831 6832 op = bs_user_op_alloc(ctx->channel, &cpl, SPDK_BLOB_READ, _blob, 6833 NULL, 0, offset, 0); 6834 if (!op) { 6835 bs_clone_snapshot_origblob_cleanup(ctx, -ENOMEM); 6836 return; 6837 } 6838 6839 bs_allocate_and_copy_cluster(_blob, ctx->channel, offset, op); 6840 } else { 6841 bs_inflate_blob_done(ctx); 6842 } 6843 } 6844 6845 static void 6846 bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6847 { 6848 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6849 uint64_t clusters_needed; 6850 uint64_t i; 6851 6852 if (bserrno != 0) { 6853 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6854 return; 6855 } 6856 6857 ctx->original.blob = _blob; 6858 ctx->original.md_ro = _blob->md_ro; 6859 6860 if (_blob->locked_operation_in_progress) { 6861 SPDK_DEBUGLOG(blob, "Cannot inflate blob - another operation in progress\n"); 6862 ctx->bserrno = -EBUSY; 6863 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6864 return; 6865 } 6866 6867 _blob->locked_operation_in_progress = true; 6868 6869 switch (_blob->parent_id) { 6870 case SPDK_BLOBID_INVALID: 6871 if (!ctx->allocate_all) { 6872 /* This blob has no parent, so we cannot decouple it. */ 6873 SPDK_ERRLOG("Cannot decouple parent of blob with no parent.\n"); 6874 bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 6875 return; 6876 } 6877 break; 6878 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 6879 /* 6880 * It would be better to rely on back_bs_dev->is_zeroes(), to determine which 6881 * clusters require allocation. Until there is a blobstore consumer that 6882 * uses esnaps with an spdk_bs_dev that implements a useful is_zeroes() it is not 6883 * worth the effort. 6884 */ 6885 ctx->allocate_all = true; 6886 break; 6887 default: 6888 break; 6889 } 6890 6891 if (spdk_blob_is_thin_provisioned(_blob) == false) { 6892 /* This is not thin provisioned blob. No need to inflate. */ 6893 bs_clone_snapshot_origblob_cleanup(ctx, 0); 6894 return; 6895 } 6896 6897 /* Do two passes - one to verify that we can obtain enough clusters 6898 * and another to actually claim them. 6899 */ 6900 clusters_needed = 0; 6901 for (i = 0; i < _blob->active.num_clusters; i++) { 6902 if (bs_cluster_needs_allocation(_blob, i, ctx->allocate_all)) { 6903 clusters_needed++; 6904 } 6905 } 6906 6907 if (clusters_needed > _blob->bs->num_free_clusters) { 6908 /* Not enough free clusters. Cannot satisfy the request. */ 6909 bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC); 6910 return; 6911 } 6912 6913 ctx->cluster = 0; 6914 bs_inflate_blob_touch_next(ctx, 0); 6915 } 6916 6917 static void 6918 bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 6919 spdk_blob_id blobid, bool allocate_all, spdk_blob_op_complete cb_fn, void *cb_arg) 6920 { 6921 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6922 6923 if (!ctx) { 6924 cb_fn(cb_arg, -ENOMEM); 6925 return; 6926 } 6927 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 6928 ctx->cpl.u.bs_basic.cb_fn = cb_fn; 6929 ctx->cpl.u.bs_basic.cb_arg = cb_arg; 6930 ctx->bserrno = 0; 6931 ctx->original.id = blobid; 6932 ctx->channel = channel; 6933 ctx->allocate_all = allocate_all; 6934 6935 spdk_bs_open_blob(bs, ctx->original.id, bs_inflate_blob_open_cpl, ctx); 6936 } 6937 6938 void 6939 spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 6940 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 6941 { 6942 bs_inflate_blob(bs, channel, blobid, true, cb_fn, cb_arg); 6943 } 6944 6945 void 6946 spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 6947 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 6948 { 6949 bs_inflate_blob(bs, channel, blobid, false, cb_fn, cb_arg); 6950 } 6951 /* END spdk_bs_inflate_blob */ 6952 6953 /* START spdk_blob_resize */ 6954 struct spdk_bs_resize_ctx { 6955 spdk_blob_op_complete cb_fn; 6956 void *cb_arg; 6957 struct spdk_blob *blob; 6958 uint64_t sz; 6959 int rc; 6960 }; 6961 6962 static void 6963 bs_resize_unfreeze_cpl(void *cb_arg, int rc) 6964 { 6965 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 6966 6967 if (rc != 0) { 6968 SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc); 6969 } 6970 6971 if (ctx->rc != 0) { 6972 SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc); 6973 rc = ctx->rc; 6974 } 6975 6976 ctx->blob->locked_operation_in_progress = false; 6977 6978 ctx->cb_fn(ctx->cb_arg, rc); 6979 free(ctx); 6980 } 6981 6982 static void 6983 bs_resize_freeze_cpl(void *cb_arg, int rc) 6984 { 6985 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 6986 6987 if (rc != 0) { 6988 ctx->blob->locked_operation_in_progress = false; 6989 ctx->cb_fn(ctx->cb_arg, rc); 6990 free(ctx); 6991 return; 6992 } 6993 6994 ctx->rc = blob_resize(ctx->blob, ctx->sz); 6995 6996 blob_unfreeze_io(ctx->blob, bs_resize_unfreeze_cpl, ctx); 6997 } 6998 6999 void 7000 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg) 7001 { 7002 struct spdk_bs_resize_ctx *ctx; 7003 7004 blob_verify_md_op(blob); 7005 7006 SPDK_DEBUGLOG(blob, "Resizing blob 0x%" PRIx64 " to %" PRIu64 " clusters\n", blob->id, sz); 7007 7008 if (blob->md_ro) { 7009 cb_fn(cb_arg, -EPERM); 7010 return; 7011 } 7012 7013 if (sz == blob->active.num_clusters) { 7014 cb_fn(cb_arg, 0); 7015 return; 7016 } 7017 7018 if (blob->locked_operation_in_progress) { 7019 cb_fn(cb_arg, -EBUSY); 7020 return; 7021 } 7022 7023 ctx = calloc(1, sizeof(*ctx)); 7024 if (!ctx) { 7025 cb_fn(cb_arg, -ENOMEM); 7026 return; 7027 } 7028 7029 blob->locked_operation_in_progress = true; 7030 ctx->cb_fn = cb_fn; 7031 ctx->cb_arg = cb_arg; 7032 ctx->blob = blob; 7033 ctx->sz = sz; 7034 blob_freeze_io(blob, bs_resize_freeze_cpl, ctx); 7035 } 7036 7037 /* END spdk_blob_resize */ 7038 7039 7040 /* START spdk_bs_delete_blob */ 7041 7042 static void 7043 bs_delete_close_cpl(void *cb_arg, int bserrno) 7044 { 7045 spdk_bs_sequence_t *seq = cb_arg; 7046 7047 bs_sequence_finish(seq, bserrno); 7048 } 7049 7050 static void 7051 bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7052 { 7053 struct spdk_blob *blob = cb_arg; 7054 7055 if (bserrno != 0) { 7056 /* 7057 * We already removed this blob from the blobstore tailq, so 7058 * we need to free it here since this is the last reference 7059 * to it. 7060 */ 7061 blob_free(blob); 7062 bs_delete_close_cpl(seq, bserrno); 7063 return; 7064 } 7065 7066 /* 7067 * This will immediately decrement the ref_count and call 7068 * the completion routine since the metadata state is clean. 7069 * By calling spdk_blob_close, we reduce the number of call 7070 * points into code that touches the blob->open_ref count 7071 * and the blobstore's blob list. 7072 */ 7073 spdk_blob_close(blob, bs_delete_close_cpl, seq); 7074 } 7075 7076 struct delete_snapshot_ctx { 7077 struct spdk_blob_list *parent_snapshot_entry; 7078 struct spdk_blob *snapshot; 7079 struct spdk_blob_md_page *page; 7080 bool snapshot_md_ro; 7081 struct spdk_blob *clone; 7082 bool clone_md_ro; 7083 spdk_blob_op_with_handle_complete cb_fn; 7084 void *cb_arg; 7085 int bserrno; 7086 uint32_t next_extent_page; 7087 }; 7088 7089 static void 7090 delete_blob_cleanup_finish(void *cb_arg, int bserrno) 7091 { 7092 struct delete_snapshot_ctx *ctx = cb_arg; 7093 7094 if (bserrno != 0) { 7095 SPDK_ERRLOG("Snapshot cleanup error %d\n", bserrno); 7096 } 7097 7098 assert(ctx != NULL); 7099 7100 if (bserrno != 0 && ctx->bserrno == 0) { 7101 ctx->bserrno = bserrno; 7102 } 7103 7104 ctx->cb_fn(ctx->cb_arg, ctx->snapshot, ctx->bserrno); 7105 spdk_free(ctx->page); 7106 free(ctx); 7107 } 7108 7109 static void 7110 delete_snapshot_cleanup_snapshot(void *cb_arg, int bserrno) 7111 { 7112 struct delete_snapshot_ctx *ctx = cb_arg; 7113 7114 if (bserrno != 0) { 7115 ctx->bserrno = bserrno; 7116 SPDK_ERRLOG("Clone cleanup error %d\n", bserrno); 7117 } 7118 7119 if (ctx->bserrno != 0) { 7120 assert(blob_lookup(ctx->snapshot->bs, ctx->snapshot->id) == NULL); 7121 RB_INSERT(spdk_blob_tree, &ctx->snapshot->bs->open_blobs, ctx->snapshot); 7122 spdk_bit_array_set(ctx->snapshot->bs->open_blobids, ctx->snapshot->id); 7123 } 7124 7125 ctx->snapshot->locked_operation_in_progress = false; 7126 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 7127 7128 spdk_blob_close(ctx->snapshot, delete_blob_cleanup_finish, ctx); 7129 } 7130 7131 static void 7132 delete_snapshot_cleanup_clone(void *cb_arg, int bserrno) 7133 { 7134 struct delete_snapshot_ctx *ctx = cb_arg; 7135 7136 ctx->clone->locked_operation_in_progress = false; 7137 ctx->clone->md_ro = ctx->clone_md_ro; 7138 7139 spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx); 7140 } 7141 7142 static void 7143 delete_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 7144 { 7145 struct delete_snapshot_ctx *ctx = cb_arg; 7146 7147 if (bserrno) { 7148 ctx->bserrno = bserrno; 7149 delete_snapshot_cleanup_clone(ctx, 0); 7150 return; 7151 } 7152 7153 ctx->clone->locked_operation_in_progress = false; 7154 spdk_blob_close(ctx->clone, delete_blob_cleanup_finish, ctx); 7155 } 7156 7157 static void 7158 delete_snapshot_sync_snapshot_cpl(void *cb_arg, int bserrno) 7159 { 7160 struct delete_snapshot_ctx *ctx = cb_arg; 7161 struct spdk_blob_list *parent_snapshot_entry = NULL; 7162 struct spdk_blob_list *snapshot_entry = NULL; 7163 struct spdk_blob_list *clone_entry = NULL; 7164 struct spdk_blob_list *snapshot_clone_entry = NULL; 7165 7166 if (bserrno) { 7167 SPDK_ERRLOG("Failed to sync MD on blob\n"); 7168 ctx->bserrno = bserrno; 7169 delete_snapshot_cleanup_clone(ctx, 0); 7170 return; 7171 } 7172 7173 /* Get snapshot entry for the snapshot we want to remove */ 7174 snapshot_entry = bs_get_snapshot_entry(ctx->snapshot->bs, ctx->snapshot->id); 7175 7176 assert(snapshot_entry != NULL); 7177 7178 /* Remove clone entry in this snapshot (at this point there can be only one clone) */ 7179 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 7180 assert(clone_entry != NULL); 7181 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 7182 snapshot_entry->clone_count--; 7183 assert(TAILQ_EMPTY(&snapshot_entry->clones)); 7184 7185 switch (ctx->snapshot->parent_id) { 7186 case SPDK_BLOBID_INVALID: 7187 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 7188 /* No parent snapshot - just remove clone entry */ 7189 free(clone_entry); 7190 break; 7191 default: 7192 /* This snapshot is at the same time a clone of another snapshot - we need to 7193 * update parent snapshot (remove current clone, add new one inherited from 7194 * the snapshot that is being removed) */ 7195 7196 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 7197 * snapshot that we are removing */ 7198 blob_get_snapshot_and_clone_entries(ctx->snapshot, &parent_snapshot_entry, 7199 &snapshot_clone_entry); 7200 7201 /* Switch clone entry in parent snapshot */ 7202 TAILQ_INSERT_TAIL(&parent_snapshot_entry->clones, clone_entry, link); 7203 TAILQ_REMOVE(&parent_snapshot_entry->clones, snapshot_clone_entry, link); 7204 free(snapshot_clone_entry); 7205 } 7206 7207 /* Restore md_ro flags */ 7208 ctx->clone->md_ro = ctx->clone_md_ro; 7209 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 7210 7211 blob_unfreeze_io(ctx->clone, delete_snapshot_unfreeze_cpl, ctx); 7212 } 7213 7214 static void 7215 delete_snapshot_sync_clone_cpl(void *cb_arg, int bserrno) 7216 { 7217 struct delete_snapshot_ctx *ctx = cb_arg; 7218 uint64_t i; 7219 7220 ctx->snapshot->md_ro = false; 7221 7222 if (bserrno) { 7223 SPDK_ERRLOG("Failed to sync MD on clone\n"); 7224 ctx->bserrno = bserrno; 7225 7226 /* Restore snapshot to previous state */ 7227 bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true); 7228 if (bserrno != 0) { 7229 delete_snapshot_cleanup_clone(ctx, bserrno); 7230 return; 7231 } 7232 7233 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx); 7234 return; 7235 } 7236 7237 /* Clear cluster map entries for snapshot */ 7238 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 7239 if (ctx->clone->active.clusters[i] == ctx->snapshot->active.clusters[i]) { 7240 ctx->snapshot->active.clusters[i] = 0; 7241 } 7242 } 7243 for (i = 0; i < ctx->snapshot->active.num_extent_pages && 7244 i < ctx->clone->active.num_extent_pages; i++) { 7245 if (ctx->clone->active.extent_pages[i] == ctx->snapshot->active.extent_pages[i]) { 7246 ctx->snapshot->active.extent_pages[i] = 0; 7247 } 7248 } 7249 7250 blob_set_thin_provision(ctx->snapshot); 7251 ctx->snapshot->state = SPDK_BLOB_STATE_DIRTY; 7252 7253 if (ctx->parent_snapshot_entry != NULL) { 7254 ctx->snapshot->back_bs_dev = NULL; 7255 } 7256 7257 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_cpl, ctx); 7258 } 7259 7260 static void 7261 delete_snapshot_update_extent_pages_cpl(struct delete_snapshot_ctx *ctx) 7262 { 7263 int bserrno; 7264 7265 /* Delete old backing bs_dev from clone (related to snapshot that will be removed) */ 7266 blob_back_bs_destroy(ctx->clone); 7267 7268 /* Set/remove snapshot xattr and switch parent ID and backing bs_dev on clone... */ 7269 if (ctx->snapshot->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 7270 bserrno = bs_snapshot_copy_xattr(ctx->clone, ctx->snapshot, 7271 BLOB_EXTERNAL_SNAPSHOT_ID); 7272 if (bserrno != 0) { 7273 ctx->bserrno = bserrno; 7274 7275 /* Restore snapshot to previous state */ 7276 bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true); 7277 if (bserrno != 0) { 7278 delete_snapshot_cleanup_clone(ctx, bserrno); 7279 return; 7280 } 7281 7282 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx); 7283 return; 7284 } 7285 ctx->clone->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT; 7286 ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev; 7287 /* Do not delete the external snapshot along with this snapshot */ 7288 ctx->snapshot->back_bs_dev = NULL; 7289 ctx->clone->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 7290 } else if (ctx->parent_snapshot_entry != NULL) { 7291 /* ...to parent snapshot */ 7292 ctx->clone->parent_id = ctx->parent_snapshot_entry->id; 7293 ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev; 7294 blob_set_xattr(ctx->clone, BLOB_SNAPSHOT, &ctx->parent_snapshot_entry->id, 7295 sizeof(spdk_blob_id), 7296 true); 7297 } else { 7298 /* ...to blobid invalid and zeroes dev */ 7299 ctx->clone->parent_id = SPDK_BLOBID_INVALID; 7300 ctx->clone->back_bs_dev = bs_create_zeroes_dev(); 7301 blob_remove_xattr(ctx->clone, BLOB_SNAPSHOT, true); 7302 } 7303 7304 spdk_blob_sync_md(ctx->clone, delete_snapshot_sync_clone_cpl, ctx); 7305 } 7306 7307 static void 7308 delete_snapshot_update_extent_pages(void *cb_arg, int bserrno) 7309 { 7310 struct delete_snapshot_ctx *ctx = cb_arg; 7311 uint32_t *extent_page; 7312 uint64_t i; 7313 7314 for (i = ctx->next_extent_page; i < ctx->snapshot->active.num_extent_pages && 7315 i < ctx->clone->active.num_extent_pages; i++) { 7316 if (ctx->snapshot->active.extent_pages[i] == 0) { 7317 /* No extent page to use from snapshot */ 7318 continue; 7319 } 7320 7321 extent_page = &ctx->clone->active.extent_pages[i]; 7322 if (*extent_page == 0) { 7323 /* Copy extent page from snapshot when clone did not have a matching one */ 7324 *extent_page = ctx->snapshot->active.extent_pages[i]; 7325 continue; 7326 } 7327 7328 /* Clone and snapshot both contain partially filled matching extent pages. 7329 * Update the clone extent page in place with cluster map containing the mix of both. */ 7330 ctx->next_extent_page = i + 1; 7331 memset(ctx->page, 0, SPDK_BS_PAGE_SIZE); 7332 7333 blob_write_extent_page(ctx->clone, *extent_page, i * SPDK_EXTENTS_PER_EP, ctx->page, 7334 delete_snapshot_update_extent_pages, ctx); 7335 return; 7336 } 7337 delete_snapshot_update_extent_pages_cpl(ctx); 7338 } 7339 7340 static void 7341 delete_snapshot_sync_snapshot_xattr_cpl(void *cb_arg, int bserrno) 7342 { 7343 struct delete_snapshot_ctx *ctx = cb_arg; 7344 uint64_t i; 7345 7346 /* Temporarily override md_ro flag for clone for MD modification */ 7347 ctx->clone_md_ro = ctx->clone->md_ro; 7348 ctx->clone->md_ro = false; 7349 7350 if (bserrno) { 7351 SPDK_ERRLOG("Failed to sync MD with xattr on blob\n"); 7352 ctx->bserrno = bserrno; 7353 delete_snapshot_cleanup_clone(ctx, 0); 7354 return; 7355 } 7356 7357 /* Copy snapshot map to clone map (only unallocated clusters in clone) */ 7358 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 7359 if (ctx->clone->active.clusters[i] == 0) { 7360 ctx->clone->active.clusters[i] = ctx->snapshot->active.clusters[i]; 7361 } 7362 } 7363 ctx->next_extent_page = 0; 7364 delete_snapshot_update_extent_pages(ctx, 0); 7365 } 7366 7367 static void 7368 delete_snapshot_esnap_channels_destroyed_cb(void *cb_arg, struct spdk_blob *blob, int bserrno) 7369 { 7370 struct delete_snapshot_ctx *ctx = cb_arg; 7371 7372 if (bserrno != 0) { 7373 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to destroy esnap channels: %d\n", 7374 blob->id, bserrno); 7375 /* That error should not stop us from syncing metadata. */ 7376 } 7377 7378 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx); 7379 } 7380 7381 static void 7382 delete_snapshot_freeze_io_cb(void *cb_arg, int bserrno) 7383 { 7384 struct delete_snapshot_ctx *ctx = cb_arg; 7385 7386 if (bserrno) { 7387 SPDK_ERRLOG("Failed to freeze I/O on clone\n"); 7388 ctx->bserrno = bserrno; 7389 delete_snapshot_cleanup_clone(ctx, 0); 7390 return; 7391 } 7392 7393 /* Temporarily override md_ro flag for snapshot for MD modification */ 7394 ctx->snapshot_md_ro = ctx->snapshot->md_ro; 7395 ctx->snapshot->md_ro = false; 7396 7397 /* Mark blob as pending for removal for power failure safety, use clone id for recovery */ 7398 ctx->bserrno = blob_set_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, &ctx->clone->id, 7399 sizeof(spdk_blob_id), true); 7400 if (ctx->bserrno != 0) { 7401 delete_snapshot_cleanup_clone(ctx, 0); 7402 return; 7403 } 7404 7405 if (blob_is_esnap_clone(ctx->snapshot)) { 7406 blob_esnap_destroy_bs_dev_channels(ctx->snapshot, false, 7407 delete_snapshot_esnap_channels_destroyed_cb, 7408 ctx); 7409 return; 7410 } 7411 7412 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx); 7413 } 7414 7415 static void 7416 delete_snapshot_open_clone_cb(void *cb_arg, struct spdk_blob *clone, int bserrno) 7417 { 7418 struct delete_snapshot_ctx *ctx = cb_arg; 7419 7420 if (bserrno) { 7421 SPDK_ERRLOG("Failed to open clone\n"); 7422 ctx->bserrno = bserrno; 7423 delete_snapshot_cleanup_snapshot(ctx, 0); 7424 return; 7425 } 7426 7427 ctx->clone = clone; 7428 7429 if (clone->locked_operation_in_progress) { 7430 SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress on its clone\n"); 7431 ctx->bserrno = -EBUSY; 7432 spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx); 7433 return; 7434 } 7435 7436 clone->locked_operation_in_progress = true; 7437 7438 blob_freeze_io(clone, delete_snapshot_freeze_io_cb, ctx); 7439 } 7440 7441 static void 7442 update_clone_on_snapshot_deletion(struct spdk_blob *snapshot, struct delete_snapshot_ctx *ctx) 7443 { 7444 struct spdk_blob_list *snapshot_entry = NULL; 7445 struct spdk_blob_list *clone_entry = NULL; 7446 struct spdk_blob_list *snapshot_clone_entry = NULL; 7447 7448 /* Get snapshot entry for the snapshot we want to remove */ 7449 snapshot_entry = bs_get_snapshot_entry(snapshot->bs, snapshot->id); 7450 7451 assert(snapshot_entry != NULL); 7452 7453 /* Get clone of the snapshot (at this point there can be only one clone) */ 7454 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 7455 assert(snapshot_entry->clone_count == 1); 7456 assert(clone_entry != NULL); 7457 7458 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 7459 * snapshot that we are removing */ 7460 blob_get_snapshot_and_clone_entries(snapshot, &ctx->parent_snapshot_entry, 7461 &snapshot_clone_entry); 7462 7463 spdk_bs_open_blob(snapshot->bs, clone_entry->id, delete_snapshot_open_clone_cb, ctx); 7464 } 7465 7466 static void 7467 bs_delete_blob_finish(void *cb_arg, struct spdk_blob *blob, int bserrno) 7468 { 7469 spdk_bs_sequence_t *seq = cb_arg; 7470 struct spdk_blob_list *snapshot_entry = NULL; 7471 uint32_t page_num; 7472 7473 if (bserrno) { 7474 SPDK_ERRLOG("Failed to remove blob\n"); 7475 bs_sequence_finish(seq, bserrno); 7476 return; 7477 } 7478 7479 /* Remove snapshot from the list */ 7480 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 7481 if (snapshot_entry != NULL) { 7482 TAILQ_REMOVE(&blob->bs->snapshots, snapshot_entry, link); 7483 free(snapshot_entry); 7484 } 7485 7486 page_num = bs_blobid_to_page(blob->id); 7487 spdk_bit_array_clear(blob->bs->used_blobids, page_num); 7488 blob->state = SPDK_BLOB_STATE_DIRTY; 7489 blob->active.num_pages = 0; 7490 blob_resize(blob, 0); 7491 7492 blob_persist(seq, blob, bs_delete_persist_cpl, blob); 7493 } 7494 7495 static int 7496 bs_is_blob_deletable(struct spdk_blob *blob, bool *update_clone) 7497 { 7498 struct spdk_blob_list *snapshot_entry = NULL; 7499 struct spdk_blob_list *clone_entry = NULL; 7500 struct spdk_blob *clone = NULL; 7501 bool has_one_clone = false; 7502 7503 /* Check if this is a snapshot with clones */ 7504 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 7505 if (snapshot_entry != NULL) { 7506 if (snapshot_entry->clone_count > 1) { 7507 SPDK_ERRLOG("Cannot remove snapshot with more than one clone\n"); 7508 return -EBUSY; 7509 } else if (snapshot_entry->clone_count == 1) { 7510 has_one_clone = true; 7511 } 7512 } 7513 7514 /* Check if someone has this blob open (besides this delete context): 7515 * - open_ref = 1 - only this context opened blob, so it is ok to remove it 7516 * - open_ref <= 2 && has_one_clone = true - clone is holding snapshot 7517 * and that is ok, because we will update it accordingly */ 7518 if (blob->open_ref <= 2 && has_one_clone) { 7519 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 7520 assert(clone_entry != NULL); 7521 clone = blob_lookup(blob->bs, clone_entry->id); 7522 7523 if (blob->open_ref == 2 && clone == NULL) { 7524 /* Clone is closed and someone else opened this blob */ 7525 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 7526 return -EBUSY; 7527 } 7528 7529 *update_clone = true; 7530 return 0; 7531 } 7532 7533 if (blob->open_ref > 1) { 7534 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 7535 return -EBUSY; 7536 } 7537 7538 assert(has_one_clone == false); 7539 *update_clone = false; 7540 return 0; 7541 } 7542 7543 static void 7544 bs_delete_enomem_close_cpl(void *cb_arg, int bserrno) 7545 { 7546 spdk_bs_sequence_t *seq = cb_arg; 7547 7548 bs_sequence_finish(seq, -ENOMEM); 7549 } 7550 7551 static void 7552 bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 7553 { 7554 spdk_bs_sequence_t *seq = cb_arg; 7555 struct delete_snapshot_ctx *ctx; 7556 bool update_clone = false; 7557 7558 if (bserrno != 0) { 7559 bs_sequence_finish(seq, bserrno); 7560 return; 7561 } 7562 7563 blob_verify_md_op(blob); 7564 7565 ctx = calloc(1, sizeof(*ctx)); 7566 if (ctx == NULL) { 7567 spdk_blob_close(blob, bs_delete_enomem_close_cpl, seq); 7568 return; 7569 } 7570 7571 ctx->snapshot = blob; 7572 ctx->cb_fn = bs_delete_blob_finish; 7573 ctx->cb_arg = seq; 7574 7575 /* Check if blob can be removed and if it is a snapshot with clone on top of it */ 7576 ctx->bserrno = bs_is_blob_deletable(blob, &update_clone); 7577 if (ctx->bserrno) { 7578 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 7579 return; 7580 } 7581 7582 if (blob->locked_operation_in_progress) { 7583 SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress\n"); 7584 ctx->bserrno = -EBUSY; 7585 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 7586 return; 7587 } 7588 7589 blob->locked_operation_in_progress = true; 7590 7591 /* 7592 * Remove the blob from the blob_store list now, to ensure it does not 7593 * get returned after this point by blob_lookup(). 7594 */ 7595 spdk_bit_array_clear(blob->bs->open_blobids, blob->id); 7596 RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob); 7597 7598 if (update_clone) { 7599 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 7600 if (!ctx->page) { 7601 ctx->bserrno = -ENOMEM; 7602 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 7603 return; 7604 } 7605 /* This blob is a snapshot with active clone - update clone first */ 7606 update_clone_on_snapshot_deletion(blob, ctx); 7607 } else { 7608 /* This blob does not have any clones - just remove it */ 7609 bs_blob_list_remove(blob); 7610 bs_delete_blob_finish(seq, blob, 0); 7611 free(ctx); 7612 } 7613 } 7614 7615 void 7616 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 7617 spdk_blob_op_complete cb_fn, void *cb_arg) 7618 { 7619 struct spdk_bs_cpl cpl; 7620 spdk_bs_sequence_t *seq; 7621 7622 SPDK_DEBUGLOG(blob, "Deleting blob 0x%" PRIx64 "\n", blobid); 7623 7624 assert(spdk_get_thread() == bs->md_thread); 7625 7626 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7627 cpl.u.blob_basic.cb_fn = cb_fn; 7628 cpl.u.blob_basic.cb_arg = cb_arg; 7629 7630 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 7631 if (!seq) { 7632 cb_fn(cb_arg, -ENOMEM); 7633 return; 7634 } 7635 7636 spdk_bs_open_blob(bs, blobid, bs_delete_open_cpl, seq); 7637 } 7638 7639 /* END spdk_bs_delete_blob */ 7640 7641 /* START spdk_bs_open_blob */ 7642 7643 static void 7644 bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7645 { 7646 struct spdk_blob *blob = cb_arg; 7647 struct spdk_blob *existing; 7648 7649 if (bserrno != 0) { 7650 blob_free(blob); 7651 seq->cpl.u.blob_handle.blob = NULL; 7652 bs_sequence_finish(seq, bserrno); 7653 return; 7654 } 7655 7656 existing = blob_lookup(blob->bs, blob->id); 7657 if (existing) { 7658 blob_free(blob); 7659 existing->open_ref++; 7660 seq->cpl.u.blob_handle.blob = existing; 7661 bs_sequence_finish(seq, 0); 7662 return; 7663 } 7664 7665 blob->open_ref++; 7666 7667 spdk_bit_array_set(blob->bs->open_blobids, blob->id); 7668 RB_INSERT(spdk_blob_tree, &blob->bs->open_blobs, blob); 7669 7670 bs_sequence_finish(seq, bserrno); 7671 } 7672 7673 static inline void 7674 blob_open_opts_copy(const struct spdk_blob_open_opts *src, struct spdk_blob_open_opts *dst) 7675 { 7676 #define FIELD_OK(field) \ 7677 offsetof(struct spdk_blob_open_opts, field) + sizeof(src->field) <= src->opts_size 7678 7679 #define SET_FIELD(field) \ 7680 if (FIELD_OK(field)) { \ 7681 dst->field = src->field; \ 7682 } \ 7683 7684 SET_FIELD(clear_method); 7685 SET_FIELD(esnap_ctx); 7686 7687 dst->opts_size = src->opts_size; 7688 7689 /* You should not remove this statement, but need to update the assert statement 7690 * if you add a new field, and also add a corresponding SET_FIELD statement */ 7691 SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_open_opts) == 24, "Incorrect size"); 7692 7693 #undef FIELD_OK 7694 #undef SET_FIELD 7695 } 7696 7697 static void 7698 bs_open_blob(struct spdk_blob_store *bs, 7699 spdk_blob_id blobid, 7700 struct spdk_blob_open_opts *opts, 7701 spdk_blob_op_with_handle_complete cb_fn, 7702 void *cb_arg) 7703 { 7704 struct spdk_blob *blob; 7705 struct spdk_bs_cpl cpl; 7706 struct spdk_blob_open_opts opts_local; 7707 spdk_bs_sequence_t *seq; 7708 uint32_t page_num; 7709 7710 SPDK_DEBUGLOG(blob, "Opening blob 0x%" PRIx64 "\n", blobid); 7711 assert(spdk_get_thread() == bs->md_thread); 7712 7713 page_num = bs_blobid_to_page(blobid); 7714 if (spdk_bit_array_get(bs->used_blobids, page_num) == false) { 7715 /* Invalid blobid */ 7716 cb_fn(cb_arg, NULL, -ENOENT); 7717 return; 7718 } 7719 7720 blob = blob_lookup(bs, blobid); 7721 if (blob) { 7722 blob->open_ref++; 7723 cb_fn(cb_arg, blob, 0); 7724 return; 7725 } 7726 7727 blob = blob_alloc(bs, blobid); 7728 if (!blob) { 7729 cb_fn(cb_arg, NULL, -ENOMEM); 7730 return; 7731 } 7732 7733 spdk_blob_open_opts_init(&opts_local, sizeof(opts_local)); 7734 if (opts) { 7735 blob_open_opts_copy(opts, &opts_local); 7736 } 7737 7738 blob->clear_method = opts_local.clear_method; 7739 7740 cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE; 7741 cpl.u.blob_handle.cb_fn = cb_fn; 7742 cpl.u.blob_handle.cb_arg = cb_arg; 7743 cpl.u.blob_handle.blob = blob; 7744 cpl.u.blob_handle.esnap_ctx = opts_local.esnap_ctx; 7745 7746 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 7747 if (!seq) { 7748 blob_free(blob); 7749 cb_fn(cb_arg, NULL, -ENOMEM); 7750 return; 7751 } 7752 7753 blob_load(seq, blob, bs_open_blob_cpl, blob); 7754 } 7755 7756 void 7757 spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 7758 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 7759 { 7760 bs_open_blob(bs, blobid, NULL, cb_fn, cb_arg); 7761 } 7762 7763 void 7764 spdk_bs_open_blob_ext(struct spdk_blob_store *bs, spdk_blob_id blobid, 7765 struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 7766 { 7767 bs_open_blob(bs, blobid, opts, cb_fn, cb_arg); 7768 } 7769 7770 /* END spdk_bs_open_blob */ 7771 7772 /* START spdk_blob_set_read_only */ 7773 int 7774 spdk_blob_set_read_only(struct spdk_blob *blob) 7775 { 7776 blob_verify_md_op(blob); 7777 7778 blob->data_ro_flags |= SPDK_BLOB_READ_ONLY; 7779 7780 blob->state = SPDK_BLOB_STATE_DIRTY; 7781 return 0; 7782 } 7783 /* END spdk_blob_set_read_only */ 7784 7785 /* START spdk_blob_sync_md */ 7786 7787 static void 7788 blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7789 { 7790 struct spdk_blob *blob = cb_arg; 7791 7792 if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 7793 blob->data_ro = true; 7794 blob->md_ro = true; 7795 } 7796 7797 bs_sequence_finish(seq, bserrno); 7798 } 7799 7800 static void 7801 blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 7802 { 7803 struct spdk_bs_cpl cpl; 7804 spdk_bs_sequence_t *seq; 7805 7806 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7807 cpl.u.blob_basic.cb_fn = cb_fn; 7808 cpl.u.blob_basic.cb_arg = cb_arg; 7809 7810 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 7811 if (!seq) { 7812 cb_fn(cb_arg, -ENOMEM); 7813 return; 7814 } 7815 7816 blob_persist(seq, blob, blob_sync_md_cpl, blob); 7817 } 7818 7819 void 7820 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 7821 { 7822 blob_verify_md_op(blob); 7823 7824 SPDK_DEBUGLOG(blob, "Syncing blob 0x%" PRIx64 "\n", blob->id); 7825 7826 if (blob->md_ro) { 7827 assert(blob->state == SPDK_BLOB_STATE_CLEAN); 7828 cb_fn(cb_arg, 0); 7829 return; 7830 } 7831 7832 blob_sync_md(blob, cb_fn, cb_arg); 7833 } 7834 7835 /* END spdk_blob_sync_md */ 7836 7837 struct spdk_blob_insert_cluster_ctx { 7838 struct spdk_thread *thread; 7839 struct spdk_blob *blob; 7840 uint32_t cluster_num; /* cluster index in blob */ 7841 uint32_t cluster; /* cluster on disk */ 7842 uint32_t extent_page; /* extent page on disk */ 7843 struct spdk_blob_md_page *page; /* preallocated extent page */ 7844 int rc; 7845 spdk_blob_op_complete cb_fn; 7846 void *cb_arg; 7847 }; 7848 7849 static void 7850 blob_insert_cluster_msg_cpl(void *arg) 7851 { 7852 struct spdk_blob_insert_cluster_ctx *ctx = arg; 7853 7854 ctx->cb_fn(ctx->cb_arg, ctx->rc); 7855 free(ctx); 7856 } 7857 7858 static void 7859 blob_insert_cluster_msg_cb(void *arg, int bserrno) 7860 { 7861 struct spdk_blob_insert_cluster_ctx *ctx = arg; 7862 7863 ctx->rc = bserrno; 7864 spdk_thread_send_msg(ctx->thread, blob_insert_cluster_msg_cpl, ctx); 7865 } 7866 7867 static void 7868 blob_insert_new_ep_cb(void *arg, int bserrno) 7869 { 7870 struct spdk_blob_insert_cluster_ctx *ctx = arg; 7871 uint32_t *extent_page; 7872 7873 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 7874 *extent_page = ctx->extent_page; 7875 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 7876 blob_sync_md(ctx->blob, blob_insert_cluster_msg_cb, ctx); 7877 } 7878 7879 struct spdk_blob_write_extent_page_ctx { 7880 struct spdk_blob_store *bs; 7881 7882 uint32_t extent; 7883 struct spdk_blob_md_page *page; 7884 }; 7885 7886 static void 7887 blob_persist_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7888 { 7889 struct spdk_blob_write_extent_page_ctx *ctx = cb_arg; 7890 7891 free(ctx); 7892 bs_sequence_finish(seq, bserrno); 7893 } 7894 7895 static void 7896 blob_write_extent_page_ready(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7897 { 7898 struct spdk_blob_write_extent_page_ctx *ctx = cb_arg; 7899 7900 if (bserrno != 0) { 7901 blob_persist_extent_page_cpl(seq, ctx, bserrno); 7902 return; 7903 } 7904 bs_sequence_write_dev(seq, ctx->page, bs_md_page_to_lba(ctx->bs, ctx->extent), 7905 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 7906 blob_persist_extent_page_cpl, ctx); 7907 } 7908 7909 static void 7910 blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 7911 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg) 7912 { 7913 struct spdk_blob_write_extent_page_ctx *ctx; 7914 spdk_bs_sequence_t *seq; 7915 struct spdk_bs_cpl cpl; 7916 7917 ctx = calloc(1, sizeof(*ctx)); 7918 if (!ctx) { 7919 cb_fn(cb_arg, -ENOMEM); 7920 return; 7921 } 7922 ctx->bs = blob->bs; 7923 ctx->extent = extent; 7924 ctx->page = page; 7925 7926 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7927 cpl.u.blob_basic.cb_fn = cb_fn; 7928 cpl.u.blob_basic.cb_arg = cb_arg; 7929 7930 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 7931 if (!seq) { 7932 free(ctx); 7933 cb_fn(cb_arg, -ENOMEM); 7934 return; 7935 } 7936 7937 assert(page); 7938 page->next = SPDK_INVALID_MD_PAGE; 7939 page->id = blob->id; 7940 page->sequence_num = 0; 7941 7942 blob_serialize_extent_page(blob, cluster_num, page); 7943 7944 page->crc = blob_md_page_calc_crc(page); 7945 7946 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent) == true); 7947 7948 bs_mark_dirty(seq, blob->bs, blob_write_extent_page_ready, ctx); 7949 } 7950 7951 static void 7952 blob_insert_cluster_msg(void *arg) 7953 { 7954 struct spdk_blob_insert_cluster_ctx *ctx = arg; 7955 uint32_t *extent_page; 7956 7957 ctx->rc = blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster); 7958 if (ctx->rc != 0) { 7959 spdk_thread_send_msg(ctx->thread, blob_insert_cluster_msg_cpl, ctx); 7960 return; 7961 } 7962 7963 if (ctx->blob->use_extent_table == false) { 7964 /* Extent table is not used, proceed with sync of md that will only use extents_rle. */ 7965 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 7966 blob_sync_md(ctx->blob, blob_insert_cluster_msg_cb, ctx); 7967 return; 7968 } 7969 7970 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 7971 if (*extent_page == 0) { 7972 /* Extent page requires allocation. 7973 * It was already claimed in the used_md_pages map and placed in ctx. */ 7974 assert(ctx->extent_page != 0); 7975 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 7976 blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, ctx->page, 7977 blob_insert_new_ep_cb, ctx); 7978 } else { 7979 /* It is possible for original thread to allocate extent page for 7980 * different cluster in the same extent page. In such case proceed with 7981 * updating the existing extent page, but release the additional one. */ 7982 if (ctx->extent_page != 0) { 7983 spdk_spin_lock(&ctx->blob->bs->used_lock); 7984 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 7985 bs_release_md_page(ctx->blob->bs, ctx->extent_page); 7986 spdk_spin_unlock(&ctx->blob->bs->used_lock); 7987 ctx->extent_page = 0; 7988 } 7989 /* Extent page already allocated. 7990 * Every cluster allocation, requires just an update of single extent page. */ 7991 blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, ctx->page, 7992 blob_insert_cluster_msg_cb, ctx); 7993 } 7994 } 7995 7996 static void 7997 blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 7998 uint64_t cluster, uint32_t extent_page, struct spdk_blob_md_page *page, 7999 spdk_blob_op_complete cb_fn, void *cb_arg) 8000 { 8001 struct spdk_blob_insert_cluster_ctx *ctx; 8002 8003 ctx = calloc(1, sizeof(*ctx)); 8004 if (ctx == NULL) { 8005 cb_fn(cb_arg, -ENOMEM); 8006 return; 8007 } 8008 8009 ctx->thread = spdk_get_thread(); 8010 ctx->blob = blob; 8011 ctx->cluster_num = cluster_num; 8012 ctx->cluster = cluster; 8013 ctx->extent_page = extent_page; 8014 ctx->page = page; 8015 ctx->cb_fn = cb_fn; 8016 ctx->cb_arg = cb_arg; 8017 8018 spdk_thread_send_msg(blob->bs->md_thread, blob_insert_cluster_msg, ctx); 8019 } 8020 8021 /* START spdk_blob_close */ 8022 8023 static void 8024 blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8025 { 8026 struct spdk_blob *blob = cb_arg; 8027 8028 if (bserrno == 0) { 8029 blob->open_ref--; 8030 if (blob->open_ref == 0) { 8031 /* 8032 * Blobs with active.num_pages == 0 are deleted blobs. 8033 * these blobs are removed from the blob_store list 8034 * when the deletion process starts - so don't try to 8035 * remove them again. 8036 */ 8037 if (blob->active.num_pages > 0) { 8038 spdk_bit_array_clear(blob->bs->open_blobids, blob->id); 8039 RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob); 8040 } 8041 blob_free(blob); 8042 } 8043 } 8044 8045 bs_sequence_finish(seq, bserrno); 8046 } 8047 8048 static void 8049 blob_close_esnap_done(void *cb_arg, struct spdk_blob *blob, int bserrno) 8050 { 8051 spdk_bs_sequence_t *seq = cb_arg; 8052 8053 if (bserrno != 0) { 8054 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": close failed with error %d\n", 8055 blob->id, bserrno); 8056 bs_sequence_finish(seq, bserrno); 8057 return; 8058 } 8059 8060 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": closed, syncing metadata on thread %s\n", 8061 blob->id, spdk_thread_get_name(spdk_get_thread())); 8062 8063 /* Sync metadata */ 8064 blob_persist(seq, blob, blob_close_cpl, blob); 8065 } 8066 8067 void 8068 spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 8069 { 8070 struct spdk_bs_cpl cpl; 8071 spdk_bs_sequence_t *seq; 8072 8073 blob_verify_md_op(blob); 8074 8075 SPDK_DEBUGLOG(blob, "Closing blob 0x%" PRIx64 "\n", blob->id); 8076 8077 if (blob->open_ref == 0) { 8078 cb_fn(cb_arg, -EBADF); 8079 return; 8080 } 8081 8082 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 8083 cpl.u.blob_basic.cb_fn = cb_fn; 8084 cpl.u.blob_basic.cb_arg = cb_arg; 8085 8086 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 8087 if (!seq) { 8088 cb_fn(cb_arg, -ENOMEM); 8089 return; 8090 } 8091 8092 if (blob->open_ref == 1 && blob_is_esnap_clone(blob)) { 8093 blob_esnap_destroy_bs_dev_channels(blob, false, blob_close_esnap_done, seq); 8094 return; 8095 } 8096 8097 /* Sync metadata */ 8098 blob_persist(seq, blob, blob_close_cpl, blob); 8099 } 8100 8101 /* END spdk_blob_close */ 8102 8103 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs) 8104 { 8105 return spdk_get_io_channel(bs); 8106 } 8107 8108 void 8109 spdk_bs_free_io_channel(struct spdk_io_channel *channel) 8110 { 8111 blob_esnap_destroy_bs_channel(spdk_io_channel_get_ctx(channel)); 8112 spdk_put_io_channel(channel); 8113 } 8114 8115 void 8116 spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel, 8117 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 8118 { 8119 blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 8120 SPDK_BLOB_UNMAP); 8121 } 8122 8123 void 8124 spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel, 8125 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 8126 { 8127 blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 8128 SPDK_BLOB_WRITE_ZEROES); 8129 } 8130 8131 void 8132 spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel, 8133 void *payload, uint64_t offset, uint64_t length, 8134 spdk_blob_op_complete cb_fn, void *cb_arg) 8135 { 8136 blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 8137 SPDK_BLOB_WRITE); 8138 } 8139 8140 void 8141 spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel, 8142 void *payload, uint64_t offset, uint64_t length, 8143 spdk_blob_op_complete cb_fn, void *cb_arg) 8144 { 8145 blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 8146 SPDK_BLOB_READ); 8147 } 8148 8149 void 8150 spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel, 8151 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 8152 spdk_blob_op_complete cb_fn, void *cb_arg) 8153 { 8154 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, NULL); 8155 } 8156 8157 void 8158 spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel, 8159 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 8160 spdk_blob_op_complete cb_fn, void *cb_arg) 8161 { 8162 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, NULL); 8163 } 8164 8165 void 8166 spdk_blob_io_writev_ext(struct spdk_blob *blob, struct spdk_io_channel *channel, 8167 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 8168 spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts) 8169 { 8170 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, 8171 io_opts); 8172 } 8173 8174 void 8175 spdk_blob_io_readv_ext(struct spdk_blob *blob, struct spdk_io_channel *channel, 8176 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 8177 spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts) 8178 { 8179 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, 8180 io_opts); 8181 } 8182 8183 struct spdk_bs_iter_ctx { 8184 int64_t page_num; 8185 struct spdk_blob_store *bs; 8186 8187 spdk_blob_op_with_handle_complete cb_fn; 8188 void *cb_arg; 8189 }; 8190 8191 static void 8192 bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 8193 { 8194 struct spdk_bs_iter_ctx *ctx = cb_arg; 8195 struct spdk_blob_store *bs = ctx->bs; 8196 spdk_blob_id id; 8197 8198 if (bserrno == 0) { 8199 ctx->cb_fn(ctx->cb_arg, _blob, bserrno); 8200 free(ctx); 8201 return; 8202 } 8203 8204 ctx->page_num++; 8205 ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num); 8206 if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) { 8207 ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT); 8208 free(ctx); 8209 return; 8210 } 8211 8212 id = bs_page_to_blobid(ctx->page_num); 8213 8214 spdk_bs_open_blob(bs, id, bs_iter_cpl, ctx); 8215 } 8216 8217 void 8218 spdk_bs_iter_first(struct spdk_blob_store *bs, 8219 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 8220 { 8221 struct spdk_bs_iter_ctx *ctx; 8222 8223 ctx = calloc(1, sizeof(*ctx)); 8224 if (!ctx) { 8225 cb_fn(cb_arg, NULL, -ENOMEM); 8226 return; 8227 } 8228 8229 ctx->page_num = -1; 8230 ctx->bs = bs; 8231 ctx->cb_fn = cb_fn; 8232 ctx->cb_arg = cb_arg; 8233 8234 bs_iter_cpl(ctx, NULL, -1); 8235 } 8236 8237 static void 8238 bs_iter_close_cpl(void *cb_arg, int bserrno) 8239 { 8240 struct spdk_bs_iter_ctx *ctx = cb_arg; 8241 8242 bs_iter_cpl(ctx, NULL, -1); 8243 } 8244 8245 void 8246 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob, 8247 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 8248 { 8249 struct spdk_bs_iter_ctx *ctx; 8250 8251 assert(blob != NULL); 8252 8253 ctx = calloc(1, sizeof(*ctx)); 8254 if (!ctx) { 8255 cb_fn(cb_arg, NULL, -ENOMEM); 8256 return; 8257 } 8258 8259 ctx->page_num = bs_blobid_to_page(blob->id); 8260 ctx->bs = bs; 8261 ctx->cb_fn = cb_fn; 8262 ctx->cb_arg = cb_arg; 8263 8264 /* Close the existing blob */ 8265 spdk_blob_close(blob, bs_iter_close_cpl, ctx); 8266 } 8267 8268 static int 8269 blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 8270 uint16_t value_len, bool internal) 8271 { 8272 struct spdk_xattr_tailq *xattrs; 8273 struct spdk_xattr *xattr; 8274 size_t desc_size; 8275 void *tmp; 8276 8277 blob_verify_md_op(blob); 8278 8279 if (blob->md_ro) { 8280 return -EPERM; 8281 } 8282 8283 desc_size = sizeof(struct spdk_blob_md_descriptor_xattr) + strlen(name) + value_len; 8284 if (desc_size > SPDK_BS_MAX_DESC_SIZE) { 8285 SPDK_DEBUGLOG(blob, "Xattr '%s' of size %zu does not fix into single page %zu\n", name, 8286 desc_size, SPDK_BS_MAX_DESC_SIZE); 8287 return -ENOMEM; 8288 } 8289 8290 if (internal) { 8291 xattrs = &blob->xattrs_internal; 8292 blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR; 8293 } else { 8294 xattrs = &blob->xattrs; 8295 } 8296 8297 TAILQ_FOREACH(xattr, xattrs, link) { 8298 if (!strcmp(name, xattr->name)) { 8299 tmp = malloc(value_len); 8300 if (!tmp) { 8301 return -ENOMEM; 8302 } 8303 8304 free(xattr->value); 8305 xattr->value_len = value_len; 8306 xattr->value = tmp; 8307 memcpy(xattr->value, value, value_len); 8308 8309 blob->state = SPDK_BLOB_STATE_DIRTY; 8310 8311 return 0; 8312 } 8313 } 8314 8315 xattr = calloc(1, sizeof(*xattr)); 8316 if (!xattr) { 8317 return -ENOMEM; 8318 } 8319 8320 xattr->name = strdup(name); 8321 if (!xattr->name) { 8322 free(xattr); 8323 return -ENOMEM; 8324 } 8325 8326 xattr->value_len = value_len; 8327 xattr->value = malloc(value_len); 8328 if (!xattr->value) { 8329 free(xattr->name); 8330 free(xattr); 8331 return -ENOMEM; 8332 } 8333 memcpy(xattr->value, value, value_len); 8334 TAILQ_INSERT_TAIL(xattrs, xattr, link); 8335 8336 blob->state = SPDK_BLOB_STATE_DIRTY; 8337 8338 return 0; 8339 } 8340 8341 int 8342 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 8343 uint16_t value_len) 8344 { 8345 return blob_set_xattr(blob, name, value, value_len, false); 8346 } 8347 8348 static int 8349 blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal) 8350 { 8351 struct spdk_xattr_tailq *xattrs; 8352 struct spdk_xattr *xattr; 8353 8354 blob_verify_md_op(blob); 8355 8356 if (blob->md_ro) { 8357 return -EPERM; 8358 } 8359 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 8360 8361 TAILQ_FOREACH(xattr, xattrs, link) { 8362 if (!strcmp(name, xattr->name)) { 8363 TAILQ_REMOVE(xattrs, xattr, link); 8364 free(xattr->value); 8365 free(xattr->name); 8366 free(xattr); 8367 8368 if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) { 8369 blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR; 8370 } 8371 blob->state = SPDK_BLOB_STATE_DIRTY; 8372 8373 return 0; 8374 } 8375 } 8376 8377 return -ENOENT; 8378 } 8379 8380 int 8381 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name) 8382 { 8383 return blob_remove_xattr(blob, name, false); 8384 } 8385 8386 static int 8387 blob_get_xattr_value(struct spdk_blob *blob, const char *name, 8388 const void **value, size_t *value_len, bool internal) 8389 { 8390 struct spdk_xattr *xattr; 8391 struct spdk_xattr_tailq *xattrs; 8392 8393 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 8394 8395 TAILQ_FOREACH(xattr, xattrs, link) { 8396 if (!strcmp(name, xattr->name)) { 8397 *value = xattr->value; 8398 *value_len = xattr->value_len; 8399 return 0; 8400 } 8401 } 8402 return -ENOENT; 8403 } 8404 8405 int 8406 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 8407 const void **value, size_t *value_len) 8408 { 8409 blob_verify_md_op(blob); 8410 8411 return blob_get_xattr_value(blob, name, value, value_len, false); 8412 } 8413 8414 struct spdk_xattr_names { 8415 uint32_t count; 8416 const char *names[0]; 8417 }; 8418 8419 static int 8420 blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names) 8421 { 8422 struct spdk_xattr *xattr; 8423 int count = 0; 8424 8425 TAILQ_FOREACH(xattr, xattrs, link) { 8426 count++; 8427 } 8428 8429 *names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *)); 8430 if (*names == NULL) { 8431 return -ENOMEM; 8432 } 8433 8434 TAILQ_FOREACH(xattr, xattrs, link) { 8435 (*names)->names[(*names)->count++] = xattr->name; 8436 } 8437 8438 return 0; 8439 } 8440 8441 int 8442 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names) 8443 { 8444 blob_verify_md_op(blob); 8445 8446 return blob_get_xattr_names(&blob->xattrs, names); 8447 } 8448 8449 uint32_t 8450 spdk_xattr_names_get_count(struct spdk_xattr_names *names) 8451 { 8452 assert(names != NULL); 8453 8454 return names->count; 8455 } 8456 8457 const char * 8458 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index) 8459 { 8460 if (index >= names->count) { 8461 return NULL; 8462 } 8463 8464 return names->names[index]; 8465 } 8466 8467 void 8468 spdk_xattr_names_free(struct spdk_xattr_names *names) 8469 { 8470 free(names); 8471 } 8472 8473 struct spdk_bs_type 8474 spdk_bs_get_bstype(struct spdk_blob_store *bs) 8475 { 8476 return bs->bstype; 8477 } 8478 8479 void 8480 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype) 8481 { 8482 memcpy(&bs->bstype, &bstype, sizeof(bstype)); 8483 } 8484 8485 bool 8486 spdk_blob_is_read_only(struct spdk_blob *blob) 8487 { 8488 assert(blob != NULL); 8489 return (blob->data_ro || blob->md_ro); 8490 } 8491 8492 bool 8493 spdk_blob_is_snapshot(struct spdk_blob *blob) 8494 { 8495 struct spdk_blob_list *snapshot_entry; 8496 8497 assert(blob != NULL); 8498 8499 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 8500 if (snapshot_entry == NULL) { 8501 return false; 8502 } 8503 8504 return true; 8505 } 8506 8507 bool 8508 spdk_blob_is_clone(struct spdk_blob *blob) 8509 { 8510 assert(blob != NULL); 8511 8512 if (blob->parent_id != SPDK_BLOBID_INVALID && 8513 blob->parent_id != SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 8514 assert(spdk_blob_is_thin_provisioned(blob)); 8515 return true; 8516 } 8517 8518 return false; 8519 } 8520 8521 bool 8522 spdk_blob_is_thin_provisioned(struct spdk_blob *blob) 8523 { 8524 assert(blob != NULL); 8525 return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 8526 } 8527 8528 bool 8529 spdk_blob_is_esnap_clone(const struct spdk_blob *blob) 8530 { 8531 return blob_is_esnap_clone(blob); 8532 } 8533 8534 static void 8535 blob_update_clear_method(struct spdk_blob *blob) 8536 { 8537 enum blob_clear_method stored_cm; 8538 8539 assert(blob != NULL); 8540 8541 /* If BLOB_CLEAR_WITH_DEFAULT was passed in, use the setting stored 8542 * in metadata previously. If something other than the default was 8543 * specified, ignore stored value and used what was passed in. 8544 */ 8545 stored_cm = ((blob->md_ro_flags & SPDK_BLOB_CLEAR_METHOD) >> SPDK_BLOB_CLEAR_METHOD_SHIFT); 8546 8547 if (blob->clear_method == BLOB_CLEAR_WITH_DEFAULT) { 8548 blob->clear_method = stored_cm; 8549 } else if (blob->clear_method != stored_cm) { 8550 SPDK_WARNLOG("Using passed in clear method 0x%x instead of stored value of 0x%x\n", 8551 blob->clear_method, stored_cm); 8552 } 8553 } 8554 8555 spdk_blob_id 8556 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id) 8557 { 8558 struct spdk_blob_list *snapshot_entry = NULL; 8559 struct spdk_blob_list *clone_entry = NULL; 8560 8561 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 8562 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 8563 if (clone_entry->id == blob_id) { 8564 return snapshot_entry->id; 8565 } 8566 } 8567 } 8568 8569 return SPDK_BLOBID_INVALID; 8570 } 8571 8572 int 8573 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids, 8574 size_t *count) 8575 { 8576 struct spdk_blob_list *snapshot_entry, *clone_entry; 8577 size_t n; 8578 8579 snapshot_entry = bs_get_snapshot_entry(bs, blobid); 8580 if (snapshot_entry == NULL) { 8581 *count = 0; 8582 return 0; 8583 } 8584 8585 if (ids == NULL || *count < snapshot_entry->clone_count) { 8586 *count = snapshot_entry->clone_count; 8587 return -ENOMEM; 8588 } 8589 *count = snapshot_entry->clone_count; 8590 8591 n = 0; 8592 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 8593 ids[n++] = clone_entry->id; 8594 } 8595 8596 return 0; 8597 } 8598 8599 static void 8600 bs_load_grow_continue(struct spdk_bs_load_ctx *ctx) 8601 { 8602 int rc; 8603 8604 if (ctx->super->size == 0) { 8605 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 8606 } 8607 8608 if (ctx->super->io_unit_size == 0) { 8609 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 8610 } 8611 8612 /* Parse the super block */ 8613 ctx->bs->clean = 1; 8614 ctx->bs->cluster_sz = ctx->super->cluster_size; 8615 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 8616 ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE; 8617 if (spdk_u32_is_pow2(ctx->bs->pages_per_cluster)) { 8618 ctx->bs->pages_per_cluster_shift = spdk_u32log2(ctx->bs->pages_per_cluster); 8619 } 8620 ctx->bs->io_unit_size = ctx->super->io_unit_size; 8621 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 8622 if (rc < 0) { 8623 bs_load_ctx_fail(ctx, -ENOMEM); 8624 return; 8625 } 8626 ctx->bs->md_start = ctx->super->md_start; 8627 ctx->bs->md_len = ctx->super->md_len; 8628 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len); 8629 if (rc < 0) { 8630 bs_load_ctx_fail(ctx, -ENOMEM); 8631 return; 8632 } 8633 8634 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 8635 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 8636 ctx->bs->super_blob = ctx->super->super_blob; 8637 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 8638 8639 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0) { 8640 SPDK_ERRLOG("Can not grow an unclean blobstore, please load it normally to clean it.\n"); 8641 bs_load_ctx_fail(ctx, -EIO); 8642 return; 8643 } else { 8644 bs_load_read_used_pages(ctx); 8645 } 8646 } 8647 8648 static void 8649 bs_load_grow_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8650 { 8651 struct spdk_bs_load_ctx *ctx = cb_arg; 8652 8653 if (bserrno != 0) { 8654 bs_load_ctx_fail(ctx, bserrno); 8655 return; 8656 } 8657 bs_load_grow_continue(ctx); 8658 } 8659 8660 static void 8661 bs_load_grow_used_clusters_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8662 { 8663 struct spdk_bs_load_ctx *ctx = cb_arg; 8664 8665 if (bserrno != 0) { 8666 bs_load_ctx_fail(ctx, bserrno); 8667 return; 8668 } 8669 8670 spdk_free(ctx->mask); 8671 8672 bs_sequence_write_dev(ctx->seq, ctx->super, bs_page_to_lba(ctx->bs, 0), 8673 bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 8674 bs_load_grow_super_write_cpl, ctx); 8675 } 8676 8677 static void 8678 bs_load_grow_used_clusters_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8679 { 8680 struct spdk_bs_load_ctx *ctx = cb_arg; 8681 uint64_t lba, lba_count; 8682 uint64_t dev_size; 8683 uint64_t total_clusters; 8684 8685 if (bserrno != 0) { 8686 bs_load_ctx_fail(ctx, bserrno); 8687 return; 8688 } 8689 8690 /* The type must be correct */ 8691 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 8692 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 8693 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 8694 struct spdk_blob_md_page) * 8)); 8695 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 8696 total_clusters = dev_size / ctx->super->cluster_size; 8697 ctx->mask->length = total_clusters; 8698 8699 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 8700 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 8701 bs_sequence_write_dev(ctx->seq, ctx->mask, lba, lba_count, 8702 bs_load_grow_used_clusters_write_cpl, ctx); 8703 } 8704 8705 static void 8706 bs_load_try_to_grow(struct spdk_bs_load_ctx *ctx) 8707 { 8708 uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask; 8709 uint64_t lba, lba_count, mask_size; 8710 8711 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 8712 total_clusters = dev_size / ctx->super->cluster_size; 8713 used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 8714 spdk_divide_round_up(total_clusters, 8), 8715 SPDK_BS_PAGE_SIZE); 8716 max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start; 8717 /* No necessary to grow or no space to grow */ 8718 if (ctx->super->size >= dev_size || used_cluster_mask_len > max_used_cluster_mask) { 8719 SPDK_DEBUGLOG(blob, "No grow\n"); 8720 bs_load_grow_continue(ctx); 8721 return; 8722 } 8723 8724 SPDK_DEBUGLOG(blob, "Resize blobstore\n"); 8725 8726 ctx->super->size = dev_size; 8727 ctx->super->used_cluster_mask_len = used_cluster_mask_len; 8728 ctx->super->crc = blob_md_page_calc_crc(ctx->super); 8729 8730 mask_size = used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 8731 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 8732 SPDK_MALLOC_DMA); 8733 if (!ctx->mask) { 8734 bs_load_ctx_fail(ctx, -ENOMEM); 8735 return; 8736 } 8737 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 8738 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 8739 bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count, 8740 bs_load_grow_used_clusters_read_cpl, ctx); 8741 } 8742 8743 static void 8744 bs_grow_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8745 { 8746 struct spdk_bs_load_ctx *ctx = cb_arg; 8747 int rc; 8748 8749 rc = bs_super_validate(ctx->super, ctx->bs); 8750 if (rc != 0) { 8751 bs_load_ctx_fail(ctx, rc); 8752 return; 8753 } 8754 8755 bs_load_try_to_grow(ctx); 8756 } 8757 8758 struct spdk_bs_grow_ctx { 8759 struct spdk_blob_store *bs; 8760 struct spdk_bs_super_block *super; 8761 8762 struct spdk_bit_pool *new_used_clusters; 8763 struct spdk_bs_md_mask *new_used_clusters_mask; 8764 8765 spdk_bs_sequence_t *seq; 8766 }; 8767 8768 static void 8769 bs_grow_live_done(struct spdk_bs_grow_ctx *ctx, int bserrno) 8770 { 8771 if (bserrno != 0) { 8772 spdk_bit_pool_free(&ctx->new_used_clusters); 8773 } 8774 8775 bs_sequence_finish(ctx->seq, bserrno); 8776 free(ctx->new_used_clusters_mask); 8777 spdk_free(ctx->super); 8778 free(ctx); 8779 } 8780 8781 static void 8782 bs_grow_live_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8783 { 8784 struct spdk_bs_grow_ctx *ctx = cb_arg; 8785 struct spdk_blob_store *bs = ctx->bs; 8786 uint64_t total_clusters; 8787 8788 if (bserrno != 0) { 8789 bs_grow_live_done(ctx, bserrno); 8790 return; 8791 } 8792 8793 /* 8794 * Blobstore is not clean until unload, for now only the super block is up to date. 8795 * This is similar to state right after blobstore init, when bs_write_used_md() didn't 8796 * yet execute. 8797 * When cleanly unloaded, the used md pages will be written out. 8798 * In case of unclean shutdown, loading blobstore will go through recovery path correctly 8799 * filling out the used_clusters with new size and writing it out. 8800 */ 8801 bs->clean = 0; 8802 8803 /* Reverting the super->size past this point is complex, avoid any error paths 8804 * that require to do so. */ 8805 spdk_spin_lock(&bs->used_lock); 8806 8807 total_clusters = ctx->super->size / ctx->super->cluster_size; 8808 8809 assert(total_clusters >= spdk_bit_pool_capacity(bs->used_clusters)); 8810 spdk_bit_pool_store_mask(bs->used_clusters, ctx->new_used_clusters_mask); 8811 8812 assert(total_clusters == spdk_bit_pool_capacity(ctx->new_used_clusters)); 8813 spdk_bit_pool_load_mask(ctx->new_used_clusters, ctx->new_used_clusters_mask); 8814 8815 spdk_bit_pool_free(&bs->used_clusters); 8816 bs->used_clusters = ctx->new_used_clusters; 8817 8818 bs->total_clusters = total_clusters; 8819 bs->total_data_clusters = bs->total_clusters - spdk_divide_round_up( 8820 bs->md_start + bs->md_len, bs->pages_per_cluster); 8821 8822 bs->num_free_clusters = spdk_bit_pool_count_free(bs->used_clusters); 8823 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 8824 spdk_spin_unlock(&bs->used_lock); 8825 8826 bs_grow_live_done(ctx, 0); 8827 } 8828 8829 static void 8830 bs_grow_live_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8831 { 8832 struct spdk_bs_grow_ctx *ctx = cb_arg; 8833 uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask; 8834 int rc; 8835 8836 if (bserrno != 0) { 8837 bs_grow_live_done(ctx, bserrno); 8838 return; 8839 } 8840 8841 rc = bs_super_validate(ctx->super, ctx->bs); 8842 if (rc != 0) { 8843 bs_grow_live_done(ctx, rc); 8844 return; 8845 } 8846 8847 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 8848 total_clusters = dev_size / ctx->super->cluster_size; 8849 used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 8850 spdk_divide_round_up(total_clusters, 8), 8851 SPDK_BS_PAGE_SIZE); 8852 max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start; 8853 /* Only checking dev_size. Since it can change, but total_clusters remain the same. */ 8854 if (dev_size == ctx->super->size) { 8855 SPDK_DEBUGLOG(blob, "No need to grow blobstore\n"); 8856 bs_grow_live_done(ctx, 0); 8857 return; 8858 } 8859 /* 8860 * Blobstore cannot be shrunk, so check before if: 8861 * - new size of the device is smaller than size in super_block 8862 * - new total number of clusters is smaller than used_clusters bit_pool 8863 * - there is enough space in metadata for used_cluster_mask to be written out 8864 */ 8865 if (dev_size < ctx->super->size || 8866 total_clusters < spdk_bit_pool_capacity(ctx->bs->used_clusters) || 8867 used_cluster_mask_len > max_used_cluster_mask) { 8868 SPDK_DEBUGLOG(blob, "No space to grow blobstore\n"); 8869 bs_grow_live_done(ctx, -ENOSPC); 8870 return; 8871 } 8872 8873 SPDK_DEBUGLOG(blob, "Resizing blobstore\n"); 8874 8875 ctx->new_used_clusters_mask = calloc(1, total_clusters); 8876 if (!ctx->new_used_clusters_mask) { 8877 bs_grow_live_done(ctx, -ENOMEM); 8878 return; 8879 } 8880 ctx->new_used_clusters = spdk_bit_pool_create(total_clusters); 8881 if (!ctx->new_used_clusters) { 8882 bs_grow_live_done(ctx, -ENOMEM); 8883 return; 8884 } 8885 8886 ctx->super->clean = 0; 8887 ctx->super->size = dev_size; 8888 ctx->super->used_cluster_mask_len = used_cluster_mask_len; 8889 bs_write_super(seq, ctx->bs, ctx->super, bs_grow_live_super_write_cpl, ctx); 8890 } 8891 8892 void 8893 spdk_bs_grow_live(struct spdk_blob_store *bs, 8894 spdk_bs_op_complete cb_fn, void *cb_arg) 8895 { 8896 struct spdk_bs_cpl cpl; 8897 struct spdk_bs_grow_ctx *ctx; 8898 8899 assert(spdk_get_thread() == bs->md_thread); 8900 8901 SPDK_DEBUGLOG(blob, "Growing blobstore on dev %p\n", bs->dev); 8902 8903 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 8904 cpl.u.bs_basic.cb_fn = cb_fn; 8905 cpl.u.bs_basic.cb_arg = cb_arg; 8906 8907 ctx = calloc(1, sizeof(struct spdk_bs_grow_ctx)); 8908 if (!ctx) { 8909 cb_fn(cb_arg, -ENOMEM); 8910 return; 8911 } 8912 ctx->bs = bs; 8913 8914 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 8915 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 8916 if (!ctx->super) { 8917 free(ctx); 8918 cb_fn(cb_arg, -ENOMEM); 8919 return; 8920 } 8921 8922 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 8923 if (!ctx->seq) { 8924 spdk_free(ctx->super); 8925 free(ctx); 8926 cb_fn(cb_arg, -ENOMEM); 8927 return; 8928 } 8929 8930 /* Read the super block */ 8931 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 8932 bs_byte_to_lba(bs, sizeof(*ctx->super)), 8933 bs_grow_live_load_super_cpl, ctx); 8934 } 8935 8936 void 8937 spdk_bs_grow(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 8938 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 8939 { 8940 struct spdk_blob_store *bs; 8941 struct spdk_bs_cpl cpl; 8942 struct spdk_bs_load_ctx *ctx; 8943 struct spdk_bs_opts opts = {}; 8944 int err; 8945 8946 SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev); 8947 8948 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 8949 SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen); 8950 dev->destroy(dev); 8951 cb_fn(cb_arg, NULL, -EINVAL); 8952 return; 8953 } 8954 8955 spdk_bs_opts_init(&opts, sizeof(opts)); 8956 if (o) { 8957 if (bs_opts_copy(o, &opts)) { 8958 return; 8959 } 8960 } 8961 8962 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 8963 dev->destroy(dev); 8964 cb_fn(cb_arg, NULL, -EINVAL); 8965 return; 8966 } 8967 8968 err = bs_alloc(dev, &opts, &bs, &ctx); 8969 if (err) { 8970 dev->destroy(dev); 8971 cb_fn(cb_arg, NULL, err); 8972 return; 8973 } 8974 8975 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 8976 cpl.u.bs_handle.cb_fn = cb_fn; 8977 cpl.u.bs_handle.cb_arg = cb_arg; 8978 cpl.u.bs_handle.bs = bs; 8979 8980 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 8981 if (!ctx->seq) { 8982 spdk_free(ctx->super); 8983 free(ctx); 8984 bs_free(bs); 8985 cb_fn(cb_arg, NULL, -ENOMEM); 8986 return; 8987 } 8988 8989 /* Read the super block */ 8990 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 8991 bs_byte_to_lba(bs, sizeof(*ctx->super)), 8992 bs_grow_load_super_cpl, ctx); 8993 } 8994 8995 int 8996 spdk_blob_get_esnap_id(struct spdk_blob *blob, const void **id, size_t *len) 8997 { 8998 if (!blob_is_esnap_clone(blob)) { 8999 return -EINVAL; 9000 } 9001 9002 return blob_get_xattr_value(blob, BLOB_EXTERNAL_SNAPSHOT_ID, id, len, true); 9003 } 9004 9005 struct spdk_io_channel * 9006 blob_esnap_get_io_channel(struct spdk_io_channel *ch, struct spdk_blob *blob) 9007 { 9008 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(ch); 9009 struct spdk_bs_dev *bs_dev = blob->back_bs_dev; 9010 struct blob_esnap_channel find = {}; 9011 struct blob_esnap_channel *esnap_channel, *existing; 9012 9013 find.blob_id = blob->id; 9014 esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find); 9015 if (spdk_likely(esnap_channel != NULL)) { 9016 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": using cached channel on thread %s\n", 9017 blob->id, spdk_thread_get_name(spdk_get_thread())); 9018 return esnap_channel->channel; 9019 } 9020 9021 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": allocating channel on thread %s\n", 9022 blob->id, spdk_thread_get_name(spdk_get_thread())); 9023 9024 esnap_channel = calloc(1, sizeof(*esnap_channel)); 9025 if (esnap_channel == NULL) { 9026 SPDK_NOTICELOG("blob 0x%" PRIx64 " channel allocation failed: no memory\n", 9027 find.blob_id); 9028 return NULL; 9029 } 9030 esnap_channel->channel = bs_dev->create_channel(bs_dev); 9031 if (esnap_channel->channel == NULL) { 9032 SPDK_NOTICELOG("blob 0x%" PRIx64 " back channel allocation failed\n", blob->id); 9033 free(esnap_channel); 9034 return NULL; 9035 } 9036 esnap_channel->blob_id = find.blob_id; 9037 existing = RB_INSERT(blob_esnap_channel_tree, &bs_channel->esnap_channels, esnap_channel); 9038 if (spdk_unlikely(existing != NULL)) { 9039 /* 9040 * This should be unreachable: all modifications to this tree happen on this thread. 9041 */ 9042 SPDK_ERRLOG("blob 0x%" PRIx64 "lost race to allocate a channel\n", find.blob_id); 9043 assert(false); 9044 9045 bs_dev->destroy_channel(bs_dev, esnap_channel->channel); 9046 free(esnap_channel); 9047 9048 return existing->channel; 9049 } 9050 9051 return esnap_channel->channel; 9052 } 9053 9054 static int 9055 blob_esnap_channel_compare(struct blob_esnap_channel *c1, struct blob_esnap_channel *c2) 9056 { 9057 return (c1->blob_id < c2->blob_id ? -1 : c1->blob_id > c2->blob_id); 9058 } 9059 9060 struct blob_esnap_destroy_ctx { 9061 spdk_blob_op_with_handle_complete cb_fn; 9062 void *cb_arg; 9063 struct spdk_blob *blob; 9064 struct spdk_bs_dev *back_bs_dev; 9065 bool abort_io; 9066 }; 9067 9068 static void 9069 blob_esnap_destroy_channels_done(struct spdk_io_channel_iter *i, int status) 9070 { 9071 struct blob_esnap_destroy_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 9072 struct spdk_blob *blob = ctx->blob; 9073 struct spdk_blob_store *bs = blob->bs; 9074 9075 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": done destroying channels for this blob\n", 9076 blob->id); 9077 9078 if (ctx->cb_fn != NULL) { 9079 ctx->cb_fn(ctx->cb_arg, blob, status); 9080 } 9081 free(ctx); 9082 9083 bs->esnap_channels_unloading--; 9084 if (bs->esnap_channels_unloading == 0 && bs->esnap_unload_cb_fn != NULL) { 9085 spdk_bs_unload(bs, bs->esnap_unload_cb_fn, bs->esnap_unload_cb_arg); 9086 } 9087 } 9088 9089 static void 9090 blob_esnap_destroy_one_channel(struct spdk_io_channel_iter *i) 9091 { 9092 struct blob_esnap_destroy_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 9093 struct spdk_blob *blob = ctx->blob; 9094 struct spdk_bs_dev *bs_dev = ctx->back_bs_dev; 9095 struct spdk_io_channel *channel = spdk_io_channel_iter_get_channel(i); 9096 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(channel); 9097 struct blob_esnap_channel *esnap_channel; 9098 struct blob_esnap_channel find = {}; 9099 9100 assert(spdk_get_thread() == spdk_io_channel_get_thread(channel)); 9101 9102 find.blob_id = blob->id; 9103 esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find); 9104 if (esnap_channel != NULL) { 9105 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": destroying channel on thread %s\n", 9106 blob->id, spdk_thread_get_name(spdk_get_thread())); 9107 RB_REMOVE(blob_esnap_channel_tree, &bs_channel->esnap_channels, esnap_channel); 9108 9109 if (ctx->abort_io) { 9110 spdk_bs_user_op_t *op, *tmp; 9111 9112 TAILQ_FOREACH_SAFE(op, &bs_channel->queued_io, link, tmp) { 9113 if (op->back_channel == esnap_channel->channel) { 9114 TAILQ_REMOVE(&bs_channel->queued_io, op, link); 9115 bs_user_op_abort(op, -EIO); 9116 } 9117 } 9118 } 9119 9120 bs_dev->destroy_channel(bs_dev, esnap_channel->channel); 9121 free(esnap_channel); 9122 } 9123 9124 spdk_for_each_channel_continue(i, 0); 9125 } 9126 9127 /* 9128 * Destroy the channels for a specific blob on each thread with a blobstore channel. This should be 9129 * used when closing an esnap clone blob and after decoupling from the parent. 9130 */ 9131 static void 9132 blob_esnap_destroy_bs_dev_channels(struct spdk_blob *blob, bool abort_io, 9133 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 9134 { 9135 struct blob_esnap_destroy_ctx *ctx; 9136 9137 if (!blob_is_esnap_clone(blob) || blob->back_bs_dev == NULL) { 9138 if (cb_fn != NULL) { 9139 cb_fn(cb_arg, blob, 0); 9140 } 9141 return; 9142 } 9143 9144 ctx = calloc(1, sizeof(*ctx)); 9145 if (ctx == NULL) { 9146 if (cb_fn != NULL) { 9147 cb_fn(cb_arg, blob, -ENOMEM); 9148 } 9149 return; 9150 } 9151 ctx->cb_fn = cb_fn; 9152 ctx->cb_arg = cb_arg; 9153 ctx->blob = blob; 9154 ctx->back_bs_dev = blob->back_bs_dev; 9155 ctx->abort_io = abort_io; 9156 9157 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": destroying channels for this blob\n", 9158 blob->id); 9159 9160 blob->bs->esnap_channels_unloading++; 9161 spdk_for_each_channel(blob->bs, blob_esnap_destroy_one_channel, ctx, 9162 blob_esnap_destroy_channels_done); 9163 } 9164 9165 /* 9166 * Destroy all bs_dev channels on a specific blobstore channel. This should be used when a 9167 * bs_channel is destroyed. 9168 */ 9169 static void 9170 blob_esnap_destroy_bs_channel(struct spdk_bs_channel *ch) 9171 { 9172 struct blob_esnap_channel *esnap_channel, *esnap_channel_tmp; 9173 9174 assert(spdk_get_thread() == spdk_io_channel_get_thread(spdk_io_channel_from_ctx(ch))); 9175 9176 SPDK_DEBUGLOG(blob_esnap, "destroying channels on thread %s\n", 9177 spdk_thread_get_name(spdk_get_thread())); 9178 RB_FOREACH_SAFE(esnap_channel, blob_esnap_channel_tree, &ch->esnap_channels, 9179 esnap_channel_tmp) { 9180 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 9181 ": destroying one channel in thread %s\n", 9182 esnap_channel->blob_id, spdk_thread_get_name(spdk_get_thread())); 9183 RB_REMOVE(blob_esnap_channel_tree, &ch->esnap_channels, esnap_channel); 9184 spdk_put_io_channel(esnap_channel->channel); 9185 free(esnap_channel); 9186 } 9187 SPDK_DEBUGLOG(blob_esnap, "done destroying channels on thread %s\n", 9188 spdk_thread_get_name(spdk_get_thread())); 9189 } 9190 9191 struct set_bs_dev_ctx { 9192 struct spdk_blob *blob; 9193 struct spdk_bs_dev *back_bs_dev; 9194 spdk_blob_op_complete cb_fn; 9195 void *cb_arg; 9196 int bserrno; 9197 }; 9198 9199 static void 9200 blob_set_back_bs_dev_done(void *_ctx, int bserrno) 9201 { 9202 struct set_bs_dev_ctx *ctx = _ctx; 9203 9204 if (bserrno != 0) { 9205 /* Even though the unfreeze failed, the update may have succeed. */ 9206 SPDK_ERRLOG("blob 0x%" PRIx64 ": unfreeze failed with error %d\n", ctx->blob->id, 9207 bserrno); 9208 } 9209 ctx->cb_fn(ctx->cb_arg, ctx->bserrno); 9210 free(ctx); 9211 } 9212 9213 static void 9214 blob_frozen_set_back_bs_dev(void *_ctx, struct spdk_blob *blob, int bserrno) 9215 { 9216 struct set_bs_dev_ctx *ctx = _ctx; 9217 9218 if (bserrno != 0) { 9219 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to release old back_bs_dev with error %d\n", 9220 blob->id, bserrno); 9221 ctx->bserrno = bserrno; 9222 blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx); 9223 return; 9224 } 9225 9226 if (blob->back_bs_dev != NULL) { 9227 blob->back_bs_dev->destroy(blob->back_bs_dev); 9228 } 9229 9230 SPDK_NOTICELOG("blob 0x%" PRIx64 ": hotplugged back_bs_dev\n", blob->id); 9231 blob->back_bs_dev = ctx->back_bs_dev; 9232 ctx->bserrno = 0; 9233 9234 blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx); 9235 } 9236 9237 static void 9238 blob_frozen_destroy_esnap_channels(void *_ctx, int bserrno) 9239 { 9240 struct set_bs_dev_ctx *ctx = _ctx; 9241 struct spdk_blob *blob = ctx->blob; 9242 9243 if (bserrno != 0) { 9244 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to freeze with error %d\n", blob->id, 9245 bserrno); 9246 ctx->cb_fn(ctx->cb_arg, bserrno); 9247 free(ctx); 9248 return; 9249 } 9250 9251 /* 9252 * This does not prevent future reads from the esnap device because any future IO will 9253 * lazily create a new esnap IO channel. 9254 */ 9255 blob_esnap_destroy_bs_dev_channels(blob, true, blob_frozen_set_back_bs_dev, ctx); 9256 } 9257 9258 void 9259 spdk_blob_set_esnap_bs_dev(struct spdk_blob *blob, struct spdk_bs_dev *back_bs_dev, 9260 spdk_blob_op_complete cb_fn, void *cb_arg) 9261 { 9262 struct set_bs_dev_ctx *ctx; 9263 9264 if (!blob_is_esnap_clone(blob)) { 9265 SPDK_ERRLOG("blob 0x%" PRIx64 ": not an esnap clone\n", blob->id); 9266 cb_fn(cb_arg, -EINVAL); 9267 return; 9268 } 9269 9270 ctx = calloc(1, sizeof(*ctx)); 9271 if (ctx == NULL) { 9272 SPDK_ERRLOG("blob 0x%" PRIx64 ": out of memory while setting back_bs_dev\n", 9273 blob->id); 9274 cb_fn(cb_arg, -ENOMEM); 9275 return; 9276 } 9277 ctx->cb_fn = cb_fn; 9278 ctx->cb_arg = cb_arg; 9279 ctx->back_bs_dev = back_bs_dev; 9280 ctx->blob = blob; 9281 blob_freeze_io(blob, blob_frozen_destroy_esnap_channels, ctx); 9282 } 9283 9284 struct spdk_bs_dev * 9285 spdk_blob_get_esnap_bs_dev(const struct spdk_blob *blob) 9286 { 9287 if (!blob_is_esnap_clone(blob)) { 9288 SPDK_ERRLOG("blob 0x%" PRIx64 ": not an esnap clone\n", blob->id); 9289 return NULL; 9290 } 9291 9292 return blob->back_bs_dev; 9293 } 9294 9295 bool 9296 spdk_blob_is_degraded(const struct spdk_blob *blob) 9297 { 9298 if (blob->bs->dev->is_degraded != NULL && blob->bs->dev->is_degraded(blob->bs->dev)) { 9299 return true; 9300 } 9301 if (blob->back_bs_dev == NULL || blob->back_bs_dev->is_degraded == NULL) { 9302 return false; 9303 } 9304 9305 return blob->back_bs_dev->is_degraded(blob->back_bs_dev); 9306 } 9307 9308 SPDK_LOG_REGISTER_COMPONENT(blob) 9309 SPDK_LOG_REGISTER_COMPONENT(blob_esnap) 9310