1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "spdk/blob.h" 10 #include "spdk/crc32.h" 11 #include "spdk/env.h" 12 #include "spdk/queue.h" 13 #include "spdk/thread.h" 14 #include "spdk/bit_array.h" 15 #include "spdk/bit_pool.h" 16 #include "spdk/likely.h" 17 #include "spdk/util.h" 18 #include "spdk/string.h" 19 20 #include "spdk_internal/assert.h" 21 #include "spdk/log.h" 22 23 #include "blobstore.h" 24 25 #define BLOB_CRC32C_INITIAL 0xffffffffUL 26 27 static int bs_register_md_thread(struct spdk_blob_store *bs); 28 static int bs_unregister_md_thread(struct spdk_blob_store *bs); 29 static void blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 30 static void blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 31 uint64_t cluster, uint32_t extent, struct spdk_blob_md_page *page, 32 spdk_blob_op_complete cb_fn, void *cb_arg); 33 static void blob_free_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 34 uint32_t extent_page, struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg); 35 36 static int blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 37 uint16_t value_len, bool internal); 38 static int blob_get_xattr_value(struct spdk_blob *blob, const char *name, 39 const void **value, size_t *value_len, bool internal); 40 static int blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal); 41 42 static void blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 43 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg); 44 45 /* 46 * External snapshots require a channel per thread per esnap bdev. The tree 47 * is populated lazily as blob IOs are handled by the back_bs_dev. When this 48 * channel is destroyed, all the channels in the tree are destroyed. 49 */ 50 51 struct blob_esnap_channel { 52 RB_ENTRY(blob_esnap_channel) node; 53 spdk_blob_id blob_id; 54 struct spdk_io_channel *channel; 55 }; 56 57 static int blob_esnap_channel_compare(struct blob_esnap_channel *c1, struct blob_esnap_channel *c2); 58 static void blob_esnap_destroy_bs_dev_channels(struct spdk_blob *blob, bool abort_io, 59 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg); 60 static void blob_esnap_destroy_bs_channel(struct spdk_bs_channel *ch); 61 RB_GENERATE_STATIC(blob_esnap_channel_tree, blob_esnap_channel, node, blob_esnap_channel_compare) 62 63 static inline bool 64 blob_is_esnap_clone(const struct spdk_blob *blob) 65 { 66 assert(blob != NULL); 67 return !!(blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT); 68 } 69 70 static int 71 blob_id_cmp(struct spdk_blob *blob1, struct spdk_blob *blob2) 72 { 73 assert(blob1 != NULL && blob2 != NULL); 74 return (blob1->id < blob2->id ? -1 : blob1->id > blob2->id); 75 } 76 77 RB_GENERATE_STATIC(spdk_blob_tree, spdk_blob, link, blob_id_cmp); 78 79 static void 80 blob_verify_md_op(struct spdk_blob *blob) 81 { 82 assert(blob != NULL); 83 assert(spdk_get_thread() == blob->bs->md_thread); 84 assert(blob->state != SPDK_BLOB_STATE_LOADING); 85 } 86 87 static struct spdk_blob_list * 88 bs_get_snapshot_entry(struct spdk_blob_store *bs, spdk_blob_id blobid) 89 { 90 struct spdk_blob_list *snapshot_entry = NULL; 91 92 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 93 if (snapshot_entry->id == blobid) { 94 break; 95 } 96 } 97 98 return snapshot_entry; 99 } 100 101 static void 102 bs_claim_md_page(struct spdk_blob_store *bs, uint32_t page) 103 { 104 assert(spdk_spin_held(&bs->used_lock)); 105 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 106 assert(spdk_bit_array_get(bs->used_md_pages, page) == false); 107 108 spdk_bit_array_set(bs->used_md_pages, page); 109 } 110 111 static void 112 bs_release_md_page(struct spdk_blob_store *bs, uint32_t page) 113 { 114 assert(spdk_spin_held(&bs->used_lock)); 115 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 116 assert(spdk_bit_array_get(bs->used_md_pages, page) == true); 117 118 spdk_bit_array_clear(bs->used_md_pages, page); 119 } 120 121 static uint32_t 122 bs_claim_cluster(struct spdk_blob_store *bs) 123 { 124 uint32_t cluster_num; 125 126 assert(spdk_spin_held(&bs->used_lock)); 127 128 cluster_num = spdk_bit_pool_allocate_bit(bs->used_clusters); 129 if (cluster_num == UINT32_MAX) { 130 return UINT32_MAX; 131 } 132 133 SPDK_DEBUGLOG(blob, "Claiming cluster %u\n", cluster_num); 134 bs->num_free_clusters--; 135 136 return cluster_num; 137 } 138 139 static void 140 bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 141 { 142 assert(spdk_spin_held(&bs->used_lock)); 143 assert(cluster_num < spdk_bit_pool_capacity(bs->used_clusters)); 144 assert(spdk_bit_pool_is_allocated(bs->used_clusters, cluster_num) == true); 145 assert(bs->num_free_clusters < bs->total_clusters); 146 147 SPDK_DEBUGLOG(blob, "Releasing cluster %u\n", cluster_num); 148 149 spdk_bit_pool_free_bit(bs->used_clusters, cluster_num); 150 bs->num_free_clusters++; 151 } 152 153 static int 154 blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster) 155 { 156 uint64_t *cluster_lba = &blob->active.clusters[cluster_num]; 157 158 blob_verify_md_op(blob); 159 160 if (*cluster_lba != 0) { 161 return -EEXIST; 162 } 163 164 *cluster_lba = bs_cluster_to_lba(blob->bs, cluster); 165 return 0; 166 } 167 168 static int 169 bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num, 170 uint64_t *cluster, uint32_t *lowest_free_md_page, bool update_map) 171 { 172 uint32_t *extent_page = 0; 173 174 assert(spdk_spin_held(&blob->bs->used_lock)); 175 176 *cluster = bs_claim_cluster(blob->bs); 177 if (*cluster == UINT32_MAX) { 178 /* No more free clusters. Cannot satisfy the request */ 179 return -ENOSPC; 180 } 181 182 if (blob->use_extent_table) { 183 extent_page = bs_cluster_to_extent_page(blob, cluster_num); 184 if (*extent_page == 0) { 185 /* Extent page shall never occupy md_page so start the search from 1 */ 186 if (*lowest_free_md_page == 0) { 187 *lowest_free_md_page = 1; 188 } 189 /* No extent_page is allocated for the cluster */ 190 *lowest_free_md_page = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, 191 *lowest_free_md_page); 192 if (*lowest_free_md_page == UINT32_MAX) { 193 /* No more free md pages. Cannot satisfy the request */ 194 bs_release_cluster(blob->bs, *cluster); 195 return -ENOSPC; 196 } 197 bs_claim_md_page(blob->bs, *lowest_free_md_page); 198 } 199 } 200 201 SPDK_DEBUGLOG(blob, "Claiming cluster %" PRIu64 " for blob 0x%" PRIx64 "\n", *cluster, 202 blob->id); 203 204 if (update_map) { 205 blob_insert_cluster(blob, cluster_num, *cluster); 206 if (blob->use_extent_table && *extent_page == 0) { 207 *extent_page = *lowest_free_md_page; 208 } 209 } 210 211 return 0; 212 } 213 214 static void 215 blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs) 216 { 217 xattrs->count = 0; 218 xattrs->names = NULL; 219 xattrs->ctx = NULL; 220 xattrs->get_value = NULL; 221 } 222 223 void 224 spdk_blob_opts_init(struct spdk_blob_opts *opts, size_t opts_size) 225 { 226 if (!opts) { 227 SPDK_ERRLOG("opts should not be NULL\n"); 228 return; 229 } 230 231 if (!opts_size) { 232 SPDK_ERRLOG("opts_size should not be zero value\n"); 233 return; 234 } 235 236 memset(opts, 0, opts_size); 237 opts->opts_size = opts_size; 238 239 #define FIELD_OK(field) \ 240 offsetof(struct spdk_blob_opts, field) + sizeof(opts->field) <= opts_size 241 242 #define SET_FIELD(field, value) \ 243 if (FIELD_OK(field)) { \ 244 opts->field = value; \ 245 } \ 246 247 SET_FIELD(num_clusters, 0); 248 SET_FIELD(thin_provision, false); 249 SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT); 250 251 if (FIELD_OK(xattrs)) { 252 blob_xattrs_init(&opts->xattrs); 253 } 254 255 SET_FIELD(use_extent_table, true); 256 257 #undef FIELD_OK 258 #undef SET_FIELD 259 } 260 261 void 262 spdk_blob_open_opts_init(struct spdk_blob_open_opts *opts, size_t opts_size) 263 { 264 if (!opts) { 265 SPDK_ERRLOG("opts should not be NULL\n"); 266 return; 267 } 268 269 if (!opts_size) { 270 SPDK_ERRLOG("opts_size should not be zero value\n"); 271 return; 272 } 273 274 memset(opts, 0, opts_size); 275 opts->opts_size = opts_size; 276 277 #define FIELD_OK(field) \ 278 offsetof(struct spdk_blob_open_opts, field) + sizeof(opts->field) <= opts_size 279 280 #define SET_FIELD(field, value) \ 281 if (FIELD_OK(field)) { \ 282 opts->field = value; \ 283 } \ 284 285 SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT); 286 287 #undef FIELD_OK 288 #undef SET_FILED 289 } 290 291 static struct spdk_blob * 292 blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id) 293 { 294 struct spdk_blob *blob; 295 296 blob = calloc(1, sizeof(*blob)); 297 if (!blob) { 298 return NULL; 299 } 300 301 blob->id = id; 302 blob->bs = bs; 303 304 blob->parent_id = SPDK_BLOBID_INVALID; 305 306 blob->state = SPDK_BLOB_STATE_DIRTY; 307 blob->extent_rle_found = false; 308 blob->extent_table_found = false; 309 blob->active.num_pages = 1; 310 blob->active.pages = calloc(1, sizeof(*blob->active.pages)); 311 if (!blob->active.pages) { 312 free(blob); 313 return NULL; 314 } 315 316 blob->active.pages[0] = bs_blobid_to_page(id); 317 318 TAILQ_INIT(&blob->xattrs); 319 TAILQ_INIT(&blob->xattrs_internal); 320 TAILQ_INIT(&blob->pending_persists); 321 TAILQ_INIT(&blob->persists_to_complete); 322 323 return blob; 324 } 325 326 static void 327 xattrs_free(struct spdk_xattr_tailq *xattrs) 328 { 329 struct spdk_xattr *xattr, *xattr_tmp; 330 331 TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) { 332 TAILQ_REMOVE(xattrs, xattr, link); 333 free(xattr->name); 334 free(xattr->value); 335 free(xattr); 336 } 337 } 338 339 static void 340 blob_free(struct spdk_blob *blob) 341 { 342 assert(blob != NULL); 343 assert(TAILQ_EMPTY(&blob->pending_persists)); 344 assert(TAILQ_EMPTY(&blob->persists_to_complete)); 345 346 free(blob->active.extent_pages); 347 free(blob->clean.extent_pages); 348 free(blob->active.clusters); 349 free(blob->clean.clusters); 350 free(blob->active.pages); 351 free(blob->clean.pages); 352 353 xattrs_free(&blob->xattrs); 354 xattrs_free(&blob->xattrs_internal); 355 356 if (blob->back_bs_dev) { 357 blob->back_bs_dev->destroy(blob->back_bs_dev); 358 } 359 360 free(blob); 361 } 362 363 static void 364 blob_back_bs_destroy_esnap_done(void *ctx, struct spdk_blob *blob, int bserrno) 365 { 366 struct spdk_bs_dev *bs_dev = ctx; 367 368 if (bserrno != 0) { 369 /* 370 * This is probably due to a memory allocation failure when creating the 371 * blob_esnap_destroy_ctx before iterating threads. 372 */ 373 SPDK_ERRLOG("blob 0x%" PRIx64 ": Unable to destroy bs dev channels: error %d\n", 374 blob->id, bserrno); 375 assert(false); 376 } 377 378 if (bs_dev == NULL) { 379 /* 380 * This check exists to make scanbuild happy. 381 * 382 * blob->back_bs_dev for an esnap is NULL during the first iteration of blobs while 383 * the blobstore is being loaded. It could also be NULL if there was an error 384 * opening the esnap device. In each of these cases, no channels could have been 385 * created because back_bs_dev->create_channel() would have led to a NULL pointer 386 * deref. 387 */ 388 assert(false); 389 return; 390 } 391 392 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": calling destroy on back_bs_dev\n", blob->id); 393 bs_dev->destroy(bs_dev); 394 } 395 396 static void 397 blob_back_bs_destroy(struct spdk_blob *blob) 398 { 399 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": preparing to destroy back_bs_dev\n", 400 blob->id); 401 402 blob_esnap_destroy_bs_dev_channels(blob, false, blob_back_bs_destroy_esnap_done, 403 blob->back_bs_dev); 404 blob->back_bs_dev = NULL; 405 } 406 407 struct freeze_io_ctx { 408 struct spdk_bs_cpl cpl; 409 struct spdk_blob *blob; 410 }; 411 412 static void 413 blob_io_sync(struct spdk_io_channel_iter *i) 414 { 415 spdk_for_each_channel_continue(i, 0); 416 } 417 418 static void 419 blob_execute_queued_io(struct spdk_io_channel_iter *i) 420 { 421 struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); 422 struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch); 423 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 424 struct spdk_bs_request_set *set; 425 struct spdk_bs_user_op_args *args; 426 spdk_bs_user_op_t *op, *tmp; 427 428 TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) { 429 set = (struct spdk_bs_request_set *)op; 430 args = &set->u.user_op; 431 432 if (args->blob == ctx->blob) { 433 TAILQ_REMOVE(&ch->queued_io, op, link); 434 bs_user_op_execute(op); 435 } 436 } 437 438 spdk_for_each_channel_continue(i, 0); 439 } 440 441 static void 442 blob_io_cpl(struct spdk_io_channel_iter *i, int status) 443 { 444 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 445 446 ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0); 447 448 free(ctx); 449 } 450 451 static void 452 blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 453 { 454 struct freeze_io_ctx *ctx; 455 456 blob_verify_md_op(blob); 457 458 ctx = calloc(1, sizeof(*ctx)); 459 if (!ctx) { 460 cb_fn(cb_arg, -ENOMEM); 461 return; 462 } 463 464 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 465 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 466 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 467 ctx->blob = blob; 468 469 /* Freeze I/O on blob */ 470 blob->frozen_refcnt++; 471 472 spdk_for_each_channel(blob->bs, blob_io_sync, ctx, blob_io_cpl); 473 } 474 475 static void 476 blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 477 { 478 struct freeze_io_ctx *ctx; 479 480 blob_verify_md_op(blob); 481 482 ctx = calloc(1, sizeof(*ctx)); 483 if (!ctx) { 484 cb_fn(cb_arg, -ENOMEM); 485 return; 486 } 487 488 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 489 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 490 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 491 ctx->blob = blob; 492 493 assert(blob->frozen_refcnt > 0); 494 495 blob->frozen_refcnt--; 496 497 spdk_for_each_channel(blob->bs, blob_execute_queued_io, ctx, blob_io_cpl); 498 } 499 500 static int 501 blob_mark_clean(struct spdk_blob *blob) 502 { 503 uint32_t *extent_pages = NULL; 504 uint64_t *clusters = NULL; 505 uint32_t *pages = NULL; 506 507 assert(blob != NULL); 508 509 if (blob->active.num_extent_pages) { 510 assert(blob->active.extent_pages); 511 extent_pages = calloc(blob->active.num_extent_pages, sizeof(*blob->active.extent_pages)); 512 if (!extent_pages) { 513 return -ENOMEM; 514 } 515 memcpy(extent_pages, blob->active.extent_pages, 516 blob->active.num_extent_pages * sizeof(*extent_pages)); 517 } 518 519 if (blob->active.num_clusters) { 520 assert(blob->active.clusters); 521 clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters)); 522 if (!clusters) { 523 free(extent_pages); 524 return -ENOMEM; 525 } 526 memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 527 } 528 529 if (blob->active.num_pages) { 530 assert(blob->active.pages); 531 pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages)); 532 if (!pages) { 533 free(extent_pages); 534 free(clusters); 535 return -ENOMEM; 536 } 537 memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 538 } 539 540 free(blob->clean.extent_pages); 541 free(blob->clean.clusters); 542 free(blob->clean.pages); 543 544 blob->clean.num_extent_pages = blob->active.num_extent_pages; 545 blob->clean.extent_pages = blob->active.extent_pages; 546 blob->clean.num_clusters = blob->active.num_clusters; 547 blob->clean.clusters = blob->active.clusters; 548 blob->clean.num_pages = blob->active.num_pages; 549 blob->clean.pages = blob->active.pages; 550 551 blob->active.extent_pages = extent_pages; 552 blob->active.clusters = clusters; 553 blob->active.pages = pages; 554 555 /* If the metadata was dirtied again while the metadata was being written to disk, 556 * we do not want to revert the DIRTY state back to CLEAN here. 557 */ 558 if (blob->state == SPDK_BLOB_STATE_LOADING) { 559 blob->state = SPDK_BLOB_STATE_CLEAN; 560 } 561 562 return 0; 563 } 564 565 static int 566 blob_deserialize_xattr(struct spdk_blob *blob, 567 struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal) 568 { 569 struct spdk_xattr *xattr; 570 571 if (desc_xattr->length != sizeof(desc_xattr->name_length) + 572 sizeof(desc_xattr->value_length) + 573 desc_xattr->name_length + desc_xattr->value_length) { 574 return -EINVAL; 575 } 576 577 xattr = calloc(1, sizeof(*xattr)); 578 if (xattr == NULL) { 579 return -ENOMEM; 580 } 581 582 xattr->name = malloc(desc_xattr->name_length + 1); 583 if (xattr->name == NULL) { 584 free(xattr); 585 return -ENOMEM; 586 } 587 588 xattr->value = malloc(desc_xattr->value_length); 589 if (xattr->value == NULL) { 590 free(xattr->name); 591 free(xattr); 592 return -ENOMEM; 593 } 594 595 memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length); 596 xattr->name[desc_xattr->name_length] = '\0'; 597 xattr->value_len = desc_xattr->value_length; 598 memcpy(xattr->value, 599 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 600 desc_xattr->value_length); 601 602 TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link); 603 604 return 0; 605 } 606 607 608 static int 609 blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob) 610 { 611 struct spdk_blob_md_descriptor *desc; 612 size_t cur_desc = 0; 613 void *tmp; 614 615 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 616 while (cur_desc < sizeof(page->descriptors)) { 617 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 618 if (desc->length == 0) { 619 /* If padding and length are 0, this terminates the page */ 620 break; 621 } 622 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 623 struct spdk_blob_md_descriptor_flags *desc_flags; 624 625 desc_flags = (struct spdk_blob_md_descriptor_flags *)desc; 626 627 if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) { 628 return -EINVAL; 629 } 630 631 if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) != 632 SPDK_BLOB_INVALID_FLAGS_MASK) { 633 return -EINVAL; 634 } 635 636 if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) != 637 SPDK_BLOB_DATA_RO_FLAGS_MASK) { 638 blob->data_ro = true; 639 blob->md_ro = true; 640 } 641 642 if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) != 643 SPDK_BLOB_MD_RO_FLAGS_MASK) { 644 blob->md_ro = true; 645 } 646 647 if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 648 blob->data_ro = true; 649 blob->md_ro = true; 650 } 651 652 blob->invalid_flags = desc_flags->invalid_flags; 653 blob->data_ro_flags = desc_flags->data_ro_flags; 654 blob->md_ro_flags = desc_flags->md_ro_flags; 655 656 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 657 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 658 unsigned int i, j; 659 unsigned int cluster_count = blob->active.num_clusters; 660 661 if (blob->extent_table_found) { 662 /* Extent Table already present in the md, 663 * both descriptors should never be at the same time. */ 664 return -EINVAL; 665 } 666 blob->extent_rle_found = true; 667 668 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 669 670 if (desc_extent_rle->length == 0 || 671 (desc_extent_rle->length % sizeof(desc_extent_rle->extents[0]) != 0)) { 672 return -EINVAL; 673 } 674 675 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 676 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 677 if (desc_extent_rle->extents[i].cluster_idx != 0) { 678 if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, 679 desc_extent_rle->extents[i].cluster_idx + j)) { 680 return -EINVAL; 681 } 682 } 683 cluster_count++; 684 } 685 } 686 687 if (cluster_count == 0) { 688 return -EINVAL; 689 } 690 tmp = realloc(blob->active.clusters, cluster_count * sizeof(*blob->active.clusters)); 691 if (tmp == NULL) { 692 return -ENOMEM; 693 } 694 blob->active.clusters = tmp; 695 blob->active.cluster_array_size = cluster_count; 696 697 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 698 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 699 if (desc_extent_rle->extents[i].cluster_idx != 0) { 700 blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs, 701 desc_extent_rle->extents[i].cluster_idx + j); 702 } else if (spdk_blob_is_thin_provisioned(blob)) { 703 blob->active.clusters[blob->active.num_clusters++] = 0; 704 } else { 705 return -EINVAL; 706 } 707 } 708 } 709 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 710 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 711 uint32_t num_extent_pages = blob->active.num_extent_pages; 712 uint32_t i, j; 713 size_t extent_pages_length; 714 715 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 716 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 717 718 if (blob->extent_rle_found) { 719 /* This means that Extent RLE is present in MD, 720 * both should never be at the same time. */ 721 return -EINVAL; 722 } else if (blob->extent_table_found && 723 desc_extent_table->num_clusters != blob->remaining_clusters_in_et) { 724 /* Number of clusters in this ET does not match number 725 * from previously read EXTENT_TABLE. */ 726 return -EINVAL; 727 } 728 729 if (desc_extent_table->length == 0 || 730 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 731 return -EINVAL; 732 } 733 734 blob->extent_table_found = true; 735 736 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 737 num_extent_pages += desc_extent_table->extent_page[i].num_pages; 738 } 739 740 if (num_extent_pages > 0) { 741 tmp = realloc(blob->active.extent_pages, num_extent_pages * sizeof(uint32_t)); 742 if (tmp == NULL) { 743 return -ENOMEM; 744 } 745 blob->active.extent_pages = tmp; 746 } 747 blob->active.extent_pages_array_size = num_extent_pages; 748 749 blob->remaining_clusters_in_et = desc_extent_table->num_clusters; 750 751 /* Extent table entries contain md page numbers for extent pages. 752 * Zeroes represent unallocated extent pages, those are run-length-encoded. 753 */ 754 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 755 if (desc_extent_table->extent_page[i].page_idx != 0) { 756 assert(desc_extent_table->extent_page[i].num_pages == 1); 757 blob->active.extent_pages[blob->active.num_extent_pages++] = 758 desc_extent_table->extent_page[i].page_idx; 759 } else if (spdk_blob_is_thin_provisioned(blob)) { 760 for (j = 0; j < desc_extent_table->extent_page[i].num_pages; j++) { 761 blob->active.extent_pages[blob->active.num_extent_pages++] = 0; 762 } 763 } else { 764 return -EINVAL; 765 } 766 } 767 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 768 struct spdk_blob_md_descriptor_extent_page *desc_extent; 769 unsigned int i; 770 unsigned int cluster_count = 0; 771 size_t cluster_idx_length; 772 773 if (blob->extent_rle_found) { 774 /* This means that Extent RLE is present in MD, 775 * both should never be at the same time. */ 776 return -EINVAL; 777 } 778 779 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 780 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 781 782 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 783 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 784 return -EINVAL; 785 } 786 787 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 788 if (desc_extent->cluster_idx[i] != 0) { 789 if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, desc_extent->cluster_idx[i])) { 790 return -EINVAL; 791 } 792 } 793 cluster_count++; 794 } 795 796 if (cluster_count == 0) { 797 return -EINVAL; 798 } 799 800 /* When reading extent pages sequentially starting cluster idx should match 801 * current size of a blob. 802 * If changed to batch reading, this check shall be removed. */ 803 if (desc_extent->start_cluster_idx != blob->active.num_clusters) { 804 return -EINVAL; 805 } 806 807 tmp = realloc(blob->active.clusters, 808 (cluster_count + blob->active.num_clusters) * sizeof(*blob->active.clusters)); 809 if (tmp == NULL) { 810 return -ENOMEM; 811 } 812 blob->active.clusters = tmp; 813 blob->active.cluster_array_size = (cluster_count + blob->active.num_clusters); 814 815 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 816 if (desc_extent->cluster_idx[i] != 0) { 817 blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs, 818 desc_extent->cluster_idx[i]); 819 } else if (spdk_blob_is_thin_provisioned(blob)) { 820 blob->active.clusters[blob->active.num_clusters++] = 0; 821 } else { 822 return -EINVAL; 823 } 824 } 825 assert(desc_extent->start_cluster_idx + cluster_count == blob->active.num_clusters); 826 assert(blob->remaining_clusters_in_et >= cluster_count); 827 blob->remaining_clusters_in_et -= cluster_count; 828 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 829 int rc; 830 831 rc = blob_deserialize_xattr(blob, 832 (struct spdk_blob_md_descriptor_xattr *) desc, false); 833 if (rc != 0) { 834 return rc; 835 } 836 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 837 int rc; 838 839 rc = blob_deserialize_xattr(blob, 840 (struct spdk_blob_md_descriptor_xattr *) desc, true); 841 if (rc != 0) { 842 return rc; 843 } 844 } else { 845 /* Unrecognized descriptor type. Do not fail - just continue to the 846 * next descriptor. If this descriptor is associated with some feature 847 * defined in a newer version of blobstore, that version of blobstore 848 * should create and set an associated feature flag to specify if this 849 * blob can be loaded or not. 850 */ 851 } 852 853 /* Advance to the next descriptor */ 854 cur_desc += sizeof(*desc) + desc->length; 855 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 856 break; 857 } 858 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 859 } 860 861 return 0; 862 } 863 864 static bool bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page); 865 866 static int 867 blob_parse_extent_page(struct spdk_blob_md_page *extent_page, struct spdk_blob *blob) 868 { 869 assert(blob != NULL); 870 assert(blob->state == SPDK_BLOB_STATE_LOADING); 871 872 if (bs_load_cur_extent_page_valid(extent_page) == false) { 873 return -ENOENT; 874 } 875 876 return blob_parse_page(extent_page, blob); 877 } 878 879 static int 880 blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count, 881 struct spdk_blob *blob) 882 { 883 const struct spdk_blob_md_page *page; 884 uint32_t i; 885 int rc; 886 void *tmp; 887 888 assert(page_count > 0); 889 assert(pages[0].sequence_num == 0); 890 assert(blob != NULL); 891 assert(blob->state == SPDK_BLOB_STATE_LOADING); 892 assert(blob->active.clusters == NULL); 893 894 /* The blobid provided doesn't match what's in the MD, this can 895 * happen for example if a bogus blobid is passed in through open. 896 */ 897 if (blob->id != pages[0].id) { 898 SPDK_ERRLOG("Blobid (0x%" PRIx64 ") doesn't match what's in metadata " 899 "(0x%" PRIx64 ")\n", blob->id, pages[0].id); 900 return -ENOENT; 901 } 902 903 tmp = realloc(blob->active.pages, page_count * sizeof(*blob->active.pages)); 904 if (!tmp) { 905 return -ENOMEM; 906 } 907 blob->active.pages = tmp; 908 909 blob->active.pages[0] = pages[0].id; 910 911 for (i = 1; i < page_count; i++) { 912 assert(spdk_bit_array_get(blob->bs->used_md_pages, pages[i - 1].next)); 913 blob->active.pages[i] = pages[i - 1].next; 914 } 915 blob->active.num_pages = page_count; 916 917 for (i = 0; i < page_count; i++) { 918 page = &pages[i]; 919 920 assert(page->id == blob->id); 921 assert(page->sequence_num == i); 922 923 rc = blob_parse_page(page, blob); 924 if (rc != 0) { 925 return rc; 926 } 927 } 928 929 return 0; 930 } 931 932 static int 933 blob_serialize_add_page(const struct spdk_blob *blob, 934 struct spdk_blob_md_page **pages, 935 uint32_t *page_count, 936 struct spdk_blob_md_page **last_page) 937 { 938 struct spdk_blob_md_page *page, *tmp_pages; 939 940 assert(pages != NULL); 941 assert(page_count != NULL); 942 943 *last_page = NULL; 944 if (*page_count == 0) { 945 assert(*pages == NULL); 946 *pages = spdk_malloc(SPDK_BS_PAGE_SIZE, 0, 947 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 948 if (*pages == NULL) { 949 return -ENOMEM; 950 } 951 *page_count = 1; 952 } else { 953 assert(*pages != NULL); 954 tmp_pages = spdk_realloc(*pages, SPDK_BS_PAGE_SIZE * (*page_count + 1), 0); 955 if (tmp_pages == NULL) { 956 return -ENOMEM; 957 } 958 (*page_count)++; 959 *pages = tmp_pages; 960 } 961 962 page = &(*pages)[*page_count - 1]; 963 memset(page, 0, sizeof(*page)); 964 page->id = blob->id; 965 page->sequence_num = *page_count - 1; 966 page->next = SPDK_INVALID_MD_PAGE; 967 *last_page = page; 968 969 return 0; 970 } 971 972 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor. 973 * Update required_sz on both success and failure. 974 * 975 */ 976 static int 977 blob_serialize_xattr(const struct spdk_xattr *xattr, 978 uint8_t *buf, size_t buf_sz, 979 size_t *required_sz, bool internal) 980 { 981 struct spdk_blob_md_descriptor_xattr *desc; 982 983 *required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) + 984 strlen(xattr->name) + 985 xattr->value_len; 986 987 if (buf_sz < *required_sz) { 988 return -1; 989 } 990 991 desc = (struct spdk_blob_md_descriptor_xattr *)buf; 992 993 desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR; 994 desc->length = sizeof(desc->name_length) + 995 sizeof(desc->value_length) + 996 strlen(xattr->name) + 997 xattr->value_len; 998 desc->name_length = strlen(xattr->name); 999 desc->value_length = xattr->value_len; 1000 1001 memcpy(desc->name, xattr->name, desc->name_length); 1002 memcpy((void *)((uintptr_t)desc->name + desc->name_length), 1003 xattr->value, 1004 desc->value_length); 1005 1006 return 0; 1007 } 1008 1009 static void 1010 blob_serialize_extent_table_entry(const struct spdk_blob *blob, 1011 uint64_t start_ep, uint64_t *next_ep, 1012 uint8_t **buf, size_t *remaining_sz) 1013 { 1014 struct spdk_blob_md_descriptor_extent_table *desc; 1015 size_t cur_sz; 1016 uint64_t i, et_idx; 1017 uint32_t extent_page, ep_len; 1018 1019 /* The buffer must have room for at least num_clusters entry */ 1020 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->num_clusters); 1021 if (*remaining_sz < cur_sz) { 1022 *next_ep = start_ep; 1023 return; 1024 } 1025 1026 desc = (struct spdk_blob_md_descriptor_extent_table *)*buf; 1027 desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE; 1028 1029 desc->num_clusters = blob->active.num_clusters; 1030 1031 ep_len = 1; 1032 et_idx = 0; 1033 for (i = start_ep; i < blob->active.num_extent_pages; i++) { 1034 if (*remaining_sz < cur_sz + sizeof(desc->extent_page[0])) { 1035 /* If we ran out of buffer space, return */ 1036 break; 1037 } 1038 1039 extent_page = blob->active.extent_pages[i]; 1040 /* Verify that next extent_page is unallocated */ 1041 if (extent_page == 0 && 1042 (i + 1 < blob->active.num_extent_pages && blob->active.extent_pages[i + 1] == 0)) { 1043 ep_len++; 1044 continue; 1045 } 1046 desc->extent_page[et_idx].page_idx = extent_page; 1047 desc->extent_page[et_idx].num_pages = ep_len; 1048 et_idx++; 1049 1050 ep_len = 1; 1051 cur_sz += sizeof(desc->extent_page[et_idx]); 1052 } 1053 *next_ep = i; 1054 1055 desc->length = sizeof(desc->num_clusters) + sizeof(desc->extent_page[0]) * et_idx; 1056 *remaining_sz -= sizeof(struct spdk_blob_md_descriptor) + desc->length; 1057 *buf += sizeof(struct spdk_blob_md_descriptor) + desc->length; 1058 } 1059 1060 static int 1061 blob_serialize_extent_table(const struct spdk_blob *blob, 1062 struct spdk_blob_md_page **pages, 1063 struct spdk_blob_md_page *cur_page, 1064 uint32_t *page_count, uint8_t **buf, 1065 size_t *remaining_sz) 1066 { 1067 uint64_t last_extent_page; 1068 int rc; 1069 1070 last_extent_page = 0; 1071 /* At least single extent table entry has to be always persisted. 1072 * Such case occurs with num_extent_pages == 0. */ 1073 while (last_extent_page <= blob->active.num_extent_pages) { 1074 blob_serialize_extent_table_entry(blob, last_extent_page, &last_extent_page, buf, 1075 remaining_sz); 1076 1077 if (last_extent_page == blob->active.num_extent_pages) { 1078 break; 1079 } 1080 1081 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1082 if (rc < 0) { 1083 return rc; 1084 } 1085 1086 *buf = (uint8_t *)cur_page->descriptors; 1087 *remaining_sz = sizeof(cur_page->descriptors); 1088 } 1089 1090 return 0; 1091 } 1092 1093 static void 1094 blob_serialize_extent_rle(const struct spdk_blob *blob, 1095 uint64_t start_cluster, uint64_t *next_cluster, 1096 uint8_t **buf, size_t *buf_sz) 1097 { 1098 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 1099 size_t cur_sz; 1100 uint64_t i, extent_idx; 1101 uint64_t lba, lba_per_cluster, lba_count; 1102 1103 /* The buffer must have room for at least one extent */ 1104 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc_extent_rle->extents[0]); 1105 if (*buf_sz < cur_sz) { 1106 *next_cluster = start_cluster; 1107 return; 1108 } 1109 1110 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)*buf; 1111 desc_extent_rle->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE; 1112 1113 lba_per_cluster = bs_cluster_to_lba(blob->bs, 1); 1114 /* Assert for scan-build false positive */ 1115 assert(lba_per_cluster > 0); 1116 1117 lba = blob->active.clusters[start_cluster]; 1118 lba_count = lba_per_cluster; 1119 extent_idx = 0; 1120 for (i = start_cluster + 1; i < blob->active.num_clusters; i++) { 1121 if ((lba + lba_count) == blob->active.clusters[i] && lba != 0) { 1122 /* Run-length encode sequential non-zero LBA */ 1123 lba_count += lba_per_cluster; 1124 continue; 1125 } else if (lba == 0 && blob->active.clusters[i] == 0) { 1126 /* Run-length encode unallocated clusters */ 1127 lba_count += lba_per_cluster; 1128 continue; 1129 } 1130 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1131 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1132 extent_idx++; 1133 1134 cur_sz += sizeof(desc_extent_rle->extents[extent_idx]); 1135 1136 if (*buf_sz < cur_sz) { 1137 /* If we ran out of buffer space, return */ 1138 *next_cluster = i; 1139 break; 1140 } 1141 1142 lba = blob->active.clusters[i]; 1143 lba_count = lba_per_cluster; 1144 } 1145 1146 if (*buf_sz >= cur_sz) { 1147 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1148 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1149 extent_idx++; 1150 1151 *next_cluster = blob->active.num_clusters; 1152 } 1153 1154 desc_extent_rle->length = sizeof(desc_extent_rle->extents[0]) * extent_idx; 1155 *buf_sz -= sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1156 *buf += sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1157 } 1158 1159 static int 1160 blob_serialize_extents_rle(const struct spdk_blob *blob, 1161 struct spdk_blob_md_page **pages, 1162 struct spdk_blob_md_page *cur_page, 1163 uint32_t *page_count, uint8_t **buf, 1164 size_t *remaining_sz) 1165 { 1166 uint64_t last_cluster; 1167 int rc; 1168 1169 last_cluster = 0; 1170 while (last_cluster < blob->active.num_clusters) { 1171 blob_serialize_extent_rle(blob, last_cluster, &last_cluster, buf, remaining_sz); 1172 1173 if (last_cluster == blob->active.num_clusters) { 1174 break; 1175 } 1176 1177 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1178 if (rc < 0) { 1179 return rc; 1180 } 1181 1182 *buf = (uint8_t *)cur_page->descriptors; 1183 *remaining_sz = sizeof(cur_page->descriptors); 1184 } 1185 1186 return 0; 1187 } 1188 1189 static void 1190 blob_serialize_extent_page(const struct spdk_blob *blob, 1191 uint64_t cluster, struct spdk_blob_md_page *page) 1192 { 1193 struct spdk_blob_md_descriptor_extent_page *desc_extent; 1194 uint64_t i, extent_idx; 1195 uint64_t lba, lba_per_cluster; 1196 uint64_t start_cluster_idx = (cluster / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP; 1197 1198 desc_extent = (struct spdk_blob_md_descriptor_extent_page *) page->descriptors; 1199 desc_extent->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE; 1200 1201 lba_per_cluster = bs_cluster_to_lba(blob->bs, 1); 1202 1203 desc_extent->start_cluster_idx = start_cluster_idx; 1204 extent_idx = 0; 1205 for (i = start_cluster_idx; i < blob->active.num_clusters; i++) { 1206 lba = blob->active.clusters[i]; 1207 desc_extent->cluster_idx[extent_idx++] = lba / lba_per_cluster; 1208 if (extent_idx >= SPDK_EXTENTS_PER_EP) { 1209 break; 1210 } 1211 } 1212 desc_extent->length = sizeof(desc_extent->start_cluster_idx) + 1213 sizeof(desc_extent->cluster_idx[0]) * extent_idx; 1214 } 1215 1216 static void 1217 blob_serialize_flags(const struct spdk_blob *blob, 1218 uint8_t *buf, size_t *buf_sz) 1219 { 1220 struct spdk_blob_md_descriptor_flags *desc; 1221 1222 /* 1223 * Flags get serialized first, so we should always have room for the flags 1224 * descriptor. 1225 */ 1226 assert(*buf_sz >= sizeof(*desc)); 1227 1228 desc = (struct spdk_blob_md_descriptor_flags *)buf; 1229 desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS; 1230 desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor); 1231 desc->invalid_flags = blob->invalid_flags; 1232 desc->data_ro_flags = blob->data_ro_flags; 1233 desc->md_ro_flags = blob->md_ro_flags; 1234 1235 *buf_sz -= sizeof(*desc); 1236 } 1237 1238 static int 1239 blob_serialize_xattrs(const struct spdk_blob *blob, 1240 const struct spdk_xattr_tailq *xattrs, bool internal, 1241 struct spdk_blob_md_page **pages, 1242 struct spdk_blob_md_page *cur_page, 1243 uint32_t *page_count, uint8_t **buf, 1244 size_t *remaining_sz) 1245 { 1246 const struct spdk_xattr *xattr; 1247 int rc; 1248 1249 TAILQ_FOREACH(xattr, xattrs, link) { 1250 size_t required_sz = 0; 1251 1252 rc = blob_serialize_xattr(xattr, 1253 *buf, *remaining_sz, 1254 &required_sz, internal); 1255 if (rc < 0) { 1256 /* Need to add a new page to the chain */ 1257 rc = blob_serialize_add_page(blob, pages, page_count, 1258 &cur_page); 1259 if (rc < 0) { 1260 spdk_free(*pages); 1261 *pages = NULL; 1262 *page_count = 0; 1263 return rc; 1264 } 1265 1266 *buf = (uint8_t *)cur_page->descriptors; 1267 *remaining_sz = sizeof(cur_page->descriptors); 1268 1269 /* Try again */ 1270 required_sz = 0; 1271 rc = blob_serialize_xattr(xattr, 1272 *buf, *remaining_sz, 1273 &required_sz, internal); 1274 1275 if (rc < 0) { 1276 spdk_free(*pages); 1277 *pages = NULL; 1278 *page_count = 0; 1279 return rc; 1280 } 1281 } 1282 1283 *remaining_sz -= required_sz; 1284 *buf += required_sz; 1285 } 1286 1287 return 0; 1288 } 1289 1290 static int 1291 blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages, 1292 uint32_t *page_count) 1293 { 1294 struct spdk_blob_md_page *cur_page; 1295 int rc; 1296 uint8_t *buf; 1297 size_t remaining_sz; 1298 1299 assert(pages != NULL); 1300 assert(page_count != NULL); 1301 assert(blob != NULL); 1302 assert(blob->state == SPDK_BLOB_STATE_DIRTY); 1303 1304 *pages = NULL; 1305 *page_count = 0; 1306 1307 /* A blob always has at least 1 page, even if it has no descriptors */ 1308 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1309 if (rc < 0) { 1310 return rc; 1311 } 1312 1313 buf = (uint8_t *)cur_page->descriptors; 1314 remaining_sz = sizeof(cur_page->descriptors); 1315 1316 /* Serialize flags */ 1317 blob_serialize_flags(blob, buf, &remaining_sz); 1318 buf += sizeof(struct spdk_blob_md_descriptor_flags); 1319 1320 /* Serialize xattrs */ 1321 rc = blob_serialize_xattrs(blob, &blob->xattrs, false, 1322 pages, cur_page, page_count, &buf, &remaining_sz); 1323 if (rc < 0) { 1324 return rc; 1325 } 1326 1327 /* Serialize internal xattrs */ 1328 rc = blob_serialize_xattrs(blob, &blob->xattrs_internal, true, 1329 pages, cur_page, page_count, &buf, &remaining_sz); 1330 if (rc < 0) { 1331 return rc; 1332 } 1333 1334 if (blob->use_extent_table) { 1335 /* Serialize extent table */ 1336 rc = blob_serialize_extent_table(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1337 } else { 1338 /* Serialize extents */ 1339 rc = blob_serialize_extents_rle(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1340 } 1341 1342 return rc; 1343 } 1344 1345 struct spdk_blob_load_ctx { 1346 struct spdk_blob *blob; 1347 1348 struct spdk_blob_md_page *pages; 1349 uint32_t num_pages; 1350 uint32_t next_extent_page; 1351 spdk_bs_sequence_t *seq; 1352 1353 spdk_bs_sequence_cpl cb_fn; 1354 void *cb_arg; 1355 }; 1356 1357 static uint32_t 1358 blob_md_page_calc_crc(void *page) 1359 { 1360 uint32_t crc; 1361 1362 crc = BLOB_CRC32C_INITIAL; 1363 crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc); 1364 crc ^= BLOB_CRC32C_INITIAL; 1365 1366 return crc; 1367 1368 } 1369 1370 static void 1371 blob_load_final(struct spdk_blob_load_ctx *ctx, int bserrno) 1372 { 1373 struct spdk_blob *blob = ctx->blob; 1374 1375 if (bserrno == 0) { 1376 blob_mark_clean(blob); 1377 } 1378 1379 ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno); 1380 1381 /* Free the memory */ 1382 spdk_free(ctx->pages); 1383 free(ctx); 1384 } 1385 1386 static void 1387 blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno) 1388 { 1389 struct spdk_blob_load_ctx *ctx = cb_arg; 1390 struct spdk_blob *blob = ctx->blob; 1391 1392 if (bserrno == 0) { 1393 blob->back_bs_dev = bs_create_blob_bs_dev(snapshot); 1394 if (blob->back_bs_dev == NULL) { 1395 bserrno = -ENOMEM; 1396 } 1397 } 1398 if (bserrno != 0) { 1399 SPDK_ERRLOG("Snapshot fail\n"); 1400 } 1401 1402 blob_load_final(ctx, bserrno); 1403 } 1404 1405 static void blob_update_clear_method(struct spdk_blob *blob); 1406 1407 static int 1408 blob_load_esnap(struct spdk_blob *blob, void *blob_ctx) 1409 { 1410 struct spdk_blob_store *bs = blob->bs; 1411 struct spdk_bs_dev *bs_dev = NULL; 1412 const void *esnap_id = NULL; 1413 size_t id_len = 0; 1414 int rc; 1415 1416 if (bs->esnap_bs_dev_create == NULL) { 1417 SPDK_NOTICELOG("blob 0x%" PRIx64 " is an esnap clone but the blobstore was opened " 1418 "without support for esnap clones\n", blob->id); 1419 return -ENOTSUP; 1420 } 1421 assert(blob->back_bs_dev == NULL); 1422 1423 rc = blob_get_xattr_value(blob, BLOB_EXTERNAL_SNAPSHOT_ID, &esnap_id, &id_len, true); 1424 if (rc != 0) { 1425 SPDK_ERRLOG("blob 0x%" PRIx64 " is an esnap clone but has no esnap ID\n", blob->id); 1426 return -EINVAL; 1427 } 1428 assert(id_len > 0 && id_len < UINT32_MAX); 1429 1430 SPDK_INFOLOG(blob, "Creating external snapshot device\n"); 1431 1432 rc = bs->esnap_bs_dev_create(bs->esnap_ctx, blob_ctx, blob, esnap_id, (uint32_t)id_len, 1433 &bs_dev); 1434 if (rc != 0) { 1435 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": failed to load back_bs_dev " 1436 "with error %d\n", blob->id, rc); 1437 return rc; 1438 } 1439 1440 /* 1441 * Note: bs_dev might be NULL if the consumer chose to not open the external snapshot. 1442 * This especially might happen during spdk_bs_load() iteration. 1443 */ 1444 if (bs_dev != NULL) { 1445 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": loaded back_bs_dev\n", blob->id); 1446 if ((bs->io_unit_size % bs_dev->blocklen) != 0) { 1447 SPDK_NOTICELOG("blob 0x%" PRIx64 " external snapshot device block size %u " 1448 "is not compatible with blobstore block size %u\n", 1449 blob->id, bs_dev->blocklen, bs->io_unit_size); 1450 bs_dev->destroy(bs_dev); 1451 return -EINVAL; 1452 } 1453 } 1454 1455 blob->back_bs_dev = bs_dev; 1456 blob->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT; 1457 1458 return 0; 1459 } 1460 1461 static void 1462 blob_load_backing_dev(spdk_bs_sequence_t *seq, void *cb_arg) 1463 { 1464 struct spdk_blob_load_ctx *ctx = cb_arg; 1465 struct spdk_blob *blob = ctx->blob; 1466 const void *value; 1467 size_t len; 1468 int rc; 1469 1470 if (blob_is_esnap_clone(blob)) { 1471 rc = blob_load_esnap(blob, seq->cpl.u.blob_handle.esnap_ctx); 1472 blob_load_final(ctx, rc); 1473 return; 1474 } 1475 1476 if (spdk_blob_is_thin_provisioned(blob)) { 1477 rc = blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true); 1478 if (rc == 0) { 1479 if (len != sizeof(spdk_blob_id)) { 1480 blob_load_final(ctx, -EINVAL); 1481 return; 1482 } 1483 /* open snapshot blob and continue in the callback function */ 1484 blob->parent_id = *(spdk_blob_id *)value; 1485 spdk_bs_open_blob(blob->bs, blob->parent_id, 1486 blob_load_snapshot_cpl, ctx); 1487 return; 1488 } else { 1489 /* add zeroes_dev for thin provisioned blob */ 1490 blob->back_bs_dev = bs_create_zeroes_dev(); 1491 } 1492 } else { 1493 /* standard blob */ 1494 blob->back_bs_dev = NULL; 1495 } 1496 blob_load_final(ctx, 0); 1497 } 1498 1499 static void 1500 blob_load_cpl_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1501 { 1502 struct spdk_blob_load_ctx *ctx = cb_arg; 1503 struct spdk_blob *blob = ctx->blob; 1504 struct spdk_blob_md_page *page; 1505 uint64_t i; 1506 uint32_t crc; 1507 uint64_t lba; 1508 void *tmp; 1509 uint64_t sz; 1510 1511 if (bserrno) { 1512 SPDK_ERRLOG("Extent page read failed: %d\n", bserrno); 1513 blob_load_final(ctx, bserrno); 1514 return; 1515 } 1516 1517 if (ctx->pages == NULL) { 1518 /* First iteration of this function, allocate buffer for single EXTENT_PAGE */ 1519 ctx->pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 1520 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 1521 if (!ctx->pages) { 1522 blob_load_final(ctx, -ENOMEM); 1523 return; 1524 } 1525 ctx->num_pages = 1; 1526 ctx->next_extent_page = 0; 1527 } else { 1528 page = &ctx->pages[0]; 1529 crc = blob_md_page_calc_crc(page); 1530 if (crc != page->crc) { 1531 blob_load_final(ctx, -EINVAL); 1532 return; 1533 } 1534 1535 if (page->next != SPDK_INVALID_MD_PAGE) { 1536 blob_load_final(ctx, -EINVAL); 1537 return; 1538 } 1539 1540 bserrno = blob_parse_extent_page(page, blob); 1541 if (bserrno) { 1542 blob_load_final(ctx, bserrno); 1543 return; 1544 } 1545 } 1546 1547 for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) { 1548 if (blob->active.extent_pages[i] != 0) { 1549 /* Extent page was allocated, read and parse it. */ 1550 lba = bs_md_page_to_lba(blob->bs, blob->active.extent_pages[i]); 1551 ctx->next_extent_page = i + 1; 1552 1553 bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1554 bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 1555 blob_load_cpl_extents_cpl, ctx); 1556 return; 1557 } else { 1558 /* Thin provisioned blobs can point to unallocated extent pages. 1559 * In this case blob size should be increased by up to the amount left in remaining_clusters_in_et. */ 1560 1561 sz = spdk_min(blob->remaining_clusters_in_et, SPDK_EXTENTS_PER_EP); 1562 blob->active.num_clusters += sz; 1563 blob->remaining_clusters_in_et -= sz; 1564 1565 assert(spdk_blob_is_thin_provisioned(blob)); 1566 assert(i + 1 < blob->active.num_extent_pages || blob->remaining_clusters_in_et == 0); 1567 1568 tmp = realloc(blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 1569 if (tmp == NULL) { 1570 blob_load_final(ctx, -ENOMEM); 1571 return; 1572 } 1573 memset(tmp + sizeof(*blob->active.clusters) * blob->active.cluster_array_size, 0, 1574 sizeof(*blob->active.clusters) * (blob->active.num_clusters - blob->active.cluster_array_size)); 1575 blob->active.clusters = tmp; 1576 blob->active.cluster_array_size = blob->active.num_clusters; 1577 } 1578 } 1579 1580 blob_load_backing_dev(seq, ctx); 1581 } 1582 1583 static void 1584 blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1585 { 1586 struct spdk_blob_load_ctx *ctx = cb_arg; 1587 struct spdk_blob *blob = ctx->blob; 1588 struct spdk_blob_md_page *page; 1589 int rc; 1590 uint32_t crc; 1591 uint32_t current_page; 1592 1593 if (ctx->num_pages == 1) { 1594 current_page = bs_blobid_to_page(blob->id); 1595 } else { 1596 assert(ctx->num_pages != 0); 1597 page = &ctx->pages[ctx->num_pages - 2]; 1598 current_page = page->next; 1599 } 1600 1601 if (bserrno) { 1602 SPDK_ERRLOG("Metadata page %d read failed for blobid 0x%" PRIx64 ": %d\n", 1603 current_page, blob->id, bserrno); 1604 blob_load_final(ctx, bserrno); 1605 return; 1606 } 1607 1608 page = &ctx->pages[ctx->num_pages - 1]; 1609 crc = blob_md_page_calc_crc(page); 1610 if (crc != page->crc) { 1611 SPDK_ERRLOG("Metadata page %d crc mismatch for blobid 0x%" PRIx64 "\n", 1612 current_page, blob->id); 1613 blob_load_final(ctx, -EINVAL); 1614 return; 1615 } 1616 1617 if (page->next != SPDK_INVALID_MD_PAGE) { 1618 struct spdk_blob_md_page *tmp_pages; 1619 uint32_t next_page = page->next; 1620 uint64_t next_lba = bs_md_page_to_lba(blob->bs, next_page); 1621 1622 /* Read the next page */ 1623 tmp_pages = spdk_realloc(ctx->pages, (sizeof(*page) * (ctx->num_pages + 1)), 0); 1624 if (tmp_pages == NULL) { 1625 blob_load_final(ctx, -ENOMEM); 1626 return; 1627 } 1628 ctx->num_pages++; 1629 ctx->pages = tmp_pages; 1630 1631 bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1], 1632 next_lba, 1633 bs_byte_to_lba(blob->bs, sizeof(*page)), 1634 blob_load_cpl, ctx); 1635 return; 1636 } 1637 1638 /* Parse the pages */ 1639 rc = blob_parse(ctx->pages, ctx->num_pages, blob); 1640 if (rc) { 1641 blob_load_final(ctx, rc); 1642 return; 1643 } 1644 1645 if (blob->extent_table_found == true) { 1646 /* If EXTENT_TABLE was found, that means support for it should be enabled. */ 1647 assert(blob->extent_rle_found == false); 1648 blob->use_extent_table = true; 1649 } else { 1650 /* If EXTENT_RLE or no extent_* descriptor was found disable support 1651 * for extent table. No extent_* descriptors means that blob has length of 0 1652 * and no extent_rle descriptors were persisted for it. 1653 * EXTENT_TABLE if used, is always present in metadata regardless of length. */ 1654 blob->use_extent_table = false; 1655 } 1656 1657 /* Check the clear_method stored in metadata vs what may have been passed 1658 * via spdk_bs_open_blob_ext() and update accordingly. 1659 */ 1660 blob_update_clear_method(blob); 1661 1662 spdk_free(ctx->pages); 1663 ctx->pages = NULL; 1664 1665 if (blob->extent_table_found) { 1666 blob_load_cpl_extents_cpl(seq, ctx, 0); 1667 } else { 1668 blob_load_backing_dev(seq, ctx); 1669 } 1670 } 1671 1672 /* Load a blob from disk given a blobid */ 1673 static void 1674 blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 1675 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 1676 { 1677 struct spdk_blob_load_ctx *ctx; 1678 struct spdk_blob_store *bs; 1679 uint32_t page_num; 1680 uint64_t lba; 1681 1682 blob_verify_md_op(blob); 1683 1684 bs = blob->bs; 1685 1686 ctx = calloc(1, sizeof(*ctx)); 1687 if (!ctx) { 1688 cb_fn(seq, cb_arg, -ENOMEM); 1689 return; 1690 } 1691 1692 ctx->blob = blob; 1693 ctx->pages = spdk_realloc(ctx->pages, SPDK_BS_PAGE_SIZE, 0); 1694 if (!ctx->pages) { 1695 free(ctx); 1696 cb_fn(seq, cb_arg, -ENOMEM); 1697 return; 1698 } 1699 ctx->num_pages = 1; 1700 ctx->cb_fn = cb_fn; 1701 ctx->cb_arg = cb_arg; 1702 ctx->seq = seq; 1703 1704 page_num = bs_blobid_to_page(blob->id); 1705 lba = bs_md_page_to_lba(blob->bs, page_num); 1706 1707 blob->state = SPDK_BLOB_STATE_LOADING; 1708 1709 bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1710 bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE), 1711 blob_load_cpl, ctx); 1712 } 1713 1714 struct spdk_blob_persist_ctx { 1715 struct spdk_blob *blob; 1716 1717 struct spdk_blob_md_page *pages; 1718 uint32_t next_extent_page; 1719 struct spdk_blob_md_page *extent_page; 1720 1721 spdk_bs_sequence_t *seq; 1722 spdk_bs_sequence_cpl cb_fn; 1723 void *cb_arg; 1724 TAILQ_ENTRY(spdk_blob_persist_ctx) link; 1725 }; 1726 1727 static void 1728 bs_batch_clear_dev(struct spdk_blob_persist_ctx *ctx, spdk_bs_batch_t *batch, uint64_t lba, 1729 uint64_t lba_count) 1730 { 1731 switch (ctx->blob->clear_method) { 1732 case BLOB_CLEAR_WITH_DEFAULT: 1733 case BLOB_CLEAR_WITH_UNMAP: 1734 bs_batch_unmap_dev(batch, lba, lba_count); 1735 break; 1736 case BLOB_CLEAR_WITH_WRITE_ZEROES: 1737 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1738 break; 1739 case BLOB_CLEAR_WITH_NONE: 1740 default: 1741 break; 1742 } 1743 } 1744 1745 static int 1746 bs_super_validate(struct spdk_bs_super_block *super, struct spdk_blob_store *bs) 1747 { 1748 uint32_t crc; 1749 static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH]; 1750 1751 if (super->version > SPDK_BS_VERSION || 1752 super->version < SPDK_BS_INITIAL_VERSION) { 1753 return -EILSEQ; 1754 } 1755 1756 if (memcmp(super->signature, SPDK_BS_SUPER_BLOCK_SIG, 1757 sizeof(super->signature)) != 0) { 1758 return -EILSEQ; 1759 } 1760 1761 crc = blob_md_page_calc_crc(super); 1762 if (crc != super->crc) { 1763 return -EILSEQ; 1764 } 1765 1766 if (memcmp(&bs->bstype, &super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 1767 SPDK_DEBUGLOG(blob, "Bstype matched - loading blobstore\n"); 1768 } else if (memcmp(&bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 1769 SPDK_DEBUGLOG(blob, "Bstype wildcard used - loading blobstore regardless bstype\n"); 1770 } else { 1771 SPDK_DEBUGLOG(blob, "Unexpected bstype\n"); 1772 SPDK_LOGDUMP(blob, "Expected:", bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 1773 SPDK_LOGDUMP(blob, "Found:", super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 1774 return -ENXIO; 1775 } 1776 1777 if (super->size > bs->dev->blockcnt * bs->dev->blocklen) { 1778 SPDK_NOTICELOG("Size mismatch, dev size: %" PRIu64 ", blobstore size: %" PRIu64 "\n", 1779 bs->dev->blockcnt * bs->dev->blocklen, super->size); 1780 return -EILSEQ; 1781 } 1782 1783 return 0; 1784 } 1785 1786 static void bs_mark_dirty(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 1787 spdk_bs_sequence_cpl cb_fn, void *cb_arg); 1788 1789 static void 1790 blob_persist_complete_cb(void *arg) 1791 { 1792 struct spdk_blob_persist_ctx *ctx = arg; 1793 1794 /* Call user callback */ 1795 ctx->cb_fn(ctx->seq, ctx->cb_arg, 0); 1796 1797 /* Free the memory */ 1798 spdk_free(ctx->pages); 1799 free(ctx); 1800 } 1801 1802 static void blob_persist_start(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 1803 1804 static void 1805 blob_persist_complete(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx, int bserrno) 1806 { 1807 struct spdk_blob_persist_ctx *next_persist, *tmp; 1808 struct spdk_blob *blob = ctx->blob; 1809 1810 if (bserrno == 0) { 1811 blob_mark_clean(blob); 1812 } 1813 1814 assert(ctx == TAILQ_FIRST(&blob->persists_to_complete)); 1815 1816 /* Complete all persists that were pending when the current persist started */ 1817 TAILQ_FOREACH_SAFE(next_persist, &blob->persists_to_complete, link, tmp) { 1818 TAILQ_REMOVE(&blob->persists_to_complete, next_persist, link); 1819 spdk_thread_send_msg(spdk_get_thread(), blob_persist_complete_cb, next_persist); 1820 } 1821 1822 if (TAILQ_EMPTY(&blob->pending_persists)) { 1823 return; 1824 } 1825 1826 /* Queue up all pending persists for completion and start blob persist with first one */ 1827 TAILQ_SWAP(&blob->persists_to_complete, &blob->pending_persists, spdk_blob_persist_ctx, link); 1828 next_persist = TAILQ_FIRST(&blob->persists_to_complete); 1829 1830 blob->state = SPDK_BLOB_STATE_DIRTY; 1831 bs_mark_dirty(seq, blob->bs, blob_persist_start, next_persist); 1832 } 1833 1834 static void 1835 blob_persist_clear_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1836 { 1837 struct spdk_blob_persist_ctx *ctx = cb_arg; 1838 struct spdk_blob *blob = ctx->blob; 1839 struct spdk_blob_store *bs = blob->bs; 1840 size_t i; 1841 1842 if (bserrno != 0) { 1843 blob_persist_complete(seq, ctx, bserrno); 1844 return; 1845 } 1846 1847 spdk_spin_lock(&bs->used_lock); 1848 1849 /* Release all extent_pages that were truncated */ 1850 for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) { 1851 /* Nothing to release if it was not allocated */ 1852 if (blob->active.extent_pages[i] != 0) { 1853 bs_release_md_page(bs, blob->active.extent_pages[i]); 1854 } 1855 } 1856 1857 spdk_spin_unlock(&bs->used_lock); 1858 1859 if (blob->active.num_extent_pages == 0) { 1860 free(blob->active.extent_pages); 1861 blob->active.extent_pages = NULL; 1862 blob->active.extent_pages_array_size = 0; 1863 } else if (blob->active.num_extent_pages != blob->active.extent_pages_array_size) { 1864 #ifndef __clang_analyzer__ 1865 void *tmp; 1866 1867 /* scan-build really can't figure reallocs, workaround it */ 1868 tmp = realloc(blob->active.extent_pages, sizeof(uint32_t) * blob->active.num_extent_pages); 1869 assert(tmp != NULL); 1870 blob->active.extent_pages = tmp; 1871 #endif 1872 blob->active.extent_pages_array_size = blob->active.num_extent_pages; 1873 } 1874 1875 blob_persist_complete(seq, ctx, bserrno); 1876 } 1877 1878 static void 1879 blob_persist_clear_extents(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 1880 { 1881 struct spdk_blob *blob = ctx->blob; 1882 struct spdk_blob_store *bs = blob->bs; 1883 size_t i; 1884 uint64_t lba; 1885 uint64_t lba_count; 1886 spdk_bs_batch_t *batch; 1887 1888 batch = bs_sequence_to_batch(seq, blob_persist_clear_extents_cpl, ctx); 1889 lba_count = bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE); 1890 1891 /* Clear all extent_pages that were truncated */ 1892 for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) { 1893 /* Nothing to clear if it was not allocated */ 1894 if (blob->active.extent_pages[i] != 0) { 1895 lba = bs_md_page_to_lba(bs, blob->active.extent_pages[i]); 1896 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1897 } 1898 } 1899 1900 bs_batch_close(batch); 1901 } 1902 1903 static void 1904 blob_persist_clear_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1905 { 1906 struct spdk_blob_persist_ctx *ctx = cb_arg; 1907 struct spdk_blob *blob = ctx->blob; 1908 struct spdk_blob_store *bs = blob->bs; 1909 size_t i; 1910 1911 if (bserrno != 0) { 1912 blob_persist_complete(seq, ctx, bserrno); 1913 return; 1914 } 1915 1916 spdk_spin_lock(&bs->used_lock); 1917 /* Release all clusters that were truncated */ 1918 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1919 uint32_t cluster_num = bs_lba_to_cluster(bs, blob->active.clusters[i]); 1920 1921 /* Nothing to release if it was not allocated */ 1922 if (blob->active.clusters[i] != 0) { 1923 bs_release_cluster(bs, cluster_num); 1924 } 1925 } 1926 spdk_spin_unlock(&bs->used_lock); 1927 1928 if (blob->active.num_clusters == 0) { 1929 free(blob->active.clusters); 1930 blob->active.clusters = NULL; 1931 blob->active.cluster_array_size = 0; 1932 } else if (blob->active.num_clusters != blob->active.cluster_array_size) { 1933 #ifndef __clang_analyzer__ 1934 void *tmp; 1935 1936 /* scan-build really can't figure reallocs, workaround it */ 1937 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * blob->active.num_clusters); 1938 assert(tmp != NULL); 1939 blob->active.clusters = tmp; 1940 1941 #endif 1942 blob->active.cluster_array_size = blob->active.num_clusters; 1943 } 1944 1945 /* Move on to clearing extent pages */ 1946 blob_persist_clear_extents(seq, ctx); 1947 } 1948 1949 static void 1950 blob_persist_clear_clusters(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 1951 { 1952 struct spdk_blob *blob = ctx->blob; 1953 struct spdk_blob_store *bs = blob->bs; 1954 spdk_bs_batch_t *batch; 1955 size_t i; 1956 uint64_t lba; 1957 uint64_t lba_count; 1958 1959 /* Clusters don't move around in blobs. The list shrinks or grows 1960 * at the end, but no changes ever occur in the middle of the list. 1961 */ 1962 1963 batch = bs_sequence_to_batch(seq, blob_persist_clear_clusters_cpl, ctx); 1964 1965 /* Clear all clusters that were truncated */ 1966 lba = 0; 1967 lba_count = 0; 1968 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1969 uint64_t next_lba = blob->active.clusters[i]; 1970 uint64_t next_lba_count = bs_cluster_to_lba(bs, 1); 1971 1972 if (next_lba > 0 && (lba + lba_count) == next_lba) { 1973 /* This cluster is contiguous with the previous one. */ 1974 lba_count += next_lba_count; 1975 continue; 1976 } else if (next_lba == 0) { 1977 continue; 1978 } 1979 1980 /* This cluster is not contiguous with the previous one. */ 1981 1982 /* If a run of LBAs previously existing, clear them now */ 1983 if (lba_count > 0) { 1984 bs_batch_clear_dev(ctx, batch, lba, lba_count); 1985 } 1986 1987 /* Start building the next batch */ 1988 lba = next_lba; 1989 if (next_lba > 0) { 1990 lba_count = next_lba_count; 1991 } else { 1992 lba_count = 0; 1993 } 1994 } 1995 1996 /* If we ended with a contiguous set of LBAs, clear them now */ 1997 if (lba_count > 0) { 1998 bs_batch_clear_dev(ctx, batch, lba, lba_count); 1999 } 2000 2001 bs_batch_close(batch); 2002 } 2003 2004 static void 2005 blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2006 { 2007 struct spdk_blob_persist_ctx *ctx = cb_arg; 2008 struct spdk_blob *blob = ctx->blob; 2009 struct spdk_blob_store *bs = blob->bs; 2010 size_t i; 2011 2012 if (bserrno != 0) { 2013 blob_persist_complete(seq, ctx, bserrno); 2014 return; 2015 } 2016 2017 spdk_spin_lock(&bs->used_lock); 2018 2019 /* This loop starts at 1 because the first page is special and handled 2020 * below. The pages (except the first) are never written in place, 2021 * so any pages in the clean list must be zeroed. 2022 */ 2023 for (i = 1; i < blob->clean.num_pages; i++) { 2024 bs_release_md_page(bs, blob->clean.pages[i]); 2025 } 2026 2027 if (blob->active.num_pages == 0) { 2028 uint32_t page_num; 2029 2030 page_num = bs_blobid_to_page(blob->id); 2031 bs_release_md_page(bs, page_num); 2032 } 2033 2034 spdk_spin_unlock(&bs->used_lock); 2035 2036 /* Move on to clearing clusters */ 2037 blob_persist_clear_clusters(seq, ctx); 2038 } 2039 2040 static void 2041 blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2042 { 2043 struct spdk_blob_persist_ctx *ctx = cb_arg; 2044 struct spdk_blob *blob = ctx->blob; 2045 struct spdk_blob_store *bs = blob->bs; 2046 uint64_t lba; 2047 uint64_t lba_count; 2048 spdk_bs_batch_t *batch; 2049 size_t i; 2050 2051 if (bserrno != 0) { 2052 blob_persist_complete(seq, ctx, bserrno); 2053 return; 2054 } 2055 2056 batch = bs_sequence_to_batch(seq, blob_persist_zero_pages_cpl, ctx); 2057 2058 lba_count = bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE); 2059 2060 /* This loop starts at 1 because the first page is special and handled 2061 * below. The pages (except the first) are never written in place, 2062 * so any pages in the clean list must be zeroed. 2063 */ 2064 for (i = 1; i < blob->clean.num_pages; i++) { 2065 lba = bs_md_page_to_lba(bs, blob->clean.pages[i]); 2066 2067 bs_batch_write_zeroes_dev(batch, lba, lba_count); 2068 } 2069 2070 /* The first page will only be zeroed if this is a delete. */ 2071 if (blob->active.num_pages == 0) { 2072 uint32_t page_num; 2073 2074 /* The first page in the metadata goes where the blobid indicates */ 2075 page_num = bs_blobid_to_page(blob->id); 2076 lba = bs_md_page_to_lba(bs, page_num); 2077 2078 bs_batch_write_zeroes_dev(batch, lba, lba_count); 2079 } 2080 2081 bs_batch_close(batch); 2082 } 2083 2084 static void 2085 blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2086 { 2087 struct spdk_blob_persist_ctx *ctx = cb_arg; 2088 struct spdk_blob *blob = ctx->blob; 2089 struct spdk_blob_store *bs = blob->bs; 2090 uint64_t lba; 2091 uint32_t lba_count; 2092 struct spdk_blob_md_page *page; 2093 2094 if (bserrno != 0) { 2095 blob_persist_complete(seq, ctx, bserrno); 2096 return; 2097 } 2098 2099 if (blob->active.num_pages == 0) { 2100 /* Move on to the next step */ 2101 blob_persist_zero_pages(seq, ctx, 0); 2102 return; 2103 } 2104 2105 lba_count = bs_byte_to_lba(bs, sizeof(*page)); 2106 2107 page = &ctx->pages[0]; 2108 /* The first page in the metadata goes where the blobid indicates */ 2109 lba = bs_md_page_to_lba(bs, bs_blobid_to_page(blob->id)); 2110 2111 bs_sequence_write_dev(seq, page, lba, lba_count, 2112 blob_persist_zero_pages, ctx); 2113 } 2114 2115 static void 2116 blob_persist_write_page_chain(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 2117 { 2118 struct spdk_blob *blob = ctx->blob; 2119 struct spdk_blob_store *bs = blob->bs; 2120 uint64_t lba; 2121 uint32_t lba_count; 2122 struct spdk_blob_md_page *page; 2123 spdk_bs_batch_t *batch; 2124 size_t i; 2125 2126 /* Clusters don't move around in blobs. The list shrinks or grows 2127 * at the end, but no changes ever occur in the middle of the list. 2128 */ 2129 2130 lba_count = bs_byte_to_lba(bs, sizeof(*page)); 2131 2132 batch = bs_sequence_to_batch(seq, blob_persist_write_page_root, ctx); 2133 2134 /* This starts at 1. The root page is not written until 2135 * all of the others are finished 2136 */ 2137 for (i = 1; i < blob->active.num_pages; i++) { 2138 page = &ctx->pages[i]; 2139 assert(page->sequence_num == i); 2140 2141 lba = bs_md_page_to_lba(bs, blob->active.pages[i]); 2142 2143 bs_batch_write_dev(batch, page, lba, lba_count); 2144 } 2145 2146 bs_batch_close(batch); 2147 } 2148 2149 static int 2150 blob_resize(struct spdk_blob *blob, uint64_t sz) 2151 { 2152 uint64_t i; 2153 uint64_t *tmp; 2154 uint64_t cluster; 2155 uint32_t lfmd; /* lowest free md page */ 2156 uint64_t num_clusters; 2157 uint32_t *ep_tmp; 2158 uint64_t new_num_ep = 0, current_num_ep = 0; 2159 struct spdk_blob_store *bs; 2160 int rc; 2161 2162 bs = blob->bs; 2163 2164 blob_verify_md_op(blob); 2165 2166 if (blob->active.num_clusters == sz) { 2167 return 0; 2168 } 2169 2170 if (blob->active.num_clusters < blob->active.cluster_array_size) { 2171 /* If this blob was resized to be larger, then smaller, then 2172 * larger without syncing, then the cluster array already 2173 * contains spare assigned clusters we can use. 2174 */ 2175 num_clusters = spdk_min(blob->active.cluster_array_size, 2176 sz); 2177 } else { 2178 num_clusters = blob->active.num_clusters; 2179 } 2180 2181 if (blob->use_extent_table) { 2182 /* Round up since every cluster beyond current Extent Table size, 2183 * requires new extent page. */ 2184 new_num_ep = spdk_divide_round_up(sz, SPDK_EXTENTS_PER_EP); 2185 current_num_ep = spdk_divide_round_up(num_clusters, SPDK_EXTENTS_PER_EP); 2186 } 2187 2188 assert(!spdk_spin_held(&bs->used_lock)); 2189 2190 /* Check first that we have enough clusters and md pages before we start claiming them. 2191 * bs->used_lock is held to ensure that clusters we think are free are still free when we go 2192 * to claim them later in this function. 2193 */ 2194 if (sz > num_clusters && spdk_blob_is_thin_provisioned(blob) == false) { 2195 spdk_spin_lock(&bs->used_lock); 2196 if ((sz - num_clusters) > bs->num_free_clusters) { 2197 rc = -ENOSPC; 2198 goto out; 2199 } 2200 lfmd = 0; 2201 for (i = current_num_ep; i < new_num_ep ; i++) { 2202 lfmd = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, lfmd); 2203 if (lfmd == UINT32_MAX) { 2204 /* No more free md pages. Cannot satisfy the request */ 2205 rc = -ENOSPC; 2206 goto out; 2207 } 2208 } 2209 } 2210 2211 if (sz > num_clusters) { 2212 /* Expand the cluster array if necessary. 2213 * We only shrink the array when persisting. 2214 */ 2215 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * sz); 2216 if (sz > 0 && tmp == NULL) { 2217 rc = -ENOMEM; 2218 goto out; 2219 } 2220 memset(tmp + blob->active.cluster_array_size, 0, 2221 sizeof(*blob->active.clusters) * (sz - blob->active.cluster_array_size)); 2222 blob->active.clusters = tmp; 2223 blob->active.cluster_array_size = sz; 2224 2225 /* Expand the extents table, only if enough clusters were added */ 2226 if (new_num_ep > current_num_ep && blob->use_extent_table) { 2227 ep_tmp = realloc(blob->active.extent_pages, sizeof(*blob->active.extent_pages) * new_num_ep); 2228 if (new_num_ep > 0 && ep_tmp == NULL) { 2229 rc = -ENOMEM; 2230 goto out; 2231 } 2232 memset(ep_tmp + blob->active.extent_pages_array_size, 0, 2233 sizeof(*blob->active.extent_pages) * (new_num_ep - blob->active.extent_pages_array_size)); 2234 blob->active.extent_pages = ep_tmp; 2235 blob->active.extent_pages_array_size = new_num_ep; 2236 } 2237 } 2238 2239 blob->state = SPDK_BLOB_STATE_DIRTY; 2240 2241 if (spdk_blob_is_thin_provisioned(blob) == false) { 2242 cluster = 0; 2243 lfmd = 0; 2244 for (i = num_clusters; i < sz; i++) { 2245 bs_allocate_cluster(blob, i, &cluster, &lfmd, true); 2246 /* Do not increment lfmd here. lfmd will get updated 2247 * to the md_page allocated (if any) when a new extent 2248 * page is needed. Just pass that value again, 2249 * bs_allocate_cluster will just start at that index 2250 * to find the next free md_page when needed. 2251 */ 2252 } 2253 } 2254 2255 blob->active.num_clusters = sz; 2256 blob->active.num_extent_pages = new_num_ep; 2257 2258 rc = 0; 2259 out: 2260 if (spdk_spin_held(&bs->used_lock)) { 2261 spdk_spin_unlock(&bs->used_lock); 2262 } 2263 2264 return rc; 2265 } 2266 2267 static void 2268 blob_persist_generate_new_md(struct spdk_blob_persist_ctx *ctx) 2269 { 2270 spdk_bs_sequence_t *seq = ctx->seq; 2271 struct spdk_blob *blob = ctx->blob; 2272 struct spdk_blob_store *bs = blob->bs; 2273 uint64_t i; 2274 uint32_t page_num; 2275 void *tmp; 2276 int rc; 2277 2278 /* Generate the new metadata */ 2279 rc = blob_serialize(blob, &ctx->pages, &blob->active.num_pages); 2280 if (rc < 0) { 2281 blob_persist_complete(seq, ctx, rc); 2282 return; 2283 } 2284 2285 assert(blob->active.num_pages >= 1); 2286 2287 /* Resize the cache of page indices */ 2288 tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 2289 if (!tmp) { 2290 blob_persist_complete(seq, ctx, -ENOMEM); 2291 return; 2292 } 2293 blob->active.pages = tmp; 2294 2295 /* Assign this metadata to pages. This requires two passes - one to verify that there are 2296 * enough pages and a second to actually claim them. The used_lock is held across 2297 * both passes to ensure things don't change in the middle. 2298 */ 2299 spdk_spin_lock(&bs->used_lock); 2300 page_num = 0; 2301 /* Note that this loop starts at one. The first page location is fixed by the blobid. */ 2302 for (i = 1; i < blob->active.num_pages; i++) { 2303 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 2304 if (page_num == UINT32_MAX) { 2305 spdk_spin_unlock(&bs->used_lock); 2306 blob_persist_complete(seq, ctx, -ENOMEM); 2307 return; 2308 } 2309 page_num++; 2310 } 2311 2312 page_num = 0; 2313 blob->active.pages[0] = bs_blobid_to_page(blob->id); 2314 for (i = 1; i < blob->active.num_pages; i++) { 2315 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 2316 ctx->pages[i - 1].next = page_num; 2317 /* Now that previous metadata page is complete, calculate the crc for it. */ 2318 ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]); 2319 blob->active.pages[i] = page_num; 2320 bs_claim_md_page(bs, page_num); 2321 SPDK_DEBUGLOG(blob, "Claiming page %u for blob 0x%" PRIx64 "\n", page_num, 2322 blob->id); 2323 page_num++; 2324 } 2325 spdk_spin_unlock(&bs->used_lock); 2326 ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]); 2327 /* Start writing the metadata from last page to first */ 2328 blob->state = SPDK_BLOB_STATE_CLEAN; 2329 blob_persist_write_page_chain(seq, ctx); 2330 } 2331 2332 static void 2333 blob_persist_write_extent_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2334 { 2335 struct spdk_blob_persist_ctx *ctx = cb_arg; 2336 struct spdk_blob *blob = ctx->blob; 2337 size_t i; 2338 uint32_t extent_page_id; 2339 uint32_t page_count = 0; 2340 int rc; 2341 2342 if (ctx->extent_page != NULL) { 2343 spdk_free(ctx->extent_page); 2344 ctx->extent_page = NULL; 2345 } 2346 2347 if (bserrno != 0) { 2348 blob_persist_complete(seq, ctx, bserrno); 2349 return; 2350 } 2351 2352 /* Only write out Extent Pages when blob was resized. */ 2353 for (i = ctx->next_extent_page; i < blob->active.extent_pages_array_size; i++) { 2354 extent_page_id = blob->active.extent_pages[i]; 2355 if (extent_page_id == 0) { 2356 /* No Extent Page to persist */ 2357 assert(spdk_blob_is_thin_provisioned(blob)); 2358 continue; 2359 } 2360 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent_page_id)); 2361 ctx->next_extent_page = i + 1; 2362 rc = blob_serialize_add_page(ctx->blob, &ctx->extent_page, &page_count, &ctx->extent_page); 2363 if (rc < 0) { 2364 blob_persist_complete(seq, ctx, rc); 2365 return; 2366 } 2367 2368 blob->state = SPDK_BLOB_STATE_DIRTY; 2369 blob_serialize_extent_page(blob, i * SPDK_EXTENTS_PER_EP, ctx->extent_page); 2370 2371 ctx->extent_page->crc = blob_md_page_calc_crc(ctx->extent_page); 2372 2373 bs_sequence_write_dev(seq, ctx->extent_page, bs_md_page_to_lba(blob->bs, extent_page_id), 2374 bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 2375 blob_persist_write_extent_pages, ctx); 2376 return; 2377 } 2378 2379 blob_persist_generate_new_md(ctx); 2380 } 2381 2382 static void 2383 blob_persist_start(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2384 { 2385 struct spdk_blob_persist_ctx *ctx = cb_arg; 2386 struct spdk_blob *blob = ctx->blob; 2387 2388 if (bserrno != 0) { 2389 blob_persist_complete(seq, ctx, bserrno); 2390 return; 2391 } 2392 2393 if (blob->active.num_pages == 0) { 2394 /* This is the signal that the blob should be deleted. 2395 * Immediately jump to the clean up routine. */ 2396 assert(blob->clean.num_pages > 0); 2397 blob->state = SPDK_BLOB_STATE_CLEAN; 2398 blob_persist_zero_pages(seq, ctx, 0); 2399 return; 2400 2401 } 2402 2403 if (blob->clean.num_clusters < blob->active.num_clusters) { 2404 /* Blob was resized up */ 2405 assert(blob->clean.num_extent_pages <= blob->active.num_extent_pages); 2406 ctx->next_extent_page = spdk_max(1, blob->clean.num_extent_pages) - 1; 2407 } else if (blob->active.num_clusters < blob->active.cluster_array_size) { 2408 /* Blob was resized down */ 2409 assert(blob->clean.num_extent_pages >= blob->active.num_extent_pages); 2410 ctx->next_extent_page = spdk_max(1, blob->active.num_extent_pages) - 1; 2411 } else { 2412 /* No change in size occurred */ 2413 blob_persist_generate_new_md(ctx); 2414 return; 2415 } 2416 2417 blob_persist_write_extent_pages(seq, ctx, 0); 2418 } 2419 2420 struct spdk_bs_mark_dirty { 2421 struct spdk_blob_store *bs; 2422 struct spdk_bs_super_block *super; 2423 spdk_bs_sequence_cpl cb_fn; 2424 void *cb_arg; 2425 }; 2426 2427 static void 2428 bs_mark_dirty_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2429 { 2430 struct spdk_bs_mark_dirty *ctx = cb_arg; 2431 2432 if (bserrno == 0) { 2433 ctx->bs->clean = 0; 2434 } 2435 2436 ctx->cb_fn(seq, ctx->cb_arg, bserrno); 2437 2438 spdk_free(ctx->super); 2439 free(ctx); 2440 } 2441 2442 static void bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2443 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg); 2444 2445 2446 static void 2447 bs_mark_dirty_write(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2448 { 2449 struct spdk_bs_mark_dirty *ctx = cb_arg; 2450 int rc; 2451 2452 if (bserrno != 0) { 2453 bs_mark_dirty_write_cpl(seq, ctx, bserrno); 2454 return; 2455 } 2456 2457 rc = bs_super_validate(ctx->super, ctx->bs); 2458 if (rc != 0) { 2459 bs_mark_dirty_write_cpl(seq, ctx, rc); 2460 return; 2461 } 2462 2463 ctx->super->clean = 0; 2464 if (ctx->super->size == 0) { 2465 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 2466 } 2467 2468 bs_write_super(seq, ctx->bs, ctx->super, bs_mark_dirty_write_cpl, ctx); 2469 } 2470 2471 static void 2472 bs_mark_dirty(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2473 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2474 { 2475 struct spdk_bs_mark_dirty *ctx; 2476 2477 /* Blobstore is already marked dirty */ 2478 if (bs->clean == 0) { 2479 cb_fn(seq, cb_arg, 0); 2480 return; 2481 } 2482 2483 ctx = calloc(1, sizeof(*ctx)); 2484 if (!ctx) { 2485 cb_fn(seq, cb_arg, -ENOMEM); 2486 return; 2487 } 2488 ctx->bs = bs; 2489 ctx->cb_fn = cb_fn; 2490 ctx->cb_arg = cb_arg; 2491 2492 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 2493 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 2494 if (!ctx->super) { 2495 free(ctx); 2496 cb_fn(seq, cb_arg, -ENOMEM); 2497 return; 2498 } 2499 2500 bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0), 2501 bs_byte_to_lba(bs, sizeof(*ctx->super)), 2502 bs_mark_dirty_write, ctx); 2503 } 2504 2505 /* Write a blob to disk */ 2506 static void 2507 blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 2508 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2509 { 2510 struct spdk_blob_persist_ctx *ctx; 2511 2512 blob_verify_md_op(blob); 2513 2514 if (blob->state == SPDK_BLOB_STATE_CLEAN && TAILQ_EMPTY(&blob->persists_to_complete)) { 2515 cb_fn(seq, cb_arg, 0); 2516 return; 2517 } 2518 2519 ctx = calloc(1, sizeof(*ctx)); 2520 if (!ctx) { 2521 cb_fn(seq, cb_arg, -ENOMEM); 2522 return; 2523 } 2524 ctx->blob = blob; 2525 ctx->seq = seq; 2526 ctx->cb_fn = cb_fn; 2527 ctx->cb_arg = cb_arg; 2528 2529 /* Multiple blob persists can affect one another, via blob->state or 2530 * blob mutable data changes. To prevent it, queue up the persists. */ 2531 if (!TAILQ_EMPTY(&blob->persists_to_complete)) { 2532 TAILQ_INSERT_TAIL(&blob->pending_persists, ctx, link); 2533 return; 2534 } 2535 TAILQ_INSERT_HEAD(&blob->persists_to_complete, ctx, link); 2536 2537 bs_mark_dirty(seq, blob->bs, blob_persist_start, ctx); 2538 } 2539 2540 struct spdk_blob_copy_cluster_ctx { 2541 struct spdk_blob *blob; 2542 uint8_t *buf; 2543 uint64_t page; 2544 uint64_t new_cluster; 2545 uint32_t new_extent_page; 2546 spdk_bs_sequence_t *seq; 2547 struct spdk_blob_md_page *new_cluster_page; 2548 }; 2549 2550 struct spdk_blob_free_cluster_ctx { 2551 struct spdk_blob *blob; 2552 uint64_t page; 2553 struct spdk_blob_md_page *md_page; 2554 uint64_t cluster_num; 2555 uint32_t extent_page; 2556 spdk_bs_sequence_t *seq; 2557 }; 2558 2559 static void 2560 blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno) 2561 { 2562 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2563 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq; 2564 TAILQ_HEAD(, spdk_bs_request_set) requests; 2565 spdk_bs_user_op_t *op; 2566 2567 TAILQ_INIT(&requests); 2568 TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link); 2569 2570 while (!TAILQ_EMPTY(&requests)) { 2571 op = TAILQ_FIRST(&requests); 2572 TAILQ_REMOVE(&requests, op, link); 2573 if (bserrno == 0) { 2574 bs_user_op_execute(op); 2575 } else { 2576 bs_user_op_abort(op, bserrno); 2577 } 2578 } 2579 2580 spdk_free(ctx->buf); 2581 free(ctx); 2582 } 2583 2584 static void 2585 blob_free_cluster_cpl(void *cb_arg, int bserrno) 2586 { 2587 struct spdk_blob_free_cluster_ctx *ctx = cb_arg; 2588 spdk_bs_sequence_t *seq = ctx->seq; 2589 2590 bs_sequence_finish(seq, bserrno); 2591 2592 free(ctx); 2593 } 2594 2595 static void 2596 blob_insert_cluster_revert(struct spdk_blob_copy_cluster_ctx *ctx) 2597 { 2598 spdk_spin_lock(&ctx->blob->bs->used_lock); 2599 bs_release_cluster(ctx->blob->bs, ctx->new_cluster); 2600 if (ctx->new_extent_page != 0) { 2601 bs_release_md_page(ctx->blob->bs, ctx->new_extent_page); 2602 } 2603 spdk_spin_unlock(&ctx->blob->bs->used_lock); 2604 } 2605 2606 static void 2607 blob_insert_cluster_cpl(void *cb_arg, int bserrno) 2608 { 2609 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2610 2611 if (bserrno) { 2612 if (bserrno == -EEXIST) { 2613 /* The metadata insert failed because another thread 2614 * allocated the cluster first. Free our cluster 2615 * but continue without error. */ 2616 bserrno = 0; 2617 } 2618 blob_insert_cluster_revert(ctx); 2619 } 2620 2621 bs_sequence_finish(ctx->seq, bserrno); 2622 } 2623 2624 static void 2625 blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2626 { 2627 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2628 uint32_t cluster_number; 2629 2630 if (bserrno) { 2631 /* The write failed, so jump to the final completion handler */ 2632 bs_sequence_finish(seq, bserrno); 2633 return; 2634 } 2635 2636 cluster_number = bs_page_to_cluster(ctx->blob->bs, ctx->page); 2637 2638 blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2639 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx); 2640 } 2641 2642 static void 2643 blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2644 { 2645 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2646 2647 if (bserrno != 0) { 2648 /* The read failed, so jump to the final completion handler */ 2649 bs_sequence_finish(seq, bserrno); 2650 return; 2651 } 2652 2653 /* Write whole cluster */ 2654 bs_sequence_write_dev(seq, ctx->buf, 2655 bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 2656 bs_cluster_to_lba(ctx->blob->bs, 1), 2657 blob_write_copy_cpl, ctx); 2658 } 2659 2660 static bool 2661 blob_can_copy(struct spdk_blob *blob, uint32_t cluster_start_page, uint64_t *base_lba) 2662 { 2663 uint64_t lba = bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page); 2664 2665 return (!blob_is_esnap_clone(blob) && blob->bs->dev->copy != NULL) && 2666 blob->back_bs_dev->translate_lba(blob->back_bs_dev, lba, base_lba); 2667 } 2668 2669 static void 2670 blob_copy(struct spdk_blob_copy_cluster_ctx *ctx, spdk_bs_user_op_t *op, uint64_t src_lba) 2671 { 2672 struct spdk_blob *blob = ctx->blob; 2673 uint64_t lba_count = bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz); 2674 2675 bs_sequence_copy_dev(ctx->seq, 2676 bs_cluster_to_lba(blob->bs, ctx->new_cluster), 2677 src_lba, 2678 lba_count, 2679 blob_write_copy_cpl, ctx); 2680 } 2681 2682 static void 2683 bs_allocate_and_copy_cluster(struct spdk_blob *blob, 2684 struct spdk_io_channel *_ch, 2685 uint64_t io_unit, spdk_bs_user_op_t *op) 2686 { 2687 struct spdk_bs_cpl cpl; 2688 struct spdk_bs_channel *ch; 2689 struct spdk_blob_copy_cluster_ctx *ctx; 2690 uint32_t cluster_start_page; 2691 uint32_t cluster_number; 2692 bool is_zeroes; 2693 bool can_copy; 2694 uint64_t copy_src_lba; 2695 int rc; 2696 2697 ch = spdk_io_channel_get_ctx(_ch); 2698 2699 if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) { 2700 /* There are already operations pending. Queue this user op 2701 * and return because it will be re-executed when the outstanding 2702 * cluster allocation completes. */ 2703 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2704 return; 2705 } 2706 2707 /* Round the io_unit offset down to the first page in the cluster */ 2708 cluster_start_page = bs_io_unit_to_cluster_start(blob, io_unit); 2709 2710 /* Calculate which index in the metadata cluster array the corresponding 2711 * cluster is supposed to be at. */ 2712 cluster_number = bs_io_unit_to_cluster_number(blob, io_unit); 2713 2714 ctx = calloc(1, sizeof(*ctx)); 2715 if (!ctx) { 2716 bs_user_op_abort(op, -ENOMEM); 2717 return; 2718 } 2719 2720 assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0); 2721 2722 ctx->blob = blob; 2723 ctx->page = cluster_start_page; 2724 ctx->new_cluster_page = ch->new_cluster_page; 2725 memset(ctx->new_cluster_page, 0, SPDK_BS_PAGE_SIZE); 2726 can_copy = blob_can_copy(blob, cluster_start_page, ©_src_lba); 2727 2728 is_zeroes = blob->back_bs_dev->is_zeroes(blob->back_bs_dev, 2729 bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page), 2730 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz)); 2731 if (blob->parent_id != SPDK_BLOBID_INVALID && !is_zeroes && !can_copy) { 2732 ctx->buf = spdk_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen, 2733 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 2734 if (!ctx->buf) { 2735 SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n", 2736 blob->bs->cluster_sz); 2737 free(ctx); 2738 bs_user_op_abort(op, -ENOMEM); 2739 return; 2740 } 2741 } 2742 2743 spdk_spin_lock(&blob->bs->used_lock); 2744 rc = bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, &ctx->new_extent_page, 2745 false); 2746 spdk_spin_unlock(&blob->bs->used_lock); 2747 if (rc != 0) { 2748 spdk_free(ctx->buf); 2749 free(ctx); 2750 bs_user_op_abort(op, rc); 2751 return; 2752 } 2753 2754 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2755 cpl.u.blob_basic.cb_fn = blob_allocate_and_copy_cluster_cpl; 2756 cpl.u.blob_basic.cb_arg = ctx; 2757 2758 ctx->seq = bs_sequence_start_blob(_ch, &cpl, blob); 2759 if (!ctx->seq) { 2760 spdk_spin_lock(&blob->bs->used_lock); 2761 bs_release_cluster(blob->bs, ctx->new_cluster); 2762 spdk_spin_unlock(&blob->bs->used_lock); 2763 spdk_free(ctx->buf); 2764 free(ctx); 2765 bs_user_op_abort(op, -ENOMEM); 2766 return; 2767 } 2768 2769 /* Queue the user op to block other incoming operations */ 2770 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2771 2772 if (blob->parent_id != SPDK_BLOBID_INVALID && !is_zeroes) { 2773 if (can_copy) { 2774 blob_copy(ctx, op, copy_src_lba); 2775 } else { 2776 /* Read cluster from backing device */ 2777 bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf, 2778 bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page), 2779 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz), 2780 blob_write_copy, ctx); 2781 } 2782 2783 } else { 2784 blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2785 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx); 2786 } 2787 } 2788 2789 static inline bool 2790 blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t io_unit, uint64_t length, 2791 uint64_t *lba, uint64_t *lba_count) 2792 { 2793 *lba_count = length; 2794 2795 if (!bs_io_unit_is_allocated(blob, io_unit)) { 2796 assert(blob->back_bs_dev != NULL); 2797 *lba = bs_io_unit_to_back_dev_lba(blob, io_unit); 2798 *lba_count = bs_io_unit_to_back_dev_lba(blob, *lba_count); 2799 return false; 2800 } else { 2801 *lba = bs_blob_io_unit_to_lba(blob, io_unit); 2802 return true; 2803 } 2804 } 2805 2806 struct op_split_ctx { 2807 struct spdk_blob *blob; 2808 struct spdk_io_channel *channel; 2809 uint64_t io_unit_offset; 2810 uint64_t io_units_remaining; 2811 void *curr_payload; 2812 enum spdk_blob_op_type op_type; 2813 spdk_bs_sequence_t *seq; 2814 bool in_submit_ctx; 2815 bool completed_in_submit_ctx; 2816 bool done; 2817 }; 2818 2819 static void 2820 blob_request_submit_op_split_next(void *cb_arg, int bserrno) 2821 { 2822 struct op_split_ctx *ctx = cb_arg; 2823 struct spdk_blob *blob = ctx->blob; 2824 struct spdk_io_channel *ch = ctx->channel; 2825 enum spdk_blob_op_type op_type = ctx->op_type; 2826 uint8_t *buf; 2827 uint64_t offset; 2828 uint64_t length; 2829 uint64_t op_length; 2830 2831 if (bserrno != 0 || ctx->io_units_remaining == 0) { 2832 bs_sequence_finish(ctx->seq, bserrno); 2833 if (ctx->in_submit_ctx) { 2834 /* Defer freeing of the ctx object, since it will be 2835 * accessed when this unwinds back to the submisison 2836 * context. 2837 */ 2838 ctx->done = true; 2839 } else { 2840 free(ctx); 2841 } 2842 return; 2843 } 2844 2845 if (ctx->in_submit_ctx) { 2846 /* If this split operation completed in the context 2847 * of its submission, mark the flag and return immediately 2848 * to avoid recursion. 2849 */ 2850 ctx->completed_in_submit_ctx = true; 2851 return; 2852 } 2853 2854 while (true) { 2855 ctx->completed_in_submit_ctx = false; 2856 2857 offset = ctx->io_unit_offset; 2858 length = ctx->io_units_remaining; 2859 buf = ctx->curr_payload; 2860 op_length = spdk_min(length, bs_num_io_units_to_cluster_boundary(blob, 2861 offset)); 2862 2863 /* Update length and payload for next operation */ 2864 ctx->io_units_remaining -= op_length; 2865 ctx->io_unit_offset += op_length; 2866 if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) { 2867 ctx->curr_payload += op_length * blob->bs->io_unit_size; 2868 } 2869 2870 assert(!ctx->in_submit_ctx); 2871 ctx->in_submit_ctx = true; 2872 2873 switch (op_type) { 2874 case SPDK_BLOB_READ: 2875 spdk_blob_io_read(blob, ch, buf, offset, op_length, 2876 blob_request_submit_op_split_next, ctx); 2877 break; 2878 case SPDK_BLOB_WRITE: 2879 spdk_blob_io_write(blob, ch, buf, offset, op_length, 2880 blob_request_submit_op_split_next, ctx); 2881 break; 2882 case SPDK_BLOB_UNMAP: 2883 spdk_blob_io_unmap(blob, ch, offset, op_length, 2884 blob_request_submit_op_split_next, ctx); 2885 break; 2886 case SPDK_BLOB_WRITE_ZEROES: 2887 spdk_blob_io_write_zeroes(blob, ch, offset, op_length, 2888 blob_request_submit_op_split_next, ctx); 2889 break; 2890 case SPDK_BLOB_READV: 2891 case SPDK_BLOB_WRITEV: 2892 SPDK_ERRLOG("readv/write not valid\n"); 2893 bs_sequence_finish(ctx->seq, -EINVAL); 2894 free(ctx); 2895 return; 2896 } 2897 2898 #ifndef __clang_analyzer__ 2899 /* scan-build reports a false positive around accessing the ctx here. It 2900 * forms a path that recursively calls this function, but then says 2901 * "assuming ctx->in_submit_ctx is false", when that isn't possible. 2902 * This path does free(ctx), returns to here, and reports a use-after-free 2903 * bug. Wrapping this bit of code so that scan-build doesn't see it 2904 * works around the scan-build bug. 2905 */ 2906 assert(ctx->in_submit_ctx); 2907 ctx->in_submit_ctx = false; 2908 2909 /* If the operation completed immediately, loop back and submit the 2910 * next operation. Otherwise we can return and the next split 2911 * operation will get submitted when this current operation is 2912 * later completed asynchronously. 2913 */ 2914 if (ctx->completed_in_submit_ctx) { 2915 continue; 2916 } else if (ctx->done) { 2917 free(ctx); 2918 } 2919 #endif 2920 break; 2921 } 2922 } 2923 2924 static void 2925 blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob, 2926 void *payload, uint64_t offset, uint64_t length, 2927 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 2928 { 2929 struct op_split_ctx *ctx; 2930 spdk_bs_sequence_t *seq; 2931 struct spdk_bs_cpl cpl; 2932 2933 assert(blob != NULL); 2934 2935 ctx = calloc(1, sizeof(struct op_split_ctx)); 2936 if (ctx == NULL) { 2937 cb_fn(cb_arg, -ENOMEM); 2938 return; 2939 } 2940 2941 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2942 cpl.u.blob_basic.cb_fn = cb_fn; 2943 cpl.u.blob_basic.cb_arg = cb_arg; 2944 2945 seq = bs_sequence_start_blob(ch, &cpl, blob); 2946 if (!seq) { 2947 free(ctx); 2948 cb_fn(cb_arg, -ENOMEM); 2949 return; 2950 } 2951 2952 ctx->blob = blob; 2953 ctx->channel = ch; 2954 ctx->curr_payload = payload; 2955 ctx->io_unit_offset = offset; 2956 ctx->io_units_remaining = length; 2957 ctx->op_type = op_type; 2958 ctx->seq = seq; 2959 2960 blob_request_submit_op_split_next(ctx, 0); 2961 } 2962 2963 static void 2964 spdk_free_cluster_unmap_complete(void *cb_arg, int bserrno) 2965 { 2966 struct spdk_blob_free_cluster_ctx *ctx = cb_arg; 2967 2968 if (bserrno) { 2969 bs_sequence_finish(ctx->seq, bserrno); 2970 free(ctx); 2971 return; 2972 } 2973 2974 blob_free_cluster_on_md_thread(ctx->blob, ctx->cluster_num, 2975 ctx->extent_page, ctx->md_page, blob_free_cluster_cpl, ctx); 2976 } 2977 2978 static void 2979 blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob, 2980 void *payload, uint64_t offset, uint64_t length, 2981 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 2982 { 2983 struct spdk_bs_cpl cpl; 2984 uint64_t lba; 2985 uint64_t lba_count; 2986 bool is_allocated; 2987 2988 assert(blob != NULL); 2989 2990 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2991 cpl.u.blob_basic.cb_fn = cb_fn; 2992 cpl.u.blob_basic.cb_arg = cb_arg; 2993 2994 if (blob->frozen_refcnt) { 2995 /* This blob I/O is frozen */ 2996 spdk_bs_user_op_t *op; 2997 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 2998 2999 op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 3000 if (!op) { 3001 cb_fn(cb_arg, -ENOMEM); 3002 return; 3003 } 3004 3005 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 3006 3007 return; 3008 } 3009 3010 is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 3011 3012 switch (op_type) { 3013 case SPDK_BLOB_READ: { 3014 spdk_bs_batch_t *batch; 3015 3016 batch = bs_batch_open(_ch, &cpl, blob); 3017 if (!batch) { 3018 cb_fn(cb_arg, -ENOMEM); 3019 return; 3020 } 3021 3022 if (is_allocated) { 3023 /* Read from the blob */ 3024 bs_batch_read_dev(batch, payload, lba, lba_count); 3025 } else { 3026 /* Read from the backing block device */ 3027 bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count); 3028 } 3029 3030 bs_batch_close(batch); 3031 break; 3032 } 3033 case SPDK_BLOB_WRITE: 3034 case SPDK_BLOB_WRITE_ZEROES: { 3035 if (is_allocated) { 3036 /* Write to the blob */ 3037 spdk_bs_batch_t *batch; 3038 3039 if (lba_count == 0) { 3040 cb_fn(cb_arg, 0); 3041 return; 3042 } 3043 3044 batch = bs_batch_open(_ch, &cpl, blob); 3045 if (!batch) { 3046 cb_fn(cb_arg, -ENOMEM); 3047 return; 3048 } 3049 3050 if (op_type == SPDK_BLOB_WRITE) { 3051 bs_batch_write_dev(batch, payload, lba, lba_count); 3052 } else { 3053 bs_batch_write_zeroes_dev(batch, lba, lba_count); 3054 } 3055 3056 bs_batch_close(batch); 3057 } else { 3058 /* Queue this operation and allocate the cluster */ 3059 spdk_bs_user_op_t *op; 3060 3061 op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 3062 if (!op) { 3063 cb_fn(cb_arg, -ENOMEM); 3064 return; 3065 } 3066 3067 bs_allocate_and_copy_cluster(blob, _ch, offset, op); 3068 } 3069 break; 3070 } 3071 case SPDK_BLOB_UNMAP: { 3072 struct spdk_blob_free_cluster_ctx *ctx = NULL; 3073 spdk_bs_batch_t *batch; 3074 3075 /* if aligned with cluster release cluster */ 3076 if (spdk_blob_is_thin_provisioned(blob) && is_allocated && 3077 bs_io_units_per_cluster(blob) == length) { 3078 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 3079 uint32_t cluster_start_page; 3080 uint32_t cluster_number; 3081 3082 assert(offset % bs_io_units_per_cluster(blob) == 0); 3083 3084 /* Round the io_unit offset down to the first page in the cluster */ 3085 cluster_start_page = bs_io_unit_to_cluster_start(blob, offset); 3086 3087 /* Calculate which index in the metadata cluster array the corresponding 3088 * cluster is supposed to be at. */ 3089 cluster_number = bs_io_unit_to_cluster_number(blob, offset); 3090 3091 ctx = calloc(1, sizeof(*ctx)); 3092 if (!ctx) { 3093 cb_fn(cb_arg, -ENOMEM); 3094 return; 3095 } 3096 /* When freeing a cluster the flow should be (in order): 3097 * 1. Unmap the underlying area (so if the cluster is reclaimed in the future, it won't leak 3098 * old data) 3099 * 2. Once the unmap completes (to avoid any races with incoming writes that may claim the 3100 * cluster), update and sync metadata freeing the cluster 3101 * 3. Once metadata update is done, complete the user unmap request 3102 */ 3103 ctx->blob = blob; 3104 ctx->page = cluster_start_page; 3105 ctx->cluster_num = cluster_number; 3106 ctx->md_page = bs_channel->new_cluster_page; 3107 ctx->seq = bs_sequence_start_bs(_ch, &cpl); 3108 if (!ctx->seq) { 3109 free(ctx); 3110 cb_fn(cb_arg, -ENOMEM); 3111 return; 3112 } 3113 3114 if (blob->use_extent_table) { 3115 ctx->extent_page = *bs_cluster_to_extent_page(blob, cluster_number); 3116 } 3117 3118 cpl.u.blob_basic.cb_fn = spdk_free_cluster_unmap_complete; 3119 cpl.u.blob_basic.cb_arg = ctx; 3120 } 3121 3122 batch = bs_batch_open(_ch, &cpl, blob); 3123 if (!batch) { 3124 free(ctx); 3125 cb_fn(cb_arg, -ENOMEM); 3126 return; 3127 } 3128 3129 if (is_allocated) { 3130 bs_batch_unmap_dev(batch, lba, lba_count); 3131 } 3132 3133 bs_batch_close(batch); 3134 break; 3135 } 3136 case SPDK_BLOB_READV: 3137 case SPDK_BLOB_WRITEV: 3138 SPDK_ERRLOG("readv/write not valid\n"); 3139 cb_fn(cb_arg, -EINVAL); 3140 break; 3141 } 3142 } 3143 3144 static void 3145 blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel, 3146 void *payload, uint64_t offset, uint64_t length, 3147 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 3148 { 3149 assert(blob != NULL); 3150 3151 if (blob->data_ro && op_type != SPDK_BLOB_READ) { 3152 cb_fn(cb_arg, -EPERM); 3153 return; 3154 } 3155 3156 if (length == 0) { 3157 cb_fn(cb_arg, 0); 3158 return; 3159 } 3160 3161 if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 3162 cb_fn(cb_arg, -EINVAL); 3163 return; 3164 } 3165 if (length <= bs_num_io_units_to_cluster_boundary(blob, offset)) { 3166 blob_request_submit_op_single(_channel, blob, payload, offset, length, 3167 cb_fn, cb_arg, op_type); 3168 } else { 3169 blob_request_submit_op_split(_channel, blob, payload, offset, length, 3170 cb_fn, cb_arg, op_type); 3171 } 3172 } 3173 3174 struct rw_iov_ctx { 3175 struct spdk_blob *blob; 3176 struct spdk_io_channel *channel; 3177 spdk_blob_op_complete cb_fn; 3178 void *cb_arg; 3179 bool read; 3180 int iovcnt; 3181 struct iovec *orig_iov; 3182 uint64_t io_unit_offset; 3183 uint64_t io_units_remaining; 3184 uint64_t io_units_done; 3185 struct spdk_blob_ext_io_opts *ext_io_opts; 3186 struct iovec iov[0]; 3187 }; 3188 3189 static void 3190 rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3191 { 3192 assert(cb_arg == NULL); 3193 bs_sequence_finish(seq, bserrno); 3194 } 3195 3196 static void 3197 rw_iov_split_next(void *cb_arg, int bserrno) 3198 { 3199 struct rw_iov_ctx *ctx = cb_arg; 3200 struct spdk_blob *blob = ctx->blob; 3201 struct iovec *iov, *orig_iov; 3202 int iovcnt; 3203 size_t orig_iovoff; 3204 uint64_t io_units_count, io_units_to_boundary, io_unit_offset; 3205 uint64_t byte_count; 3206 3207 if (bserrno != 0 || ctx->io_units_remaining == 0) { 3208 ctx->cb_fn(ctx->cb_arg, bserrno); 3209 free(ctx); 3210 return; 3211 } 3212 3213 io_unit_offset = ctx->io_unit_offset; 3214 io_units_to_boundary = bs_num_io_units_to_cluster_boundary(blob, io_unit_offset); 3215 io_units_count = spdk_min(ctx->io_units_remaining, io_units_to_boundary); 3216 /* 3217 * Get index and offset into the original iov array for our current position in the I/O sequence. 3218 * byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will 3219 * point to the current position in the I/O sequence. 3220 */ 3221 byte_count = ctx->io_units_done * blob->bs->io_unit_size; 3222 orig_iov = &ctx->orig_iov[0]; 3223 orig_iovoff = 0; 3224 while (byte_count > 0) { 3225 if (byte_count >= orig_iov->iov_len) { 3226 byte_count -= orig_iov->iov_len; 3227 orig_iov++; 3228 } else { 3229 orig_iovoff = byte_count; 3230 byte_count = 0; 3231 } 3232 } 3233 3234 /* 3235 * Build an iov array for the next I/O in the sequence. byte_count will keep track of how many 3236 * bytes of this next I/O remain to be accounted for in the new iov array. 3237 */ 3238 byte_count = io_units_count * blob->bs->io_unit_size; 3239 iov = &ctx->iov[0]; 3240 iovcnt = 0; 3241 while (byte_count > 0) { 3242 assert(iovcnt < ctx->iovcnt); 3243 iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff); 3244 iov->iov_base = orig_iov->iov_base + orig_iovoff; 3245 byte_count -= iov->iov_len; 3246 orig_iovoff = 0; 3247 orig_iov++; 3248 iov++; 3249 iovcnt++; 3250 } 3251 3252 ctx->io_unit_offset += io_units_count; 3253 ctx->io_units_remaining -= io_units_count; 3254 ctx->io_units_done += io_units_count; 3255 iov = &ctx->iov[0]; 3256 3257 if (ctx->read) { 3258 spdk_blob_io_readv_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 3259 io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts); 3260 } else { 3261 spdk_blob_io_writev_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 3262 io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts); 3263 } 3264 } 3265 3266 static void 3267 blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel, 3268 struct iovec *iov, int iovcnt, 3269 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg, bool read, 3270 struct spdk_blob_ext_io_opts *ext_io_opts) 3271 { 3272 struct spdk_bs_cpl cpl; 3273 3274 assert(blob != NULL); 3275 3276 if (!read && blob->data_ro) { 3277 cb_fn(cb_arg, -EPERM); 3278 return; 3279 } 3280 3281 if (length == 0) { 3282 cb_fn(cb_arg, 0); 3283 return; 3284 } 3285 3286 if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 3287 cb_fn(cb_arg, -EINVAL); 3288 return; 3289 } 3290 3291 /* 3292 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having 3293 * to split a request that spans a cluster boundary. For I/O that do not span a cluster boundary, 3294 * there will be no noticeable difference compared to using a batch. For I/O that do span a cluster 3295 * boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need 3296 * to allocate a separate iov array and split the I/O such that none of the resulting 3297 * smaller I/O cross a cluster boundary. These smaller I/O will be issued in sequence (not in parallel) 3298 * but since this case happens very infrequently, any performance impact will be negligible. 3299 * 3300 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs 3301 * for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them 3302 * in a batch. That would also require creating an intermediate spdk_bs_cpl that would get called 3303 * when the batch was completed, to allow for freeing the memory for the iov arrays. 3304 */ 3305 if (spdk_likely(length <= bs_num_io_units_to_cluster_boundary(blob, offset))) { 3306 uint64_t lba_count; 3307 uint64_t lba; 3308 bool is_allocated; 3309 3310 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 3311 cpl.u.blob_basic.cb_fn = cb_fn; 3312 cpl.u.blob_basic.cb_arg = cb_arg; 3313 3314 if (blob->frozen_refcnt) { 3315 /* This blob I/O is frozen */ 3316 enum spdk_blob_op_type op_type; 3317 spdk_bs_user_op_t *op; 3318 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel); 3319 3320 op_type = read ? SPDK_BLOB_READV : SPDK_BLOB_WRITEV; 3321 op = bs_user_op_alloc(_channel, &cpl, op_type, blob, iov, iovcnt, offset, length); 3322 if (!op) { 3323 cb_fn(cb_arg, -ENOMEM); 3324 return; 3325 } 3326 3327 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 3328 3329 return; 3330 } 3331 3332 is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 3333 3334 if (read) { 3335 spdk_bs_sequence_t *seq; 3336 3337 seq = bs_sequence_start_blob(_channel, &cpl, blob); 3338 if (!seq) { 3339 cb_fn(cb_arg, -ENOMEM); 3340 return; 3341 } 3342 3343 seq->ext_io_opts = ext_io_opts; 3344 3345 if (is_allocated) { 3346 bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL); 3347 } else { 3348 bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count, 3349 rw_iov_done, NULL); 3350 } 3351 } else { 3352 if (is_allocated) { 3353 spdk_bs_sequence_t *seq; 3354 3355 seq = bs_sequence_start_blob(_channel, &cpl, blob); 3356 if (!seq) { 3357 cb_fn(cb_arg, -ENOMEM); 3358 return; 3359 } 3360 3361 seq->ext_io_opts = ext_io_opts; 3362 3363 bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL); 3364 } else { 3365 /* Queue this operation and allocate the cluster */ 3366 spdk_bs_user_op_t *op; 3367 3368 op = bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset, 3369 length); 3370 if (!op) { 3371 cb_fn(cb_arg, -ENOMEM); 3372 return; 3373 } 3374 3375 op->ext_io_opts = ext_io_opts; 3376 3377 bs_allocate_and_copy_cluster(blob, _channel, offset, op); 3378 } 3379 } 3380 } else { 3381 struct rw_iov_ctx *ctx; 3382 3383 ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec)); 3384 if (ctx == NULL) { 3385 cb_fn(cb_arg, -ENOMEM); 3386 return; 3387 } 3388 3389 ctx->blob = blob; 3390 ctx->channel = _channel; 3391 ctx->cb_fn = cb_fn; 3392 ctx->cb_arg = cb_arg; 3393 ctx->read = read; 3394 ctx->orig_iov = iov; 3395 ctx->iovcnt = iovcnt; 3396 ctx->io_unit_offset = offset; 3397 ctx->io_units_remaining = length; 3398 ctx->io_units_done = 0; 3399 ctx->ext_io_opts = ext_io_opts; 3400 3401 rw_iov_split_next(ctx, 0); 3402 } 3403 } 3404 3405 static struct spdk_blob * 3406 blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid) 3407 { 3408 struct spdk_blob find; 3409 3410 if (spdk_bit_array_get(bs->open_blobids, blobid) == 0) { 3411 return NULL; 3412 } 3413 3414 find.id = blobid; 3415 return RB_FIND(spdk_blob_tree, &bs->open_blobs, &find); 3416 } 3417 3418 static void 3419 blob_get_snapshot_and_clone_entries(struct spdk_blob *blob, 3420 struct spdk_blob_list **snapshot_entry, struct spdk_blob_list **clone_entry) 3421 { 3422 assert(blob != NULL); 3423 *snapshot_entry = NULL; 3424 *clone_entry = NULL; 3425 3426 if (blob->parent_id == SPDK_BLOBID_INVALID) { 3427 return; 3428 } 3429 3430 TAILQ_FOREACH(*snapshot_entry, &blob->bs->snapshots, link) { 3431 if ((*snapshot_entry)->id == blob->parent_id) { 3432 break; 3433 } 3434 } 3435 3436 if (*snapshot_entry != NULL) { 3437 TAILQ_FOREACH(*clone_entry, &(*snapshot_entry)->clones, link) { 3438 if ((*clone_entry)->id == blob->id) { 3439 break; 3440 } 3441 } 3442 3443 assert(*clone_entry != NULL); 3444 } 3445 } 3446 3447 static int 3448 bs_channel_create(void *io_device, void *ctx_buf) 3449 { 3450 struct spdk_blob_store *bs = io_device; 3451 struct spdk_bs_channel *channel = ctx_buf; 3452 struct spdk_bs_dev *dev; 3453 uint32_t max_ops = bs->max_channel_ops; 3454 uint32_t i; 3455 3456 dev = bs->dev; 3457 3458 channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set)); 3459 if (!channel->req_mem) { 3460 return -1; 3461 } 3462 3463 TAILQ_INIT(&channel->reqs); 3464 3465 for (i = 0; i < max_ops; i++) { 3466 TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link); 3467 } 3468 3469 channel->bs = bs; 3470 channel->dev = dev; 3471 channel->dev_channel = dev->create_channel(dev); 3472 3473 if (!channel->dev_channel) { 3474 SPDK_ERRLOG("Failed to create device channel.\n"); 3475 free(channel->req_mem); 3476 return -1; 3477 } 3478 3479 channel->new_cluster_page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, NULL, SPDK_ENV_SOCKET_ID_ANY, 3480 SPDK_MALLOC_DMA); 3481 if (!channel->new_cluster_page) { 3482 SPDK_ERRLOG("Failed to allocate new cluster page\n"); 3483 free(channel->req_mem); 3484 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 3485 return -1; 3486 } 3487 3488 TAILQ_INIT(&channel->need_cluster_alloc); 3489 TAILQ_INIT(&channel->queued_io); 3490 RB_INIT(&channel->esnap_channels); 3491 3492 return 0; 3493 } 3494 3495 static void 3496 bs_channel_destroy(void *io_device, void *ctx_buf) 3497 { 3498 struct spdk_bs_channel *channel = ctx_buf; 3499 spdk_bs_user_op_t *op; 3500 3501 while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) { 3502 op = TAILQ_FIRST(&channel->need_cluster_alloc); 3503 TAILQ_REMOVE(&channel->need_cluster_alloc, op, link); 3504 bs_user_op_abort(op, -EIO); 3505 } 3506 3507 while (!TAILQ_EMPTY(&channel->queued_io)) { 3508 op = TAILQ_FIRST(&channel->queued_io); 3509 TAILQ_REMOVE(&channel->queued_io, op, link); 3510 bs_user_op_abort(op, -EIO); 3511 } 3512 3513 blob_esnap_destroy_bs_channel(channel); 3514 3515 free(channel->req_mem); 3516 spdk_free(channel->new_cluster_page); 3517 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 3518 } 3519 3520 static void 3521 bs_dev_destroy(void *io_device) 3522 { 3523 struct spdk_blob_store *bs = io_device; 3524 struct spdk_blob *blob, *blob_tmp; 3525 3526 bs->dev->destroy(bs->dev); 3527 3528 RB_FOREACH_SAFE(blob, spdk_blob_tree, &bs->open_blobs, blob_tmp) { 3529 RB_REMOVE(spdk_blob_tree, &bs->open_blobs, blob); 3530 spdk_bit_array_clear(bs->open_blobids, blob->id); 3531 blob_free(blob); 3532 } 3533 3534 spdk_spin_destroy(&bs->used_lock); 3535 3536 spdk_bit_array_free(&bs->open_blobids); 3537 spdk_bit_array_free(&bs->used_blobids); 3538 spdk_bit_array_free(&bs->used_md_pages); 3539 spdk_bit_pool_free(&bs->used_clusters); 3540 /* 3541 * If this function is called for any reason except a successful unload, 3542 * the unload_cpl type will be NONE and this will be a nop. 3543 */ 3544 bs_call_cpl(&bs->unload_cpl, bs->unload_err); 3545 3546 free(bs); 3547 } 3548 3549 static int 3550 bs_blob_list_add(struct spdk_blob *blob) 3551 { 3552 spdk_blob_id snapshot_id; 3553 struct spdk_blob_list *snapshot_entry = NULL; 3554 struct spdk_blob_list *clone_entry = NULL; 3555 3556 assert(blob != NULL); 3557 3558 snapshot_id = blob->parent_id; 3559 if (snapshot_id == SPDK_BLOBID_INVALID || 3560 snapshot_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 3561 return 0; 3562 } 3563 3564 snapshot_entry = bs_get_snapshot_entry(blob->bs, snapshot_id); 3565 if (snapshot_entry == NULL) { 3566 /* Snapshot not found */ 3567 snapshot_entry = calloc(1, sizeof(struct spdk_blob_list)); 3568 if (snapshot_entry == NULL) { 3569 return -ENOMEM; 3570 } 3571 snapshot_entry->id = snapshot_id; 3572 TAILQ_INIT(&snapshot_entry->clones); 3573 TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link); 3574 } else { 3575 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 3576 if (clone_entry->id == blob->id) { 3577 break; 3578 } 3579 } 3580 } 3581 3582 if (clone_entry == NULL) { 3583 /* Clone not found */ 3584 clone_entry = calloc(1, sizeof(struct spdk_blob_list)); 3585 if (clone_entry == NULL) { 3586 return -ENOMEM; 3587 } 3588 clone_entry->id = blob->id; 3589 TAILQ_INIT(&clone_entry->clones); 3590 TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link); 3591 snapshot_entry->clone_count++; 3592 } 3593 3594 return 0; 3595 } 3596 3597 static void 3598 bs_blob_list_remove(struct spdk_blob *blob) 3599 { 3600 struct spdk_blob_list *snapshot_entry = NULL; 3601 struct spdk_blob_list *clone_entry = NULL; 3602 3603 blob_get_snapshot_and_clone_entries(blob, &snapshot_entry, &clone_entry); 3604 3605 if (snapshot_entry == NULL) { 3606 return; 3607 } 3608 3609 blob->parent_id = SPDK_BLOBID_INVALID; 3610 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3611 free(clone_entry); 3612 3613 snapshot_entry->clone_count--; 3614 } 3615 3616 static int 3617 bs_blob_list_free(struct spdk_blob_store *bs) 3618 { 3619 struct spdk_blob_list *snapshot_entry; 3620 struct spdk_blob_list *snapshot_entry_tmp; 3621 struct spdk_blob_list *clone_entry; 3622 struct spdk_blob_list *clone_entry_tmp; 3623 3624 TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) { 3625 TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) { 3626 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3627 free(clone_entry); 3628 } 3629 TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link); 3630 free(snapshot_entry); 3631 } 3632 3633 return 0; 3634 } 3635 3636 static void 3637 bs_free(struct spdk_blob_store *bs) 3638 { 3639 bs_blob_list_free(bs); 3640 3641 bs_unregister_md_thread(bs); 3642 spdk_io_device_unregister(bs, bs_dev_destroy); 3643 } 3644 3645 void 3646 spdk_bs_opts_init(struct spdk_bs_opts *opts, size_t opts_size) 3647 { 3648 3649 if (!opts) { 3650 SPDK_ERRLOG("opts should not be NULL\n"); 3651 return; 3652 } 3653 3654 if (!opts_size) { 3655 SPDK_ERRLOG("opts_size should not be zero value\n"); 3656 return; 3657 } 3658 3659 memset(opts, 0, opts_size); 3660 opts->opts_size = opts_size; 3661 3662 #define FIELD_OK(field) \ 3663 offsetof(struct spdk_bs_opts, field) + sizeof(opts->field) <= opts_size 3664 3665 #define SET_FIELD(field, value) \ 3666 if (FIELD_OK(field)) { \ 3667 opts->field = value; \ 3668 } \ 3669 3670 SET_FIELD(cluster_sz, SPDK_BLOB_OPTS_CLUSTER_SZ); 3671 SET_FIELD(num_md_pages, SPDK_BLOB_OPTS_NUM_MD_PAGES); 3672 SET_FIELD(max_md_ops, SPDK_BLOB_OPTS_NUM_MD_PAGES); 3673 SET_FIELD(max_channel_ops, SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS); 3674 SET_FIELD(clear_method, BS_CLEAR_WITH_UNMAP); 3675 3676 if (FIELD_OK(bstype)) { 3677 memset(&opts->bstype, 0, sizeof(opts->bstype)); 3678 } 3679 3680 SET_FIELD(iter_cb_fn, NULL); 3681 SET_FIELD(iter_cb_arg, NULL); 3682 SET_FIELD(force_recover, false); 3683 SET_FIELD(esnap_bs_dev_create, NULL); 3684 SET_FIELD(esnap_ctx, NULL); 3685 3686 #undef FIELD_OK 3687 #undef SET_FIELD 3688 } 3689 3690 static int 3691 bs_opts_verify(struct spdk_bs_opts *opts) 3692 { 3693 if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 || 3694 opts->max_channel_ops == 0) { 3695 SPDK_ERRLOG("Blobstore options cannot be set to 0\n"); 3696 return -1; 3697 } 3698 3699 return 0; 3700 } 3701 3702 /* START spdk_bs_load */ 3703 3704 /* spdk_bs_load_ctx is used for init, load, unload and dump code paths. */ 3705 3706 struct spdk_bs_load_ctx { 3707 struct spdk_blob_store *bs; 3708 struct spdk_bs_super_block *super; 3709 3710 struct spdk_bs_md_mask *mask; 3711 bool in_page_chain; 3712 uint32_t page_index; 3713 uint32_t cur_page; 3714 struct spdk_blob_md_page *page; 3715 3716 uint64_t num_extent_pages; 3717 uint32_t *extent_page_num; 3718 struct spdk_blob_md_page *extent_pages; 3719 struct spdk_bit_array *used_clusters; 3720 3721 spdk_bs_sequence_t *seq; 3722 spdk_blob_op_with_handle_complete iter_cb_fn; 3723 void *iter_cb_arg; 3724 struct spdk_blob *blob; 3725 spdk_blob_id blobid; 3726 3727 bool force_recover; 3728 3729 /* These fields are used in the spdk_bs_dump path. */ 3730 bool dumping; 3731 FILE *fp; 3732 spdk_bs_dump_print_xattr print_xattr_fn; 3733 char xattr_name[4096]; 3734 }; 3735 3736 static int 3737 bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_blob_store **_bs, 3738 struct spdk_bs_load_ctx **_ctx) 3739 { 3740 struct spdk_blob_store *bs; 3741 struct spdk_bs_load_ctx *ctx; 3742 uint64_t dev_size; 3743 int rc; 3744 3745 dev_size = dev->blocklen * dev->blockcnt; 3746 if (dev_size < opts->cluster_sz) { 3747 /* Device size cannot be smaller than cluster size of blobstore */ 3748 SPDK_INFOLOG(blob, "Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n", 3749 dev_size, opts->cluster_sz); 3750 return -ENOSPC; 3751 } 3752 if (opts->cluster_sz < SPDK_BS_PAGE_SIZE) { 3753 /* Cluster size cannot be smaller than page size */ 3754 SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n", 3755 opts->cluster_sz, SPDK_BS_PAGE_SIZE); 3756 return -EINVAL; 3757 } 3758 bs = calloc(1, sizeof(struct spdk_blob_store)); 3759 if (!bs) { 3760 return -ENOMEM; 3761 } 3762 3763 ctx = calloc(1, sizeof(struct spdk_bs_load_ctx)); 3764 if (!ctx) { 3765 free(bs); 3766 return -ENOMEM; 3767 } 3768 3769 ctx->bs = bs; 3770 ctx->iter_cb_fn = opts->iter_cb_fn; 3771 ctx->iter_cb_arg = opts->iter_cb_arg; 3772 ctx->force_recover = opts->force_recover; 3773 3774 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 3775 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3776 if (!ctx->super) { 3777 free(ctx); 3778 free(bs); 3779 return -ENOMEM; 3780 } 3781 3782 RB_INIT(&bs->open_blobs); 3783 TAILQ_INIT(&bs->snapshots); 3784 bs->dev = dev; 3785 bs->md_thread = spdk_get_thread(); 3786 assert(bs->md_thread != NULL); 3787 3788 /* 3789 * Do not use bs_lba_to_cluster() here since blockcnt may not be an 3790 * even multiple of the cluster size. 3791 */ 3792 bs->cluster_sz = opts->cluster_sz; 3793 bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen); 3794 ctx->used_clusters = spdk_bit_array_create(bs->total_clusters); 3795 if (!ctx->used_clusters) { 3796 spdk_free(ctx->super); 3797 free(ctx); 3798 free(bs); 3799 return -ENOMEM; 3800 } 3801 3802 bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE; 3803 if (spdk_u32_is_pow2(bs->pages_per_cluster)) { 3804 bs->pages_per_cluster_shift = spdk_u32log2(bs->pages_per_cluster); 3805 } 3806 bs->num_free_clusters = bs->total_clusters; 3807 bs->io_unit_size = dev->blocklen; 3808 3809 bs->max_channel_ops = opts->max_channel_ops; 3810 bs->super_blob = SPDK_BLOBID_INVALID; 3811 memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype)); 3812 bs->esnap_bs_dev_create = opts->esnap_bs_dev_create; 3813 bs->esnap_ctx = opts->esnap_ctx; 3814 3815 /* The metadata is assumed to be at least 1 page */ 3816 bs->used_md_pages = spdk_bit_array_create(1); 3817 bs->used_blobids = spdk_bit_array_create(0); 3818 bs->open_blobids = spdk_bit_array_create(0); 3819 3820 spdk_spin_init(&bs->used_lock); 3821 3822 spdk_io_device_register(bs, bs_channel_create, bs_channel_destroy, 3823 sizeof(struct spdk_bs_channel), "blobstore"); 3824 rc = bs_register_md_thread(bs); 3825 if (rc == -1) { 3826 spdk_io_device_unregister(bs, NULL); 3827 spdk_spin_destroy(&bs->used_lock); 3828 spdk_bit_array_free(&bs->open_blobids); 3829 spdk_bit_array_free(&bs->used_blobids); 3830 spdk_bit_array_free(&bs->used_md_pages); 3831 spdk_bit_array_free(&ctx->used_clusters); 3832 spdk_free(ctx->super); 3833 free(ctx); 3834 free(bs); 3835 /* FIXME: this is a lie but don't know how to get a proper error code here */ 3836 return -ENOMEM; 3837 } 3838 3839 *_ctx = ctx; 3840 *_bs = bs; 3841 return 0; 3842 } 3843 3844 static void 3845 bs_load_ctx_fail(struct spdk_bs_load_ctx *ctx, int bserrno) 3846 { 3847 assert(bserrno != 0); 3848 3849 spdk_free(ctx->super); 3850 bs_sequence_finish(ctx->seq, bserrno); 3851 bs_free(ctx->bs); 3852 spdk_bit_array_free(&ctx->used_clusters); 3853 free(ctx); 3854 } 3855 3856 static void 3857 bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 3858 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg) 3859 { 3860 /* Update the values in the super block */ 3861 super->super_blob = bs->super_blob; 3862 memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype)); 3863 super->crc = blob_md_page_calc_crc(super); 3864 bs_sequence_write_dev(seq, super, bs_page_to_lba(bs, 0), 3865 bs_byte_to_lba(bs, sizeof(*super)), 3866 cb_fn, cb_arg); 3867 } 3868 3869 static void 3870 bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 3871 { 3872 struct spdk_bs_load_ctx *ctx = arg; 3873 uint64_t mask_size, lba, lba_count; 3874 3875 /* Write out the used clusters mask */ 3876 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 3877 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 3878 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3879 if (!ctx->mask) { 3880 bs_load_ctx_fail(ctx, -ENOMEM); 3881 return; 3882 } 3883 3884 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS; 3885 ctx->mask->length = ctx->bs->total_clusters; 3886 /* We could get here through the normal unload path, or through dirty 3887 * shutdown recovery. For the normal unload path, we use the mask from 3888 * the bit pool. For dirty shutdown recovery, we don't have a bit pool yet - 3889 * only the bit array from the load ctx. 3890 */ 3891 if (ctx->bs->used_clusters) { 3892 assert(ctx->mask->length == spdk_bit_pool_capacity(ctx->bs->used_clusters)); 3893 spdk_bit_pool_store_mask(ctx->bs->used_clusters, ctx->mask->mask); 3894 } else { 3895 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->used_clusters)); 3896 spdk_bit_array_store_mask(ctx->used_clusters, ctx->mask->mask); 3897 } 3898 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 3899 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 3900 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 3901 } 3902 3903 static void 3904 bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 3905 { 3906 struct spdk_bs_load_ctx *ctx = arg; 3907 uint64_t mask_size, lba, lba_count; 3908 3909 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 3910 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 3911 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3912 if (!ctx->mask) { 3913 bs_load_ctx_fail(ctx, -ENOMEM); 3914 return; 3915 } 3916 3917 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES; 3918 ctx->mask->length = ctx->super->md_len; 3919 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages)); 3920 3921 spdk_bit_array_store_mask(ctx->bs->used_md_pages, ctx->mask->mask); 3922 lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 3923 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 3924 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 3925 } 3926 3927 static void 3928 bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 3929 { 3930 struct spdk_bs_load_ctx *ctx = arg; 3931 uint64_t mask_size, lba, lba_count; 3932 3933 if (ctx->super->used_blobid_mask_len == 0) { 3934 /* 3935 * This is a pre-v3 on-disk format where the blobid mask does not get 3936 * written to disk. 3937 */ 3938 cb_fn(seq, arg, 0); 3939 return; 3940 } 3941 3942 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 3943 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 3944 SPDK_MALLOC_DMA); 3945 if (!ctx->mask) { 3946 bs_load_ctx_fail(ctx, -ENOMEM); 3947 return; 3948 } 3949 3950 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS; 3951 ctx->mask->length = ctx->super->md_len; 3952 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids)); 3953 3954 spdk_bit_array_store_mask(ctx->bs->used_blobids, ctx->mask->mask); 3955 lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 3956 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 3957 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 3958 } 3959 3960 static void 3961 blob_set_thin_provision(struct spdk_blob *blob) 3962 { 3963 blob_verify_md_op(blob); 3964 blob->invalid_flags |= SPDK_BLOB_THIN_PROV; 3965 blob->state = SPDK_BLOB_STATE_DIRTY; 3966 } 3967 3968 static void 3969 blob_set_clear_method(struct spdk_blob *blob, enum blob_clear_method clear_method) 3970 { 3971 blob_verify_md_op(blob); 3972 blob->clear_method = clear_method; 3973 blob->md_ro_flags |= (clear_method << SPDK_BLOB_CLEAR_METHOD_SHIFT); 3974 blob->state = SPDK_BLOB_STATE_DIRTY; 3975 } 3976 3977 static void bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno); 3978 3979 static void 3980 bs_delete_corrupted_blob_cpl(void *cb_arg, int bserrno) 3981 { 3982 struct spdk_bs_load_ctx *ctx = cb_arg; 3983 spdk_blob_id id; 3984 int64_t page_num; 3985 3986 /* Iterate to next blob (we can't use spdk_bs_iter_next function as our 3987 * last blob has been removed */ 3988 page_num = bs_blobid_to_page(ctx->blobid); 3989 page_num++; 3990 page_num = spdk_bit_array_find_first_set(ctx->bs->used_blobids, page_num); 3991 if (page_num >= spdk_bit_array_capacity(ctx->bs->used_blobids)) { 3992 bs_load_iter(ctx, NULL, -ENOENT); 3993 return; 3994 } 3995 3996 id = bs_page_to_blobid(page_num); 3997 3998 spdk_bs_open_blob(ctx->bs, id, bs_load_iter, ctx); 3999 } 4000 4001 static void 4002 bs_delete_corrupted_close_cb(void *cb_arg, int bserrno) 4003 { 4004 struct spdk_bs_load_ctx *ctx = cb_arg; 4005 4006 if (bserrno != 0) { 4007 SPDK_ERRLOG("Failed to close corrupted blob\n"); 4008 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4009 return; 4010 } 4011 4012 spdk_bs_delete_blob(ctx->bs, ctx->blobid, bs_delete_corrupted_blob_cpl, ctx); 4013 } 4014 4015 static void 4016 bs_delete_corrupted_blob(void *cb_arg, int bserrno) 4017 { 4018 struct spdk_bs_load_ctx *ctx = cb_arg; 4019 uint64_t i; 4020 4021 if (bserrno != 0) { 4022 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 4023 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4024 return; 4025 } 4026 4027 /* Snapshot and clone have the same copy of cluster map and extent pages 4028 * at this point. Let's clear both for snapshot now, 4029 * so that it won't be cleared for clone later when we remove snapshot. 4030 * Also set thin provision to pass data corruption check */ 4031 for (i = 0; i < ctx->blob->active.num_clusters; i++) { 4032 ctx->blob->active.clusters[i] = 0; 4033 } 4034 for (i = 0; i < ctx->blob->active.num_extent_pages; i++) { 4035 ctx->blob->active.extent_pages[i] = 0; 4036 } 4037 4038 ctx->blob->md_ro = false; 4039 4040 blob_set_thin_provision(ctx->blob); 4041 4042 ctx->blobid = ctx->blob->id; 4043 4044 spdk_blob_close(ctx->blob, bs_delete_corrupted_close_cb, ctx); 4045 } 4046 4047 static void 4048 bs_update_corrupted_blob(void *cb_arg, int bserrno) 4049 { 4050 struct spdk_bs_load_ctx *ctx = cb_arg; 4051 4052 if (bserrno != 0) { 4053 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 4054 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4055 return; 4056 } 4057 4058 ctx->blob->md_ro = false; 4059 blob_remove_xattr(ctx->blob, SNAPSHOT_PENDING_REMOVAL, true); 4060 blob_remove_xattr(ctx->blob, SNAPSHOT_IN_PROGRESS, true); 4061 spdk_blob_set_read_only(ctx->blob); 4062 4063 if (ctx->iter_cb_fn) { 4064 ctx->iter_cb_fn(ctx->iter_cb_arg, ctx->blob, 0); 4065 } 4066 bs_blob_list_add(ctx->blob); 4067 4068 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4069 } 4070 4071 static void 4072 bs_examine_clone(void *cb_arg, struct spdk_blob *blob, int bserrno) 4073 { 4074 struct spdk_bs_load_ctx *ctx = cb_arg; 4075 4076 if (bserrno != 0) { 4077 SPDK_ERRLOG("Failed to open clone of a corrupted blob\n"); 4078 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 4079 return; 4080 } 4081 4082 if (blob->parent_id == ctx->blob->id) { 4083 /* Power failure occurred before updating clone (snapshot delete case) 4084 * or after updating clone (creating snapshot case) - keep snapshot */ 4085 spdk_blob_close(blob, bs_update_corrupted_blob, ctx); 4086 } else { 4087 /* Power failure occurred after updating clone (snapshot delete case) 4088 * or before updating clone (creating snapshot case) - remove snapshot */ 4089 spdk_blob_close(blob, bs_delete_corrupted_blob, ctx); 4090 } 4091 } 4092 4093 static void 4094 bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno) 4095 { 4096 struct spdk_bs_load_ctx *ctx = arg; 4097 const void *value; 4098 size_t len; 4099 int rc = 0; 4100 4101 if (bserrno == 0) { 4102 /* Examine blob if it is corrupted after power failure. Fix 4103 * the ones that can be fixed and remove any other corrupted 4104 * ones. If it is not corrupted just process it */ 4105 rc = blob_get_xattr_value(blob, SNAPSHOT_PENDING_REMOVAL, &value, &len, true); 4106 if (rc != 0) { 4107 rc = blob_get_xattr_value(blob, SNAPSHOT_IN_PROGRESS, &value, &len, true); 4108 if (rc != 0) { 4109 /* Not corrupted - process it and continue with iterating through blobs */ 4110 if (ctx->iter_cb_fn) { 4111 ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0); 4112 } 4113 bs_blob_list_add(blob); 4114 spdk_bs_iter_next(ctx->bs, blob, bs_load_iter, ctx); 4115 return; 4116 } 4117 4118 } 4119 4120 assert(len == sizeof(spdk_blob_id)); 4121 4122 ctx->blob = blob; 4123 4124 /* Open clone to check if we are able to fix this blob or should we remove it */ 4125 spdk_bs_open_blob(ctx->bs, *(spdk_blob_id *)value, bs_examine_clone, ctx); 4126 return; 4127 } else if (bserrno == -ENOENT) { 4128 bserrno = 0; 4129 } else { 4130 /* 4131 * This case needs to be looked at further. Same problem 4132 * exists with applications that rely on explicit blob 4133 * iteration. We should just skip the blob that failed 4134 * to load and continue on to the next one. 4135 */ 4136 SPDK_ERRLOG("Error in iterating blobs\n"); 4137 } 4138 4139 ctx->iter_cb_fn = NULL; 4140 4141 spdk_free(ctx->super); 4142 spdk_free(ctx->mask); 4143 bs_sequence_finish(ctx->seq, bserrno); 4144 free(ctx); 4145 } 4146 4147 static void bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg); 4148 4149 static void 4150 bs_load_complete(struct spdk_bs_load_ctx *ctx) 4151 { 4152 ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters); 4153 if (ctx->dumping) { 4154 bs_dump_read_md_page(ctx->seq, ctx); 4155 return; 4156 } 4157 spdk_bs_iter_first(ctx->bs, bs_load_iter, ctx); 4158 } 4159 4160 static void 4161 bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4162 { 4163 struct spdk_bs_load_ctx *ctx = cb_arg; 4164 int rc; 4165 4166 /* The type must be correct */ 4167 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS); 4168 4169 /* The length of the mask (in bits) must not be greater than 4170 * the length of the buffer (converted to bits) */ 4171 assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE * 8)); 4172 4173 /* The length of the mask must be exactly equal to the size 4174 * (in pages) of the metadata region */ 4175 assert(ctx->mask->length == ctx->super->md_len); 4176 4177 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->mask->length); 4178 if (rc < 0) { 4179 spdk_free(ctx->mask); 4180 bs_load_ctx_fail(ctx, rc); 4181 return; 4182 } 4183 4184 spdk_bit_array_load_mask(ctx->bs->used_blobids, ctx->mask->mask); 4185 bs_load_complete(ctx); 4186 } 4187 4188 static void 4189 bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4190 { 4191 struct spdk_bs_load_ctx *ctx = cb_arg; 4192 uint64_t lba, lba_count, mask_size; 4193 int rc; 4194 4195 if (bserrno != 0) { 4196 bs_load_ctx_fail(ctx, bserrno); 4197 return; 4198 } 4199 4200 /* The type must be correct */ 4201 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 4202 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 4203 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 4204 struct spdk_blob_md_page) * 8)); 4205 /* 4206 * The length of the mask must be equal to or larger than the total number of clusters. It may be 4207 * larger than the total number of clusters due to a failure spdk_bs_grow. 4208 */ 4209 assert(ctx->mask->length >= ctx->bs->total_clusters); 4210 if (ctx->mask->length > ctx->bs->total_clusters) { 4211 SPDK_WARNLOG("Shrink the used_custers mask length to total_clusters"); 4212 ctx->mask->length = ctx->bs->total_clusters; 4213 } 4214 4215 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->mask->length); 4216 if (rc < 0) { 4217 spdk_free(ctx->mask); 4218 bs_load_ctx_fail(ctx, rc); 4219 return; 4220 } 4221 4222 spdk_bit_array_load_mask(ctx->used_clusters, ctx->mask->mask); 4223 ctx->bs->num_free_clusters = spdk_bit_array_count_clear(ctx->used_clusters); 4224 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 4225 4226 spdk_free(ctx->mask); 4227 4228 /* Read the used blobids mask */ 4229 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 4230 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 4231 SPDK_MALLOC_DMA); 4232 if (!ctx->mask) { 4233 bs_load_ctx_fail(ctx, -ENOMEM); 4234 return; 4235 } 4236 lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 4237 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 4238 bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 4239 bs_load_used_blobids_cpl, ctx); 4240 } 4241 4242 static void 4243 bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4244 { 4245 struct spdk_bs_load_ctx *ctx = cb_arg; 4246 uint64_t lba, lba_count, mask_size; 4247 int rc; 4248 4249 if (bserrno != 0) { 4250 bs_load_ctx_fail(ctx, bserrno); 4251 return; 4252 } 4253 4254 /* The type must be correct */ 4255 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES); 4256 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 4257 assert(ctx->mask->length <= (ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE * 4258 8)); 4259 /* The length of the mask must be exactly equal to the size (in pages) of the metadata region */ 4260 if (ctx->mask->length != ctx->super->md_len) { 4261 SPDK_ERRLOG("mismatched md_len in used_pages mask: " 4262 "mask->length=%" PRIu32 " super->md_len=%" PRIu32 "\n", 4263 ctx->mask->length, ctx->super->md_len); 4264 assert(false); 4265 } 4266 4267 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->mask->length); 4268 if (rc < 0) { 4269 spdk_free(ctx->mask); 4270 bs_load_ctx_fail(ctx, rc); 4271 return; 4272 } 4273 4274 spdk_bit_array_load_mask(ctx->bs->used_md_pages, ctx->mask->mask); 4275 spdk_free(ctx->mask); 4276 4277 /* Read the used clusters mask */ 4278 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 4279 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 4280 SPDK_MALLOC_DMA); 4281 if (!ctx->mask) { 4282 bs_load_ctx_fail(ctx, -ENOMEM); 4283 return; 4284 } 4285 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 4286 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 4287 bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 4288 bs_load_used_clusters_cpl, ctx); 4289 } 4290 4291 static void 4292 bs_load_read_used_pages(struct spdk_bs_load_ctx *ctx) 4293 { 4294 uint64_t lba, lba_count, mask_size; 4295 4296 /* Read the used pages mask */ 4297 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 4298 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 4299 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4300 if (!ctx->mask) { 4301 bs_load_ctx_fail(ctx, -ENOMEM); 4302 return; 4303 } 4304 4305 lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 4306 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 4307 bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count, 4308 bs_load_used_pages_cpl, ctx); 4309 } 4310 4311 static int 4312 bs_load_replay_md_parse_page(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_page *page) 4313 { 4314 struct spdk_blob_store *bs = ctx->bs; 4315 struct spdk_blob_md_descriptor *desc; 4316 size_t cur_desc = 0; 4317 4318 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4319 while (cur_desc < sizeof(page->descriptors)) { 4320 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 4321 if (desc->length == 0) { 4322 /* If padding and length are 0, this terminates the page */ 4323 break; 4324 } 4325 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 4326 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 4327 unsigned int i, j; 4328 unsigned int cluster_count = 0; 4329 uint32_t cluster_idx; 4330 4331 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 4332 4333 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 4334 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 4335 cluster_idx = desc_extent_rle->extents[i].cluster_idx; 4336 /* 4337 * cluster_idx = 0 means an unallocated cluster - don't mark that 4338 * in the used cluster map. 4339 */ 4340 if (cluster_idx != 0) { 4341 SPDK_NOTICELOG("Recover: cluster %" PRIu32 "\n", cluster_idx + j); 4342 spdk_bit_array_set(ctx->used_clusters, cluster_idx + j); 4343 if (bs->num_free_clusters == 0) { 4344 return -ENOSPC; 4345 } 4346 bs->num_free_clusters--; 4347 } 4348 cluster_count++; 4349 } 4350 } 4351 if (cluster_count == 0) { 4352 return -EINVAL; 4353 } 4354 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4355 struct spdk_blob_md_descriptor_extent_page *desc_extent; 4356 uint32_t i; 4357 uint32_t cluster_count = 0; 4358 uint32_t cluster_idx; 4359 size_t cluster_idx_length; 4360 4361 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 4362 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 4363 4364 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 4365 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 4366 return -EINVAL; 4367 } 4368 4369 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 4370 cluster_idx = desc_extent->cluster_idx[i]; 4371 /* 4372 * cluster_idx = 0 means an unallocated cluster - don't mark that 4373 * in the used cluster map. 4374 */ 4375 if (cluster_idx != 0) { 4376 if (cluster_idx < desc_extent->start_cluster_idx && 4377 cluster_idx >= desc_extent->start_cluster_idx + cluster_count) { 4378 return -EINVAL; 4379 } 4380 spdk_bit_array_set(ctx->used_clusters, cluster_idx); 4381 if (bs->num_free_clusters == 0) { 4382 return -ENOSPC; 4383 } 4384 bs->num_free_clusters--; 4385 } 4386 cluster_count++; 4387 } 4388 4389 if (cluster_count == 0) { 4390 return -EINVAL; 4391 } 4392 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 4393 /* Skip this item */ 4394 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 4395 /* Skip this item */ 4396 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 4397 /* Skip this item */ 4398 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 4399 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 4400 uint32_t num_extent_pages = ctx->num_extent_pages; 4401 uint32_t i; 4402 size_t extent_pages_length; 4403 void *tmp; 4404 4405 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 4406 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 4407 4408 if (desc_extent_table->length == 0 || 4409 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 4410 return -EINVAL; 4411 } 4412 4413 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 4414 if (desc_extent_table->extent_page[i].page_idx != 0) { 4415 if (desc_extent_table->extent_page[i].num_pages != 1) { 4416 return -EINVAL; 4417 } 4418 num_extent_pages += 1; 4419 } 4420 } 4421 4422 if (num_extent_pages > 0) { 4423 tmp = realloc(ctx->extent_page_num, num_extent_pages * sizeof(uint32_t)); 4424 if (tmp == NULL) { 4425 return -ENOMEM; 4426 } 4427 ctx->extent_page_num = tmp; 4428 4429 /* Extent table entries contain md page numbers for extent pages. 4430 * Zeroes represent unallocated extent pages, those are run-length-encoded. 4431 */ 4432 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 4433 if (desc_extent_table->extent_page[i].page_idx != 0) { 4434 ctx->extent_page_num[ctx->num_extent_pages] = desc_extent_table->extent_page[i].page_idx; 4435 ctx->num_extent_pages += 1; 4436 } 4437 } 4438 } 4439 } else { 4440 /* Error */ 4441 return -EINVAL; 4442 } 4443 /* Advance to the next descriptor */ 4444 cur_desc += sizeof(*desc) + desc->length; 4445 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 4446 break; 4447 } 4448 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 4449 } 4450 return 0; 4451 } 4452 4453 static bool 4454 bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page) 4455 { 4456 uint32_t crc; 4457 struct spdk_blob_md_descriptor *desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4458 size_t desc_len; 4459 4460 crc = blob_md_page_calc_crc(page); 4461 if (crc != page->crc) { 4462 return false; 4463 } 4464 4465 /* Extent page should always be of sequence num 0. */ 4466 if (page->sequence_num != 0) { 4467 return false; 4468 } 4469 4470 /* Descriptor type must be EXTENT_PAGE. */ 4471 if (desc->type != SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4472 return false; 4473 } 4474 4475 /* Descriptor length cannot exceed the page. */ 4476 desc_len = sizeof(*desc) + desc->length; 4477 if (desc_len > sizeof(page->descriptors)) { 4478 return false; 4479 } 4480 4481 /* It has to be the only descriptor in the page. */ 4482 if (desc_len + sizeof(*desc) <= sizeof(page->descriptors)) { 4483 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + desc_len); 4484 if (desc->length != 0) { 4485 return false; 4486 } 4487 } 4488 4489 return true; 4490 } 4491 4492 static bool 4493 bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx) 4494 { 4495 uint32_t crc; 4496 struct spdk_blob_md_page *page = ctx->page; 4497 4498 crc = blob_md_page_calc_crc(page); 4499 if (crc != page->crc) { 4500 return false; 4501 } 4502 4503 /* First page of a sequence should match the blobid. */ 4504 if (page->sequence_num == 0 && 4505 bs_page_to_blobid(ctx->cur_page) != page->id) { 4506 return false; 4507 } 4508 assert(bs_load_cur_extent_page_valid(page) == false); 4509 4510 return true; 4511 } 4512 4513 static void bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx); 4514 4515 static void 4516 bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4517 { 4518 struct spdk_bs_load_ctx *ctx = cb_arg; 4519 4520 if (bserrno != 0) { 4521 bs_load_ctx_fail(ctx, bserrno); 4522 return; 4523 } 4524 4525 bs_load_complete(ctx); 4526 } 4527 4528 static void 4529 bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4530 { 4531 struct spdk_bs_load_ctx *ctx = cb_arg; 4532 4533 spdk_free(ctx->mask); 4534 ctx->mask = NULL; 4535 4536 if (bserrno != 0) { 4537 bs_load_ctx_fail(ctx, bserrno); 4538 return; 4539 } 4540 4541 bs_write_used_clusters(seq, ctx, bs_load_write_used_clusters_cpl); 4542 } 4543 4544 static void 4545 bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4546 { 4547 struct spdk_bs_load_ctx *ctx = cb_arg; 4548 4549 spdk_free(ctx->mask); 4550 ctx->mask = NULL; 4551 4552 if (bserrno != 0) { 4553 bs_load_ctx_fail(ctx, bserrno); 4554 return; 4555 } 4556 4557 bs_write_used_blobids(seq, ctx, bs_load_write_used_blobids_cpl); 4558 } 4559 4560 static void 4561 bs_load_write_used_md(struct spdk_bs_load_ctx *ctx) 4562 { 4563 bs_write_used_md(ctx->seq, ctx, bs_load_write_used_pages_cpl); 4564 } 4565 4566 static void 4567 bs_load_replay_md_chain_cpl(struct spdk_bs_load_ctx *ctx) 4568 { 4569 uint64_t num_md_clusters; 4570 uint64_t i; 4571 4572 ctx->in_page_chain = false; 4573 4574 do { 4575 ctx->page_index++; 4576 } while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true); 4577 4578 if (ctx->page_index < ctx->super->md_len) { 4579 ctx->cur_page = ctx->page_index; 4580 bs_load_replay_cur_md_page(ctx); 4581 } else { 4582 /* Claim all of the clusters used by the metadata */ 4583 num_md_clusters = spdk_divide_round_up( 4584 ctx->super->md_start + ctx->super->md_len, ctx->bs->pages_per_cluster); 4585 for (i = 0; i < num_md_clusters; i++) { 4586 spdk_bit_array_set(ctx->used_clusters, i); 4587 } 4588 ctx->bs->num_free_clusters -= num_md_clusters; 4589 spdk_free(ctx->page); 4590 bs_load_write_used_md(ctx); 4591 } 4592 } 4593 4594 static void 4595 bs_load_replay_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4596 { 4597 struct spdk_bs_load_ctx *ctx = cb_arg; 4598 uint32_t page_num; 4599 uint64_t i; 4600 4601 if (bserrno != 0) { 4602 spdk_free(ctx->extent_pages); 4603 bs_load_ctx_fail(ctx, bserrno); 4604 return; 4605 } 4606 4607 for (i = 0; i < ctx->num_extent_pages; i++) { 4608 /* Extent pages are only read when present within in chain md. 4609 * Integrity of md is not right if that page was not a valid extent page. */ 4610 if (bs_load_cur_extent_page_valid(&ctx->extent_pages[i]) != true) { 4611 spdk_free(ctx->extent_pages); 4612 bs_load_ctx_fail(ctx, -EILSEQ); 4613 return; 4614 } 4615 4616 page_num = ctx->extent_page_num[i]; 4617 spdk_bit_array_set(ctx->bs->used_md_pages, page_num); 4618 if (bs_load_replay_md_parse_page(ctx, &ctx->extent_pages[i])) { 4619 spdk_free(ctx->extent_pages); 4620 bs_load_ctx_fail(ctx, -EILSEQ); 4621 return; 4622 } 4623 } 4624 4625 spdk_free(ctx->extent_pages); 4626 free(ctx->extent_page_num); 4627 ctx->extent_page_num = NULL; 4628 ctx->num_extent_pages = 0; 4629 4630 bs_load_replay_md_chain_cpl(ctx); 4631 } 4632 4633 static void 4634 bs_load_replay_extent_pages(struct spdk_bs_load_ctx *ctx) 4635 { 4636 spdk_bs_batch_t *batch; 4637 uint32_t page; 4638 uint64_t lba; 4639 uint64_t i; 4640 4641 ctx->extent_pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE * ctx->num_extent_pages, 0, 4642 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4643 if (!ctx->extent_pages) { 4644 bs_load_ctx_fail(ctx, -ENOMEM); 4645 return; 4646 } 4647 4648 batch = bs_sequence_to_batch(ctx->seq, bs_load_replay_extent_page_cpl, ctx); 4649 4650 for (i = 0; i < ctx->num_extent_pages; i++) { 4651 page = ctx->extent_page_num[i]; 4652 assert(page < ctx->super->md_len); 4653 lba = bs_md_page_to_lba(ctx->bs, page); 4654 bs_batch_read_dev(batch, &ctx->extent_pages[i], lba, 4655 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE)); 4656 } 4657 4658 bs_batch_close(batch); 4659 } 4660 4661 static void 4662 bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4663 { 4664 struct spdk_bs_load_ctx *ctx = cb_arg; 4665 uint32_t page_num; 4666 struct spdk_blob_md_page *page; 4667 4668 if (bserrno != 0) { 4669 bs_load_ctx_fail(ctx, bserrno); 4670 return; 4671 } 4672 4673 page_num = ctx->cur_page; 4674 page = ctx->page; 4675 if (bs_load_cur_md_page_valid(ctx) == true) { 4676 if (page->sequence_num == 0 || ctx->in_page_chain == true) { 4677 spdk_spin_lock(&ctx->bs->used_lock); 4678 bs_claim_md_page(ctx->bs, page_num); 4679 spdk_spin_unlock(&ctx->bs->used_lock); 4680 if (page->sequence_num == 0) { 4681 SPDK_NOTICELOG("Recover: blob 0x%" PRIx32 "\n", page_num); 4682 spdk_bit_array_set(ctx->bs->used_blobids, page_num); 4683 } 4684 if (bs_load_replay_md_parse_page(ctx, page)) { 4685 bs_load_ctx_fail(ctx, -EILSEQ); 4686 return; 4687 } 4688 if (page->next != SPDK_INVALID_MD_PAGE) { 4689 ctx->in_page_chain = true; 4690 ctx->cur_page = page->next; 4691 bs_load_replay_cur_md_page(ctx); 4692 return; 4693 } 4694 if (ctx->num_extent_pages != 0) { 4695 bs_load_replay_extent_pages(ctx); 4696 return; 4697 } 4698 } 4699 } 4700 bs_load_replay_md_chain_cpl(ctx); 4701 } 4702 4703 static void 4704 bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx) 4705 { 4706 uint64_t lba; 4707 4708 assert(ctx->cur_page < ctx->super->md_len); 4709 lba = bs_md_page_to_lba(ctx->bs, ctx->cur_page); 4710 bs_sequence_read_dev(ctx->seq, ctx->page, lba, 4711 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 4712 bs_load_replay_md_cpl, ctx); 4713 } 4714 4715 static void 4716 bs_load_replay_md(struct spdk_bs_load_ctx *ctx) 4717 { 4718 ctx->page_index = 0; 4719 ctx->cur_page = 0; 4720 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 4721 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4722 if (!ctx->page) { 4723 bs_load_ctx_fail(ctx, -ENOMEM); 4724 return; 4725 } 4726 bs_load_replay_cur_md_page(ctx); 4727 } 4728 4729 static void 4730 bs_recover(struct spdk_bs_load_ctx *ctx) 4731 { 4732 int rc; 4733 4734 SPDK_NOTICELOG("Performing recovery on blobstore\n"); 4735 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len); 4736 if (rc < 0) { 4737 bs_load_ctx_fail(ctx, -ENOMEM); 4738 return; 4739 } 4740 4741 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len); 4742 if (rc < 0) { 4743 bs_load_ctx_fail(ctx, -ENOMEM); 4744 return; 4745 } 4746 4747 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 4748 if (rc < 0) { 4749 bs_load_ctx_fail(ctx, -ENOMEM); 4750 return; 4751 } 4752 4753 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->super->md_len); 4754 if (rc < 0) { 4755 bs_load_ctx_fail(ctx, -ENOMEM); 4756 return; 4757 } 4758 4759 ctx->bs->num_free_clusters = ctx->bs->total_clusters; 4760 bs_load_replay_md(ctx); 4761 } 4762 4763 static int 4764 bs_parse_super(struct spdk_bs_load_ctx *ctx) 4765 { 4766 int rc; 4767 4768 if (ctx->super->size == 0) { 4769 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 4770 } 4771 4772 if (ctx->super->io_unit_size == 0) { 4773 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 4774 } 4775 4776 ctx->bs->clean = 1; 4777 ctx->bs->cluster_sz = ctx->super->cluster_size; 4778 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 4779 ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE; 4780 if (spdk_u32_is_pow2(ctx->bs->pages_per_cluster)) { 4781 ctx->bs->pages_per_cluster_shift = spdk_u32log2(ctx->bs->pages_per_cluster); 4782 } 4783 ctx->bs->io_unit_size = ctx->super->io_unit_size; 4784 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 4785 if (rc < 0) { 4786 return -ENOMEM; 4787 } 4788 ctx->bs->md_start = ctx->super->md_start; 4789 ctx->bs->md_len = ctx->super->md_len; 4790 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len); 4791 if (rc < 0) { 4792 return -ENOMEM; 4793 } 4794 4795 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 4796 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 4797 ctx->bs->super_blob = ctx->super->super_blob; 4798 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 4799 4800 return 0; 4801 } 4802 4803 static void 4804 bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4805 { 4806 struct spdk_bs_load_ctx *ctx = cb_arg; 4807 int rc; 4808 4809 rc = bs_super_validate(ctx->super, ctx->bs); 4810 if (rc != 0) { 4811 bs_load_ctx_fail(ctx, rc); 4812 return; 4813 } 4814 4815 rc = bs_parse_super(ctx); 4816 if (rc < 0) { 4817 bs_load_ctx_fail(ctx, rc); 4818 return; 4819 } 4820 4821 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0 || ctx->force_recover) { 4822 bs_recover(ctx); 4823 } else { 4824 bs_load_read_used_pages(ctx); 4825 } 4826 } 4827 4828 static inline int 4829 bs_opts_copy(struct spdk_bs_opts *src, struct spdk_bs_opts *dst) 4830 { 4831 4832 if (!src->opts_size) { 4833 SPDK_ERRLOG("opts_size should not be zero value\n"); 4834 return -1; 4835 } 4836 4837 #define FIELD_OK(field) \ 4838 offsetof(struct spdk_bs_opts, field) + sizeof(src->field) <= src->opts_size 4839 4840 #define SET_FIELD(field) \ 4841 if (FIELD_OK(field)) { \ 4842 dst->field = src->field; \ 4843 } \ 4844 4845 SET_FIELD(cluster_sz); 4846 SET_FIELD(num_md_pages); 4847 SET_FIELD(max_md_ops); 4848 SET_FIELD(max_channel_ops); 4849 SET_FIELD(clear_method); 4850 4851 if (FIELD_OK(bstype)) { 4852 memcpy(&dst->bstype, &src->bstype, sizeof(dst->bstype)); 4853 } 4854 SET_FIELD(iter_cb_fn); 4855 SET_FIELD(iter_cb_arg); 4856 SET_FIELD(force_recover); 4857 SET_FIELD(esnap_bs_dev_create); 4858 SET_FIELD(esnap_ctx); 4859 4860 dst->opts_size = src->opts_size; 4861 4862 /* You should not remove this statement, but need to update the assert statement 4863 * if you add a new field, and also add a corresponding SET_FIELD statement */ 4864 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_opts) == 88, "Incorrect size"); 4865 4866 #undef FIELD_OK 4867 #undef SET_FIELD 4868 4869 return 0; 4870 } 4871 4872 void 4873 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 4874 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 4875 { 4876 struct spdk_blob_store *bs; 4877 struct spdk_bs_cpl cpl; 4878 struct spdk_bs_load_ctx *ctx; 4879 struct spdk_bs_opts opts = {}; 4880 int err; 4881 4882 SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev); 4883 4884 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 4885 SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen); 4886 dev->destroy(dev); 4887 cb_fn(cb_arg, NULL, -EINVAL); 4888 return; 4889 } 4890 4891 spdk_bs_opts_init(&opts, sizeof(opts)); 4892 if (o) { 4893 if (bs_opts_copy(o, &opts)) { 4894 return; 4895 } 4896 } 4897 4898 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 4899 dev->destroy(dev); 4900 cb_fn(cb_arg, NULL, -EINVAL); 4901 return; 4902 } 4903 4904 err = bs_alloc(dev, &opts, &bs, &ctx); 4905 if (err) { 4906 dev->destroy(dev); 4907 cb_fn(cb_arg, NULL, err); 4908 return; 4909 } 4910 4911 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 4912 cpl.u.bs_handle.cb_fn = cb_fn; 4913 cpl.u.bs_handle.cb_arg = cb_arg; 4914 cpl.u.bs_handle.bs = bs; 4915 4916 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 4917 if (!ctx->seq) { 4918 spdk_free(ctx->super); 4919 free(ctx); 4920 bs_free(bs); 4921 cb_fn(cb_arg, NULL, -ENOMEM); 4922 return; 4923 } 4924 4925 /* Read the super block */ 4926 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 4927 bs_byte_to_lba(bs, sizeof(*ctx->super)), 4928 bs_load_super_cpl, ctx); 4929 } 4930 4931 /* END spdk_bs_load */ 4932 4933 /* START spdk_bs_dump */ 4934 4935 static void 4936 bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno) 4937 { 4938 spdk_free(ctx->super); 4939 4940 /* 4941 * We need to defer calling bs_call_cpl() until after 4942 * dev destruction, so tuck these away for later use. 4943 */ 4944 ctx->bs->unload_err = bserrno; 4945 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 4946 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 4947 4948 bs_sequence_finish(seq, 0); 4949 bs_free(ctx->bs); 4950 free(ctx); 4951 } 4952 4953 static void 4954 bs_dump_print_xattr(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 4955 { 4956 struct spdk_blob_md_descriptor_xattr *desc_xattr; 4957 uint32_t i; 4958 const char *type; 4959 4960 desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc; 4961 4962 if (desc_xattr->length != 4963 sizeof(desc_xattr->name_length) + sizeof(desc_xattr->value_length) + 4964 desc_xattr->name_length + desc_xattr->value_length) { 4965 } 4966 4967 memcpy(ctx->xattr_name, desc_xattr->name, desc_xattr->name_length); 4968 ctx->xattr_name[desc_xattr->name_length] = '\0'; 4969 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 4970 type = "XATTR"; 4971 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 4972 type = "XATTR_INTERNAL"; 4973 } else { 4974 assert(false); 4975 type = "XATTR_?"; 4976 } 4977 fprintf(ctx->fp, "%s: name = \"%s\"\n", type, ctx->xattr_name); 4978 fprintf(ctx->fp, " value = \""); 4979 ctx->print_xattr_fn(ctx->fp, ctx->super->bstype.bstype, ctx->xattr_name, 4980 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 4981 desc_xattr->value_length); 4982 fprintf(ctx->fp, "\"\n"); 4983 for (i = 0; i < desc_xattr->value_length; i++) { 4984 if (i % 16 == 0) { 4985 fprintf(ctx->fp, " "); 4986 } 4987 fprintf(ctx->fp, "%02" PRIx8 " ", *((uint8_t *)desc_xattr->name + desc_xattr->name_length + i)); 4988 if ((i + 1) % 16 == 0) { 4989 fprintf(ctx->fp, "\n"); 4990 } 4991 } 4992 if (i % 16 != 0) { 4993 fprintf(ctx->fp, "\n"); 4994 } 4995 } 4996 4997 struct type_flag_desc { 4998 uint64_t mask; 4999 uint64_t val; 5000 const char *name; 5001 }; 5002 5003 static void 5004 bs_dump_print_type_bits(struct spdk_bs_load_ctx *ctx, uint64_t flags, 5005 struct type_flag_desc *desc, size_t numflags) 5006 { 5007 uint64_t covered = 0; 5008 size_t i; 5009 5010 for (i = 0; i < numflags; i++) { 5011 if ((desc[i].mask & flags) != desc[i].val) { 5012 continue; 5013 } 5014 fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " %s", desc[i].val, desc[i].name); 5015 if (desc[i].mask != desc[i].val) { 5016 fprintf(ctx->fp, " (mask 0x%" PRIx64 " value 0x%" PRIx64 ")", 5017 desc[i].mask, desc[i].val); 5018 } 5019 fprintf(ctx->fp, "\n"); 5020 covered |= desc[i].mask; 5021 } 5022 if ((flags & ~covered) != 0) { 5023 fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " Unknown\n", flags & ~covered); 5024 } 5025 } 5026 5027 static void 5028 bs_dump_print_type_flags(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 5029 { 5030 struct spdk_blob_md_descriptor_flags *type_desc; 5031 #define ADD_FLAG(f) { f, f, #f } 5032 #define ADD_MASK_VAL(m, v) { m, v, #v } 5033 static struct type_flag_desc invalid[] = { 5034 ADD_FLAG(SPDK_BLOB_THIN_PROV), 5035 ADD_FLAG(SPDK_BLOB_INTERNAL_XATTR), 5036 ADD_FLAG(SPDK_BLOB_EXTENT_TABLE), 5037 }; 5038 static struct type_flag_desc data_ro[] = { 5039 ADD_FLAG(SPDK_BLOB_READ_ONLY), 5040 }; 5041 static struct type_flag_desc md_ro[] = { 5042 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_DEFAULT), 5043 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_NONE), 5044 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_UNMAP), 5045 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_WRITE_ZEROES), 5046 }; 5047 #undef ADD_FLAG 5048 #undef ADD_MASK_VAL 5049 5050 type_desc = (struct spdk_blob_md_descriptor_flags *)desc; 5051 fprintf(ctx->fp, "Flags:\n"); 5052 fprintf(ctx->fp, "\tinvalid: 0x%016" PRIx64 "\n", type_desc->invalid_flags); 5053 bs_dump_print_type_bits(ctx, type_desc->invalid_flags, invalid, 5054 SPDK_COUNTOF(invalid)); 5055 fprintf(ctx->fp, "\tdata_ro: 0x%016" PRIx64 "\n", type_desc->data_ro_flags); 5056 bs_dump_print_type_bits(ctx, type_desc->data_ro_flags, data_ro, 5057 SPDK_COUNTOF(data_ro)); 5058 fprintf(ctx->fp, "\t md_ro: 0x%016" PRIx64 "\n", type_desc->md_ro_flags); 5059 bs_dump_print_type_bits(ctx, type_desc->md_ro_flags, md_ro, 5060 SPDK_COUNTOF(md_ro)); 5061 } 5062 5063 static void 5064 bs_dump_print_extent_table(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 5065 { 5066 struct spdk_blob_md_descriptor_extent_table *et_desc; 5067 uint64_t num_extent_pages; 5068 uint32_t et_idx; 5069 5070 et_desc = (struct spdk_blob_md_descriptor_extent_table *)desc; 5071 num_extent_pages = (et_desc->length - sizeof(et_desc->num_clusters)) / 5072 sizeof(et_desc->extent_page[0]); 5073 5074 fprintf(ctx->fp, "Extent table:\n"); 5075 for (et_idx = 0; et_idx < num_extent_pages; et_idx++) { 5076 if (et_desc->extent_page[et_idx].page_idx == 0) { 5077 /* Zeroes represent unallocated extent pages. */ 5078 continue; 5079 } 5080 fprintf(ctx->fp, "\tExtent page: %5" PRIu32 " length %3" PRIu32 5081 " at LBA %" PRIu64 "\n", et_desc->extent_page[et_idx].page_idx, 5082 et_desc->extent_page[et_idx].num_pages, 5083 bs_md_page_to_lba(ctx->bs, et_desc->extent_page[et_idx].page_idx)); 5084 } 5085 } 5086 5087 static void 5088 bs_dump_print_md_page(struct spdk_bs_load_ctx *ctx) 5089 { 5090 uint32_t page_idx = ctx->cur_page; 5091 struct spdk_blob_md_page *page = ctx->page; 5092 struct spdk_blob_md_descriptor *desc; 5093 size_t cur_desc = 0; 5094 uint32_t crc; 5095 5096 fprintf(ctx->fp, "=========\n"); 5097 fprintf(ctx->fp, "Metadata Page Index: %" PRIu32 " (0x%" PRIx32 ")\n", page_idx, page_idx); 5098 fprintf(ctx->fp, "Start LBA: %" PRIu64 "\n", bs_md_page_to_lba(ctx->bs, page_idx)); 5099 fprintf(ctx->fp, "Blob ID: 0x%" PRIx64 "\n", page->id); 5100 fprintf(ctx->fp, "Sequence: %" PRIu32 "\n", page->sequence_num); 5101 if (page->next == SPDK_INVALID_MD_PAGE) { 5102 fprintf(ctx->fp, "Next: None\n"); 5103 } else { 5104 fprintf(ctx->fp, "Next: %" PRIu32 "\n", page->next); 5105 } 5106 fprintf(ctx->fp, "In used bit array%s:", ctx->super->clean ? "" : " (not clean: dubious)"); 5107 if (spdk_bit_array_get(ctx->bs->used_md_pages, page_idx)) { 5108 fprintf(ctx->fp, " md"); 5109 } 5110 if (spdk_bit_array_get(ctx->bs->used_blobids, page_idx)) { 5111 fprintf(ctx->fp, " blob"); 5112 } 5113 fprintf(ctx->fp, "\n"); 5114 5115 crc = blob_md_page_calc_crc(page); 5116 fprintf(ctx->fp, "CRC: 0x%" PRIx32 " (%s)\n", page->crc, crc == page->crc ? "OK" : "Mismatch"); 5117 5118 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 5119 while (cur_desc < sizeof(page->descriptors)) { 5120 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 5121 if (desc->length == 0) { 5122 /* If padding and length are 0, this terminates the page */ 5123 break; 5124 } 5125 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 5126 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 5127 unsigned int i; 5128 5129 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 5130 5131 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 5132 if (desc_extent_rle->extents[i].cluster_idx != 0) { 5133 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 5134 desc_extent_rle->extents[i].cluster_idx); 5135 } else { 5136 fprintf(ctx->fp, "Unallocated Extent - "); 5137 } 5138 fprintf(ctx->fp, " Length: %" PRIu32, desc_extent_rle->extents[i].length); 5139 fprintf(ctx->fp, "\n"); 5140 } 5141 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 5142 struct spdk_blob_md_descriptor_extent_page *desc_extent; 5143 unsigned int i; 5144 5145 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 5146 5147 for (i = 0; i < desc_extent->length / sizeof(desc_extent->cluster_idx[0]); i++) { 5148 if (desc_extent->cluster_idx[i] != 0) { 5149 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 5150 desc_extent->cluster_idx[i]); 5151 } else { 5152 fprintf(ctx->fp, "Unallocated Extent"); 5153 } 5154 fprintf(ctx->fp, "\n"); 5155 } 5156 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 5157 bs_dump_print_xattr(ctx, desc); 5158 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 5159 bs_dump_print_xattr(ctx, desc); 5160 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 5161 bs_dump_print_type_flags(ctx, desc); 5162 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 5163 bs_dump_print_extent_table(ctx, desc); 5164 } else { 5165 /* Error */ 5166 fprintf(ctx->fp, "Unknown descriptor type %" PRIu8 "\n", desc->type); 5167 } 5168 /* Advance to the next descriptor */ 5169 cur_desc += sizeof(*desc) + desc->length; 5170 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 5171 break; 5172 } 5173 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 5174 } 5175 } 5176 5177 static void 5178 bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5179 { 5180 struct spdk_bs_load_ctx *ctx = cb_arg; 5181 5182 if (bserrno != 0) { 5183 bs_dump_finish(seq, ctx, bserrno); 5184 return; 5185 } 5186 5187 if (ctx->page->id != 0) { 5188 bs_dump_print_md_page(ctx); 5189 } 5190 5191 ctx->cur_page++; 5192 5193 if (ctx->cur_page < ctx->super->md_len) { 5194 bs_dump_read_md_page(seq, ctx); 5195 } else { 5196 spdk_free(ctx->page); 5197 bs_dump_finish(seq, ctx, 0); 5198 } 5199 } 5200 5201 static void 5202 bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg) 5203 { 5204 struct spdk_bs_load_ctx *ctx = cb_arg; 5205 uint64_t lba; 5206 5207 assert(ctx->cur_page < ctx->super->md_len); 5208 lba = bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page); 5209 bs_sequence_read_dev(seq, ctx->page, lba, 5210 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 5211 bs_dump_read_md_page_cpl, ctx); 5212 } 5213 5214 static void 5215 bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5216 { 5217 struct spdk_bs_load_ctx *ctx = cb_arg; 5218 int rc; 5219 5220 fprintf(ctx->fp, "Signature: \"%.8s\" ", ctx->super->signature); 5221 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 5222 sizeof(ctx->super->signature)) != 0) { 5223 fprintf(ctx->fp, "(Mismatch)\n"); 5224 bs_dump_finish(seq, ctx, bserrno); 5225 return; 5226 } else { 5227 fprintf(ctx->fp, "(OK)\n"); 5228 } 5229 fprintf(ctx->fp, "Version: %" PRIu32 "\n", ctx->super->version); 5230 fprintf(ctx->fp, "CRC: 0x%x (%s)\n", ctx->super->crc, 5231 (ctx->super->crc == blob_md_page_calc_crc(ctx->super)) ? "OK" : "Mismatch"); 5232 fprintf(ctx->fp, "Blobstore Type: %.*s\n", SPDK_BLOBSTORE_TYPE_LENGTH, ctx->super->bstype.bstype); 5233 fprintf(ctx->fp, "Cluster Size: %" PRIu32 "\n", ctx->super->cluster_size); 5234 fprintf(ctx->fp, "Super Blob ID: "); 5235 if (ctx->super->super_blob == SPDK_BLOBID_INVALID) { 5236 fprintf(ctx->fp, "(None)\n"); 5237 } else { 5238 fprintf(ctx->fp, "0x%" PRIx64 "\n", ctx->super->super_blob); 5239 } 5240 fprintf(ctx->fp, "Clean: %" PRIu32 "\n", ctx->super->clean); 5241 fprintf(ctx->fp, "Used Metadata Page Mask Start: %" PRIu32 "\n", ctx->super->used_page_mask_start); 5242 fprintf(ctx->fp, "Used Metadata Page Mask Length: %" PRIu32 "\n", ctx->super->used_page_mask_len); 5243 fprintf(ctx->fp, "Used Cluster Mask Start: %" PRIu32 "\n", ctx->super->used_cluster_mask_start); 5244 fprintf(ctx->fp, "Used Cluster Mask Length: %" PRIu32 "\n", ctx->super->used_cluster_mask_len); 5245 fprintf(ctx->fp, "Used Blob ID Mask Start: %" PRIu32 "\n", ctx->super->used_blobid_mask_start); 5246 fprintf(ctx->fp, "Used Blob ID Mask Length: %" PRIu32 "\n", ctx->super->used_blobid_mask_len); 5247 fprintf(ctx->fp, "Metadata Start: %" PRIu32 "\n", ctx->super->md_start); 5248 fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len); 5249 5250 ctx->cur_page = 0; 5251 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 5252 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 5253 if (!ctx->page) { 5254 bs_dump_finish(seq, ctx, -ENOMEM); 5255 return; 5256 } 5257 5258 rc = bs_parse_super(ctx); 5259 if (rc < 0) { 5260 bs_load_ctx_fail(ctx, rc); 5261 return; 5262 } 5263 5264 bs_load_read_used_pages(ctx); 5265 } 5266 5267 void 5268 spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_xattr_fn, 5269 spdk_bs_op_complete cb_fn, void *cb_arg) 5270 { 5271 struct spdk_blob_store *bs; 5272 struct spdk_bs_cpl cpl; 5273 struct spdk_bs_load_ctx *ctx; 5274 struct spdk_bs_opts opts = {}; 5275 int err; 5276 5277 SPDK_DEBUGLOG(blob, "Dumping blobstore from dev %p\n", dev); 5278 5279 spdk_bs_opts_init(&opts, sizeof(opts)); 5280 5281 err = bs_alloc(dev, &opts, &bs, &ctx); 5282 if (err) { 5283 dev->destroy(dev); 5284 cb_fn(cb_arg, err); 5285 return; 5286 } 5287 5288 ctx->dumping = true; 5289 ctx->fp = fp; 5290 ctx->print_xattr_fn = print_xattr_fn; 5291 5292 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5293 cpl.u.bs_basic.cb_fn = cb_fn; 5294 cpl.u.bs_basic.cb_arg = cb_arg; 5295 5296 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5297 if (!ctx->seq) { 5298 spdk_free(ctx->super); 5299 free(ctx); 5300 bs_free(bs); 5301 cb_fn(cb_arg, -ENOMEM); 5302 return; 5303 } 5304 5305 /* Read the super block */ 5306 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5307 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5308 bs_dump_super_cpl, ctx); 5309 } 5310 5311 /* END spdk_bs_dump */ 5312 5313 /* START spdk_bs_init */ 5314 5315 static void 5316 bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5317 { 5318 struct spdk_bs_load_ctx *ctx = cb_arg; 5319 5320 ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters); 5321 spdk_free(ctx->super); 5322 free(ctx); 5323 5324 bs_sequence_finish(seq, bserrno); 5325 } 5326 5327 static void 5328 bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5329 { 5330 struct spdk_bs_load_ctx *ctx = cb_arg; 5331 5332 /* Write super block */ 5333 bs_sequence_write_dev(seq, ctx->super, bs_page_to_lba(ctx->bs, 0), 5334 bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 5335 bs_init_persist_super_cpl, ctx); 5336 } 5337 5338 void 5339 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 5340 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 5341 { 5342 struct spdk_bs_load_ctx *ctx; 5343 struct spdk_blob_store *bs; 5344 struct spdk_bs_cpl cpl; 5345 spdk_bs_sequence_t *seq; 5346 spdk_bs_batch_t *batch; 5347 uint64_t num_md_lba; 5348 uint64_t num_md_pages; 5349 uint64_t num_md_clusters; 5350 uint64_t max_used_cluster_mask_len; 5351 uint32_t i; 5352 struct spdk_bs_opts opts = {}; 5353 int rc; 5354 uint64_t lba, lba_count; 5355 5356 SPDK_DEBUGLOG(blob, "Initializing blobstore on dev %p\n", dev); 5357 5358 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 5359 SPDK_ERRLOG("unsupported dev block length of %d\n", 5360 dev->blocklen); 5361 dev->destroy(dev); 5362 cb_fn(cb_arg, NULL, -EINVAL); 5363 return; 5364 } 5365 5366 spdk_bs_opts_init(&opts, sizeof(opts)); 5367 if (o) { 5368 if (bs_opts_copy(o, &opts)) { 5369 return; 5370 } 5371 } 5372 5373 if (bs_opts_verify(&opts) != 0) { 5374 dev->destroy(dev); 5375 cb_fn(cb_arg, NULL, -EINVAL); 5376 return; 5377 } 5378 5379 rc = bs_alloc(dev, &opts, &bs, &ctx); 5380 if (rc) { 5381 dev->destroy(dev); 5382 cb_fn(cb_arg, NULL, rc); 5383 return; 5384 } 5385 5386 if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) { 5387 /* By default, allocate 1 page per cluster. 5388 * Technically, this over-allocates metadata 5389 * because more metadata will reduce the number 5390 * of usable clusters. This can be addressed with 5391 * more complex math in the future. 5392 */ 5393 bs->md_len = bs->total_clusters; 5394 } else { 5395 bs->md_len = opts.num_md_pages; 5396 } 5397 rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len); 5398 if (rc < 0) { 5399 spdk_free(ctx->super); 5400 free(ctx); 5401 bs_free(bs); 5402 cb_fn(cb_arg, NULL, -ENOMEM); 5403 return; 5404 } 5405 5406 rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len); 5407 if (rc < 0) { 5408 spdk_free(ctx->super); 5409 free(ctx); 5410 bs_free(bs); 5411 cb_fn(cb_arg, NULL, -ENOMEM); 5412 return; 5413 } 5414 5415 rc = spdk_bit_array_resize(&bs->open_blobids, bs->md_len); 5416 if (rc < 0) { 5417 spdk_free(ctx->super); 5418 free(ctx); 5419 bs_free(bs); 5420 cb_fn(cb_arg, NULL, -ENOMEM); 5421 return; 5422 } 5423 5424 memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 5425 sizeof(ctx->super->signature)); 5426 ctx->super->version = SPDK_BS_VERSION; 5427 ctx->super->length = sizeof(*ctx->super); 5428 ctx->super->super_blob = bs->super_blob; 5429 ctx->super->clean = 0; 5430 ctx->super->cluster_size = bs->cluster_sz; 5431 ctx->super->io_unit_size = bs->io_unit_size; 5432 memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype)); 5433 5434 /* Calculate how many pages the metadata consumes at the front 5435 * of the disk. 5436 */ 5437 5438 /* The super block uses 1 page */ 5439 num_md_pages = 1; 5440 5441 /* The used_md_pages mask requires 1 bit per metadata page, rounded 5442 * up to the nearest page, plus a header. 5443 */ 5444 ctx->super->used_page_mask_start = num_md_pages; 5445 ctx->super->used_page_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5446 spdk_divide_round_up(bs->md_len, 8), 5447 SPDK_BS_PAGE_SIZE); 5448 num_md_pages += ctx->super->used_page_mask_len; 5449 5450 /* The used_clusters mask requires 1 bit per cluster, rounded 5451 * up to the nearest page, plus a header. 5452 */ 5453 ctx->super->used_cluster_mask_start = num_md_pages; 5454 ctx->super->used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5455 spdk_divide_round_up(bs->total_clusters, 8), 5456 SPDK_BS_PAGE_SIZE); 5457 /* The blobstore might be extended, then the used_cluster bitmap will need more space. 5458 * Here we calculate the max clusters we can support according to the 5459 * num_md_pages (bs->md_len). 5460 */ 5461 max_used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5462 spdk_divide_round_up(bs->md_len, 8), 5463 SPDK_BS_PAGE_SIZE); 5464 max_used_cluster_mask_len = spdk_max(max_used_cluster_mask_len, 5465 ctx->super->used_cluster_mask_len); 5466 num_md_pages += max_used_cluster_mask_len; 5467 5468 /* The used_blobids mask requires 1 bit per metadata page, rounded 5469 * up to the nearest page, plus a header. 5470 */ 5471 ctx->super->used_blobid_mask_start = num_md_pages; 5472 ctx->super->used_blobid_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5473 spdk_divide_round_up(bs->md_len, 8), 5474 SPDK_BS_PAGE_SIZE); 5475 num_md_pages += ctx->super->used_blobid_mask_len; 5476 5477 /* The metadata region size was chosen above */ 5478 ctx->super->md_start = bs->md_start = num_md_pages; 5479 ctx->super->md_len = bs->md_len; 5480 num_md_pages += bs->md_len; 5481 5482 num_md_lba = bs_page_to_lba(bs, num_md_pages); 5483 5484 ctx->super->size = dev->blockcnt * dev->blocklen; 5485 5486 ctx->super->crc = blob_md_page_calc_crc(ctx->super); 5487 5488 num_md_clusters = spdk_divide_round_up(num_md_pages, bs->pages_per_cluster); 5489 if (num_md_clusters > bs->total_clusters) { 5490 SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, " 5491 "please decrease number of pages reserved for metadata " 5492 "or increase cluster size.\n"); 5493 spdk_free(ctx->super); 5494 spdk_bit_array_free(&ctx->used_clusters); 5495 free(ctx); 5496 bs_free(bs); 5497 cb_fn(cb_arg, NULL, -ENOMEM); 5498 return; 5499 } 5500 /* Claim all of the clusters used by the metadata */ 5501 for (i = 0; i < num_md_clusters; i++) { 5502 spdk_bit_array_set(ctx->used_clusters, i); 5503 } 5504 5505 bs->num_free_clusters -= num_md_clusters; 5506 bs->total_data_clusters = bs->num_free_clusters; 5507 5508 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 5509 cpl.u.bs_handle.cb_fn = cb_fn; 5510 cpl.u.bs_handle.cb_arg = cb_arg; 5511 cpl.u.bs_handle.bs = bs; 5512 5513 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5514 if (!seq) { 5515 spdk_free(ctx->super); 5516 free(ctx); 5517 bs_free(bs); 5518 cb_fn(cb_arg, NULL, -ENOMEM); 5519 return; 5520 } 5521 5522 batch = bs_sequence_to_batch(seq, bs_init_trim_cpl, ctx); 5523 5524 /* Clear metadata space */ 5525 bs_batch_write_zeroes_dev(batch, 0, num_md_lba); 5526 5527 lba = num_md_lba; 5528 lba_count = ctx->bs->dev->blockcnt - lba; 5529 switch (opts.clear_method) { 5530 case BS_CLEAR_WITH_UNMAP: 5531 /* Trim data clusters */ 5532 bs_batch_unmap_dev(batch, lba, lba_count); 5533 break; 5534 case BS_CLEAR_WITH_WRITE_ZEROES: 5535 /* Write_zeroes to data clusters */ 5536 bs_batch_write_zeroes_dev(batch, lba, lba_count); 5537 break; 5538 case BS_CLEAR_WITH_NONE: 5539 default: 5540 break; 5541 } 5542 5543 bs_batch_close(batch); 5544 } 5545 5546 /* END spdk_bs_init */ 5547 5548 /* START spdk_bs_destroy */ 5549 5550 static void 5551 bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5552 { 5553 struct spdk_bs_load_ctx *ctx = cb_arg; 5554 struct spdk_blob_store *bs = ctx->bs; 5555 5556 /* 5557 * We need to defer calling bs_call_cpl() until after 5558 * dev destruction, so tuck these away for later use. 5559 */ 5560 bs->unload_err = bserrno; 5561 memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5562 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5563 5564 bs_sequence_finish(seq, bserrno); 5565 5566 bs_free(bs); 5567 free(ctx); 5568 } 5569 5570 void 5571 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, 5572 void *cb_arg) 5573 { 5574 struct spdk_bs_cpl cpl; 5575 spdk_bs_sequence_t *seq; 5576 struct spdk_bs_load_ctx *ctx; 5577 5578 SPDK_DEBUGLOG(blob, "Destroying blobstore\n"); 5579 5580 if (!RB_EMPTY(&bs->open_blobs)) { 5581 SPDK_ERRLOG("Blobstore still has open blobs\n"); 5582 cb_fn(cb_arg, -EBUSY); 5583 return; 5584 } 5585 5586 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5587 cpl.u.bs_basic.cb_fn = cb_fn; 5588 cpl.u.bs_basic.cb_arg = cb_arg; 5589 5590 ctx = calloc(1, sizeof(*ctx)); 5591 if (!ctx) { 5592 cb_fn(cb_arg, -ENOMEM); 5593 return; 5594 } 5595 5596 ctx->bs = bs; 5597 5598 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5599 if (!seq) { 5600 free(ctx); 5601 cb_fn(cb_arg, -ENOMEM); 5602 return; 5603 } 5604 5605 /* Write zeroes to the super block */ 5606 bs_sequence_write_zeroes_dev(seq, 5607 bs_page_to_lba(bs, 0), 5608 bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)), 5609 bs_destroy_trim_cpl, ctx); 5610 } 5611 5612 /* END spdk_bs_destroy */ 5613 5614 /* START spdk_bs_unload */ 5615 5616 static void 5617 bs_unload_finish(struct spdk_bs_load_ctx *ctx, int bserrno) 5618 { 5619 spdk_bs_sequence_t *seq = ctx->seq; 5620 5621 spdk_free(ctx->super); 5622 5623 /* 5624 * We need to defer calling bs_call_cpl() until after 5625 * dev destruction, so tuck these away for later use. 5626 */ 5627 ctx->bs->unload_err = bserrno; 5628 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5629 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5630 5631 bs_sequence_finish(seq, bserrno); 5632 5633 bs_free(ctx->bs); 5634 free(ctx); 5635 } 5636 5637 static void 5638 bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5639 { 5640 struct spdk_bs_load_ctx *ctx = cb_arg; 5641 5642 bs_unload_finish(ctx, bserrno); 5643 } 5644 5645 static void 5646 bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5647 { 5648 struct spdk_bs_load_ctx *ctx = cb_arg; 5649 5650 spdk_free(ctx->mask); 5651 5652 if (bserrno != 0) { 5653 bs_unload_finish(ctx, bserrno); 5654 return; 5655 } 5656 5657 ctx->super->clean = 1; 5658 5659 bs_write_super(seq, ctx->bs, ctx->super, bs_unload_write_super_cpl, ctx); 5660 } 5661 5662 static void 5663 bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5664 { 5665 struct spdk_bs_load_ctx *ctx = cb_arg; 5666 5667 spdk_free(ctx->mask); 5668 ctx->mask = NULL; 5669 5670 if (bserrno != 0) { 5671 bs_unload_finish(ctx, bserrno); 5672 return; 5673 } 5674 5675 bs_write_used_clusters(seq, ctx, bs_unload_write_used_clusters_cpl); 5676 } 5677 5678 static void 5679 bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5680 { 5681 struct spdk_bs_load_ctx *ctx = cb_arg; 5682 5683 spdk_free(ctx->mask); 5684 ctx->mask = NULL; 5685 5686 if (bserrno != 0) { 5687 bs_unload_finish(ctx, bserrno); 5688 return; 5689 } 5690 5691 bs_write_used_blobids(seq, ctx, bs_unload_write_used_blobids_cpl); 5692 } 5693 5694 static void 5695 bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5696 { 5697 struct spdk_bs_load_ctx *ctx = cb_arg; 5698 int rc; 5699 5700 if (bserrno != 0) { 5701 bs_unload_finish(ctx, bserrno); 5702 return; 5703 } 5704 5705 rc = bs_super_validate(ctx->super, ctx->bs); 5706 if (rc != 0) { 5707 bs_unload_finish(ctx, rc); 5708 return; 5709 } 5710 5711 bs_write_used_md(seq, cb_arg, bs_unload_write_used_pages_cpl); 5712 } 5713 5714 void 5715 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg) 5716 { 5717 struct spdk_bs_cpl cpl; 5718 struct spdk_bs_load_ctx *ctx; 5719 5720 SPDK_DEBUGLOG(blob, "Syncing blobstore\n"); 5721 5722 /* 5723 * If external snapshot channels are being destroyed while the blobstore is unloaded, the 5724 * unload is deferred until after the channel destruction completes. 5725 */ 5726 if (bs->esnap_channels_unloading != 0) { 5727 if (bs->esnap_unload_cb_fn != NULL) { 5728 SPDK_ERRLOG("Blobstore unload in progress\n"); 5729 cb_fn(cb_arg, -EBUSY); 5730 return; 5731 } 5732 SPDK_DEBUGLOG(blob_esnap, "Blobstore unload deferred: %" PRIu32 5733 " esnap clones are unloading\n", bs->esnap_channels_unloading); 5734 bs->esnap_unload_cb_fn = cb_fn; 5735 bs->esnap_unload_cb_arg = cb_arg; 5736 return; 5737 } 5738 if (bs->esnap_unload_cb_fn != NULL) { 5739 SPDK_DEBUGLOG(blob_esnap, "Blobstore deferred unload progressing\n"); 5740 assert(bs->esnap_unload_cb_fn == cb_fn); 5741 assert(bs->esnap_unload_cb_arg == cb_arg); 5742 bs->esnap_unload_cb_fn = NULL; 5743 bs->esnap_unload_cb_arg = NULL; 5744 } 5745 5746 if (!RB_EMPTY(&bs->open_blobs)) { 5747 SPDK_ERRLOG("Blobstore still has open blobs\n"); 5748 cb_fn(cb_arg, -EBUSY); 5749 return; 5750 } 5751 5752 ctx = calloc(1, sizeof(*ctx)); 5753 if (!ctx) { 5754 cb_fn(cb_arg, -ENOMEM); 5755 return; 5756 } 5757 5758 ctx->bs = bs; 5759 5760 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 5761 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 5762 if (!ctx->super) { 5763 free(ctx); 5764 cb_fn(cb_arg, -ENOMEM); 5765 return; 5766 } 5767 5768 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5769 cpl.u.bs_basic.cb_fn = cb_fn; 5770 cpl.u.bs_basic.cb_arg = cb_arg; 5771 5772 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5773 if (!ctx->seq) { 5774 spdk_free(ctx->super); 5775 free(ctx); 5776 cb_fn(cb_arg, -ENOMEM); 5777 return; 5778 } 5779 5780 /* Read super block */ 5781 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5782 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5783 bs_unload_read_super_cpl, ctx); 5784 } 5785 5786 /* END spdk_bs_unload */ 5787 5788 /* START spdk_bs_set_super */ 5789 5790 struct spdk_bs_set_super_ctx { 5791 struct spdk_blob_store *bs; 5792 struct spdk_bs_super_block *super; 5793 }; 5794 5795 static void 5796 bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5797 { 5798 struct spdk_bs_set_super_ctx *ctx = cb_arg; 5799 5800 if (bserrno != 0) { 5801 SPDK_ERRLOG("Unable to write to super block of blobstore\n"); 5802 } 5803 5804 spdk_free(ctx->super); 5805 5806 bs_sequence_finish(seq, bserrno); 5807 5808 free(ctx); 5809 } 5810 5811 static void 5812 bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5813 { 5814 struct spdk_bs_set_super_ctx *ctx = cb_arg; 5815 int rc; 5816 5817 if (bserrno != 0) { 5818 SPDK_ERRLOG("Unable to read super block of blobstore\n"); 5819 spdk_free(ctx->super); 5820 bs_sequence_finish(seq, bserrno); 5821 free(ctx); 5822 return; 5823 } 5824 5825 rc = bs_super_validate(ctx->super, ctx->bs); 5826 if (rc != 0) { 5827 SPDK_ERRLOG("Not a valid super block\n"); 5828 spdk_free(ctx->super); 5829 bs_sequence_finish(seq, rc); 5830 free(ctx); 5831 return; 5832 } 5833 5834 bs_write_super(seq, ctx->bs, ctx->super, bs_set_super_write_cpl, ctx); 5835 } 5836 5837 void 5838 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid, 5839 spdk_bs_op_complete cb_fn, void *cb_arg) 5840 { 5841 struct spdk_bs_cpl cpl; 5842 spdk_bs_sequence_t *seq; 5843 struct spdk_bs_set_super_ctx *ctx; 5844 5845 SPDK_DEBUGLOG(blob, "Setting super blob id on blobstore\n"); 5846 5847 ctx = calloc(1, sizeof(*ctx)); 5848 if (!ctx) { 5849 cb_fn(cb_arg, -ENOMEM); 5850 return; 5851 } 5852 5853 ctx->bs = bs; 5854 5855 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 5856 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 5857 if (!ctx->super) { 5858 free(ctx); 5859 cb_fn(cb_arg, -ENOMEM); 5860 return; 5861 } 5862 5863 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5864 cpl.u.bs_basic.cb_fn = cb_fn; 5865 cpl.u.bs_basic.cb_arg = cb_arg; 5866 5867 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 5868 if (!seq) { 5869 spdk_free(ctx->super); 5870 free(ctx); 5871 cb_fn(cb_arg, -ENOMEM); 5872 return; 5873 } 5874 5875 bs->super_blob = blobid; 5876 5877 /* Read super block */ 5878 bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0), 5879 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5880 bs_set_super_read_cpl, ctx); 5881 } 5882 5883 /* END spdk_bs_set_super */ 5884 5885 void 5886 spdk_bs_get_super(struct spdk_blob_store *bs, 5887 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5888 { 5889 if (bs->super_blob == SPDK_BLOBID_INVALID) { 5890 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT); 5891 } else { 5892 cb_fn(cb_arg, bs->super_blob, 0); 5893 } 5894 } 5895 5896 uint64_t 5897 spdk_bs_get_cluster_size(struct spdk_blob_store *bs) 5898 { 5899 return bs->cluster_sz; 5900 } 5901 5902 uint64_t 5903 spdk_bs_get_page_size(struct spdk_blob_store *bs) 5904 { 5905 return SPDK_BS_PAGE_SIZE; 5906 } 5907 5908 uint64_t 5909 spdk_bs_get_io_unit_size(struct spdk_blob_store *bs) 5910 { 5911 return bs->io_unit_size; 5912 } 5913 5914 uint64_t 5915 spdk_bs_free_cluster_count(struct spdk_blob_store *bs) 5916 { 5917 return bs->num_free_clusters; 5918 } 5919 5920 uint64_t 5921 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs) 5922 { 5923 return bs->total_data_clusters; 5924 } 5925 5926 static int 5927 bs_register_md_thread(struct spdk_blob_store *bs) 5928 { 5929 bs->md_channel = spdk_get_io_channel(bs); 5930 if (!bs->md_channel) { 5931 SPDK_ERRLOG("Failed to get IO channel.\n"); 5932 return -1; 5933 } 5934 5935 return 0; 5936 } 5937 5938 static int 5939 bs_unregister_md_thread(struct spdk_blob_store *bs) 5940 { 5941 spdk_put_io_channel(bs->md_channel); 5942 5943 return 0; 5944 } 5945 5946 spdk_blob_id 5947 spdk_blob_get_id(struct spdk_blob *blob) 5948 { 5949 assert(blob != NULL); 5950 5951 return blob->id; 5952 } 5953 5954 uint64_t 5955 spdk_blob_get_num_pages(struct spdk_blob *blob) 5956 { 5957 assert(blob != NULL); 5958 5959 return bs_cluster_to_page(blob->bs, blob->active.num_clusters); 5960 } 5961 5962 uint64_t 5963 spdk_blob_get_num_io_units(struct spdk_blob *blob) 5964 { 5965 assert(blob != NULL); 5966 5967 return spdk_blob_get_num_pages(blob) * bs_io_unit_per_page(blob->bs); 5968 } 5969 5970 uint64_t 5971 spdk_blob_get_num_clusters(struct spdk_blob *blob) 5972 { 5973 assert(blob != NULL); 5974 5975 return blob->active.num_clusters; 5976 } 5977 5978 static uint64_t 5979 blob_find_io_unit(struct spdk_blob *blob, uint64_t offset, bool is_allocated) 5980 { 5981 uint64_t blob_io_unit_num = spdk_blob_get_num_io_units(blob); 5982 5983 while (offset < blob_io_unit_num) { 5984 if (bs_io_unit_is_allocated(blob, offset) == is_allocated) { 5985 return offset; 5986 } 5987 5988 offset += bs_num_io_units_to_cluster_boundary(blob, offset); 5989 } 5990 5991 return UINT64_MAX; 5992 } 5993 5994 uint64_t 5995 spdk_blob_get_next_allocated_io_unit(struct spdk_blob *blob, uint64_t offset) 5996 { 5997 return blob_find_io_unit(blob, offset, true); 5998 } 5999 6000 uint64_t 6001 spdk_blob_get_next_unallocated_io_unit(struct spdk_blob *blob, uint64_t offset) 6002 { 6003 return blob_find_io_unit(blob, offset, false); 6004 } 6005 6006 /* START spdk_bs_create_blob */ 6007 6008 static void 6009 bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 6010 { 6011 struct spdk_blob *blob = cb_arg; 6012 uint32_t page_idx = bs_blobid_to_page(blob->id); 6013 6014 if (bserrno != 0) { 6015 spdk_spin_lock(&blob->bs->used_lock); 6016 spdk_bit_array_clear(blob->bs->used_blobids, page_idx); 6017 bs_release_md_page(blob->bs, page_idx); 6018 spdk_spin_unlock(&blob->bs->used_lock); 6019 } 6020 6021 blob_free(blob); 6022 6023 bs_sequence_finish(seq, bserrno); 6024 } 6025 6026 static int 6027 blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs, 6028 bool internal) 6029 { 6030 uint64_t i; 6031 size_t value_len = 0; 6032 int rc; 6033 const void *value = NULL; 6034 if (xattrs->count > 0 && xattrs->get_value == NULL) { 6035 return -EINVAL; 6036 } 6037 for (i = 0; i < xattrs->count; i++) { 6038 xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len); 6039 if (value == NULL || value_len == 0) { 6040 return -EINVAL; 6041 } 6042 rc = blob_set_xattr(blob, xattrs->names[i], value, value_len, internal); 6043 if (rc < 0) { 6044 return rc; 6045 } 6046 } 6047 return 0; 6048 } 6049 6050 static void 6051 blob_opts_copy(const struct spdk_blob_opts *src, struct spdk_blob_opts *dst) 6052 { 6053 #define FIELD_OK(field) \ 6054 offsetof(struct spdk_blob_opts, field) + sizeof(src->field) <= src->opts_size 6055 6056 #define SET_FIELD(field) \ 6057 if (FIELD_OK(field)) { \ 6058 dst->field = src->field; \ 6059 } \ 6060 6061 SET_FIELD(num_clusters); 6062 SET_FIELD(thin_provision); 6063 SET_FIELD(clear_method); 6064 6065 if (FIELD_OK(xattrs)) { 6066 memcpy(&dst->xattrs, &src->xattrs, sizeof(src->xattrs)); 6067 } 6068 6069 SET_FIELD(use_extent_table); 6070 SET_FIELD(esnap_id); 6071 SET_FIELD(esnap_id_len); 6072 6073 dst->opts_size = src->opts_size; 6074 6075 /* You should not remove this statement, but need to update the assert statement 6076 * if you add a new field, and also add a corresponding SET_FIELD statement */ 6077 SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_opts) == 80, "Incorrect size"); 6078 6079 #undef FIELD_OK 6080 #undef SET_FIELD 6081 } 6082 6083 static void 6084 bs_create_blob(struct spdk_blob_store *bs, 6085 const struct spdk_blob_opts *opts, 6086 const struct spdk_blob_xattr_opts *internal_xattrs, 6087 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6088 { 6089 struct spdk_blob *blob; 6090 uint32_t page_idx; 6091 struct spdk_bs_cpl cpl; 6092 struct spdk_blob_opts opts_local; 6093 struct spdk_blob_xattr_opts internal_xattrs_default; 6094 spdk_bs_sequence_t *seq; 6095 spdk_blob_id id; 6096 int rc; 6097 6098 assert(spdk_get_thread() == bs->md_thread); 6099 6100 spdk_spin_lock(&bs->used_lock); 6101 page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); 6102 if (page_idx == UINT32_MAX) { 6103 spdk_spin_unlock(&bs->used_lock); 6104 cb_fn(cb_arg, 0, -ENOMEM); 6105 return; 6106 } 6107 spdk_bit_array_set(bs->used_blobids, page_idx); 6108 bs_claim_md_page(bs, page_idx); 6109 spdk_spin_unlock(&bs->used_lock); 6110 6111 id = bs_page_to_blobid(page_idx); 6112 6113 SPDK_DEBUGLOG(blob, "Creating blob with id 0x%" PRIx64 " at page %u\n", id, page_idx); 6114 6115 spdk_blob_opts_init(&opts_local, sizeof(opts_local)); 6116 if (opts) { 6117 blob_opts_copy(opts, &opts_local); 6118 } 6119 6120 blob = blob_alloc(bs, id); 6121 if (!blob) { 6122 rc = -ENOMEM; 6123 goto error; 6124 } 6125 6126 blob->use_extent_table = opts_local.use_extent_table; 6127 if (blob->use_extent_table) { 6128 blob->invalid_flags |= SPDK_BLOB_EXTENT_TABLE; 6129 } 6130 6131 if (!internal_xattrs) { 6132 blob_xattrs_init(&internal_xattrs_default); 6133 internal_xattrs = &internal_xattrs_default; 6134 } 6135 6136 rc = blob_set_xattrs(blob, &opts_local.xattrs, false); 6137 if (rc < 0) { 6138 goto error; 6139 } 6140 6141 rc = blob_set_xattrs(blob, internal_xattrs, true); 6142 if (rc < 0) { 6143 goto error; 6144 } 6145 6146 if (opts_local.thin_provision) { 6147 blob_set_thin_provision(blob); 6148 } 6149 6150 blob_set_clear_method(blob, opts_local.clear_method); 6151 6152 if (opts_local.esnap_id != NULL) { 6153 if (opts_local.esnap_id_len > UINT16_MAX) { 6154 SPDK_ERRLOG("esnap id length %" PRIu64 "is too long\n", 6155 opts_local.esnap_id_len); 6156 rc = -EINVAL; 6157 goto error; 6158 6159 } 6160 blob_set_thin_provision(blob); 6161 blob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 6162 rc = blob_set_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID, 6163 opts_local.esnap_id, opts_local.esnap_id_len, true); 6164 if (rc != 0) { 6165 goto error; 6166 } 6167 } 6168 6169 rc = blob_resize(blob, opts_local.num_clusters); 6170 if (rc < 0) { 6171 goto error; 6172 } 6173 cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6174 cpl.u.blobid.cb_fn = cb_fn; 6175 cpl.u.blobid.cb_arg = cb_arg; 6176 cpl.u.blobid.blobid = blob->id; 6177 6178 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 6179 if (!seq) { 6180 rc = -ENOMEM; 6181 goto error; 6182 } 6183 6184 blob_persist(seq, blob, bs_create_blob_cpl, blob); 6185 return; 6186 6187 error: 6188 SPDK_ERRLOG("Failed to create blob: %s, size in clusters/size: %lu (clusters)\n", 6189 spdk_strerror(rc), opts_local.num_clusters); 6190 if (blob != NULL) { 6191 blob_free(blob); 6192 } 6193 spdk_spin_lock(&bs->used_lock); 6194 spdk_bit_array_clear(bs->used_blobids, page_idx); 6195 bs_release_md_page(bs, page_idx); 6196 spdk_spin_unlock(&bs->used_lock); 6197 cb_fn(cb_arg, 0, rc); 6198 } 6199 6200 void 6201 spdk_bs_create_blob(struct spdk_blob_store *bs, 6202 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6203 { 6204 bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg); 6205 } 6206 6207 void 6208 spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts, 6209 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6210 { 6211 bs_create_blob(bs, opts, NULL, cb_fn, cb_arg); 6212 } 6213 6214 /* END spdk_bs_create_blob */ 6215 6216 /* START blob_cleanup */ 6217 6218 struct spdk_clone_snapshot_ctx { 6219 struct spdk_bs_cpl cpl; 6220 int bserrno; 6221 bool frozen; 6222 6223 struct spdk_io_channel *channel; 6224 6225 /* Current cluster for inflate operation */ 6226 uint64_t cluster; 6227 6228 /* For inflation force allocation of all unallocated clusters and remove 6229 * thin-provisioning. Otherwise only decouple parent and keep clone thin. */ 6230 bool allocate_all; 6231 6232 struct { 6233 spdk_blob_id id; 6234 struct spdk_blob *blob; 6235 bool md_ro; 6236 } original; 6237 struct { 6238 spdk_blob_id id; 6239 struct spdk_blob *blob; 6240 } new; 6241 6242 /* xattrs specified for snapshot/clones only. They have no impact on 6243 * the original blobs xattrs. */ 6244 const struct spdk_blob_xattr_opts *xattrs; 6245 }; 6246 6247 static void 6248 bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno) 6249 { 6250 struct spdk_clone_snapshot_ctx *ctx = cb_arg; 6251 struct spdk_bs_cpl *cpl = &ctx->cpl; 6252 6253 if (bserrno != 0) { 6254 if (ctx->bserrno != 0) { 6255 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6256 } else { 6257 ctx->bserrno = bserrno; 6258 } 6259 } 6260 6261 switch (cpl->type) { 6262 case SPDK_BS_CPL_TYPE_BLOBID: 6263 cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno); 6264 break; 6265 case SPDK_BS_CPL_TYPE_BLOB_BASIC: 6266 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno); 6267 break; 6268 default: 6269 SPDK_UNREACHABLE(); 6270 break; 6271 } 6272 6273 free(ctx); 6274 } 6275 6276 static void 6277 bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 6278 { 6279 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6280 struct spdk_blob *origblob = ctx->original.blob; 6281 6282 if (bserrno != 0) { 6283 if (ctx->bserrno != 0) { 6284 SPDK_ERRLOG("Unfreeze error %d\n", bserrno); 6285 } else { 6286 ctx->bserrno = bserrno; 6287 } 6288 } 6289 6290 ctx->original.id = origblob->id; 6291 origblob->locked_operation_in_progress = false; 6292 6293 /* Revert md_ro to original state */ 6294 origblob->md_ro = ctx->original.md_ro; 6295 6296 spdk_blob_close(origblob, bs_clone_snapshot_cleanup_finish, ctx); 6297 } 6298 6299 static void 6300 bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno) 6301 { 6302 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6303 struct spdk_blob *origblob = ctx->original.blob; 6304 6305 if (bserrno != 0) { 6306 if (ctx->bserrno != 0) { 6307 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6308 } else { 6309 ctx->bserrno = bserrno; 6310 } 6311 } 6312 6313 if (ctx->frozen) { 6314 /* Unfreeze any outstanding I/O */ 6315 blob_unfreeze_io(origblob, bs_snapshot_unfreeze_cpl, ctx); 6316 } else { 6317 bs_snapshot_unfreeze_cpl(ctx, 0); 6318 } 6319 6320 } 6321 6322 static void 6323 bs_clone_snapshot_newblob_cleanup(struct spdk_clone_snapshot_ctx *ctx, int bserrno) 6324 { 6325 struct spdk_blob *newblob = ctx->new.blob; 6326 6327 if (bserrno != 0) { 6328 if (ctx->bserrno != 0) { 6329 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 6330 } else { 6331 ctx->bserrno = bserrno; 6332 } 6333 } 6334 6335 ctx->new.id = newblob->id; 6336 spdk_blob_close(newblob, bs_clone_snapshot_origblob_cleanup, ctx); 6337 } 6338 6339 /* END blob_cleanup */ 6340 6341 /* START spdk_bs_create_snapshot */ 6342 6343 static void 6344 bs_snapshot_swap_cluster_maps(struct spdk_blob *blob1, struct spdk_blob *blob2) 6345 { 6346 uint64_t *cluster_temp; 6347 uint32_t *extent_page_temp; 6348 6349 cluster_temp = blob1->active.clusters; 6350 blob1->active.clusters = blob2->active.clusters; 6351 blob2->active.clusters = cluster_temp; 6352 6353 extent_page_temp = blob1->active.extent_pages; 6354 blob1->active.extent_pages = blob2->active.extent_pages; 6355 blob2->active.extent_pages = extent_page_temp; 6356 } 6357 6358 /* Copies an internal xattr */ 6359 static int 6360 bs_snapshot_copy_xattr(struct spdk_blob *toblob, struct spdk_blob *fromblob, const char *name) 6361 { 6362 const void *val = NULL; 6363 size_t len; 6364 int bserrno; 6365 6366 bserrno = blob_get_xattr_value(fromblob, name, &val, &len, true); 6367 if (bserrno != 0) { 6368 SPDK_ERRLOG("blob 0x%" PRIx64 " missing %s XATTR\n", fromblob->id, name); 6369 return bserrno; 6370 } 6371 6372 bserrno = blob_set_xattr(toblob, name, val, len, true); 6373 if (bserrno != 0) { 6374 SPDK_ERRLOG("could not set %s XATTR on blob 0x%" PRIx64 "\n", 6375 name, toblob->id); 6376 return bserrno; 6377 } 6378 return 0; 6379 } 6380 6381 static void 6382 bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno) 6383 { 6384 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6385 struct spdk_blob *origblob = ctx->original.blob; 6386 struct spdk_blob *newblob = ctx->new.blob; 6387 6388 if (bserrno != 0) { 6389 bs_snapshot_swap_cluster_maps(newblob, origblob); 6390 if (blob_is_esnap_clone(newblob)) { 6391 bs_snapshot_copy_xattr(origblob, newblob, BLOB_EXTERNAL_SNAPSHOT_ID); 6392 origblob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 6393 } 6394 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6395 return; 6396 } 6397 6398 /* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */ 6399 bserrno = blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true); 6400 if (bserrno != 0) { 6401 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6402 return; 6403 } 6404 6405 bs_blob_list_add(ctx->original.blob); 6406 6407 spdk_blob_set_read_only(newblob); 6408 6409 /* sync snapshot metadata */ 6410 spdk_blob_sync_md(newblob, bs_clone_snapshot_origblob_cleanup, ctx); 6411 } 6412 6413 static void 6414 bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno) 6415 { 6416 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6417 struct spdk_blob *origblob = ctx->original.blob; 6418 struct spdk_blob *newblob = ctx->new.blob; 6419 6420 if (bserrno != 0) { 6421 /* return cluster map back to original */ 6422 bs_snapshot_swap_cluster_maps(newblob, origblob); 6423 6424 /* Newblob md sync failed. Valid clusters are only present in origblob. 6425 * Since I/O is frozen on origblob, not changes to zeroed out cluster map should have occurred. 6426 * Newblob needs to be reverted to thin_provisioned state at creation to properly close. */ 6427 blob_set_thin_provision(newblob); 6428 assert(spdk_mem_all_zero(newblob->active.clusters, 6429 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 6430 assert(spdk_mem_all_zero(newblob->active.extent_pages, 6431 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 6432 6433 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6434 return; 6435 } 6436 6437 /* Set internal xattr for snapshot id */ 6438 bserrno = blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true); 6439 if (bserrno != 0) { 6440 /* return cluster map back to original */ 6441 bs_snapshot_swap_cluster_maps(newblob, origblob); 6442 blob_set_thin_provision(newblob); 6443 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6444 return; 6445 } 6446 6447 /* Create new back_bs_dev for snapshot */ 6448 origblob->back_bs_dev = bs_create_blob_bs_dev(newblob); 6449 if (origblob->back_bs_dev == NULL) { 6450 /* return cluster map back to original */ 6451 bs_snapshot_swap_cluster_maps(newblob, origblob); 6452 blob_set_thin_provision(newblob); 6453 bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL); 6454 return; 6455 } 6456 6457 /* Remove the xattr that references an external snapshot */ 6458 if (blob_is_esnap_clone(origblob)) { 6459 origblob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT; 6460 bserrno = blob_remove_xattr(origblob, BLOB_EXTERNAL_SNAPSHOT_ID, true); 6461 if (bserrno != 0) { 6462 if (bserrno == -ENOENT) { 6463 SPDK_ERRLOG("blob 0x%" PRIx64 " has no " BLOB_EXTERNAL_SNAPSHOT_ID 6464 " xattr to remove\n", origblob->id); 6465 assert(false); 6466 } else { 6467 /* return cluster map back to original */ 6468 bs_snapshot_swap_cluster_maps(newblob, origblob); 6469 blob_set_thin_provision(newblob); 6470 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6471 return; 6472 } 6473 } 6474 } 6475 6476 bs_blob_list_remove(origblob); 6477 origblob->parent_id = newblob->id; 6478 /* set clone blob as thin provisioned */ 6479 blob_set_thin_provision(origblob); 6480 6481 bs_blob_list_add(newblob); 6482 6483 /* sync clone metadata */ 6484 spdk_blob_sync_md(origblob, bs_snapshot_origblob_sync_cpl, ctx); 6485 } 6486 6487 static void 6488 bs_snapshot_freeze_cpl(void *cb_arg, int rc) 6489 { 6490 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6491 struct spdk_blob *origblob = ctx->original.blob; 6492 struct spdk_blob *newblob = ctx->new.blob; 6493 int bserrno; 6494 6495 if (rc != 0) { 6496 bs_clone_snapshot_newblob_cleanup(ctx, rc); 6497 return; 6498 } 6499 6500 ctx->frozen = true; 6501 6502 if (blob_is_esnap_clone(origblob)) { 6503 /* Clean up any channels associated with the original blob id because future IO will 6504 * perform IO using the snapshot blob_id. 6505 */ 6506 blob_esnap_destroy_bs_dev_channels(origblob, false, NULL, NULL); 6507 } 6508 if (newblob->back_bs_dev) { 6509 blob_back_bs_destroy(newblob); 6510 } 6511 /* set new back_bs_dev for snapshot */ 6512 newblob->back_bs_dev = origblob->back_bs_dev; 6513 /* Set invalid flags from origblob */ 6514 newblob->invalid_flags = origblob->invalid_flags; 6515 6516 /* inherit parent from original blob if set */ 6517 newblob->parent_id = origblob->parent_id; 6518 switch (origblob->parent_id) { 6519 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 6520 bserrno = bs_snapshot_copy_xattr(newblob, origblob, BLOB_EXTERNAL_SNAPSHOT_ID); 6521 if (bserrno != 0) { 6522 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6523 return; 6524 } 6525 break; 6526 case SPDK_BLOBID_INVALID: 6527 break; 6528 default: 6529 /* Set internal xattr for snapshot id */ 6530 bserrno = blob_set_xattr(newblob, BLOB_SNAPSHOT, 6531 &origblob->parent_id, sizeof(spdk_blob_id), true); 6532 if (bserrno != 0) { 6533 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6534 return; 6535 } 6536 } 6537 6538 /* swap cluster maps */ 6539 bs_snapshot_swap_cluster_maps(newblob, origblob); 6540 6541 /* Set the clear method on the new blob to match the original. */ 6542 blob_set_clear_method(newblob, origblob->clear_method); 6543 6544 /* sync snapshot metadata */ 6545 spdk_blob_sync_md(newblob, bs_snapshot_newblob_sync_cpl, ctx); 6546 } 6547 6548 static void 6549 bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6550 { 6551 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6552 struct spdk_blob *origblob = ctx->original.blob; 6553 struct spdk_blob *newblob = _blob; 6554 6555 if (bserrno != 0) { 6556 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6557 return; 6558 } 6559 6560 ctx->new.blob = newblob; 6561 assert(spdk_blob_is_thin_provisioned(newblob)); 6562 assert(spdk_mem_all_zero(newblob->active.clusters, 6563 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 6564 assert(spdk_mem_all_zero(newblob->active.extent_pages, 6565 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 6566 6567 blob_freeze_io(origblob, bs_snapshot_freeze_cpl, ctx); 6568 } 6569 6570 static void 6571 bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 6572 { 6573 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6574 struct spdk_blob *origblob = ctx->original.blob; 6575 6576 if (bserrno != 0) { 6577 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6578 return; 6579 } 6580 6581 ctx->new.id = blobid; 6582 ctx->cpl.u.blobid.blobid = blobid; 6583 6584 spdk_bs_open_blob(origblob->bs, ctx->new.id, bs_snapshot_newblob_open_cpl, ctx); 6585 } 6586 6587 6588 static void 6589 bs_xattr_snapshot(void *arg, const char *name, 6590 const void **value, size_t *value_len) 6591 { 6592 assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0); 6593 6594 struct spdk_blob *blob = (struct spdk_blob *)arg; 6595 *value = &blob->id; 6596 *value_len = sizeof(blob->id); 6597 } 6598 6599 static void 6600 bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6601 { 6602 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6603 struct spdk_blob_opts opts; 6604 struct spdk_blob_xattr_opts internal_xattrs; 6605 char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS }; 6606 6607 if (bserrno != 0) { 6608 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6609 return; 6610 } 6611 6612 ctx->original.blob = _blob; 6613 6614 if (_blob->data_ro || _blob->md_ro) { 6615 SPDK_DEBUGLOG(blob, "Cannot create snapshot from read only blob with id 0x%" 6616 PRIx64 "\n", _blob->id); 6617 ctx->bserrno = -EINVAL; 6618 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6619 return; 6620 } 6621 6622 if (_blob->locked_operation_in_progress) { 6623 SPDK_DEBUGLOG(blob, "Cannot create snapshot - another operation in progress\n"); 6624 ctx->bserrno = -EBUSY; 6625 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6626 return; 6627 } 6628 6629 _blob->locked_operation_in_progress = true; 6630 6631 spdk_blob_opts_init(&opts, sizeof(opts)); 6632 blob_xattrs_init(&internal_xattrs); 6633 6634 /* Change the size of new blob to the same as in original blob, 6635 * but do not allocate clusters */ 6636 opts.thin_provision = true; 6637 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 6638 opts.use_extent_table = _blob->use_extent_table; 6639 6640 /* If there are any xattrs specified for snapshot, set them now */ 6641 if (ctx->xattrs) { 6642 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 6643 } 6644 /* Set internal xattr SNAPSHOT_IN_PROGRESS */ 6645 internal_xattrs.count = 1; 6646 internal_xattrs.ctx = _blob; 6647 internal_xattrs.names = xattrs_names; 6648 internal_xattrs.get_value = bs_xattr_snapshot; 6649 6650 bs_create_blob(_blob->bs, &opts, &internal_xattrs, 6651 bs_snapshot_newblob_create_cpl, ctx); 6652 } 6653 6654 void 6655 spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid, 6656 const struct spdk_blob_xattr_opts *snapshot_xattrs, 6657 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6658 { 6659 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6660 6661 if (!ctx) { 6662 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 6663 return; 6664 } 6665 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6666 ctx->cpl.u.blobid.cb_fn = cb_fn; 6667 ctx->cpl.u.blobid.cb_arg = cb_arg; 6668 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 6669 ctx->bserrno = 0; 6670 ctx->frozen = false; 6671 ctx->original.id = blobid; 6672 ctx->xattrs = snapshot_xattrs; 6673 6674 spdk_bs_open_blob(bs, ctx->original.id, bs_snapshot_origblob_open_cpl, ctx); 6675 } 6676 /* END spdk_bs_create_snapshot */ 6677 6678 /* START spdk_bs_create_clone */ 6679 6680 static void 6681 bs_xattr_clone(void *arg, const char *name, 6682 const void **value, size_t *value_len) 6683 { 6684 assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0); 6685 6686 struct spdk_blob *blob = (struct spdk_blob *)arg; 6687 *value = &blob->id; 6688 *value_len = sizeof(blob->id); 6689 } 6690 6691 static void 6692 bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6693 { 6694 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6695 struct spdk_blob *clone = _blob; 6696 6697 ctx->new.blob = clone; 6698 bs_blob_list_add(clone); 6699 6700 spdk_blob_close(clone, bs_clone_snapshot_origblob_cleanup, ctx); 6701 } 6702 6703 static void 6704 bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 6705 { 6706 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6707 6708 ctx->cpl.u.blobid.blobid = blobid; 6709 spdk_bs_open_blob(ctx->original.blob->bs, blobid, bs_clone_newblob_open_cpl, ctx); 6710 } 6711 6712 static void 6713 bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6714 { 6715 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6716 struct spdk_blob_opts opts; 6717 struct spdk_blob_xattr_opts internal_xattrs; 6718 char *xattr_names[] = { BLOB_SNAPSHOT }; 6719 6720 if (bserrno != 0) { 6721 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6722 return; 6723 } 6724 6725 ctx->original.blob = _blob; 6726 ctx->original.md_ro = _blob->md_ro; 6727 6728 if (!_blob->data_ro || !_blob->md_ro) { 6729 SPDK_DEBUGLOG(blob, "Clone not from read-only blob\n"); 6730 ctx->bserrno = -EINVAL; 6731 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6732 return; 6733 } 6734 6735 if (_blob->locked_operation_in_progress) { 6736 SPDK_DEBUGLOG(blob, "Cannot create clone - another operation in progress\n"); 6737 ctx->bserrno = -EBUSY; 6738 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6739 return; 6740 } 6741 6742 _blob->locked_operation_in_progress = true; 6743 6744 spdk_blob_opts_init(&opts, sizeof(opts)); 6745 blob_xattrs_init(&internal_xattrs); 6746 6747 opts.thin_provision = true; 6748 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 6749 opts.use_extent_table = _blob->use_extent_table; 6750 if (ctx->xattrs) { 6751 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 6752 } 6753 6754 /* Set internal xattr BLOB_SNAPSHOT */ 6755 internal_xattrs.count = 1; 6756 internal_xattrs.ctx = _blob; 6757 internal_xattrs.names = xattr_names; 6758 internal_xattrs.get_value = bs_xattr_clone; 6759 6760 bs_create_blob(_blob->bs, &opts, &internal_xattrs, 6761 bs_clone_newblob_create_cpl, ctx); 6762 } 6763 6764 void 6765 spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid, 6766 const struct spdk_blob_xattr_opts *clone_xattrs, 6767 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6768 { 6769 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6770 6771 if (!ctx) { 6772 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 6773 return; 6774 } 6775 6776 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6777 ctx->cpl.u.blobid.cb_fn = cb_fn; 6778 ctx->cpl.u.blobid.cb_arg = cb_arg; 6779 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 6780 ctx->bserrno = 0; 6781 ctx->xattrs = clone_xattrs; 6782 ctx->original.id = blobid; 6783 6784 spdk_bs_open_blob(bs, ctx->original.id, bs_clone_origblob_open_cpl, ctx); 6785 } 6786 6787 /* END spdk_bs_create_clone */ 6788 6789 /* START spdk_bs_inflate_blob */ 6790 6791 static void 6792 bs_inflate_blob_set_parent_cpl(void *cb_arg, struct spdk_blob *_parent, int bserrno) 6793 { 6794 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6795 struct spdk_blob *_blob = ctx->original.blob; 6796 6797 if (bserrno != 0) { 6798 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6799 return; 6800 } 6801 6802 /* Temporarily override md_ro flag for MD modification */ 6803 _blob->md_ro = false; 6804 6805 bserrno = blob_set_xattr(_blob, BLOB_SNAPSHOT, &_parent->id, sizeof(spdk_blob_id), true); 6806 if (bserrno != 0) { 6807 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6808 return; 6809 } 6810 6811 assert(_parent != NULL); 6812 6813 bs_blob_list_remove(_blob); 6814 _blob->parent_id = _parent->id; 6815 6816 blob_back_bs_destroy(_blob); 6817 _blob->back_bs_dev = bs_create_blob_bs_dev(_parent); 6818 bs_blob_list_add(_blob); 6819 6820 spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx); 6821 } 6822 6823 static void 6824 bs_inflate_blob_done(struct spdk_clone_snapshot_ctx *ctx) 6825 { 6826 struct spdk_blob *_blob = ctx->original.blob; 6827 struct spdk_blob *_parent; 6828 6829 if (ctx->allocate_all) { 6830 /* remove thin provisioning */ 6831 bs_blob_list_remove(_blob); 6832 if (_blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 6833 blob_remove_xattr(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, true); 6834 _blob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT; 6835 } else { 6836 blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 6837 } 6838 _blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV; 6839 blob_back_bs_destroy(_blob); 6840 _blob->parent_id = SPDK_BLOBID_INVALID; 6841 } else { 6842 /* For now, esnap clones always have allocate_all set. */ 6843 assert(!blob_is_esnap_clone(_blob)); 6844 6845 _parent = ((struct spdk_blob_bs_dev *)(_blob->back_bs_dev))->blob; 6846 if (_parent->parent_id != SPDK_BLOBID_INVALID) { 6847 /* We must change the parent of the inflated blob */ 6848 spdk_bs_open_blob(_blob->bs, _parent->parent_id, 6849 bs_inflate_blob_set_parent_cpl, ctx); 6850 return; 6851 } 6852 6853 bs_blob_list_remove(_blob); 6854 _blob->parent_id = SPDK_BLOBID_INVALID; 6855 blob_back_bs_destroy(_blob); 6856 _blob->back_bs_dev = bs_create_zeroes_dev(); 6857 } 6858 6859 /* Temporarily override md_ro flag for MD modification */ 6860 _blob->md_ro = false; 6861 blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 6862 _blob->state = SPDK_BLOB_STATE_DIRTY; 6863 6864 spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx); 6865 } 6866 6867 /* Check if cluster needs allocation */ 6868 static inline bool 6869 bs_cluster_needs_allocation(struct spdk_blob *blob, uint64_t cluster, bool allocate_all) 6870 { 6871 struct spdk_blob_bs_dev *b; 6872 6873 assert(blob != NULL); 6874 6875 if (blob->active.clusters[cluster] != 0) { 6876 /* Cluster is already allocated */ 6877 return false; 6878 } 6879 6880 if (blob->parent_id == SPDK_BLOBID_INVALID) { 6881 /* Blob have no parent blob */ 6882 return allocate_all; 6883 } 6884 6885 if (blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 6886 return true; 6887 } 6888 6889 b = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 6890 return (allocate_all || b->blob->active.clusters[cluster] != 0); 6891 } 6892 6893 static void 6894 bs_inflate_blob_touch_next(void *cb_arg, int bserrno) 6895 { 6896 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6897 struct spdk_blob *_blob = ctx->original.blob; 6898 struct spdk_bs_cpl cpl; 6899 spdk_bs_user_op_t *op; 6900 uint64_t offset; 6901 6902 if (bserrno != 0) { 6903 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6904 return; 6905 } 6906 6907 for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) { 6908 if (bs_cluster_needs_allocation(_blob, ctx->cluster, ctx->allocate_all)) { 6909 break; 6910 } 6911 } 6912 6913 if (ctx->cluster < _blob->active.num_clusters) { 6914 offset = bs_cluster_to_lba(_blob->bs, ctx->cluster); 6915 6916 /* We may safely increment a cluster before copying */ 6917 ctx->cluster++; 6918 6919 /* Use a dummy 0B read as a context for cluster copy */ 6920 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 6921 cpl.u.blob_basic.cb_fn = bs_inflate_blob_touch_next; 6922 cpl.u.blob_basic.cb_arg = ctx; 6923 6924 op = bs_user_op_alloc(ctx->channel, &cpl, SPDK_BLOB_READ, _blob, 6925 NULL, 0, offset, 0); 6926 if (!op) { 6927 bs_clone_snapshot_origblob_cleanup(ctx, -ENOMEM); 6928 return; 6929 } 6930 6931 bs_allocate_and_copy_cluster(_blob, ctx->channel, offset, op); 6932 } else { 6933 bs_inflate_blob_done(ctx); 6934 } 6935 } 6936 6937 static void 6938 bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6939 { 6940 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6941 uint64_t clusters_needed; 6942 uint64_t i; 6943 6944 if (bserrno != 0) { 6945 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6946 return; 6947 } 6948 6949 ctx->original.blob = _blob; 6950 ctx->original.md_ro = _blob->md_ro; 6951 6952 if (_blob->locked_operation_in_progress) { 6953 SPDK_DEBUGLOG(blob, "Cannot inflate blob - another operation in progress\n"); 6954 ctx->bserrno = -EBUSY; 6955 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6956 return; 6957 } 6958 6959 _blob->locked_operation_in_progress = true; 6960 6961 switch (_blob->parent_id) { 6962 case SPDK_BLOBID_INVALID: 6963 if (!ctx->allocate_all) { 6964 /* This blob has no parent, so we cannot decouple it. */ 6965 SPDK_ERRLOG("Cannot decouple parent of blob with no parent.\n"); 6966 bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 6967 return; 6968 } 6969 break; 6970 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 6971 /* 6972 * It would be better to rely on back_bs_dev->is_zeroes(), to determine which 6973 * clusters require allocation. Until there is a blobstore consumer that 6974 * uses esnaps with an spdk_bs_dev that implements a useful is_zeroes() it is not 6975 * worth the effort. 6976 */ 6977 ctx->allocate_all = true; 6978 break; 6979 default: 6980 break; 6981 } 6982 6983 if (spdk_blob_is_thin_provisioned(_blob) == false) { 6984 /* This is not thin provisioned blob. No need to inflate. */ 6985 bs_clone_snapshot_origblob_cleanup(ctx, 0); 6986 return; 6987 } 6988 6989 /* Do two passes - one to verify that we can obtain enough clusters 6990 * and another to actually claim them. 6991 */ 6992 clusters_needed = 0; 6993 for (i = 0; i < _blob->active.num_clusters; i++) { 6994 if (bs_cluster_needs_allocation(_blob, i, ctx->allocate_all)) { 6995 clusters_needed++; 6996 } 6997 } 6998 6999 if (clusters_needed > _blob->bs->num_free_clusters) { 7000 /* Not enough free clusters. Cannot satisfy the request. */ 7001 bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC); 7002 return; 7003 } 7004 7005 ctx->cluster = 0; 7006 bs_inflate_blob_touch_next(ctx, 0); 7007 } 7008 7009 static void 7010 bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7011 spdk_blob_id blobid, bool allocate_all, spdk_blob_op_complete cb_fn, void *cb_arg) 7012 { 7013 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 7014 7015 if (!ctx) { 7016 cb_fn(cb_arg, -ENOMEM); 7017 return; 7018 } 7019 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7020 ctx->cpl.u.bs_basic.cb_fn = cb_fn; 7021 ctx->cpl.u.bs_basic.cb_arg = cb_arg; 7022 ctx->bserrno = 0; 7023 ctx->original.id = blobid; 7024 ctx->channel = channel; 7025 ctx->allocate_all = allocate_all; 7026 7027 spdk_bs_open_blob(bs, ctx->original.id, bs_inflate_blob_open_cpl, ctx); 7028 } 7029 7030 void 7031 spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7032 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 7033 { 7034 bs_inflate_blob(bs, channel, blobid, true, cb_fn, cb_arg); 7035 } 7036 7037 void 7038 spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 7039 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 7040 { 7041 bs_inflate_blob(bs, channel, blobid, false, cb_fn, cb_arg); 7042 } 7043 /* END spdk_bs_inflate_blob */ 7044 7045 /* START spdk_blob_resize */ 7046 struct spdk_bs_resize_ctx { 7047 spdk_blob_op_complete cb_fn; 7048 void *cb_arg; 7049 struct spdk_blob *blob; 7050 uint64_t sz; 7051 int rc; 7052 }; 7053 7054 static void 7055 bs_resize_unfreeze_cpl(void *cb_arg, int rc) 7056 { 7057 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 7058 7059 if (rc != 0) { 7060 SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc); 7061 } 7062 7063 if (ctx->rc != 0) { 7064 SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc); 7065 rc = ctx->rc; 7066 } 7067 7068 ctx->blob->locked_operation_in_progress = false; 7069 7070 ctx->cb_fn(ctx->cb_arg, rc); 7071 free(ctx); 7072 } 7073 7074 static void 7075 bs_resize_freeze_cpl(void *cb_arg, int rc) 7076 { 7077 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 7078 7079 if (rc != 0) { 7080 ctx->blob->locked_operation_in_progress = false; 7081 ctx->cb_fn(ctx->cb_arg, rc); 7082 free(ctx); 7083 return; 7084 } 7085 7086 ctx->rc = blob_resize(ctx->blob, ctx->sz); 7087 7088 blob_unfreeze_io(ctx->blob, bs_resize_unfreeze_cpl, ctx); 7089 } 7090 7091 void 7092 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg) 7093 { 7094 struct spdk_bs_resize_ctx *ctx; 7095 7096 blob_verify_md_op(blob); 7097 7098 SPDK_DEBUGLOG(blob, "Resizing blob 0x%" PRIx64 " to %" PRIu64 " clusters\n", blob->id, sz); 7099 7100 if (blob->md_ro) { 7101 cb_fn(cb_arg, -EPERM); 7102 return; 7103 } 7104 7105 if (sz == blob->active.num_clusters) { 7106 cb_fn(cb_arg, 0); 7107 return; 7108 } 7109 7110 if (blob->locked_operation_in_progress) { 7111 cb_fn(cb_arg, -EBUSY); 7112 return; 7113 } 7114 7115 ctx = calloc(1, sizeof(*ctx)); 7116 if (!ctx) { 7117 cb_fn(cb_arg, -ENOMEM); 7118 return; 7119 } 7120 7121 blob->locked_operation_in_progress = true; 7122 ctx->cb_fn = cb_fn; 7123 ctx->cb_arg = cb_arg; 7124 ctx->blob = blob; 7125 ctx->sz = sz; 7126 blob_freeze_io(blob, bs_resize_freeze_cpl, ctx); 7127 } 7128 7129 /* END spdk_blob_resize */ 7130 7131 7132 /* START spdk_bs_delete_blob */ 7133 7134 static void 7135 bs_delete_close_cpl(void *cb_arg, int bserrno) 7136 { 7137 spdk_bs_sequence_t *seq = cb_arg; 7138 7139 bs_sequence_finish(seq, bserrno); 7140 } 7141 7142 static void 7143 bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7144 { 7145 struct spdk_blob *blob = cb_arg; 7146 7147 if (bserrno != 0) { 7148 /* 7149 * We already removed this blob from the blobstore tailq, so 7150 * we need to free it here since this is the last reference 7151 * to it. 7152 */ 7153 blob_free(blob); 7154 bs_delete_close_cpl(seq, bserrno); 7155 return; 7156 } 7157 7158 /* 7159 * This will immediately decrement the ref_count and call 7160 * the completion routine since the metadata state is clean. 7161 * By calling spdk_blob_close, we reduce the number of call 7162 * points into code that touches the blob->open_ref count 7163 * and the blobstore's blob list. 7164 */ 7165 spdk_blob_close(blob, bs_delete_close_cpl, seq); 7166 } 7167 7168 struct delete_snapshot_ctx { 7169 struct spdk_blob_list *parent_snapshot_entry; 7170 struct spdk_blob *snapshot; 7171 struct spdk_blob_md_page *page; 7172 bool snapshot_md_ro; 7173 struct spdk_blob *clone; 7174 bool clone_md_ro; 7175 spdk_blob_op_with_handle_complete cb_fn; 7176 void *cb_arg; 7177 int bserrno; 7178 uint32_t next_extent_page; 7179 }; 7180 7181 static void 7182 delete_blob_cleanup_finish(void *cb_arg, int bserrno) 7183 { 7184 struct delete_snapshot_ctx *ctx = cb_arg; 7185 7186 if (bserrno != 0) { 7187 SPDK_ERRLOG("Snapshot cleanup error %d\n", bserrno); 7188 } 7189 7190 assert(ctx != NULL); 7191 7192 if (bserrno != 0 && ctx->bserrno == 0) { 7193 ctx->bserrno = bserrno; 7194 } 7195 7196 ctx->cb_fn(ctx->cb_arg, ctx->snapshot, ctx->bserrno); 7197 spdk_free(ctx->page); 7198 free(ctx); 7199 } 7200 7201 static void 7202 delete_snapshot_cleanup_snapshot(void *cb_arg, int bserrno) 7203 { 7204 struct delete_snapshot_ctx *ctx = cb_arg; 7205 7206 if (bserrno != 0) { 7207 ctx->bserrno = bserrno; 7208 SPDK_ERRLOG("Clone cleanup error %d\n", bserrno); 7209 } 7210 7211 if (ctx->bserrno != 0) { 7212 assert(blob_lookup(ctx->snapshot->bs, ctx->snapshot->id) == NULL); 7213 RB_INSERT(spdk_blob_tree, &ctx->snapshot->bs->open_blobs, ctx->snapshot); 7214 spdk_bit_array_set(ctx->snapshot->bs->open_blobids, ctx->snapshot->id); 7215 } 7216 7217 ctx->snapshot->locked_operation_in_progress = false; 7218 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 7219 7220 spdk_blob_close(ctx->snapshot, delete_blob_cleanup_finish, ctx); 7221 } 7222 7223 static void 7224 delete_snapshot_cleanup_clone(void *cb_arg, int bserrno) 7225 { 7226 struct delete_snapshot_ctx *ctx = cb_arg; 7227 7228 ctx->clone->locked_operation_in_progress = false; 7229 ctx->clone->md_ro = ctx->clone_md_ro; 7230 7231 spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx); 7232 } 7233 7234 static void 7235 delete_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 7236 { 7237 struct delete_snapshot_ctx *ctx = cb_arg; 7238 7239 if (bserrno) { 7240 ctx->bserrno = bserrno; 7241 delete_snapshot_cleanup_clone(ctx, 0); 7242 return; 7243 } 7244 7245 ctx->clone->locked_operation_in_progress = false; 7246 spdk_blob_close(ctx->clone, delete_blob_cleanup_finish, ctx); 7247 } 7248 7249 static void 7250 delete_snapshot_sync_snapshot_cpl(void *cb_arg, int bserrno) 7251 { 7252 struct delete_snapshot_ctx *ctx = cb_arg; 7253 struct spdk_blob_list *parent_snapshot_entry = NULL; 7254 struct spdk_blob_list *snapshot_entry = NULL; 7255 struct spdk_blob_list *clone_entry = NULL; 7256 struct spdk_blob_list *snapshot_clone_entry = NULL; 7257 7258 if (bserrno) { 7259 SPDK_ERRLOG("Failed to sync MD on blob\n"); 7260 ctx->bserrno = bserrno; 7261 delete_snapshot_cleanup_clone(ctx, 0); 7262 return; 7263 } 7264 7265 /* Get snapshot entry for the snapshot we want to remove */ 7266 snapshot_entry = bs_get_snapshot_entry(ctx->snapshot->bs, ctx->snapshot->id); 7267 7268 assert(snapshot_entry != NULL); 7269 7270 /* Remove clone entry in this snapshot (at this point there can be only one clone) */ 7271 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 7272 assert(clone_entry != NULL); 7273 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 7274 snapshot_entry->clone_count--; 7275 assert(TAILQ_EMPTY(&snapshot_entry->clones)); 7276 7277 switch (ctx->snapshot->parent_id) { 7278 case SPDK_BLOBID_INVALID: 7279 case SPDK_BLOBID_EXTERNAL_SNAPSHOT: 7280 /* No parent snapshot - just remove clone entry */ 7281 free(clone_entry); 7282 break; 7283 default: 7284 /* This snapshot is at the same time a clone of another snapshot - we need to 7285 * update parent snapshot (remove current clone, add new one inherited from 7286 * the snapshot that is being removed) */ 7287 7288 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 7289 * snapshot that we are removing */ 7290 blob_get_snapshot_and_clone_entries(ctx->snapshot, &parent_snapshot_entry, 7291 &snapshot_clone_entry); 7292 7293 /* Switch clone entry in parent snapshot */ 7294 TAILQ_INSERT_TAIL(&parent_snapshot_entry->clones, clone_entry, link); 7295 TAILQ_REMOVE(&parent_snapshot_entry->clones, snapshot_clone_entry, link); 7296 free(snapshot_clone_entry); 7297 } 7298 7299 /* Restore md_ro flags */ 7300 ctx->clone->md_ro = ctx->clone_md_ro; 7301 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 7302 7303 blob_unfreeze_io(ctx->clone, delete_snapshot_unfreeze_cpl, ctx); 7304 } 7305 7306 static void 7307 delete_snapshot_sync_clone_cpl(void *cb_arg, int bserrno) 7308 { 7309 struct delete_snapshot_ctx *ctx = cb_arg; 7310 uint64_t i; 7311 7312 ctx->snapshot->md_ro = false; 7313 7314 if (bserrno) { 7315 SPDK_ERRLOG("Failed to sync MD on clone\n"); 7316 ctx->bserrno = bserrno; 7317 7318 /* Restore snapshot to previous state */ 7319 bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true); 7320 if (bserrno != 0) { 7321 delete_snapshot_cleanup_clone(ctx, bserrno); 7322 return; 7323 } 7324 7325 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx); 7326 return; 7327 } 7328 7329 /* Clear cluster map entries for snapshot */ 7330 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 7331 if (ctx->clone->active.clusters[i] == ctx->snapshot->active.clusters[i]) { 7332 ctx->snapshot->active.clusters[i] = 0; 7333 } 7334 } 7335 for (i = 0; i < ctx->snapshot->active.num_extent_pages && 7336 i < ctx->clone->active.num_extent_pages; i++) { 7337 if (ctx->clone->active.extent_pages[i] == ctx->snapshot->active.extent_pages[i]) { 7338 ctx->snapshot->active.extent_pages[i] = 0; 7339 } 7340 } 7341 7342 blob_set_thin_provision(ctx->snapshot); 7343 ctx->snapshot->state = SPDK_BLOB_STATE_DIRTY; 7344 7345 if (ctx->parent_snapshot_entry != NULL) { 7346 ctx->snapshot->back_bs_dev = NULL; 7347 } 7348 7349 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_cpl, ctx); 7350 } 7351 7352 static void 7353 delete_snapshot_update_extent_pages_cpl(struct delete_snapshot_ctx *ctx) 7354 { 7355 int bserrno; 7356 7357 /* Delete old backing bs_dev from clone (related to snapshot that will be removed) */ 7358 blob_back_bs_destroy(ctx->clone); 7359 7360 /* Set/remove snapshot xattr and switch parent ID and backing bs_dev on clone... */ 7361 if (ctx->snapshot->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 7362 bserrno = bs_snapshot_copy_xattr(ctx->clone, ctx->snapshot, 7363 BLOB_EXTERNAL_SNAPSHOT_ID); 7364 if (bserrno != 0) { 7365 ctx->bserrno = bserrno; 7366 7367 /* Restore snapshot to previous state */ 7368 bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true); 7369 if (bserrno != 0) { 7370 delete_snapshot_cleanup_clone(ctx, bserrno); 7371 return; 7372 } 7373 7374 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx); 7375 return; 7376 } 7377 ctx->clone->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT; 7378 ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev; 7379 /* Do not delete the external snapshot along with this snapshot */ 7380 ctx->snapshot->back_bs_dev = NULL; 7381 ctx->clone->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT; 7382 } else if (ctx->parent_snapshot_entry != NULL) { 7383 /* ...to parent snapshot */ 7384 ctx->clone->parent_id = ctx->parent_snapshot_entry->id; 7385 ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev; 7386 blob_set_xattr(ctx->clone, BLOB_SNAPSHOT, &ctx->parent_snapshot_entry->id, 7387 sizeof(spdk_blob_id), 7388 true); 7389 } else { 7390 /* ...to blobid invalid and zeroes dev */ 7391 ctx->clone->parent_id = SPDK_BLOBID_INVALID; 7392 ctx->clone->back_bs_dev = bs_create_zeroes_dev(); 7393 blob_remove_xattr(ctx->clone, BLOB_SNAPSHOT, true); 7394 } 7395 7396 spdk_blob_sync_md(ctx->clone, delete_snapshot_sync_clone_cpl, ctx); 7397 } 7398 7399 static void 7400 delete_snapshot_update_extent_pages(void *cb_arg, int bserrno) 7401 { 7402 struct delete_snapshot_ctx *ctx = cb_arg; 7403 uint32_t *extent_page; 7404 uint64_t i; 7405 7406 for (i = ctx->next_extent_page; i < ctx->snapshot->active.num_extent_pages && 7407 i < ctx->clone->active.num_extent_pages; i++) { 7408 if (ctx->snapshot->active.extent_pages[i] == 0) { 7409 /* No extent page to use from snapshot */ 7410 continue; 7411 } 7412 7413 extent_page = &ctx->clone->active.extent_pages[i]; 7414 if (*extent_page == 0) { 7415 /* Copy extent page from snapshot when clone did not have a matching one */ 7416 *extent_page = ctx->snapshot->active.extent_pages[i]; 7417 continue; 7418 } 7419 7420 /* Clone and snapshot both contain partially filled matching extent pages. 7421 * Update the clone extent page in place with cluster map containing the mix of both. */ 7422 ctx->next_extent_page = i + 1; 7423 memset(ctx->page, 0, SPDK_BS_PAGE_SIZE); 7424 7425 blob_write_extent_page(ctx->clone, *extent_page, i * SPDK_EXTENTS_PER_EP, ctx->page, 7426 delete_snapshot_update_extent_pages, ctx); 7427 return; 7428 } 7429 delete_snapshot_update_extent_pages_cpl(ctx); 7430 } 7431 7432 static void 7433 delete_snapshot_sync_snapshot_xattr_cpl(void *cb_arg, int bserrno) 7434 { 7435 struct delete_snapshot_ctx *ctx = cb_arg; 7436 uint64_t i; 7437 7438 /* Temporarily override md_ro flag for clone for MD modification */ 7439 ctx->clone_md_ro = ctx->clone->md_ro; 7440 ctx->clone->md_ro = false; 7441 7442 if (bserrno) { 7443 SPDK_ERRLOG("Failed to sync MD with xattr on blob\n"); 7444 ctx->bserrno = bserrno; 7445 delete_snapshot_cleanup_clone(ctx, 0); 7446 return; 7447 } 7448 7449 /* Copy snapshot map to clone map (only unallocated clusters in clone) */ 7450 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 7451 if (ctx->clone->active.clusters[i] == 0) { 7452 ctx->clone->active.clusters[i] = ctx->snapshot->active.clusters[i]; 7453 } 7454 } 7455 ctx->next_extent_page = 0; 7456 delete_snapshot_update_extent_pages(ctx, 0); 7457 } 7458 7459 static void 7460 delete_snapshot_esnap_channels_destroyed_cb(void *cb_arg, struct spdk_blob *blob, int bserrno) 7461 { 7462 struct delete_snapshot_ctx *ctx = cb_arg; 7463 7464 if (bserrno != 0) { 7465 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to destroy esnap channels: %d\n", 7466 blob->id, bserrno); 7467 /* That error should not stop us from syncing metadata. */ 7468 } 7469 7470 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx); 7471 } 7472 7473 static void 7474 delete_snapshot_freeze_io_cb(void *cb_arg, int bserrno) 7475 { 7476 struct delete_snapshot_ctx *ctx = cb_arg; 7477 7478 if (bserrno) { 7479 SPDK_ERRLOG("Failed to freeze I/O on clone\n"); 7480 ctx->bserrno = bserrno; 7481 delete_snapshot_cleanup_clone(ctx, 0); 7482 return; 7483 } 7484 7485 /* Temporarily override md_ro flag for snapshot for MD modification */ 7486 ctx->snapshot_md_ro = ctx->snapshot->md_ro; 7487 ctx->snapshot->md_ro = false; 7488 7489 /* Mark blob as pending for removal for power failure safety, use clone id for recovery */ 7490 ctx->bserrno = blob_set_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, &ctx->clone->id, 7491 sizeof(spdk_blob_id), true); 7492 if (ctx->bserrno != 0) { 7493 delete_snapshot_cleanup_clone(ctx, 0); 7494 return; 7495 } 7496 7497 if (blob_is_esnap_clone(ctx->snapshot)) { 7498 blob_esnap_destroy_bs_dev_channels(ctx->snapshot, false, 7499 delete_snapshot_esnap_channels_destroyed_cb, 7500 ctx); 7501 return; 7502 } 7503 7504 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx); 7505 } 7506 7507 static void 7508 delete_snapshot_open_clone_cb(void *cb_arg, struct spdk_blob *clone, int bserrno) 7509 { 7510 struct delete_snapshot_ctx *ctx = cb_arg; 7511 7512 if (bserrno) { 7513 SPDK_ERRLOG("Failed to open clone\n"); 7514 ctx->bserrno = bserrno; 7515 delete_snapshot_cleanup_snapshot(ctx, 0); 7516 return; 7517 } 7518 7519 ctx->clone = clone; 7520 7521 if (clone->locked_operation_in_progress) { 7522 SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress on its clone\n"); 7523 ctx->bserrno = -EBUSY; 7524 spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx); 7525 return; 7526 } 7527 7528 clone->locked_operation_in_progress = true; 7529 7530 blob_freeze_io(clone, delete_snapshot_freeze_io_cb, ctx); 7531 } 7532 7533 static void 7534 update_clone_on_snapshot_deletion(struct spdk_blob *snapshot, struct delete_snapshot_ctx *ctx) 7535 { 7536 struct spdk_blob_list *snapshot_entry = NULL; 7537 struct spdk_blob_list *clone_entry = NULL; 7538 struct spdk_blob_list *snapshot_clone_entry = NULL; 7539 7540 /* Get snapshot entry for the snapshot we want to remove */ 7541 snapshot_entry = bs_get_snapshot_entry(snapshot->bs, snapshot->id); 7542 7543 assert(snapshot_entry != NULL); 7544 7545 /* Get clone of the snapshot (at this point there can be only one clone) */ 7546 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 7547 assert(snapshot_entry->clone_count == 1); 7548 assert(clone_entry != NULL); 7549 7550 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 7551 * snapshot that we are removing */ 7552 blob_get_snapshot_and_clone_entries(snapshot, &ctx->parent_snapshot_entry, 7553 &snapshot_clone_entry); 7554 7555 spdk_bs_open_blob(snapshot->bs, clone_entry->id, delete_snapshot_open_clone_cb, ctx); 7556 } 7557 7558 static void 7559 bs_delete_blob_finish(void *cb_arg, struct spdk_blob *blob, int bserrno) 7560 { 7561 spdk_bs_sequence_t *seq = cb_arg; 7562 struct spdk_blob_list *snapshot_entry = NULL; 7563 uint32_t page_num; 7564 7565 if (bserrno) { 7566 SPDK_ERRLOG("Failed to remove blob\n"); 7567 bs_sequence_finish(seq, bserrno); 7568 return; 7569 } 7570 7571 /* Remove snapshot from the list */ 7572 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 7573 if (snapshot_entry != NULL) { 7574 TAILQ_REMOVE(&blob->bs->snapshots, snapshot_entry, link); 7575 free(snapshot_entry); 7576 } 7577 7578 page_num = bs_blobid_to_page(blob->id); 7579 spdk_bit_array_clear(blob->bs->used_blobids, page_num); 7580 blob->state = SPDK_BLOB_STATE_DIRTY; 7581 blob->active.num_pages = 0; 7582 blob_resize(blob, 0); 7583 7584 blob_persist(seq, blob, bs_delete_persist_cpl, blob); 7585 } 7586 7587 static int 7588 bs_is_blob_deletable(struct spdk_blob *blob, bool *update_clone) 7589 { 7590 struct spdk_blob_list *snapshot_entry = NULL; 7591 struct spdk_blob_list *clone_entry = NULL; 7592 struct spdk_blob *clone = NULL; 7593 bool has_one_clone = false; 7594 7595 /* Check if this is a snapshot with clones */ 7596 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 7597 if (snapshot_entry != NULL) { 7598 if (snapshot_entry->clone_count > 1) { 7599 SPDK_ERRLOG("Cannot remove snapshot with more than one clone\n"); 7600 return -EBUSY; 7601 } else if (snapshot_entry->clone_count == 1) { 7602 has_one_clone = true; 7603 } 7604 } 7605 7606 /* Check if someone has this blob open (besides this delete context): 7607 * - open_ref = 1 - only this context opened blob, so it is ok to remove it 7608 * - open_ref <= 2 && has_one_clone = true - clone is holding snapshot 7609 * and that is ok, because we will update it accordingly */ 7610 if (blob->open_ref <= 2 && has_one_clone) { 7611 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 7612 assert(clone_entry != NULL); 7613 clone = blob_lookup(blob->bs, clone_entry->id); 7614 7615 if (blob->open_ref == 2 && clone == NULL) { 7616 /* Clone is closed and someone else opened this blob */ 7617 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 7618 return -EBUSY; 7619 } 7620 7621 *update_clone = true; 7622 return 0; 7623 } 7624 7625 if (blob->open_ref > 1) { 7626 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 7627 return -EBUSY; 7628 } 7629 7630 assert(has_one_clone == false); 7631 *update_clone = false; 7632 return 0; 7633 } 7634 7635 static void 7636 bs_delete_enomem_close_cpl(void *cb_arg, int bserrno) 7637 { 7638 spdk_bs_sequence_t *seq = cb_arg; 7639 7640 bs_sequence_finish(seq, -ENOMEM); 7641 } 7642 7643 static void 7644 bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 7645 { 7646 spdk_bs_sequence_t *seq = cb_arg; 7647 struct delete_snapshot_ctx *ctx; 7648 bool update_clone = false; 7649 7650 if (bserrno != 0) { 7651 bs_sequence_finish(seq, bserrno); 7652 return; 7653 } 7654 7655 blob_verify_md_op(blob); 7656 7657 ctx = calloc(1, sizeof(*ctx)); 7658 if (ctx == NULL) { 7659 spdk_blob_close(blob, bs_delete_enomem_close_cpl, seq); 7660 return; 7661 } 7662 7663 ctx->snapshot = blob; 7664 ctx->cb_fn = bs_delete_blob_finish; 7665 ctx->cb_arg = seq; 7666 7667 /* Check if blob can be removed and if it is a snapshot with clone on top of it */ 7668 ctx->bserrno = bs_is_blob_deletable(blob, &update_clone); 7669 if (ctx->bserrno) { 7670 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 7671 return; 7672 } 7673 7674 if (blob->locked_operation_in_progress) { 7675 SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress\n"); 7676 ctx->bserrno = -EBUSY; 7677 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 7678 return; 7679 } 7680 7681 blob->locked_operation_in_progress = true; 7682 7683 /* 7684 * Remove the blob from the blob_store list now, to ensure it does not 7685 * get returned after this point by blob_lookup(). 7686 */ 7687 spdk_bit_array_clear(blob->bs->open_blobids, blob->id); 7688 RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob); 7689 7690 if (update_clone) { 7691 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 7692 if (!ctx->page) { 7693 ctx->bserrno = -ENOMEM; 7694 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 7695 return; 7696 } 7697 /* This blob is a snapshot with active clone - update clone first */ 7698 update_clone_on_snapshot_deletion(blob, ctx); 7699 } else { 7700 /* This blob does not have any clones - just remove it */ 7701 bs_blob_list_remove(blob); 7702 bs_delete_blob_finish(seq, blob, 0); 7703 free(ctx); 7704 } 7705 } 7706 7707 void 7708 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 7709 spdk_blob_op_complete cb_fn, void *cb_arg) 7710 { 7711 struct spdk_bs_cpl cpl; 7712 spdk_bs_sequence_t *seq; 7713 7714 SPDK_DEBUGLOG(blob, "Deleting blob 0x%" PRIx64 "\n", blobid); 7715 7716 assert(spdk_get_thread() == bs->md_thread); 7717 7718 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7719 cpl.u.blob_basic.cb_fn = cb_fn; 7720 cpl.u.blob_basic.cb_arg = cb_arg; 7721 7722 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 7723 if (!seq) { 7724 cb_fn(cb_arg, -ENOMEM); 7725 return; 7726 } 7727 7728 spdk_bs_open_blob(bs, blobid, bs_delete_open_cpl, seq); 7729 } 7730 7731 /* END spdk_bs_delete_blob */ 7732 7733 /* START spdk_bs_open_blob */ 7734 7735 static void 7736 bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7737 { 7738 struct spdk_blob *blob = cb_arg; 7739 struct spdk_blob *existing; 7740 7741 if (bserrno != 0) { 7742 blob_free(blob); 7743 seq->cpl.u.blob_handle.blob = NULL; 7744 bs_sequence_finish(seq, bserrno); 7745 return; 7746 } 7747 7748 existing = blob_lookup(blob->bs, blob->id); 7749 if (existing) { 7750 blob_free(blob); 7751 existing->open_ref++; 7752 seq->cpl.u.blob_handle.blob = existing; 7753 bs_sequence_finish(seq, 0); 7754 return; 7755 } 7756 7757 blob->open_ref++; 7758 7759 spdk_bit_array_set(blob->bs->open_blobids, blob->id); 7760 RB_INSERT(spdk_blob_tree, &blob->bs->open_blobs, blob); 7761 7762 bs_sequence_finish(seq, bserrno); 7763 } 7764 7765 static inline void 7766 blob_open_opts_copy(const struct spdk_blob_open_opts *src, struct spdk_blob_open_opts *dst) 7767 { 7768 #define FIELD_OK(field) \ 7769 offsetof(struct spdk_blob_open_opts, field) + sizeof(src->field) <= src->opts_size 7770 7771 #define SET_FIELD(field) \ 7772 if (FIELD_OK(field)) { \ 7773 dst->field = src->field; \ 7774 } \ 7775 7776 SET_FIELD(clear_method); 7777 SET_FIELD(esnap_ctx); 7778 7779 dst->opts_size = src->opts_size; 7780 7781 /* You should not remove this statement, but need to update the assert statement 7782 * if you add a new field, and also add a corresponding SET_FIELD statement */ 7783 SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_open_opts) == 24, "Incorrect size"); 7784 7785 #undef FIELD_OK 7786 #undef SET_FIELD 7787 } 7788 7789 static void 7790 bs_open_blob(struct spdk_blob_store *bs, 7791 spdk_blob_id blobid, 7792 struct spdk_blob_open_opts *opts, 7793 spdk_blob_op_with_handle_complete cb_fn, 7794 void *cb_arg) 7795 { 7796 struct spdk_blob *blob; 7797 struct spdk_bs_cpl cpl; 7798 struct spdk_blob_open_opts opts_local; 7799 spdk_bs_sequence_t *seq; 7800 uint32_t page_num; 7801 7802 SPDK_DEBUGLOG(blob, "Opening blob 0x%" PRIx64 "\n", blobid); 7803 assert(spdk_get_thread() == bs->md_thread); 7804 7805 page_num = bs_blobid_to_page(blobid); 7806 if (spdk_bit_array_get(bs->used_blobids, page_num) == false) { 7807 /* Invalid blobid */ 7808 cb_fn(cb_arg, NULL, -ENOENT); 7809 return; 7810 } 7811 7812 blob = blob_lookup(bs, blobid); 7813 if (blob) { 7814 blob->open_ref++; 7815 cb_fn(cb_arg, blob, 0); 7816 return; 7817 } 7818 7819 blob = blob_alloc(bs, blobid); 7820 if (!blob) { 7821 cb_fn(cb_arg, NULL, -ENOMEM); 7822 return; 7823 } 7824 7825 spdk_blob_open_opts_init(&opts_local, sizeof(opts_local)); 7826 if (opts) { 7827 blob_open_opts_copy(opts, &opts_local); 7828 } 7829 7830 blob->clear_method = opts_local.clear_method; 7831 7832 cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE; 7833 cpl.u.blob_handle.cb_fn = cb_fn; 7834 cpl.u.blob_handle.cb_arg = cb_arg; 7835 cpl.u.blob_handle.blob = blob; 7836 cpl.u.blob_handle.esnap_ctx = opts_local.esnap_ctx; 7837 7838 seq = bs_sequence_start_bs(bs->md_channel, &cpl); 7839 if (!seq) { 7840 blob_free(blob); 7841 cb_fn(cb_arg, NULL, -ENOMEM); 7842 return; 7843 } 7844 7845 blob_load(seq, blob, bs_open_blob_cpl, blob); 7846 } 7847 7848 void 7849 spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 7850 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 7851 { 7852 bs_open_blob(bs, blobid, NULL, cb_fn, cb_arg); 7853 } 7854 7855 void 7856 spdk_bs_open_blob_ext(struct spdk_blob_store *bs, spdk_blob_id blobid, 7857 struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 7858 { 7859 bs_open_blob(bs, blobid, opts, cb_fn, cb_arg); 7860 } 7861 7862 /* END spdk_bs_open_blob */ 7863 7864 /* START spdk_blob_set_read_only */ 7865 int 7866 spdk_blob_set_read_only(struct spdk_blob *blob) 7867 { 7868 blob_verify_md_op(blob); 7869 7870 blob->data_ro_flags |= SPDK_BLOB_READ_ONLY; 7871 7872 blob->state = SPDK_BLOB_STATE_DIRTY; 7873 return 0; 7874 } 7875 /* END spdk_blob_set_read_only */ 7876 7877 /* START spdk_blob_sync_md */ 7878 7879 static void 7880 blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7881 { 7882 struct spdk_blob *blob = cb_arg; 7883 7884 if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 7885 blob->data_ro = true; 7886 blob->md_ro = true; 7887 } 7888 7889 bs_sequence_finish(seq, bserrno); 7890 } 7891 7892 static void 7893 blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 7894 { 7895 struct spdk_bs_cpl cpl; 7896 spdk_bs_sequence_t *seq; 7897 7898 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7899 cpl.u.blob_basic.cb_fn = cb_fn; 7900 cpl.u.blob_basic.cb_arg = cb_arg; 7901 7902 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 7903 if (!seq) { 7904 cb_fn(cb_arg, -ENOMEM); 7905 return; 7906 } 7907 7908 blob_persist(seq, blob, blob_sync_md_cpl, blob); 7909 } 7910 7911 void 7912 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 7913 { 7914 blob_verify_md_op(blob); 7915 7916 SPDK_DEBUGLOG(blob, "Syncing blob 0x%" PRIx64 "\n", blob->id); 7917 7918 if (blob->md_ro) { 7919 assert(blob->state == SPDK_BLOB_STATE_CLEAN); 7920 cb_fn(cb_arg, 0); 7921 return; 7922 } 7923 7924 blob_sync_md(blob, cb_fn, cb_arg); 7925 } 7926 7927 /* END spdk_blob_sync_md */ 7928 7929 struct spdk_blob_cluster_op_ctx { 7930 struct spdk_thread *thread; 7931 struct spdk_blob *blob; 7932 uint32_t cluster_num; /* cluster index in blob */ 7933 uint32_t cluster; /* cluster on disk */ 7934 uint32_t extent_page; /* extent page on disk */ 7935 struct spdk_blob_md_page *page; /* preallocated extent page */ 7936 int rc; 7937 spdk_blob_op_complete cb_fn; 7938 void *cb_arg; 7939 }; 7940 7941 static void 7942 blob_op_cluster_msg_cpl(void *arg) 7943 { 7944 struct spdk_blob_cluster_op_ctx *ctx = arg; 7945 7946 ctx->cb_fn(ctx->cb_arg, ctx->rc); 7947 free(ctx); 7948 } 7949 7950 static void 7951 blob_op_cluster_msg_cb(void *arg, int bserrno) 7952 { 7953 struct spdk_blob_cluster_op_ctx *ctx = arg; 7954 7955 ctx->rc = bserrno; 7956 spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx); 7957 } 7958 7959 static void 7960 blob_insert_new_ep_cb(void *arg, int bserrno) 7961 { 7962 struct spdk_blob_cluster_op_ctx *ctx = arg; 7963 uint32_t *extent_page; 7964 7965 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 7966 *extent_page = ctx->extent_page; 7967 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 7968 blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx); 7969 } 7970 7971 struct spdk_blob_write_extent_page_ctx { 7972 struct spdk_blob_store *bs; 7973 7974 uint32_t extent; 7975 struct spdk_blob_md_page *page; 7976 }; 7977 7978 static void 7979 blob_free_cluster_msg_cb(void *arg, int bserrno) 7980 { 7981 struct spdk_blob_cluster_op_ctx *ctx = arg; 7982 7983 spdk_spin_lock(&ctx->blob->bs->used_lock); 7984 bs_release_cluster(ctx->blob->bs, bs_lba_to_cluster(ctx->blob->bs, ctx->cluster)); 7985 spdk_spin_unlock(&ctx->blob->bs->used_lock); 7986 7987 ctx->rc = bserrno; 7988 spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx); 7989 } 7990 7991 static void 7992 blob_free_cluster_update_ep_cb(void *arg, int bserrno) 7993 { 7994 struct spdk_blob_cluster_op_ctx *ctx = arg; 7995 7996 if (bserrno != 0 || ctx->blob->bs->clean == 0) { 7997 blob_free_cluster_msg_cb(ctx, bserrno); 7998 return; 7999 } 8000 8001 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8002 blob_sync_md(ctx->blob, blob_free_cluster_msg_cb, ctx); 8003 } 8004 8005 static void 8006 blob_free_cluster_free_ep_cb(void *arg, int bserrno) 8007 { 8008 struct spdk_blob_cluster_op_ctx *ctx = arg; 8009 8010 spdk_spin_lock(&ctx->blob->bs->used_lock); 8011 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8012 bs_release_md_page(ctx->blob->bs, ctx->extent_page); 8013 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8014 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8015 blob_sync_md(ctx->blob, blob_free_cluster_msg_cb, ctx); 8016 } 8017 8018 static void 8019 blob_persist_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8020 { 8021 struct spdk_blob_write_extent_page_ctx *ctx = cb_arg; 8022 8023 free(ctx); 8024 bs_sequence_finish(seq, bserrno); 8025 } 8026 8027 static void 8028 blob_write_extent_page_ready(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8029 { 8030 struct spdk_blob_write_extent_page_ctx *ctx = cb_arg; 8031 8032 if (bserrno != 0) { 8033 blob_persist_extent_page_cpl(seq, ctx, bserrno); 8034 return; 8035 } 8036 bs_sequence_write_dev(seq, ctx->page, bs_md_page_to_lba(ctx->bs, ctx->extent), 8037 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 8038 blob_persist_extent_page_cpl, ctx); 8039 } 8040 8041 static void 8042 blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 8043 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg) 8044 { 8045 struct spdk_blob_write_extent_page_ctx *ctx; 8046 spdk_bs_sequence_t *seq; 8047 struct spdk_bs_cpl cpl; 8048 8049 ctx = calloc(1, sizeof(*ctx)); 8050 if (!ctx) { 8051 cb_fn(cb_arg, -ENOMEM); 8052 return; 8053 } 8054 ctx->bs = blob->bs; 8055 ctx->extent = extent; 8056 ctx->page = page; 8057 8058 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 8059 cpl.u.blob_basic.cb_fn = cb_fn; 8060 cpl.u.blob_basic.cb_arg = cb_arg; 8061 8062 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 8063 if (!seq) { 8064 free(ctx); 8065 cb_fn(cb_arg, -ENOMEM); 8066 return; 8067 } 8068 8069 assert(page); 8070 page->next = SPDK_INVALID_MD_PAGE; 8071 page->id = blob->id; 8072 page->sequence_num = 0; 8073 8074 blob_serialize_extent_page(blob, cluster_num, page); 8075 8076 page->crc = blob_md_page_calc_crc(page); 8077 8078 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent) == true); 8079 8080 bs_mark_dirty(seq, blob->bs, blob_write_extent_page_ready, ctx); 8081 } 8082 8083 static void 8084 blob_insert_cluster_msg(void *arg) 8085 { 8086 struct spdk_blob_cluster_op_ctx *ctx = arg; 8087 uint32_t *extent_page; 8088 8089 ctx->rc = blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster); 8090 if (ctx->rc != 0) { 8091 spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx); 8092 return; 8093 } 8094 8095 if (ctx->blob->use_extent_table == false) { 8096 /* Extent table is not used, proceed with sync of md that will only use extents_rle. */ 8097 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8098 blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx); 8099 return; 8100 } 8101 8102 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 8103 if (*extent_page == 0) { 8104 /* Extent page requires allocation. 8105 * It was already claimed in the used_md_pages map and placed in ctx. */ 8106 assert(ctx->extent_page != 0); 8107 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8108 blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, ctx->page, 8109 blob_insert_new_ep_cb, ctx); 8110 } else { 8111 /* It is possible for original thread to allocate extent page for 8112 * different cluster in the same extent page. In such case proceed with 8113 * updating the existing extent page, but release the additional one. */ 8114 if (ctx->extent_page != 0) { 8115 spdk_spin_lock(&ctx->blob->bs->used_lock); 8116 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8117 bs_release_md_page(ctx->blob->bs, ctx->extent_page); 8118 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8119 ctx->extent_page = 0; 8120 } 8121 /* Extent page already allocated. 8122 * Every cluster allocation, requires just an update of single extent page. */ 8123 blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, ctx->page, 8124 blob_op_cluster_msg_cb, ctx); 8125 } 8126 } 8127 8128 static void 8129 blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 8130 uint64_t cluster, uint32_t extent_page, struct spdk_blob_md_page *page, 8131 spdk_blob_op_complete cb_fn, void *cb_arg) 8132 { 8133 struct spdk_blob_cluster_op_ctx *ctx; 8134 8135 ctx = calloc(1, sizeof(*ctx)); 8136 if (ctx == NULL) { 8137 cb_fn(cb_arg, -ENOMEM); 8138 return; 8139 } 8140 8141 ctx->thread = spdk_get_thread(); 8142 ctx->blob = blob; 8143 ctx->cluster_num = cluster_num; 8144 ctx->cluster = cluster; 8145 ctx->extent_page = extent_page; 8146 ctx->page = page; 8147 ctx->cb_fn = cb_fn; 8148 ctx->cb_arg = cb_arg; 8149 8150 spdk_thread_send_msg(blob->bs->md_thread, blob_insert_cluster_msg, ctx); 8151 } 8152 8153 static void 8154 blob_free_cluster_msg(void *arg) 8155 { 8156 struct spdk_blob_cluster_op_ctx *ctx = arg; 8157 uint32_t *extent_page; 8158 uint32_t start_cluster_idx; 8159 bool free_extent_page = true; 8160 size_t i; 8161 8162 ctx->cluster = ctx->blob->active.clusters[ctx->cluster_num]; 8163 ctx->blob->active.clusters[ctx->cluster_num] = 0; 8164 8165 if (ctx->blob->use_extent_table == false) { 8166 /* Extent table is not used, proceed with sync of md that will only use extents_rle. */ 8167 spdk_spin_lock(&ctx->blob->bs->used_lock); 8168 bs_release_cluster(ctx->blob->bs, bs_lba_to_cluster(ctx->blob->bs, ctx->cluster)); 8169 spdk_spin_unlock(&ctx->blob->bs->used_lock); 8170 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 8171 blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx); 8172 return; 8173 } 8174 8175 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 8176 8177 /* There shouldn't be parallel release operations on same cluster */ 8178 assert(*extent_page == ctx->extent_page); 8179 8180 start_cluster_idx = (ctx->cluster_num / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP; 8181 for (i = 0; i < SPDK_EXTENTS_PER_EP; ++i) { 8182 if (ctx->blob->active.clusters[start_cluster_idx + i] != 0) { 8183 free_extent_page = false; 8184 break; 8185 } 8186 } 8187 8188 if (free_extent_page) { 8189 assert(ctx->extent_page != 0); 8190 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 8191 ctx->blob->active.extent_pages[bs_cluster_to_extent_table_id(ctx->cluster_num)] = 0; 8192 blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, ctx->page, 8193 blob_free_cluster_free_ep_cb, ctx); 8194 } else { 8195 blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, ctx->page, 8196 blob_free_cluster_update_ep_cb, ctx); 8197 } 8198 } 8199 8200 8201 static void 8202 blob_free_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, uint32_t extent_page, 8203 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg) 8204 { 8205 struct spdk_blob_cluster_op_ctx *ctx; 8206 8207 ctx = calloc(1, sizeof(*ctx)); 8208 if (ctx == NULL) { 8209 cb_fn(cb_arg, -ENOMEM); 8210 return; 8211 } 8212 8213 ctx->thread = spdk_get_thread(); 8214 ctx->blob = blob; 8215 ctx->cluster_num = cluster_num; 8216 ctx->extent_page = extent_page; 8217 ctx->page = page; 8218 ctx->cb_fn = cb_fn; 8219 ctx->cb_arg = cb_arg; 8220 8221 spdk_thread_send_msg(blob->bs->md_thread, blob_free_cluster_msg, ctx); 8222 } 8223 8224 /* START spdk_blob_close */ 8225 8226 static void 8227 blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8228 { 8229 struct spdk_blob *blob = cb_arg; 8230 8231 if (bserrno == 0) { 8232 blob->open_ref--; 8233 if (blob->open_ref == 0) { 8234 /* 8235 * Blobs with active.num_pages == 0 are deleted blobs. 8236 * these blobs are removed from the blob_store list 8237 * when the deletion process starts - so don't try to 8238 * remove them again. 8239 */ 8240 if (blob->active.num_pages > 0) { 8241 spdk_bit_array_clear(blob->bs->open_blobids, blob->id); 8242 RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob); 8243 } 8244 blob_free(blob); 8245 } 8246 } 8247 8248 bs_sequence_finish(seq, bserrno); 8249 } 8250 8251 static void 8252 blob_close_esnap_done(void *cb_arg, struct spdk_blob *blob, int bserrno) 8253 { 8254 spdk_bs_sequence_t *seq = cb_arg; 8255 8256 if (bserrno != 0) { 8257 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": close failed with error %d\n", 8258 blob->id, bserrno); 8259 bs_sequence_finish(seq, bserrno); 8260 return; 8261 } 8262 8263 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": closed, syncing metadata on thread %s\n", 8264 blob->id, spdk_thread_get_name(spdk_get_thread())); 8265 8266 /* Sync metadata */ 8267 blob_persist(seq, blob, blob_close_cpl, blob); 8268 } 8269 8270 void 8271 spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 8272 { 8273 struct spdk_bs_cpl cpl; 8274 spdk_bs_sequence_t *seq; 8275 8276 blob_verify_md_op(blob); 8277 8278 SPDK_DEBUGLOG(blob, "Closing blob 0x%" PRIx64 "\n", blob->id); 8279 8280 if (blob->open_ref == 0) { 8281 cb_fn(cb_arg, -EBADF); 8282 return; 8283 } 8284 8285 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 8286 cpl.u.blob_basic.cb_fn = cb_fn; 8287 cpl.u.blob_basic.cb_arg = cb_arg; 8288 8289 seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl); 8290 if (!seq) { 8291 cb_fn(cb_arg, -ENOMEM); 8292 return; 8293 } 8294 8295 if (blob->open_ref == 1 && blob_is_esnap_clone(blob)) { 8296 blob_esnap_destroy_bs_dev_channels(blob, false, blob_close_esnap_done, seq); 8297 return; 8298 } 8299 8300 /* Sync metadata */ 8301 blob_persist(seq, blob, blob_close_cpl, blob); 8302 } 8303 8304 /* END spdk_blob_close */ 8305 8306 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs) 8307 { 8308 return spdk_get_io_channel(bs); 8309 } 8310 8311 void 8312 spdk_bs_free_io_channel(struct spdk_io_channel *channel) 8313 { 8314 blob_esnap_destroy_bs_channel(spdk_io_channel_get_ctx(channel)); 8315 spdk_put_io_channel(channel); 8316 } 8317 8318 void 8319 spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel, 8320 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 8321 { 8322 blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 8323 SPDK_BLOB_UNMAP); 8324 } 8325 8326 void 8327 spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel, 8328 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 8329 { 8330 blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 8331 SPDK_BLOB_WRITE_ZEROES); 8332 } 8333 8334 void 8335 spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel, 8336 void *payload, uint64_t offset, uint64_t length, 8337 spdk_blob_op_complete cb_fn, void *cb_arg) 8338 { 8339 blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 8340 SPDK_BLOB_WRITE); 8341 } 8342 8343 void 8344 spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel, 8345 void *payload, uint64_t offset, uint64_t length, 8346 spdk_blob_op_complete cb_fn, void *cb_arg) 8347 { 8348 blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 8349 SPDK_BLOB_READ); 8350 } 8351 8352 void 8353 spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel, 8354 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 8355 spdk_blob_op_complete cb_fn, void *cb_arg) 8356 { 8357 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, NULL); 8358 } 8359 8360 void 8361 spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel, 8362 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 8363 spdk_blob_op_complete cb_fn, void *cb_arg) 8364 { 8365 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, NULL); 8366 } 8367 8368 void 8369 spdk_blob_io_writev_ext(struct spdk_blob *blob, struct spdk_io_channel *channel, 8370 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 8371 spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts) 8372 { 8373 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, 8374 io_opts); 8375 } 8376 8377 void 8378 spdk_blob_io_readv_ext(struct spdk_blob *blob, struct spdk_io_channel *channel, 8379 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 8380 spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts) 8381 { 8382 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, 8383 io_opts); 8384 } 8385 8386 struct spdk_bs_iter_ctx { 8387 int64_t page_num; 8388 struct spdk_blob_store *bs; 8389 8390 spdk_blob_op_with_handle_complete cb_fn; 8391 void *cb_arg; 8392 }; 8393 8394 static void 8395 bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 8396 { 8397 struct spdk_bs_iter_ctx *ctx = cb_arg; 8398 struct spdk_blob_store *bs = ctx->bs; 8399 spdk_blob_id id; 8400 8401 if (bserrno == 0) { 8402 ctx->cb_fn(ctx->cb_arg, _blob, bserrno); 8403 free(ctx); 8404 return; 8405 } 8406 8407 ctx->page_num++; 8408 ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num); 8409 if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) { 8410 ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT); 8411 free(ctx); 8412 return; 8413 } 8414 8415 id = bs_page_to_blobid(ctx->page_num); 8416 8417 spdk_bs_open_blob(bs, id, bs_iter_cpl, ctx); 8418 } 8419 8420 void 8421 spdk_bs_iter_first(struct spdk_blob_store *bs, 8422 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 8423 { 8424 struct spdk_bs_iter_ctx *ctx; 8425 8426 ctx = calloc(1, sizeof(*ctx)); 8427 if (!ctx) { 8428 cb_fn(cb_arg, NULL, -ENOMEM); 8429 return; 8430 } 8431 8432 ctx->page_num = -1; 8433 ctx->bs = bs; 8434 ctx->cb_fn = cb_fn; 8435 ctx->cb_arg = cb_arg; 8436 8437 bs_iter_cpl(ctx, NULL, -1); 8438 } 8439 8440 static void 8441 bs_iter_close_cpl(void *cb_arg, int bserrno) 8442 { 8443 struct spdk_bs_iter_ctx *ctx = cb_arg; 8444 8445 bs_iter_cpl(ctx, NULL, -1); 8446 } 8447 8448 void 8449 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob, 8450 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 8451 { 8452 struct spdk_bs_iter_ctx *ctx; 8453 8454 assert(blob != NULL); 8455 8456 ctx = calloc(1, sizeof(*ctx)); 8457 if (!ctx) { 8458 cb_fn(cb_arg, NULL, -ENOMEM); 8459 return; 8460 } 8461 8462 ctx->page_num = bs_blobid_to_page(blob->id); 8463 ctx->bs = bs; 8464 ctx->cb_fn = cb_fn; 8465 ctx->cb_arg = cb_arg; 8466 8467 /* Close the existing blob */ 8468 spdk_blob_close(blob, bs_iter_close_cpl, ctx); 8469 } 8470 8471 static int 8472 blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 8473 uint16_t value_len, bool internal) 8474 { 8475 struct spdk_xattr_tailq *xattrs; 8476 struct spdk_xattr *xattr; 8477 size_t desc_size; 8478 void *tmp; 8479 8480 blob_verify_md_op(blob); 8481 8482 if (blob->md_ro) { 8483 return -EPERM; 8484 } 8485 8486 desc_size = sizeof(struct spdk_blob_md_descriptor_xattr) + strlen(name) + value_len; 8487 if (desc_size > SPDK_BS_MAX_DESC_SIZE) { 8488 SPDK_DEBUGLOG(blob, "Xattr '%s' of size %zu does not fix into single page %zu\n", name, 8489 desc_size, SPDK_BS_MAX_DESC_SIZE); 8490 return -ENOMEM; 8491 } 8492 8493 if (internal) { 8494 xattrs = &blob->xattrs_internal; 8495 blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR; 8496 } else { 8497 xattrs = &blob->xattrs; 8498 } 8499 8500 TAILQ_FOREACH(xattr, xattrs, link) { 8501 if (!strcmp(name, xattr->name)) { 8502 tmp = malloc(value_len); 8503 if (!tmp) { 8504 return -ENOMEM; 8505 } 8506 8507 free(xattr->value); 8508 xattr->value_len = value_len; 8509 xattr->value = tmp; 8510 memcpy(xattr->value, value, value_len); 8511 8512 blob->state = SPDK_BLOB_STATE_DIRTY; 8513 8514 return 0; 8515 } 8516 } 8517 8518 xattr = calloc(1, sizeof(*xattr)); 8519 if (!xattr) { 8520 return -ENOMEM; 8521 } 8522 8523 xattr->name = strdup(name); 8524 if (!xattr->name) { 8525 free(xattr); 8526 return -ENOMEM; 8527 } 8528 8529 xattr->value_len = value_len; 8530 xattr->value = malloc(value_len); 8531 if (!xattr->value) { 8532 free(xattr->name); 8533 free(xattr); 8534 return -ENOMEM; 8535 } 8536 memcpy(xattr->value, value, value_len); 8537 TAILQ_INSERT_TAIL(xattrs, xattr, link); 8538 8539 blob->state = SPDK_BLOB_STATE_DIRTY; 8540 8541 return 0; 8542 } 8543 8544 int 8545 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 8546 uint16_t value_len) 8547 { 8548 return blob_set_xattr(blob, name, value, value_len, false); 8549 } 8550 8551 static int 8552 blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal) 8553 { 8554 struct spdk_xattr_tailq *xattrs; 8555 struct spdk_xattr *xattr; 8556 8557 blob_verify_md_op(blob); 8558 8559 if (blob->md_ro) { 8560 return -EPERM; 8561 } 8562 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 8563 8564 TAILQ_FOREACH(xattr, xattrs, link) { 8565 if (!strcmp(name, xattr->name)) { 8566 TAILQ_REMOVE(xattrs, xattr, link); 8567 free(xattr->value); 8568 free(xattr->name); 8569 free(xattr); 8570 8571 if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) { 8572 blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR; 8573 } 8574 blob->state = SPDK_BLOB_STATE_DIRTY; 8575 8576 return 0; 8577 } 8578 } 8579 8580 return -ENOENT; 8581 } 8582 8583 int 8584 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name) 8585 { 8586 return blob_remove_xattr(blob, name, false); 8587 } 8588 8589 static int 8590 blob_get_xattr_value(struct spdk_blob *blob, const char *name, 8591 const void **value, size_t *value_len, bool internal) 8592 { 8593 struct spdk_xattr *xattr; 8594 struct spdk_xattr_tailq *xattrs; 8595 8596 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 8597 8598 TAILQ_FOREACH(xattr, xattrs, link) { 8599 if (!strcmp(name, xattr->name)) { 8600 *value = xattr->value; 8601 *value_len = xattr->value_len; 8602 return 0; 8603 } 8604 } 8605 return -ENOENT; 8606 } 8607 8608 int 8609 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 8610 const void **value, size_t *value_len) 8611 { 8612 blob_verify_md_op(blob); 8613 8614 return blob_get_xattr_value(blob, name, value, value_len, false); 8615 } 8616 8617 struct spdk_xattr_names { 8618 uint32_t count; 8619 const char *names[0]; 8620 }; 8621 8622 static int 8623 blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names) 8624 { 8625 struct spdk_xattr *xattr; 8626 int count = 0; 8627 8628 TAILQ_FOREACH(xattr, xattrs, link) { 8629 count++; 8630 } 8631 8632 *names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *)); 8633 if (*names == NULL) { 8634 return -ENOMEM; 8635 } 8636 8637 TAILQ_FOREACH(xattr, xattrs, link) { 8638 (*names)->names[(*names)->count++] = xattr->name; 8639 } 8640 8641 return 0; 8642 } 8643 8644 int 8645 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names) 8646 { 8647 blob_verify_md_op(blob); 8648 8649 return blob_get_xattr_names(&blob->xattrs, names); 8650 } 8651 8652 uint32_t 8653 spdk_xattr_names_get_count(struct spdk_xattr_names *names) 8654 { 8655 assert(names != NULL); 8656 8657 return names->count; 8658 } 8659 8660 const char * 8661 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index) 8662 { 8663 if (index >= names->count) { 8664 return NULL; 8665 } 8666 8667 return names->names[index]; 8668 } 8669 8670 void 8671 spdk_xattr_names_free(struct spdk_xattr_names *names) 8672 { 8673 free(names); 8674 } 8675 8676 struct spdk_bs_type 8677 spdk_bs_get_bstype(struct spdk_blob_store *bs) 8678 { 8679 return bs->bstype; 8680 } 8681 8682 void 8683 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype) 8684 { 8685 memcpy(&bs->bstype, &bstype, sizeof(bstype)); 8686 } 8687 8688 bool 8689 spdk_blob_is_read_only(struct spdk_blob *blob) 8690 { 8691 assert(blob != NULL); 8692 return (blob->data_ro || blob->md_ro); 8693 } 8694 8695 bool 8696 spdk_blob_is_snapshot(struct spdk_blob *blob) 8697 { 8698 struct spdk_blob_list *snapshot_entry; 8699 8700 assert(blob != NULL); 8701 8702 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 8703 if (snapshot_entry == NULL) { 8704 return false; 8705 } 8706 8707 return true; 8708 } 8709 8710 bool 8711 spdk_blob_is_clone(struct spdk_blob *blob) 8712 { 8713 assert(blob != NULL); 8714 8715 if (blob->parent_id != SPDK_BLOBID_INVALID && 8716 blob->parent_id != SPDK_BLOBID_EXTERNAL_SNAPSHOT) { 8717 assert(spdk_blob_is_thin_provisioned(blob)); 8718 return true; 8719 } 8720 8721 return false; 8722 } 8723 8724 bool 8725 spdk_blob_is_thin_provisioned(struct spdk_blob *blob) 8726 { 8727 assert(blob != NULL); 8728 return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 8729 } 8730 8731 bool 8732 spdk_blob_is_esnap_clone(const struct spdk_blob *blob) 8733 { 8734 return blob_is_esnap_clone(blob); 8735 } 8736 8737 static void 8738 blob_update_clear_method(struct spdk_blob *blob) 8739 { 8740 enum blob_clear_method stored_cm; 8741 8742 assert(blob != NULL); 8743 8744 /* If BLOB_CLEAR_WITH_DEFAULT was passed in, use the setting stored 8745 * in metadata previously. If something other than the default was 8746 * specified, ignore stored value and used what was passed in. 8747 */ 8748 stored_cm = ((blob->md_ro_flags & SPDK_BLOB_CLEAR_METHOD) >> SPDK_BLOB_CLEAR_METHOD_SHIFT); 8749 8750 if (blob->clear_method == BLOB_CLEAR_WITH_DEFAULT) { 8751 blob->clear_method = stored_cm; 8752 } else if (blob->clear_method != stored_cm) { 8753 SPDK_WARNLOG("Using passed in clear method 0x%x instead of stored value of 0x%x\n", 8754 blob->clear_method, stored_cm); 8755 } 8756 } 8757 8758 spdk_blob_id 8759 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id) 8760 { 8761 struct spdk_blob_list *snapshot_entry = NULL; 8762 struct spdk_blob_list *clone_entry = NULL; 8763 8764 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 8765 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 8766 if (clone_entry->id == blob_id) { 8767 return snapshot_entry->id; 8768 } 8769 } 8770 } 8771 8772 return SPDK_BLOBID_INVALID; 8773 } 8774 8775 int 8776 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids, 8777 size_t *count) 8778 { 8779 struct spdk_blob_list *snapshot_entry, *clone_entry; 8780 size_t n; 8781 8782 snapshot_entry = bs_get_snapshot_entry(bs, blobid); 8783 if (snapshot_entry == NULL) { 8784 *count = 0; 8785 return 0; 8786 } 8787 8788 if (ids == NULL || *count < snapshot_entry->clone_count) { 8789 *count = snapshot_entry->clone_count; 8790 return -ENOMEM; 8791 } 8792 *count = snapshot_entry->clone_count; 8793 8794 n = 0; 8795 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 8796 ids[n++] = clone_entry->id; 8797 } 8798 8799 return 0; 8800 } 8801 8802 static void 8803 bs_load_grow_continue(struct spdk_bs_load_ctx *ctx) 8804 { 8805 int rc; 8806 8807 if (ctx->super->size == 0) { 8808 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 8809 } 8810 8811 if (ctx->super->io_unit_size == 0) { 8812 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 8813 } 8814 8815 /* Parse the super block */ 8816 ctx->bs->clean = 1; 8817 ctx->bs->cluster_sz = ctx->super->cluster_size; 8818 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 8819 ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE; 8820 if (spdk_u32_is_pow2(ctx->bs->pages_per_cluster)) { 8821 ctx->bs->pages_per_cluster_shift = spdk_u32log2(ctx->bs->pages_per_cluster); 8822 } 8823 ctx->bs->io_unit_size = ctx->super->io_unit_size; 8824 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 8825 if (rc < 0) { 8826 bs_load_ctx_fail(ctx, -ENOMEM); 8827 return; 8828 } 8829 ctx->bs->md_start = ctx->super->md_start; 8830 ctx->bs->md_len = ctx->super->md_len; 8831 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len); 8832 if (rc < 0) { 8833 bs_load_ctx_fail(ctx, -ENOMEM); 8834 return; 8835 } 8836 8837 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 8838 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 8839 ctx->bs->super_blob = ctx->super->super_blob; 8840 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 8841 8842 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0) { 8843 SPDK_ERRLOG("Can not grow an unclean blobstore, please load it normally to clean it.\n"); 8844 bs_load_ctx_fail(ctx, -EIO); 8845 return; 8846 } else { 8847 bs_load_read_used_pages(ctx); 8848 } 8849 } 8850 8851 static void 8852 bs_load_grow_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8853 { 8854 struct spdk_bs_load_ctx *ctx = cb_arg; 8855 8856 if (bserrno != 0) { 8857 bs_load_ctx_fail(ctx, bserrno); 8858 return; 8859 } 8860 bs_load_grow_continue(ctx); 8861 } 8862 8863 static void 8864 bs_load_grow_used_clusters_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8865 { 8866 struct spdk_bs_load_ctx *ctx = cb_arg; 8867 8868 if (bserrno != 0) { 8869 bs_load_ctx_fail(ctx, bserrno); 8870 return; 8871 } 8872 8873 spdk_free(ctx->mask); 8874 8875 bs_sequence_write_dev(ctx->seq, ctx->super, bs_page_to_lba(ctx->bs, 0), 8876 bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 8877 bs_load_grow_super_write_cpl, ctx); 8878 } 8879 8880 static void 8881 bs_load_grow_used_clusters_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8882 { 8883 struct spdk_bs_load_ctx *ctx = cb_arg; 8884 uint64_t lba, lba_count; 8885 uint64_t dev_size; 8886 uint64_t total_clusters; 8887 8888 if (bserrno != 0) { 8889 bs_load_ctx_fail(ctx, bserrno); 8890 return; 8891 } 8892 8893 /* The type must be correct */ 8894 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 8895 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 8896 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 8897 struct spdk_blob_md_page) * 8)); 8898 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 8899 total_clusters = dev_size / ctx->super->cluster_size; 8900 ctx->mask->length = total_clusters; 8901 8902 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 8903 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 8904 bs_sequence_write_dev(ctx->seq, ctx->mask, lba, lba_count, 8905 bs_load_grow_used_clusters_write_cpl, ctx); 8906 } 8907 8908 static void 8909 bs_load_try_to_grow(struct spdk_bs_load_ctx *ctx) 8910 { 8911 uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask; 8912 uint64_t lba, lba_count, mask_size; 8913 8914 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 8915 total_clusters = dev_size / ctx->super->cluster_size; 8916 used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 8917 spdk_divide_round_up(total_clusters, 8), 8918 SPDK_BS_PAGE_SIZE); 8919 max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start; 8920 /* No necessary to grow or no space to grow */ 8921 if (ctx->super->size >= dev_size || used_cluster_mask_len > max_used_cluster_mask) { 8922 SPDK_DEBUGLOG(blob, "No grow\n"); 8923 bs_load_grow_continue(ctx); 8924 return; 8925 } 8926 8927 SPDK_DEBUGLOG(blob, "Resize blobstore\n"); 8928 8929 ctx->super->size = dev_size; 8930 ctx->super->used_cluster_mask_len = used_cluster_mask_len; 8931 ctx->super->crc = blob_md_page_calc_crc(ctx->super); 8932 8933 mask_size = used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 8934 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 8935 SPDK_MALLOC_DMA); 8936 if (!ctx->mask) { 8937 bs_load_ctx_fail(ctx, -ENOMEM); 8938 return; 8939 } 8940 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 8941 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 8942 bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count, 8943 bs_load_grow_used_clusters_read_cpl, ctx); 8944 } 8945 8946 static void 8947 bs_grow_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8948 { 8949 struct spdk_bs_load_ctx *ctx = cb_arg; 8950 int rc; 8951 8952 rc = bs_super_validate(ctx->super, ctx->bs); 8953 if (rc != 0) { 8954 bs_load_ctx_fail(ctx, rc); 8955 return; 8956 } 8957 8958 bs_load_try_to_grow(ctx); 8959 } 8960 8961 struct spdk_bs_grow_ctx { 8962 struct spdk_blob_store *bs; 8963 struct spdk_bs_super_block *super; 8964 8965 struct spdk_bit_pool *new_used_clusters; 8966 struct spdk_bs_md_mask *new_used_clusters_mask; 8967 8968 spdk_bs_sequence_t *seq; 8969 }; 8970 8971 static void 8972 bs_grow_live_done(struct spdk_bs_grow_ctx *ctx, int bserrno) 8973 { 8974 if (bserrno != 0) { 8975 spdk_bit_pool_free(&ctx->new_used_clusters); 8976 } 8977 8978 bs_sequence_finish(ctx->seq, bserrno); 8979 free(ctx->new_used_clusters_mask); 8980 spdk_free(ctx->super); 8981 free(ctx); 8982 } 8983 8984 static void 8985 bs_grow_live_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 8986 { 8987 struct spdk_bs_grow_ctx *ctx = cb_arg; 8988 struct spdk_blob_store *bs = ctx->bs; 8989 uint64_t total_clusters; 8990 8991 if (bserrno != 0) { 8992 bs_grow_live_done(ctx, bserrno); 8993 return; 8994 } 8995 8996 /* 8997 * Blobstore is not clean until unload, for now only the super block is up to date. 8998 * This is similar to state right after blobstore init, when bs_write_used_md() didn't 8999 * yet execute. 9000 * When cleanly unloaded, the used md pages will be written out. 9001 * In case of unclean shutdown, loading blobstore will go through recovery path correctly 9002 * filling out the used_clusters with new size and writing it out. 9003 */ 9004 bs->clean = 0; 9005 9006 /* Reverting the super->size past this point is complex, avoid any error paths 9007 * that require to do so. */ 9008 spdk_spin_lock(&bs->used_lock); 9009 9010 total_clusters = ctx->super->size / ctx->super->cluster_size; 9011 9012 assert(total_clusters >= spdk_bit_pool_capacity(bs->used_clusters)); 9013 spdk_bit_pool_store_mask(bs->used_clusters, ctx->new_used_clusters_mask); 9014 9015 assert(total_clusters == spdk_bit_pool_capacity(ctx->new_used_clusters)); 9016 spdk_bit_pool_load_mask(ctx->new_used_clusters, ctx->new_used_clusters_mask); 9017 9018 spdk_bit_pool_free(&bs->used_clusters); 9019 bs->used_clusters = ctx->new_used_clusters; 9020 9021 bs->total_clusters = total_clusters; 9022 bs->total_data_clusters = bs->total_clusters - spdk_divide_round_up( 9023 bs->md_start + bs->md_len, bs->pages_per_cluster); 9024 9025 bs->num_free_clusters = spdk_bit_pool_count_free(bs->used_clusters); 9026 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 9027 spdk_spin_unlock(&bs->used_lock); 9028 9029 bs_grow_live_done(ctx, 0); 9030 } 9031 9032 static void 9033 bs_grow_live_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 9034 { 9035 struct spdk_bs_grow_ctx *ctx = cb_arg; 9036 uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask; 9037 int rc; 9038 9039 if (bserrno != 0) { 9040 bs_grow_live_done(ctx, bserrno); 9041 return; 9042 } 9043 9044 rc = bs_super_validate(ctx->super, ctx->bs); 9045 if (rc != 0) { 9046 bs_grow_live_done(ctx, rc); 9047 return; 9048 } 9049 9050 dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 9051 total_clusters = dev_size / ctx->super->cluster_size; 9052 used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 9053 spdk_divide_round_up(total_clusters, 8), 9054 SPDK_BS_PAGE_SIZE); 9055 max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start; 9056 /* Only checking dev_size. Since it can change, but total_clusters remain the same. */ 9057 if (dev_size == ctx->super->size) { 9058 SPDK_DEBUGLOG(blob, "No need to grow blobstore\n"); 9059 bs_grow_live_done(ctx, 0); 9060 return; 9061 } 9062 /* 9063 * Blobstore cannot be shrunk, so check before if: 9064 * - new size of the device is smaller than size in super_block 9065 * - new total number of clusters is smaller than used_clusters bit_pool 9066 * - there is enough space in metadata for used_cluster_mask to be written out 9067 */ 9068 if (dev_size < ctx->super->size || 9069 total_clusters < spdk_bit_pool_capacity(ctx->bs->used_clusters) || 9070 used_cluster_mask_len > max_used_cluster_mask) { 9071 SPDK_DEBUGLOG(blob, "No space to grow blobstore\n"); 9072 bs_grow_live_done(ctx, -ENOSPC); 9073 return; 9074 } 9075 9076 SPDK_DEBUGLOG(blob, "Resizing blobstore\n"); 9077 9078 ctx->new_used_clusters_mask = calloc(1, total_clusters); 9079 if (!ctx->new_used_clusters_mask) { 9080 bs_grow_live_done(ctx, -ENOMEM); 9081 return; 9082 } 9083 ctx->new_used_clusters = spdk_bit_pool_create(total_clusters); 9084 if (!ctx->new_used_clusters) { 9085 bs_grow_live_done(ctx, -ENOMEM); 9086 return; 9087 } 9088 9089 ctx->super->clean = 0; 9090 ctx->super->size = dev_size; 9091 ctx->super->used_cluster_mask_len = used_cluster_mask_len; 9092 bs_write_super(seq, ctx->bs, ctx->super, bs_grow_live_super_write_cpl, ctx); 9093 } 9094 9095 void 9096 spdk_bs_grow_live(struct spdk_blob_store *bs, 9097 spdk_bs_op_complete cb_fn, void *cb_arg) 9098 { 9099 struct spdk_bs_cpl cpl; 9100 struct spdk_bs_grow_ctx *ctx; 9101 9102 assert(spdk_get_thread() == bs->md_thread); 9103 9104 SPDK_DEBUGLOG(blob, "Growing blobstore on dev %p\n", bs->dev); 9105 9106 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 9107 cpl.u.bs_basic.cb_fn = cb_fn; 9108 cpl.u.bs_basic.cb_arg = cb_arg; 9109 9110 ctx = calloc(1, sizeof(struct spdk_bs_grow_ctx)); 9111 if (!ctx) { 9112 cb_fn(cb_arg, -ENOMEM); 9113 return; 9114 } 9115 ctx->bs = bs; 9116 9117 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 9118 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 9119 if (!ctx->super) { 9120 free(ctx); 9121 cb_fn(cb_arg, -ENOMEM); 9122 return; 9123 } 9124 9125 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 9126 if (!ctx->seq) { 9127 spdk_free(ctx->super); 9128 free(ctx); 9129 cb_fn(cb_arg, -ENOMEM); 9130 return; 9131 } 9132 9133 /* Read the super block */ 9134 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 9135 bs_byte_to_lba(bs, sizeof(*ctx->super)), 9136 bs_grow_live_load_super_cpl, ctx); 9137 } 9138 9139 void 9140 spdk_bs_grow(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 9141 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 9142 { 9143 struct spdk_blob_store *bs; 9144 struct spdk_bs_cpl cpl; 9145 struct spdk_bs_load_ctx *ctx; 9146 struct spdk_bs_opts opts = {}; 9147 int err; 9148 9149 SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev); 9150 9151 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 9152 SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen); 9153 dev->destroy(dev); 9154 cb_fn(cb_arg, NULL, -EINVAL); 9155 return; 9156 } 9157 9158 spdk_bs_opts_init(&opts, sizeof(opts)); 9159 if (o) { 9160 if (bs_opts_copy(o, &opts)) { 9161 return; 9162 } 9163 } 9164 9165 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 9166 dev->destroy(dev); 9167 cb_fn(cb_arg, NULL, -EINVAL); 9168 return; 9169 } 9170 9171 err = bs_alloc(dev, &opts, &bs, &ctx); 9172 if (err) { 9173 dev->destroy(dev); 9174 cb_fn(cb_arg, NULL, err); 9175 return; 9176 } 9177 9178 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 9179 cpl.u.bs_handle.cb_fn = cb_fn; 9180 cpl.u.bs_handle.cb_arg = cb_arg; 9181 cpl.u.bs_handle.bs = bs; 9182 9183 ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl); 9184 if (!ctx->seq) { 9185 spdk_free(ctx->super); 9186 free(ctx); 9187 bs_free(bs); 9188 cb_fn(cb_arg, NULL, -ENOMEM); 9189 return; 9190 } 9191 9192 /* Read the super block */ 9193 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 9194 bs_byte_to_lba(bs, sizeof(*ctx->super)), 9195 bs_grow_load_super_cpl, ctx); 9196 } 9197 9198 int 9199 spdk_blob_get_esnap_id(struct spdk_blob *blob, const void **id, size_t *len) 9200 { 9201 if (!blob_is_esnap_clone(blob)) { 9202 return -EINVAL; 9203 } 9204 9205 return blob_get_xattr_value(blob, BLOB_EXTERNAL_SNAPSHOT_ID, id, len, true); 9206 } 9207 9208 struct spdk_io_channel * 9209 blob_esnap_get_io_channel(struct spdk_io_channel *ch, struct spdk_blob *blob) 9210 { 9211 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(ch); 9212 struct spdk_bs_dev *bs_dev = blob->back_bs_dev; 9213 struct blob_esnap_channel find = {}; 9214 struct blob_esnap_channel *esnap_channel, *existing; 9215 9216 find.blob_id = blob->id; 9217 esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find); 9218 if (spdk_likely(esnap_channel != NULL)) { 9219 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": using cached channel on thread %s\n", 9220 blob->id, spdk_thread_get_name(spdk_get_thread())); 9221 return esnap_channel->channel; 9222 } 9223 9224 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": allocating channel on thread %s\n", 9225 blob->id, spdk_thread_get_name(spdk_get_thread())); 9226 9227 esnap_channel = calloc(1, sizeof(*esnap_channel)); 9228 if (esnap_channel == NULL) { 9229 SPDK_NOTICELOG("blob 0x%" PRIx64 " channel allocation failed: no memory\n", 9230 find.blob_id); 9231 return NULL; 9232 } 9233 esnap_channel->channel = bs_dev->create_channel(bs_dev); 9234 if (esnap_channel->channel == NULL) { 9235 SPDK_NOTICELOG("blob 0x%" PRIx64 " back channel allocation failed\n", blob->id); 9236 free(esnap_channel); 9237 return NULL; 9238 } 9239 esnap_channel->blob_id = find.blob_id; 9240 existing = RB_INSERT(blob_esnap_channel_tree, &bs_channel->esnap_channels, esnap_channel); 9241 if (spdk_unlikely(existing != NULL)) { 9242 /* 9243 * This should be unreachable: all modifications to this tree happen on this thread. 9244 */ 9245 SPDK_ERRLOG("blob 0x%" PRIx64 "lost race to allocate a channel\n", find.blob_id); 9246 assert(false); 9247 9248 bs_dev->destroy_channel(bs_dev, esnap_channel->channel); 9249 free(esnap_channel); 9250 9251 return existing->channel; 9252 } 9253 9254 return esnap_channel->channel; 9255 } 9256 9257 static int 9258 blob_esnap_channel_compare(struct blob_esnap_channel *c1, struct blob_esnap_channel *c2) 9259 { 9260 return (c1->blob_id < c2->blob_id ? -1 : c1->blob_id > c2->blob_id); 9261 } 9262 9263 struct blob_esnap_destroy_ctx { 9264 spdk_blob_op_with_handle_complete cb_fn; 9265 void *cb_arg; 9266 struct spdk_blob *blob; 9267 struct spdk_bs_dev *back_bs_dev; 9268 bool abort_io; 9269 }; 9270 9271 static void 9272 blob_esnap_destroy_channels_done(struct spdk_io_channel_iter *i, int status) 9273 { 9274 struct blob_esnap_destroy_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 9275 struct spdk_blob *blob = ctx->blob; 9276 struct spdk_blob_store *bs = blob->bs; 9277 9278 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": done destroying channels for this blob\n", 9279 blob->id); 9280 9281 if (ctx->cb_fn != NULL) { 9282 ctx->cb_fn(ctx->cb_arg, blob, status); 9283 } 9284 free(ctx); 9285 9286 bs->esnap_channels_unloading--; 9287 if (bs->esnap_channels_unloading == 0 && bs->esnap_unload_cb_fn != NULL) { 9288 spdk_bs_unload(bs, bs->esnap_unload_cb_fn, bs->esnap_unload_cb_arg); 9289 } 9290 } 9291 9292 static void 9293 blob_esnap_destroy_one_channel(struct spdk_io_channel_iter *i) 9294 { 9295 struct blob_esnap_destroy_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 9296 struct spdk_blob *blob = ctx->blob; 9297 struct spdk_bs_dev *bs_dev = ctx->back_bs_dev; 9298 struct spdk_io_channel *channel = spdk_io_channel_iter_get_channel(i); 9299 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(channel); 9300 struct blob_esnap_channel *esnap_channel; 9301 struct blob_esnap_channel find = {}; 9302 9303 assert(spdk_get_thread() == spdk_io_channel_get_thread(channel)); 9304 9305 find.blob_id = blob->id; 9306 esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find); 9307 if (esnap_channel != NULL) { 9308 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": destroying channel on thread %s\n", 9309 blob->id, spdk_thread_get_name(spdk_get_thread())); 9310 RB_REMOVE(blob_esnap_channel_tree, &bs_channel->esnap_channels, esnap_channel); 9311 9312 if (ctx->abort_io) { 9313 spdk_bs_user_op_t *op, *tmp; 9314 9315 TAILQ_FOREACH_SAFE(op, &bs_channel->queued_io, link, tmp) { 9316 if (op->back_channel == esnap_channel->channel) { 9317 TAILQ_REMOVE(&bs_channel->queued_io, op, link); 9318 bs_user_op_abort(op, -EIO); 9319 } 9320 } 9321 } 9322 9323 bs_dev->destroy_channel(bs_dev, esnap_channel->channel); 9324 free(esnap_channel); 9325 } 9326 9327 spdk_for_each_channel_continue(i, 0); 9328 } 9329 9330 /* 9331 * Destroy the channels for a specific blob on each thread with a blobstore channel. This should be 9332 * used when closing an esnap clone blob and after decoupling from the parent. 9333 */ 9334 static void 9335 blob_esnap_destroy_bs_dev_channels(struct spdk_blob *blob, bool abort_io, 9336 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 9337 { 9338 struct blob_esnap_destroy_ctx *ctx; 9339 9340 if (!blob_is_esnap_clone(blob) || blob->back_bs_dev == NULL) { 9341 if (cb_fn != NULL) { 9342 cb_fn(cb_arg, blob, 0); 9343 } 9344 return; 9345 } 9346 9347 ctx = calloc(1, sizeof(*ctx)); 9348 if (ctx == NULL) { 9349 if (cb_fn != NULL) { 9350 cb_fn(cb_arg, blob, -ENOMEM); 9351 } 9352 return; 9353 } 9354 ctx->cb_fn = cb_fn; 9355 ctx->cb_arg = cb_arg; 9356 ctx->blob = blob; 9357 ctx->back_bs_dev = blob->back_bs_dev; 9358 ctx->abort_io = abort_io; 9359 9360 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": destroying channels for this blob\n", 9361 blob->id); 9362 9363 blob->bs->esnap_channels_unloading++; 9364 spdk_for_each_channel(blob->bs, blob_esnap_destroy_one_channel, ctx, 9365 blob_esnap_destroy_channels_done); 9366 } 9367 9368 /* 9369 * Destroy all bs_dev channels on a specific blobstore channel. This should be used when a 9370 * bs_channel is destroyed. 9371 */ 9372 static void 9373 blob_esnap_destroy_bs_channel(struct spdk_bs_channel *ch) 9374 { 9375 struct blob_esnap_channel *esnap_channel, *esnap_channel_tmp; 9376 9377 assert(spdk_get_thread() == spdk_io_channel_get_thread(spdk_io_channel_from_ctx(ch))); 9378 9379 SPDK_DEBUGLOG(blob_esnap, "destroying channels on thread %s\n", 9380 spdk_thread_get_name(spdk_get_thread())); 9381 RB_FOREACH_SAFE(esnap_channel, blob_esnap_channel_tree, &ch->esnap_channels, 9382 esnap_channel_tmp) { 9383 SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 9384 ": destroying one channel in thread %s\n", 9385 esnap_channel->blob_id, spdk_thread_get_name(spdk_get_thread())); 9386 RB_REMOVE(blob_esnap_channel_tree, &ch->esnap_channels, esnap_channel); 9387 spdk_put_io_channel(esnap_channel->channel); 9388 free(esnap_channel); 9389 } 9390 SPDK_DEBUGLOG(blob_esnap, "done destroying channels on thread %s\n", 9391 spdk_thread_get_name(spdk_get_thread())); 9392 } 9393 9394 struct set_bs_dev_ctx { 9395 struct spdk_blob *blob; 9396 struct spdk_bs_dev *back_bs_dev; 9397 spdk_blob_op_complete cb_fn; 9398 void *cb_arg; 9399 int bserrno; 9400 }; 9401 9402 static void 9403 blob_set_back_bs_dev_done(void *_ctx, int bserrno) 9404 { 9405 struct set_bs_dev_ctx *ctx = _ctx; 9406 9407 if (bserrno != 0) { 9408 /* Even though the unfreeze failed, the update may have succeed. */ 9409 SPDK_ERRLOG("blob 0x%" PRIx64 ": unfreeze failed with error %d\n", ctx->blob->id, 9410 bserrno); 9411 } 9412 ctx->cb_fn(ctx->cb_arg, ctx->bserrno); 9413 free(ctx); 9414 } 9415 9416 static void 9417 blob_frozen_set_back_bs_dev(void *_ctx, struct spdk_blob *blob, int bserrno) 9418 { 9419 struct set_bs_dev_ctx *ctx = _ctx; 9420 9421 if (bserrno != 0) { 9422 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to release old back_bs_dev with error %d\n", 9423 blob->id, bserrno); 9424 ctx->bserrno = bserrno; 9425 blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx); 9426 return; 9427 } 9428 9429 if (blob->back_bs_dev != NULL) { 9430 blob->back_bs_dev->destroy(blob->back_bs_dev); 9431 } 9432 9433 SPDK_NOTICELOG("blob 0x%" PRIx64 ": hotplugged back_bs_dev\n", blob->id); 9434 blob->back_bs_dev = ctx->back_bs_dev; 9435 ctx->bserrno = 0; 9436 9437 blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx); 9438 } 9439 9440 static void 9441 blob_frozen_destroy_esnap_channels(void *_ctx, int bserrno) 9442 { 9443 struct set_bs_dev_ctx *ctx = _ctx; 9444 struct spdk_blob *blob = ctx->blob; 9445 9446 if (bserrno != 0) { 9447 SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to freeze with error %d\n", blob->id, 9448 bserrno); 9449 ctx->cb_fn(ctx->cb_arg, bserrno); 9450 free(ctx); 9451 return; 9452 } 9453 9454 /* 9455 * This does not prevent future reads from the esnap device because any future IO will 9456 * lazily create a new esnap IO channel. 9457 */ 9458 blob_esnap_destroy_bs_dev_channels(blob, true, blob_frozen_set_back_bs_dev, ctx); 9459 } 9460 9461 void 9462 spdk_blob_set_esnap_bs_dev(struct spdk_blob *blob, struct spdk_bs_dev *back_bs_dev, 9463 spdk_blob_op_complete cb_fn, void *cb_arg) 9464 { 9465 struct set_bs_dev_ctx *ctx; 9466 9467 if (!blob_is_esnap_clone(blob)) { 9468 SPDK_ERRLOG("blob 0x%" PRIx64 ": not an esnap clone\n", blob->id); 9469 cb_fn(cb_arg, -EINVAL); 9470 return; 9471 } 9472 9473 ctx = calloc(1, sizeof(*ctx)); 9474 if (ctx == NULL) { 9475 SPDK_ERRLOG("blob 0x%" PRIx64 ": out of memory while setting back_bs_dev\n", 9476 blob->id); 9477 cb_fn(cb_arg, -ENOMEM); 9478 return; 9479 } 9480 ctx->cb_fn = cb_fn; 9481 ctx->cb_arg = cb_arg; 9482 ctx->back_bs_dev = back_bs_dev; 9483 ctx->blob = blob; 9484 blob_freeze_io(blob, blob_frozen_destroy_esnap_channels, ctx); 9485 } 9486 9487 struct spdk_bs_dev * 9488 spdk_blob_get_esnap_bs_dev(const struct spdk_blob *blob) 9489 { 9490 if (!blob_is_esnap_clone(blob)) { 9491 SPDK_ERRLOG("blob 0x%" PRIx64 ": not an esnap clone\n", blob->id); 9492 return NULL; 9493 } 9494 9495 return blob->back_bs_dev; 9496 } 9497 9498 bool 9499 spdk_blob_is_degraded(const struct spdk_blob *blob) 9500 { 9501 if (blob->bs->dev->is_degraded != NULL && blob->bs->dev->is_degraded(blob->bs->dev)) { 9502 return true; 9503 } 9504 if (blob->back_bs_dev == NULL || blob->back_bs_dev->is_degraded == NULL) { 9505 return false; 9506 } 9507 9508 return blob->back_bs_dev->is_degraded(blob->back_bs_dev); 9509 } 9510 9511 SPDK_LOG_REGISTER_COMPONENT(blob) 9512 SPDK_LOG_REGISTER_COMPONENT(blob_esnap) 9513