1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "spdk/stdinc.h" 36 37 #include "spdk/blob.h" 38 #include "spdk/crc32.h" 39 #include "spdk/env.h" 40 #include "spdk/queue.h" 41 #include "spdk/thread.h" 42 #include "spdk/bit_array.h" 43 #include "spdk/bit_pool.h" 44 #include "spdk/likely.h" 45 #include "spdk/util.h" 46 #include "spdk/string.h" 47 48 #include "spdk_internal/assert.h" 49 #include "spdk/log.h" 50 51 #include "blobstore.h" 52 53 #define BLOB_CRC32C_INITIAL 0xffffffffUL 54 55 static int bs_register_md_thread(struct spdk_blob_store *bs); 56 static int bs_unregister_md_thread(struct spdk_blob_store *bs); 57 static void blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 58 static void blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 59 uint64_t cluster, uint32_t extent, spdk_blob_op_complete cb_fn, void *cb_arg); 60 61 static int blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 62 uint16_t value_len, bool internal); 63 static int blob_get_xattr_value(struct spdk_blob *blob, const char *name, 64 const void **value, size_t *value_len, bool internal); 65 static int blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal); 66 67 static void blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 68 spdk_blob_op_complete cb_fn, void *cb_arg); 69 70 static int 71 blob_id_cmp(struct spdk_blob *blob1, struct spdk_blob *blob2) 72 { 73 return (blob1->id < blob2->id ? -1 : blob1->id > blob2->id); 74 } 75 76 RB_GENERATE_STATIC(spdk_blob_tree, spdk_blob, link, blob_id_cmp); 77 78 static void 79 blob_verify_md_op(struct spdk_blob *blob) 80 { 81 assert(blob != NULL); 82 assert(spdk_get_thread() == blob->bs->md_thread); 83 assert(blob->state != SPDK_BLOB_STATE_LOADING); 84 } 85 86 static struct spdk_blob_list * 87 bs_get_snapshot_entry(struct spdk_blob_store *bs, spdk_blob_id blobid) 88 { 89 struct spdk_blob_list *snapshot_entry = NULL; 90 91 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 92 if (snapshot_entry->id == blobid) { 93 break; 94 } 95 } 96 97 return snapshot_entry; 98 } 99 100 static void 101 bs_claim_md_page(struct spdk_blob_store *bs, uint32_t page) 102 { 103 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 104 assert(spdk_bit_array_get(bs->used_md_pages, page) == false); 105 106 spdk_bit_array_set(bs->used_md_pages, page); 107 } 108 109 static void 110 bs_release_md_page(struct spdk_blob_store *bs, uint32_t page) 111 { 112 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 113 assert(spdk_bit_array_get(bs->used_md_pages, page) == true); 114 115 spdk_bit_array_clear(bs->used_md_pages, page); 116 } 117 118 static uint32_t 119 bs_claim_cluster(struct spdk_blob_store *bs) 120 { 121 uint32_t cluster_num; 122 123 cluster_num = spdk_bit_pool_allocate_bit(bs->used_clusters); 124 if (cluster_num == UINT32_MAX) { 125 return UINT32_MAX; 126 } 127 128 SPDK_DEBUGLOG(blob, "Claiming cluster %u\n", cluster_num); 129 bs->num_free_clusters--; 130 131 return cluster_num; 132 } 133 134 static void 135 bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 136 { 137 assert(cluster_num < spdk_bit_pool_capacity(bs->used_clusters)); 138 assert(spdk_bit_pool_is_allocated(bs->used_clusters, cluster_num) == true); 139 assert(bs->num_free_clusters < bs->total_clusters); 140 141 SPDK_DEBUGLOG(blob, "Releasing cluster %u\n", cluster_num); 142 143 spdk_bit_pool_free_bit(bs->used_clusters, cluster_num); 144 bs->num_free_clusters++; 145 } 146 147 static int 148 blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster) 149 { 150 uint64_t *cluster_lba = &blob->active.clusters[cluster_num]; 151 152 blob_verify_md_op(blob); 153 154 if (*cluster_lba != 0) { 155 return -EEXIST; 156 } 157 158 *cluster_lba = bs_cluster_to_lba(blob->bs, cluster); 159 return 0; 160 } 161 162 static int 163 bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num, 164 uint64_t *cluster, uint32_t *lowest_free_md_page, bool update_map) 165 { 166 uint32_t *extent_page = 0; 167 168 *cluster = bs_claim_cluster(blob->bs); 169 if (*cluster == UINT32_MAX) { 170 /* No more free clusters. Cannot satisfy the request */ 171 return -ENOSPC; 172 } 173 174 if (blob->use_extent_table) { 175 extent_page = bs_cluster_to_extent_page(blob, cluster_num); 176 if (*extent_page == 0) { 177 /* Extent page shall never occupy md_page so start the search from 1 */ 178 if (*lowest_free_md_page == 0) { 179 *lowest_free_md_page = 1; 180 } 181 /* No extent_page is allocated for the cluster */ 182 *lowest_free_md_page = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, 183 *lowest_free_md_page); 184 if (*lowest_free_md_page == UINT32_MAX) { 185 /* No more free md pages. Cannot satisfy the request */ 186 bs_release_cluster(blob->bs, *cluster); 187 return -ENOSPC; 188 } 189 bs_claim_md_page(blob->bs, *lowest_free_md_page); 190 } 191 } 192 193 SPDK_DEBUGLOG(blob, "Claiming cluster %" PRIu64 " for blob %" PRIu64 "\n", *cluster, blob->id); 194 195 if (update_map) { 196 blob_insert_cluster(blob, cluster_num, *cluster); 197 if (blob->use_extent_table && *extent_page == 0) { 198 *extent_page = *lowest_free_md_page; 199 } 200 } 201 202 return 0; 203 } 204 205 static void 206 blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs) 207 { 208 xattrs->count = 0; 209 xattrs->names = NULL; 210 xattrs->ctx = NULL; 211 xattrs->get_value = NULL; 212 } 213 214 void 215 spdk_blob_opts_init(struct spdk_blob_opts *opts, size_t opts_size) 216 { 217 if (!opts) { 218 SPDK_ERRLOG("opts should not be NULL\n"); 219 return; 220 } 221 222 if (!opts_size) { 223 SPDK_ERRLOG("opts_size should not be zero value\n"); 224 return; 225 } 226 227 memset(opts, 0, opts_size); 228 opts->opts_size = opts_size; 229 230 #define FIELD_OK(field) \ 231 offsetof(struct spdk_blob_opts, field) + sizeof(opts->field) <= opts_size 232 233 #define SET_FIELD(field, value) \ 234 if (FIELD_OK(field)) { \ 235 opts->field = value; \ 236 } \ 237 238 SET_FIELD(num_clusters, 0); 239 SET_FIELD(thin_provision, false); 240 SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT); 241 242 if (FIELD_OK(xattrs)) { 243 blob_xattrs_init(&opts->xattrs); 244 } 245 246 SET_FIELD(use_extent_table, true); 247 248 #undef FIELD_OK 249 #undef SET_FIELD 250 } 251 252 void 253 spdk_blob_open_opts_init(struct spdk_blob_open_opts *opts, size_t opts_size) 254 { 255 if (!opts) { 256 SPDK_ERRLOG("opts should not be NULL\n"); 257 return; 258 } 259 260 if (!opts_size) { 261 SPDK_ERRLOG("opts_size should not be zero value\n"); 262 return; 263 } 264 265 memset(opts, 0, opts_size); 266 opts->opts_size = opts_size; 267 268 #define FIELD_OK(field) \ 269 offsetof(struct spdk_blob_open_opts, field) + sizeof(opts->field) <= opts_size 270 271 #define SET_FIELD(field, value) \ 272 if (FIELD_OK(field)) { \ 273 opts->field = value; \ 274 } \ 275 276 SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT); 277 278 #undef FIELD_OK 279 #undef SET_FILED 280 } 281 282 static struct spdk_blob * 283 blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id) 284 { 285 struct spdk_blob *blob; 286 287 blob = calloc(1, sizeof(*blob)); 288 if (!blob) { 289 return NULL; 290 } 291 292 blob->id = id; 293 blob->bs = bs; 294 295 blob->parent_id = SPDK_BLOBID_INVALID; 296 297 blob->state = SPDK_BLOB_STATE_DIRTY; 298 blob->extent_rle_found = false; 299 blob->extent_table_found = false; 300 blob->active.num_pages = 1; 301 blob->active.pages = calloc(1, sizeof(*blob->active.pages)); 302 if (!blob->active.pages) { 303 free(blob); 304 return NULL; 305 } 306 307 blob->active.pages[0] = bs_blobid_to_page(id); 308 309 TAILQ_INIT(&blob->xattrs); 310 TAILQ_INIT(&blob->xattrs_internal); 311 TAILQ_INIT(&blob->pending_persists); 312 TAILQ_INIT(&blob->persists_to_complete); 313 314 return blob; 315 } 316 317 static void 318 xattrs_free(struct spdk_xattr_tailq *xattrs) 319 { 320 struct spdk_xattr *xattr, *xattr_tmp; 321 322 TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) { 323 TAILQ_REMOVE(xattrs, xattr, link); 324 free(xattr->name); 325 free(xattr->value); 326 free(xattr); 327 } 328 } 329 330 static void 331 blob_free(struct spdk_blob *blob) 332 { 333 assert(blob != NULL); 334 assert(TAILQ_EMPTY(&blob->pending_persists)); 335 assert(TAILQ_EMPTY(&blob->persists_to_complete)); 336 337 free(blob->active.extent_pages); 338 free(blob->clean.extent_pages); 339 free(blob->active.clusters); 340 free(blob->clean.clusters); 341 free(blob->active.pages); 342 free(blob->clean.pages); 343 344 xattrs_free(&blob->xattrs); 345 xattrs_free(&blob->xattrs_internal); 346 347 if (blob->back_bs_dev) { 348 blob->back_bs_dev->destroy(blob->back_bs_dev); 349 } 350 351 free(blob); 352 } 353 354 struct freeze_io_ctx { 355 struct spdk_bs_cpl cpl; 356 struct spdk_blob *blob; 357 }; 358 359 static void 360 blob_io_sync(struct spdk_io_channel_iter *i) 361 { 362 spdk_for_each_channel_continue(i, 0); 363 } 364 365 static void 366 blob_execute_queued_io(struct spdk_io_channel_iter *i) 367 { 368 struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); 369 struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch); 370 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 371 struct spdk_bs_request_set *set; 372 struct spdk_bs_user_op_args *args; 373 spdk_bs_user_op_t *op, *tmp; 374 375 TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) { 376 set = (struct spdk_bs_request_set *)op; 377 args = &set->u.user_op; 378 379 if (args->blob == ctx->blob) { 380 TAILQ_REMOVE(&ch->queued_io, op, link); 381 bs_user_op_execute(op); 382 } 383 } 384 385 spdk_for_each_channel_continue(i, 0); 386 } 387 388 static void 389 blob_io_cpl(struct spdk_io_channel_iter *i, int status) 390 { 391 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 392 393 ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0); 394 395 free(ctx); 396 } 397 398 static void 399 blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 400 { 401 struct freeze_io_ctx *ctx; 402 403 ctx = calloc(1, sizeof(*ctx)); 404 if (!ctx) { 405 cb_fn(cb_arg, -ENOMEM); 406 return; 407 } 408 409 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 410 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 411 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 412 ctx->blob = blob; 413 414 /* Freeze I/O on blob */ 415 blob->frozen_refcnt++; 416 417 if (blob->frozen_refcnt == 1) { 418 spdk_for_each_channel(blob->bs, blob_io_sync, ctx, blob_io_cpl); 419 } else { 420 cb_fn(cb_arg, 0); 421 free(ctx); 422 } 423 } 424 425 static void 426 blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 427 { 428 struct freeze_io_ctx *ctx; 429 430 ctx = calloc(1, sizeof(*ctx)); 431 if (!ctx) { 432 cb_fn(cb_arg, -ENOMEM); 433 return; 434 } 435 436 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 437 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 438 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 439 ctx->blob = blob; 440 441 assert(blob->frozen_refcnt > 0); 442 443 blob->frozen_refcnt--; 444 445 if (blob->frozen_refcnt == 0) { 446 spdk_for_each_channel(blob->bs, blob_execute_queued_io, ctx, blob_io_cpl); 447 } else { 448 cb_fn(cb_arg, 0); 449 free(ctx); 450 } 451 } 452 453 static int 454 blob_mark_clean(struct spdk_blob *blob) 455 { 456 uint32_t *extent_pages = NULL; 457 uint64_t *clusters = NULL; 458 uint32_t *pages = NULL; 459 460 assert(blob != NULL); 461 462 if (blob->active.num_extent_pages) { 463 assert(blob->active.extent_pages); 464 extent_pages = calloc(blob->active.num_extent_pages, sizeof(*blob->active.extent_pages)); 465 if (!extent_pages) { 466 return -ENOMEM; 467 } 468 memcpy(extent_pages, blob->active.extent_pages, 469 blob->active.num_extent_pages * sizeof(*extent_pages)); 470 } 471 472 if (blob->active.num_clusters) { 473 assert(blob->active.clusters); 474 clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters)); 475 if (!clusters) { 476 free(extent_pages); 477 return -ENOMEM; 478 } 479 memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 480 } 481 482 if (blob->active.num_pages) { 483 assert(blob->active.pages); 484 pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages)); 485 if (!pages) { 486 free(extent_pages); 487 free(clusters); 488 return -ENOMEM; 489 } 490 memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 491 } 492 493 free(blob->clean.extent_pages); 494 free(blob->clean.clusters); 495 free(blob->clean.pages); 496 497 blob->clean.num_extent_pages = blob->active.num_extent_pages; 498 blob->clean.extent_pages = blob->active.extent_pages; 499 blob->clean.num_clusters = blob->active.num_clusters; 500 blob->clean.clusters = blob->active.clusters; 501 blob->clean.num_pages = blob->active.num_pages; 502 blob->clean.pages = blob->active.pages; 503 504 blob->active.extent_pages = extent_pages; 505 blob->active.clusters = clusters; 506 blob->active.pages = pages; 507 508 /* If the metadata was dirtied again while the metadata was being written to disk, 509 * we do not want to revert the DIRTY state back to CLEAN here. 510 */ 511 if (blob->state == SPDK_BLOB_STATE_LOADING) { 512 blob->state = SPDK_BLOB_STATE_CLEAN; 513 } 514 515 return 0; 516 } 517 518 static int 519 blob_deserialize_xattr(struct spdk_blob *blob, 520 struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal) 521 { 522 struct spdk_xattr *xattr; 523 524 if (desc_xattr->length != sizeof(desc_xattr->name_length) + 525 sizeof(desc_xattr->value_length) + 526 desc_xattr->name_length + desc_xattr->value_length) { 527 return -EINVAL; 528 } 529 530 xattr = calloc(1, sizeof(*xattr)); 531 if (xattr == NULL) { 532 return -ENOMEM; 533 } 534 535 xattr->name = malloc(desc_xattr->name_length + 1); 536 if (xattr->name == NULL) { 537 free(xattr); 538 return -ENOMEM; 539 } 540 541 xattr->value = malloc(desc_xattr->value_length); 542 if (xattr->value == NULL) { 543 free(xattr->name); 544 free(xattr); 545 return -ENOMEM; 546 } 547 548 memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length); 549 xattr->name[desc_xattr->name_length] = '\0'; 550 xattr->value_len = desc_xattr->value_length; 551 memcpy(xattr->value, 552 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 553 desc_xattr->value_length); 554 555 TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link); 556 557 return 0; 558 } 559 560 561 static int 562 blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob) 563 { 564 struct spdk_blob_md_descriptor *desc; 565 size_t cur_desc = 0; 566 void *tmp; 567 568 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 569 while (cur_desc < sizeof(page->descriptors)) { 570 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 571 if (desc->length == 0) { 572 /* If padding and length are 0, this terminates the page */ 573 break; 574 } 575 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 576 struct spdk_blob_md_descriptor_flags *desc_flags; 577 578 desc_flags = (struct spdk_blob_md_descriptor_flags *)desc; 579 580 if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) { 581 return -EINVAL; 582 } 583 584 if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) != 585 SPDK_BLOB_INVALID_FLAGS_MASK) { 586 return -EINVAL; 587 } 588 589 if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) != 590 SPDK_BLOB_DATA_RO_FLAGS_MASK) { 591 blob->data_ro = true; 592 blob->md_ro = true; 593 } 594 595 if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) != 596 SPDK_BLOB_MD_RO_FLAGS_MASK) { 597 blob->md_ro = true; 598 } 599 600 if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 601 blob->data_ro = true; 602 blob->md_ro = true; 603 } 604 605 blob->invalid_flags = desc_flags->invalid_flags; 606 blob->data_ro_flags = desc_flags->data_ro_flags; 607 blob->md_ro_flags = desc_flags->md_ro_flags; 608 609 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 610 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 611 unsigned int i, j; 612 unsigned int cluster_count = blob->active.num_clusters; 613 614 if (blob->extent_table_found) { 615 /* Extent Table already present in the md, 616 * both descriptors should never be at the same time. */ 617 return -EINVAL; 618 } 619 blob->extent_rle_found = true; 620 621 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 622 623 if (desc_extent_rle->length == 0 || 624 (desc_extent_rle->length % sizeof(desc_extent_rle->extents[0]) != 0)) { 625 return -EINVAL; 626 } 627 628 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 629 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 630 if (desc_extent_rle->extents[i].cluster_idx != 0) { 631 if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, 632 desc_extent_rle->extents[i].cluster_idx + j)) { 633 return -EINVAL; 634 } 635 } 636 cluster_count++; 637 } 638 } 639 640 if (cluster_count == 0) { 641 return -EINVAL; 642 } 643 tmp = realloc(blob->active.clusters, cluster_count * sizeof(*blob->active.clusters)); 644 if (tmp == NULL) { 645 return -ENOMEM; 646 } 647 blob->active.clusters = tmp; 648 blob->active.cluster_array_size = cluster_count; 649 650 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 651 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 652 if (desc_extent_rle->extents[i].cluster_idx != 0) { 653 blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs, 654 desc_extent_rle->extents[i].cluster_idx + j); 655 } else if (spdk_blob_is_thin_provisioned(blob)) { 656 blob->active.clusters[blob->active.num_clusters++] = 0; 657 } else { 658 return -EINVAL; 659 } 660 } 661 } 662 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 663 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 664 uint32_t num_extent_pages = blob->active.num_extent_pages; 665 uint32_t i, j; 666 size_t extent_pages_length; 667 668 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 669 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 670 671 if (blob->extent_rle_found) { 672 /* This means that Extent RLE is present in MD, 673 * both should never be at the same time. */ 674 return -EINVAL; 675 } else if (blob->extent_table_found && 676 desc_extent_table->num_clusters != blob->remaining_clusters_in_et) { 677 /* Number of clusters in this ET does not match number 678 * from previously read EXTENT_TABLE. */ 679 return -EINVAL; 680 } 681 682 if (desc_extent_table->length == 0 || 683 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 684 return -EINVAL; 685 } 686 687 blob->extent_table_found = true; 688 689 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 690 num_extent_pages += desc_extent_table->extent_page[i].num_pages; 691 } 692 693 if (num_extent_pages > 0) { 694 tmp = realloc(blob->active.extent_pages, num_extent_pages * sizeof(uint32_t)); 695 if (tmp == NULL) { 696 return -ENOMEM; 697 } 698 blob->active.extent_pages = tmp; 699 } 700 blob->active.extent_pages_array_size = num_extent_pages; 701 702 blob->remaining_clusters_in_et = desc_extent_table->num_clusters; 703 704 /* Extent table entries contain md page numbers for extent pages. 705 * Zeroes represent unallocated extent pages, those are run-length-encoded. 706 */ 707 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 708 if (desc_extent_table->extent_page[i].page_idx != 0) { 709 assert(desc_extent_table->extent_page[i].num_pages == 1); 710 blob->active.extent_pages[blob->active.num_extent_pages++] = 711 desc_extent_table->extent_page[i].page_idx; 712 } else if (spdk_blob_is_thin_provisioned(blob)) { 713 for (j = 0; j < desc_extent_table->extent_page[i].num_pages; j++) { 714 blob->active.extent_pages[blob->active.num_extent_pages++] = 0; 715 } 716 } else { 717 return -EINVAL; 718 } 719 } 720 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 721 struct spdk_blob_md_descriptor_extent_page *desc_extent; 722 unsigned int i; 723 unsigned int cluster_count = 0; 724 size_t cluster_idx_length; 725 726 if (blob->extent_rle_found) { 727 /* This means that Extent RLE is present in MD, 728 * both should never be at the same time. */ 729 return -EINVAL; 730 } 731 732 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 733 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 734 735 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 736 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 737 return -EINVAL; 738 } 739 740 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 741 if (desc_extent->cluster_idx[i] != 0) { 742 if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, desc_extent->cluster_idx[i])) { 743 return -EINVAL; 744 } 745 } 746 cluster_count++; 747 } 748 749 if (cluster_count == 0) { 750 return -EINVAL; 751 } 752 753 /* When reading extent pages sequentially starting cluster idx should match 754 * current size of a blob. 755 * If changed to batch reading, this check shall be removed. */ 756 if (desc_extent->start_cluster_idx != blob->active.num_clusters) { 757 return -EINVAL; 758 } 759 760 tmp = realloc(blob->active.clusters, 761 (cluster_count + blob->active.num_clusters) * sizeof(*blob->active.clusters)); 762 if (tmp == NULL) { 763 return -ENOMEM; 764 } 765 blob->active.clusters = tmp; 766 blob->active.cluster_array_size = (cluster_count + blob->active.num_clusters); 767 768 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 769 if (desc_extent->cluster_idx[i] != 0) { 770 blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs, 771 desc_extent->cluster_idx[i]); 772 } else if (spdk_blob_is_thin_provisioned(blob)) { 773 blob->active.clusters[blob->active.num_clusters++] = 0; 774 } else { 775 return -EINVAL; 776 } 777 } 778 assert(desc_extent->start_cluster_idx + cluster_count == blob->active.num_clusters); 779 assert(blob->remaining_clusters_in_et >= cluster_count); 780 blob->remaining_clusters_in_et -= cluster_count; 781 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 782 int rc; 783 784 rc = blob_deserialize_xattr(blob, 785 (struct spdk_blob_md_descriptor_xattr *) desc, false); 786 if (rc != 0) { 787 return rc; 788 } 789 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 790 int rc; 791 792 rc = blob_deserialize_xattr(blob, 793 (struct spdk_blob_md_descriptor_xattr *) desc, true); 794 if (rc != 0) { 795 return rc; 796 } 797 } else { 798 /* Unrecognized descriptor type. Do not fail - just continue to the 799 * next descriptor. If this descriptor is associated with some feature 800 * defined in a newer version of blobstore, that version of blobstore 801 * should create and set an associated feature flag to specify if this 802 * blob can be loaded or not. 803 */ 804 } 805 806 /* Advance to the next descriptor */ 807 cur_desc += sizeof(*desc) + desc->length; 808 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 809 break; 810 } 811 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 812 } 813 814 return 0; 815 } 816 817 static bool bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page); 818 819 static int 820 blob_parse_extent_page(struct spdk_blob_md_page *extent_page, struct spdk_blob *blob) 821 { 822 assert(blob != NULL); 823 assert(blob->state == SPDK_BLOB_STATE_LOADING); 824 825 if (bs_load_cur_extent_page_valid(extent_page) == false) { 826 return -ENOENT; 827 } 828 829 return blob_parse_page(extent_page, blob); 830 } 831 832 static int 833 blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count, 834 struct spdk_blob *blob) 835 { 836 const struct spdk_blob_md_page *page; 837 uint32_t i; 838 int rc; 839 void *tmp; 840 841 assert(page_count > 0); 842 assert(pages[0].sequence_num == 0); 843 assert(blob != NULL); 844 assert(blob->state == SPDK_BLOB_STATE_LOADING); 845 assert(blob->active.clusters == NULL); 846 847 /* The blobid provided doesn't match what's in the MD, this can 848 * happen for example if a bogus blobid is passed in through open. 849 */ 850 if (blob->id != pages[0].id) { 851 SPDK_ERRLOG("Blobid (%" PRIu64 ") doesn't match what's in metadata (%" PRIu64 ")\n", 852 blob->id, pages[0].id); 853 return -ENOENT; 854 } 855 856 tmp = realloc(blob->active.pages, page_count * sizeof(*blob->active.pages)); 857 if (!tmp) { 858 return -ENOMEM; 859 } 860 blob->active.pages = tmp; 861 862 blob->active.pages[0] = pages[0].id; 863 864 for (i = 1; i < page_count; i++) { 865 assert(spdk_bit_array_get(blob->bs->used_md_pages, pages[i - 1].next)); 866 blob->active.pages[i] = pages[i - 1].next; 867 } 868 blob->active.num_pages = page_count; 869 870 for (i = 0; i < page_count; i++) { 871 page = &pages[i]; 872 873 assert(page->id == blob->id); 874 assert(page->sequence_num == i); 875 876 rc = blob_parse_page(page, blob); 877 if (rc != 0) { 878 return rc; 879 } 880 } 881 882 return 0; 883 } 884 885 static int 886 blob_serialize_add_page(const struct spdk_blob *blob, 887 struct spdk_blob_md_page **pages, 888 uint32_t *page_count, 889 struct spdk_blob_md_page **last_page) 890 { 891 struct spdk_blob_md_page *page, *tmp_pages; 892 893 assert(pages != NULL); 894 assert(page_count != NULL); 895 896 *last_page = NULL; 897 if (*page_count == 0) { 898 assert(*pages == NULL); 899 *pages = spdk_malloc(SPDK_BS_PAGE_SIZE, 0, 900 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 901 if (*pages == NULL) { 902 return -ENOMEM; 903 } 904 *page_count = 1; 905 } else { 906 assert(*pages != NULL); 907 tmp_pages = spdk_realloc(*pages, SPDK_BS_PAGE_SIZE * (*page_count + 1), 0); 908 if (tmp_pages == NULL) { 909 return -ENOMEM; 910 } 911 (*page_count)++; 912 *pages = tmp_pages; 913 } 914 915 page = &(*pages)[*page_count - 1]; 916 memset(page, 0, sizeof(*page)); 917 page->id = blob->id; 918 page->sequence_num = *page_count - 1; 919 page->next = SPDK_INVALID_MD_PAGE; 920 *last_page = page; 921 922 return 0; 923 } 924 925 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor. 926 * Update required_sz on both success and failure. 927 * 928 */ 929 static int 930 blob_serialize_xattr(const struct spdk_xattr *xattr, 931 uint8_t *buf, size_t buf_sz, 932 size_t *required_sz, bool internal) 933 { 934 struct spdk_blob_md_descriptor_xattr *desc; 935 936 *required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) + 937 strlen(xattr->name) + 938 xattr->value_len; 939 940 if (buf_sz < *required_sz) { 941 return -1; 942 } 943 944 desc = (struct spdk_blob_md_descriptor_xattr *)buf; 945 946 desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR; 947 desc->length = sizeof(desc->name_length) + 948 sizeof(desc->value_length) + 949 strlen(xattr->name) + 950 xattr->value_len; 951 desc->name_length = strlen(xattr->name); 952 desc->value_length = xattr->value_len; 953 954 memcpy(desc->name, xattr->name, desc->name_length); 955 memcpy((void *)((uintptr_t)desc->name + desc->name_length), 956 xattr->value, 957 desc->value_length); 958 959 return 0; 960 } 961 962 static void 963 blob_serialize_extent_table_entry(const struct spdk_blob *blob, 964 uint64_t start_ep, uint64_t *next_ep, 965 uint8_t **buf, size_t *remaining_sz) 966 { 967 struct spdk_blob_md_descriptor_extent_table *desc; 968 size_t cur_sz; 969 uint64_t i, et_idx; 970 uint32_t extent_page, ep_len; 971 972 /* The buffer must have room for at least num_clusters entry */ 973 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->num_clusters); 974 if (*remaining_sz < cur_sz) { 975 *next_ep = start_ep; 976 return; 977 } 978 979 desc = (struct spdk_blob_md_descriptor_extent_table *)*buf; 980 desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE; 981 982 desc->num_clusters = blob->active.num_clusters; 983 984 ep_len = 1; 985 et_idx = 0; 986 for (i = start_ep; i < blob->active.num_extent_pages; i++) { 987 if (*remaining_sz < cur_sz + sizeof(desc->extent_page[0])) { 988 /* If we ran out of buffer space, return */ 989 break; 990 } 991 992 extent_page = blob->active.extent_pages[i]; 993 /* Verify that next extent_page is unallocated */ 994 if (extent_page == 0 && 995 (i + 1 < blob->active.num_extent_pages && blob->active.extent_pages[i + 1] == 0)) { 996 ep_len++; 997 continue; 998 } 999 desc->extent_page[et_idx].page_idx = extent_page; 1000 desc->extent_page[et_idx].num_pages = ep_len; 1001 et_idx++; 1002 1003 ep_len = 1; 1004 cur_sz += sizeof(desc->extent_page[et_idx]); 1005 } 1006 *next_ep = i; 1007 1008 desc->length = sizeof(desc->num_clusters) + sizeof(desc->extent_page[0]) * et_idx; 1009 *remaining_sz -= sizeof(struct spdk_blob_md_descriptor) + desc->length; 1010 *buf += sizeof(struct spdk_blob_md_descriptor) + desc->length; 1011 } 1012 1013 static int 1014 blob_serialize_extent_table(const struct spdk_blob *blob, 1015 struct spdk_blob_md_page **pages, 1016 struct spdk_blob_md_page *cur_page, 1017 uint32_t *page_count, uint8_t **buf, 1018 size_t *remaining_sz) 1019 { 1020 uint64_t last_extent_page; 1021 int rc; 1022 1023 last_extent_page = 0; 1024 /* At least single extent table entry has to be always persisted. 1025 * Such case occurs with num_extent_pages == 0. */ 1026 while (last_extent_page <= blob->active.num_extent_pages) { 1027 blob_serialize_extent_table_entry(blob, last_extent_page, &last_extent_page, buf, 1028 remaining_sz); 1029 1030 if (last_extent_page == blob->active.num_extent_pages) { 1031 break; 1032 } 1033 1034 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1035 if (rc < 0) { 1036 return rc; 1037 } 1038 1039 *buf = (uint8_t *)cur_page->descriptors; 1040 *remaining_sz = sizeof(cur_page->descriptors); 1041 } 1042 1043 return 0; 1044 } 1045 1046 static void 1047 blob_serialize_extent_rle(const struct spdk_blob *blob, 1048 uint64_t start_cluster, uint64_t *next_cluster, 1049 uint8_t **buf, size_t *buf_sz) 1050 { 1051 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 1052 size_t cur_sz; 1053 uint64_t i, extent_idx; 1054 uint64_t lba, lba_per_cluster, lba_count; 1055 1056 /* The buffer must have room for at least one extent */ 1057 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc_extent_rle->extents[0]); 1058 if (*buf_sz < cur_sz) { 1059 *next_cluster = start_cluster; 1060 return; 1061 } 1062 1063 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)*buf; 1064 desc_extent_rle->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE; 1065 1066 lba_per_cluster = bs_cluster_to_lba(blob->bs, 1); 1067 1068 lba = blob->active.clusters[start_cluster]; 1069 lba_count = lba_per_cluster; 1070 extent_idx = 0; 1071 for (i = start_cluster + 1; i < blob->active.num_clusters; i++) { 1072 if ((lba + lba_count) == blob->active.clusters[i] && lba != 0) { 1073 /* Run-length encode sequential non-zero LBA */ 1074 lba_count += lba_per_cluster; 1075 continue; 1076 } else if (lba == 0 && blob->active.clusters[i] == 0) { 1077 /* Run-length encode unallocated clusters */ 1078 lba_count += lba_per_cluster; 1079 continue; 1080 } 1081 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1082 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1083 extent_idx++; 1084 1085 cur_sz += sizeof(desc_extent_rle->extents[extent_idx]); 1086 1087 if (*buf_sz < cur_sz) { 1088 /* If we ran out of buffer space, return */ 1089 *next_cluster = i; 1090 break; 1091 } 1092 1093 lba = blob->active.clusters[i]; 1094 lba_count = lba_per_cluster; 1095 } 1096 1097 if (*buf_sz >= cur_sz) { 1098 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1099 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1100 extent_idx++; 1101 1102 *next_cluster = blob->active.num_clusters; 1103 } 1104 1105 desc_extent_rle->length = sizeof(desc_extent_rle->extents[0]) * extent_idx; 1106 *buf_sz -= sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1107 *buf += sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1108 } 1109 1110 static int 1111 blob_serialize_extents_rle(const struct spdk_blob *blob, 1112 struct spdk_blob_md_page **pages, 1113 struct spdk_blob_md_page *cur_page, 1114 uint32_t *page_count, uint8_t **buf, 1115 size_t *remaining_sz) 1116 { 1117 uint64_t last_cluster; 1118 int rc; 1119 1120 last_cluster = 0; 1121 while (last_cluster < blob->active.num_clusters) { 1122 blob_serialize_extent_rle(blob, last_cluster, &last_cluster, buf, remaining_sz); 1123 1124 if (last_cluster == blob->active.num_clusters) { 1125 break; 1126 } 1127 1128 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1129 if (rc < 0) { 1130 return rc; 1131 } 1132 1133 *buf = (uint8_t *)cur_page->descriptors; 1134 *remaining_sz = sizeof(cur_page->descriptors); 1135 } 1136 1137 return 0; 1138 } 1139 1140 static void 1141 blob_serialize_extent_page(const struct spdk_blob *blob, 1142 uint64_t cluster, struct spdk_blob_md_page *page) 1143 { 1144 struct spdk_blob_md_descriptor_extent_page *desc_extent; 1145 uint64_t i, extent_idx; 1146 uint64_t lba, lba_per_cluster; 1147 uint64_t start_cluster_idx = (cluster / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP; 1148 1149 desc_extent = (struct spdk_blob_md_descriptor_extent_page *) page->descriptors; 1150 desc_extent->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE; 1151 1152 lba_per_cluster = bs_cluster_to_lba(blob->bs, 1); 1153 1154 desc_extent->start_cluster_idx = start_cluster_idx; 1155 extent_idx = 0; 1156 for (i = start_cluster_idx; i < blob->active.num_clusters; i++) { 1157 lba = blob->active.clusters[i]; 1158 desc_extent->cluster_idx[extent_idx++] = lba / lba_per_cluster; 1159 if (extent_idx >= SPDK_EXTENTS_PER_EP) { 1160 break; 1161 } 1162 } 1163 desc_extent->length = sizeof(desc_extent->start_cluster_idx) + 1164 sizeof(desc_extent->cluster_idx[0]) * extent_idx; 1165 } 1166 1167 static void 1168 blob_serialize_flags(const struct spdk_blob *blob, 1169 uint8_t *buf, size_t *buf_sz) 1170 { 1171 struct spdk_blob_md_descriptor_flags *desc; 1172 1173 /* 1174 * Flags get serialized first, so we should always have room for the flags 1175 * descriptor. 1176 */ 1177 assert(*buf_sz >= sizeof(*desc)); 1178 1179 desc = (struct spdk_blob_md_descriptor_flags *)buf; 1180 desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS; 1181 desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor); 1182 desc->invalid_flags = blob->invalid_flags; 1183 desc->data_ro_flags = blob->data_ro_flags; 1184 desc->md_ro_flags = blob->md_ro_flags; 1185 1186 *buf_sz -= sizeof(*desc); 1187 } 1188 1189 static int 1190 blob_serialize_xattrs(const struct spdk_blob *blob, 1191 const struct spdk_xattr_tailq *xattrs, bool internal, 1192 struct spdk_blob_md_page **pages, 1193 struct spdk_blob_md_page *cur_page, 1194 uint32_t *page_count, uint8_t **buf, 1195 size_t *remaining_sz) 1196 { 1197 const struct spdk_xattr *xattr; 1198 int rc; 1199 1200 TAILQ_FOREACH(xattr, xattrs, link) { 1201 size_t required_sz = 0; 1202 1203 rc = blob_serialize_xattr(xattr, 1204 *buf, *remaining_sz, 1205 &required_sz, internal); 1206 if (rc < 0) { 1207 /* Need to add a new page to the chain */ 1208 rc = blob_serialize_add_page(blob, pages, page_count, 1209 &cur_page); 1210 if (rc < 0) { 1211 spdk_free(*pages); 1212 *pages = NULL; 1213 *page_count = 0; 1214 return rc; 1215 } 1216 1217 *buf = (uint8_t *)cur_page->descriptors; 1218 *remaining_sz = sizeof(cur_page->descriptors); 1219 1220 /* Try again */ 1221 required_sz = 0; 1222 rc = blob_serialize_xattr(xattr, 1223 *buf, *remaining_sz, 1224 &required_sz, internal); 1225 1226 if (rc < 0) { 1227 spdk_free(*pages); 1228 *pages = NULL; 1229 *page_count = 0; 1230 return rc; 1231 } 1232 } 1233 1234 *remaining_sz -= required_sz; 1235 *buf += required_sz; 1236 } 1237 1238 return 0; 1239 } 1240 1241 static int 1242 blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages, 1243 uint32_t *page_count) 1244 { 1245 struct spdk_blob_md_page *cur_page; 1246 int rc; 1247 uint8_t *buf; 1248 size_t remaining_sz; 1249 1250 assert(pages != NULL); 1251 assert(page_count != NULL); 1252 assert(blob != NULL); 1253 assert(blob->state == SPDK_BLOB_STATE_DIRTY); 1254 1255 *pages = NULL; 1256 *page_count = 0; 1257 1258 /* A blob always has at least 1 page, even if it has no descriptors */ 1259 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1260 if (rc < 0) { 1261 return rc; 1262 } 1263 1264 buf = (uint8_t *)cur_page->descriptors; 1265 remaining_sz = sizeof(cur_page->descriptors); 1266 1267 /* Serialize flags */ 1268 blob_serialize_flags(blob, buf, &remaining_sz); 1269 buf += sizeof(struct spdk_blob_md_descriptor_flags); 1270 1271 /* Serialize xattrs */ 1272 rc = blob_serialize_xattrs(blob, &blob->xattrs, false, 1273 pages, cur_page, page_count, &buf, &remaining_sz); 1274 if (rc < 0) { 1275 return rc; 1276 } 1277 1278 /* Serialize internal xattrs */ 1279 rc = blob_serialize_xattrs(blob, &blob->xattrs_internal, true, 1280 pages, cur_page, page_count, &buf, &remaining_sz); 1281 if (rc < 0) { 1282 return rc; 1283 } 1284 1285 if (blob->use_extent_table) { 1286 /* Serialize extent table */ 1287 rc = blob_serialize_extent_table(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1288 } else { 1289 /* Serialize extents */ 1290 rc = blob_serialize_extents_rle(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1291 } 1292 1293 return rc; 1294 } 1295 1296 struct spdk_blob_load_ctx { 1297 struct spdk_blob *blob; 1298 1299 struct spdk_blob_md_page *pages; 1300 uint32_t num_pages; 1301 uint32_t next_extent_page; 1302 spdk_bs_sequence_t *seq; 1303 1304 spdk_bs_sequence_cpl cb_fn; 1305 void *cb_arg; 1306 }; 1307 1308 static uint32_t 1309 blob_md_page_calc_crc(void *page) 1310 { 1311 uint32_t crc; 1312 1313 crc = BLOB_CRC32C_INITIAL; 1314 crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc); 1315 crc ^= BLOB_CRC32C_INITIAL; 1316 1317 return crc; 1318 1319 } 1320 1321 static void 1322 blob_load_final(struct spdk_blob_load_ctx *ctx, int bserrno) 1323 { 1324 struct spdk_blob *blob = ctx->blob; 1325 1326 if (bserrno == 0) { 1327 blob_mark_clean(blob); 1328 } 1329 1330 ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno); 1331 1332 /* Free the memory */ 1333 spdk_free(ctx->pages); 1334 free(ctx); 1335 } 1336 1337 static void 1338 blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno) 1339 { 1340 struct spdk_blob_load_ctx *ctx = cb_arg; 1341 struct spdk_blob *blob = ctx->blob; 1342 1343 if (bserrno == 0) { 1344 blob->back_bs_dev = bs_create_blob_bs_dev(snapshot); 1345 if (blob->back_bs_dev == NULL) { 1346 bserrno = -ENOMEM; 1347 } 1348 } 1349 if (bserrno != 0) { 1350 SPDK_ERRLOG("Snapshot fail\n"); 1351 } 1352 1353 blob_load_final(ctx, bserrno); 1354 } 1355 1356 static void blob_update_clear_method(struct spdk_blob *blob); 1357 1358 static void 1359 blob_load_backing_dev(void *cb_arg) 1360 { 1361 struct spdk_blob_load_ctx *ctx = cb_arg; 1362 struct spdk_blob *blob = ctx->blob; 1363 const void *value; 1364 size_t len; 1365 int rc; 1366 1367 if (spdk_blob_is_thin_provisioned(blob)) { 1368 rc = blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true); 1369 if (rc == 0) { 1370 if (len != sizeof(spdk_blob_id)) { 1371 blob_load_final(ctx, -EINVAL); 1372 return; 1373 } 1374 /* open snapshot blob and continue in the callback function */ 1375 blob->parent_id = *(spdk_blob_id *)value; 1376 spdk_bs_open_blob(blob->bs, blob->parent_id, 1377 blob_load_snapshot_cpl, ctx); 1378 return; 1379 } else { 1380 /* add zeroes_dev for thin provisioned blob */ 1381 blob->back_bs_dev = bs_create_zeroes_dev(); 1382 } 1383 } else { 1384 /* standard blob */ 1385 blob->back_bs_dev = NULL; 1386 } 1387 blob_load_final(ctx, 0); 1388 } 1389 1390 static void 1391 blob_load_cpl_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1392 { 1393 struct spdk_blob_load_ctx *ctx = cb_arg; 1394 struct spdk_blob *blob = ctx->blob; 1395 struct spdk_blob_md_page *page; 1396 uint64_t i; 1397 uint32_t crc; 1398 uint64_t lba; 1399 void *tmp; 1400 uint64_t sz; 1401 1402 if (bserrno) { 1403 SPDK_ERRLOG("Extent page read failed: %d\n", bserrno); 1404 blob_load_final(ctx, bserrno); 1405 return; 1406 } 1407 1408 if (ctx->pages == NULL) { 1409 /* First iteration of this function, allocate buffer for single EXTENT_PAGE */ 1410 ctx->pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 1411 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 1412 if (!ctx->pages) { 1413 blob_load_final(ctx, -ENOMEM); 1414 return; 1415 } 1416 ctx->num_pages = 1; 1417 ctx->next_extent_page = 0; 1418 } else { 1419 page = &ctx->pages[0]; 1420 crc = blob_md_page_calc_crc(page); 1421 if (crc != page->crc) { 1422 blob_load_final(ctx, -EINVAL); 1423 return; 1424 } 1425 1426 if (page->next != SPDK_INVALID_MD_PAGE) { 1427 blob_load_final(ctx, -EINVAL); 1428 return; 1429 } 1430 1431 bserrno = blob_parse_extent_page(page, blob); 1432 if (bserrno) { 1433 blob_load_final(ctx, bserrno); 1434 return; 1435 } 1436 } 1437 1438 for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) { 1439 if (blob->active.extent_pages[i] != 0) { 1440 /* Extent page was allocated, read and parse it. */ 1441 lba = bs_md_page_to_lba(blob->bs, blob->active.extent_pages[i]); 1442 ctx->next_extent_page = i + 1; 1443 1444 bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1445 bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 1446 blob_load_cpl_extents_cpl, ctx); 1447 return; 1448 } else { 1449 /* Thin provisioned blobs can point to unallocated extent pages. 1450 * In this case blob size should be increased by up to the amount left in remaining_clusters_in_et. */ 1451 1452 sz = spdk_min(blob->remaining_clusters_in_et, SPDK_EXTENTS_PER_EP); 1453 blob->active.num_clusters += sz; 1454 blob->remaining_clusters_in_et -= sz; 1455 1456 assert(spdk_blob_is_thin_provisioned(blob)); 1457 assert(i + 1 < blob->active.num_extent_pages || blob->remaining_clusters_in_et == 0); 1458 1459 tmp = realloc(blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 1460 if (tmp == NULL) { 1461 blob_load_final(ctx, -ENOMEM); 1462 return; 1463 } 1464 memset(tmp + sizeof(*blob->active.clusters) * blob->active.cluster_array_size, 0, 1465 sizeof(*blob->active.clusters) * (blob->active.num_clusters - blob->active.cluster_array_size)); 1466 blob->active.clusters = tmp; 1467 blob->active.cluster_array_size = blob->active.num_clusters; 1468 } 1469 } 1470 1471 blob_load_backing_dev(ctx); 1472 } 1473 1474 static void 1475 blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1476 { 1477 struct spdk_blob_load_ctx *ctx = cb_arg; 1478 struct spdk_blob *blob = ctx->blob; 1479 struct spdk_blob_md_page *page; 1480 int rc; 1481 uint32_t crc; 1482 uint32_t current_page; 1483 1484 if (ctx->num_pages == 1) { 1485 current_page = bs_blobid_to_page(blob->id); 1486 } else { 1487 assert(ctx->num_pages != 0); 1488 page = &ctx->pages[ctx->num_pages - 2]; 1489 current_page = page->next; 1490 } 1491 1492 if (bserrno) { 1493 SPDK_ERRLOG("Metadata page %d read failed for blobid %" PRIu64 ": %d\n", 1494 current_page, blob->id, bserrno); 1495 blob_load_final(ctx, bserrno); 1496 return; 1497 } 1498 1499 page = &ctx->pages[ctx->num_pages - 1]; 1500 crc = blob_md_page_calc_crc(page); 1501 if (crc != page->crc) { 1502 SPDK_ERRLOG("Metadata page %d crc mismatch for blobid %" PRIu64 "\n", 1503 current_page, blob->id); 1504 blob_load_final(ctx, -EINVAL); 1505 return; 1506 } 1507 1508 if (page->next != SPDK_INVALID_MD_PAGE) { 1509 struct spdk_blob_md_page *tmp_pages; 1510 uint32_t next_page = page->next; 1511 uint64_t next_lba = bs_md_page_to_lba(blob->bs, next_page); 1512 1513 /* Read the next page */ 1514 tmp_pages = spdk_realloc(ctx->pages, (sizeof(*page) * (ctx->num_pages + 1)), 0); 1515 if (tmp_pages == NULL) { 1516 blob_load_final(ctx, -ENOMEM); 1517 return; 1518 } 1519 ctx->num_pages++; 1520 ctx->pages = tmp_pages; 1521 1522 bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1], 1523 next_lba, 1524 bs_byte_to_lba(blob->bs, sizeof(*page)), 1525 blob_load_cpl, ctx); 1526 return; 1527 } 1528 1529 /* Parse the pages */ 1530 rc = blob_parse(ctx->pages, ctx->num_pages, blob); 1531 if (rc) { 1532 blob_load_final(ctx, rc); 1533 return; 1534 } 1535 1536 if (blob->extent_table_found == true) { 1537 /* If EXTENT_TABLE was found, that means support for it should be enabled. */ 1538 assert(blob->extent_rle_found == false); 1539 blob->use_extent_table = true; 1540 } else { 1541 /* If EXTENT_RLE or no extent_* descriptor was found disable support 1542 * for extent table. No extent_* descriptors means that blob has length of 0 1543 * and no extent_rle descriptors were persisted for it. 1544 * EXTENT_TABLE if used, is always present in metadata regardless of length. */ 1545 blob->use_extent_table = false; 1546 } 1547 1548 /* Check the clear_method stored in metadata vs what may have been passed 1549 * via spdk_bs_open_blob_ext() and update accordingly. 1550 */ 1551 blob_update_clear_method(blob); 1552 1553 spdk_free(ctx->pages); 1554 ctx->pages = NULL; 1555 1556 if (blob->extent_table_found) { 1557 blob_load_cpl_extents_cpl(seq, ctx, 0); 1558 } else { 1559 blob_load_backing_dev(ctx); 1560 } 1561 } 1562 1563 /* Load a blob from disk given a blobid */ 1564 static void 1565 blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 1566 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 1567 { 1568 struct spdk_blob_load_ctx *ctx; 1569 struct spdk_blob_store *bs; 1570 uint32_t page_num; 1571 uint64_t lba; 1572 1573 blob_verify_md_op(blob); 1574 1575 bs = blob->bs; 1576 1577 ctx = calloc(1, sizeof(*ctx)); 1578 if (!ctx) { 1579 cb_fn(seq, cb_arg, -ENOMEM); 1580 return; 1581 } 1582 1583 ctx->blob = blob; 1584 ctx->pages = spdk_realloc(ctx->pages, SPDK_BS_PAGE_SIZE, 0); 1585 if (!ctx->pages) { 1586 free(ctx); 1587 cb_fn(seq, cb_arg, -ENOMEM); 1588 return; 1589 } 1590 ctx->num_pages = 1; 1591 ctx->cb_fn = cb_fn; 1592 ctx->cb_arg = cb_arg; 1593 ctx->seq = seq; 1594 1595 page_num = bs_blobid_to_page(blob->id); 1596 lba = bs_md_page_to_lba(blob->bs, page_num); 1597 1598 blob->state = SPDK_BLOB_STATE_LOADING; 1599 1600 bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1601 bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE), 1602 blob_load_cpl, ctx); 1603 } 1604 1605 struct spdk_blob_persist_ctx { 1606 struct spdk_blob *blob; 1607 1608 struct spdk_bs_super_block *super; 1609 1610 struct spdk_blob_md_page *pages; 1611 uint32_t next_extent_page; 1612 struct spdk_blob_md_page *extent_page; 1613 1614 spdk_bs_sequence_t *seq; 1615 spdk_bs_sequence_cpl cb_fn; 1616 void *cb_arg; 1617 TAILQ_ENTRY(spdk_blob_persist_ctx) link; 1618 }; 1619 1620 static void 1621 bs_batch_clear_dev(struct spdk_blob_persist_ctx *ctx, spdk_bs_batch_t *batch, uint64_t lba, 1622 uint64_t lba_count) 1623 { 1624 switch (ctx->blob->clear_method) { 1625 case BLOB_CLEAR_WITH_DEFAULT: 1626 case BLOB_CLEAR_WITH_UNMAP: 1627 bs_batch_unmap_dev(batch, lba, lba_count); 1628 break; 1629 case BLOB_CLEAR_WITH_WRITE_ZEROES: 1630 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1631 break; 1632 case BLOB_CLEAR_WITH_NONE: 1633 default: 1634 break; 1635 } 1636 } 1637 1638 static void blob_persist_check_dirty(struct spdk_blob_persist_ctx *ctx); 1639 1640 static void 1641 blob_persist_complete_cb(void *arg) 1642 { 1643 struct spdk_blob_persist_ctx *ctx = arg; 1644 1645 /* Call user callback */ 1646 ctx->cb_fn(ctx->seq, ctx->cb_arg, 0); 1647 1648 /* Free the memory */ 1649 spdk_free(ctx->pages); 1650 free(ctx); 1651 } 1652 1653 static void 1654 blob_persist_complete(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx, int bserrno) 1655 { 1656 struct spdk_blob_persist_ctx *next_persist, *tmp; 1657 struct spdk_blob *blob = ctx->blob; 1658 1659 if (bserrno == 0) { 1660 blob_mark_clean(blob); 1661 } 1662 1663 assert(ctx == TAILQ_FIRST(&blob->persists_to_complete)); 1664 1665 /* Complete all persists that were pending when the current persist started */ 1666 TAILQ_FOREACH_SAFE(next_persist, &blob->persists_to_complete, link, tmp) { 1667 TAILQ_REMOVE(&blob->persists_to_complete, next_persist, link); 1668 spdk_thread_send_msg(spdk_get_thread(), blob_persist_complete_cb, next_persist); 1669 } 1670 1671 if (TAILQ_EMPTY(&blob->pending_persists)) { 1672 return; 1673 } 1674 1675 /* Queue up all pending persists for completion and start blob persist with first one */ 1676 TAILQ_SWAP(&blob->persists_to_complete, &blob->pending_persists, spdk_blob_persist_ctx, link); 1677 next_persist = TAILQ_FIRST(&blob->persists_to_complete); 1678 1679 blob->state = SPDK_BLOB_STATE_DIRTY; 1680 blob_persist_check_dirty(next_persist); 1681 } 1682 1683 static void 1684 blob_persist_clear_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1685 { 1686 struct spdk_blob_persist_ctx *ctx = cb_arg; 1687 struct spdk_blob *blob = ctx->blob; 1688 struct spdk_blob_store *bs = blob->bs; 1689 size_t i; 1690 1691 if (bserrno != 0) { 1692 blob_persist_complete(seq, ctx, bserrno); 1693 return; 1694 } 1695 1696 /* Release all extent_pages that were truncated */ 1697 for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) { 1698 /* Nothing to release if it was not allocated */ 1699 if (blob->active.extent_pages[i] != 0) { 1700 bs_release_md_page(bs, blob->active.extent_pages[i]); 1701 } 1702 } 1703 1704 if (blob->active.num_extent_pages == 0) { 1705 free(blob->active.extent_pages); 1706 blob->active.extent_pages = NULL; 1707 blob->active.extent_pages_array_size = 0; 1708 } else if (blob->active.num_extent_pages != blob->active.extent_pages_array_size) { 1709 #ifndef __clang_analyzer__ 1710 void *tmp; 1711 1712 /* scan-build really can't figure reallocs, workaround it */ 1713 tmp = realloc(blob->active.extent_pages, sizeof(uint32_t) * blob->active.num_extent_pages); 1714 assert(tmp != NULL); 1715 blob->active.extent_pages = tmp; 1716 #endif 1717 blob->active.extent_pages_array_size = blob->active.num_extent_pages; 1718 } 1719 1720 blob_persist_complete(seq, ctx, bserrno); 1721 } 1722 1723 static void 1724 blob_persist_clear_extents(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 1725 { 1726 struct spdk_blob *blob = ctx->blob; 1727 struct spdk_blob_store *bs = blob->bs; 1728 size_t i; 1729 uint64_t lba; 1730 uint64_t lba_count; 1731 spdk_bs_batch_t *batch; 1732 1733 batch = bs_sequence_to_batch(seq, blob_persist_clear_extents_cpl, ctx); 1734 lba_count = bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE); 1735 1736 /* Clear all extent_pages that were truncated */ 1737 for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) { 1738 /* Nothing to clear if it was not allocated */ 1739 if (blob->active.extent_pages[i] != 0) { 1740 lba = bs_md_page_to_lba(bs, blob->active.extent_pages[i]); 1741 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1742 } 1743 } 1744 1745 bs_batch_close(batch); 1746 } 1747 1748 static void 1749 blob_persist_clear_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1750 { 1751 struct spdk_blob_persist_ctx *ctx = cb_arg; 1752 struct spdk_blob *blob = ctx->blob; 1753 struct spdk_blob_store *bs = blob->bs; 1754 size_t i; 1755 1756 if (bserrno != 0) { 1757 blob_persist_complete(seq, ctx, bserrno); 1758 return; 1759 } 1760 1761 pthread_mutex_lock(&bs->used_clusters_mutex); 1762 /* Release all clusters that were truncated */ 1763 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1764 uint32_t cluster_num = bs_lba_to_cluster(bs, blob->active.clusters[i]); 1765 1766 /* Nothing to release if it was not allocated */ 1767 if (blob->active.clusters[i] != 0) { 1768 bs_release_cluster(bs, cluster_num); 1769 } 1770 } 1771 pthread_mutex_unlock(&bs->used_clusters_mutex); 1772 1773 if (blob->active.num_clusters == 0) { 1774 free(blob->active.clusters); 1775 blob->active.clusters = NULL; 1776 blob->active.cluster_array_size = 0; 1777 } else if (blob->active.num_clusters != blob->active.cluster_array_size) { 1778 #ifndef __clang_analyzer__ 1779 void *tmp; 1780 1781 /* scan-build really can't figure reallocs, workaround it */ 1782 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * blob->active.num_clusters); 1783 assert(tmp != NULL); 1784 blob->active.clusters = tmp; 1785 1786 #endif 1787 blob->active.cluster_array_size = blob->active.num_clusters; 1788 } 1789 1790 /* Move on to clearing extent pages */ 1791 blob_persist_clear_extents(seq, ctx); 1792 } 1793 1794 static void 1795 blob_persist_clear_clusters(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 1796 { 1797 struct spdk_blob *blob = ctx->blob; 1798 struct spdk_blob_store *bs = blob->bs; 1799 spdk_bs_batch_t *batch; 1800 size_t i; 1801 uint64_t lba; 1802 uint64_t lba_count; 1803 1804 /* Clusters don't move around in blobs. The list shrinks or grows 1805 * at the end, but no changes ever occur in the middle of the list. 1806 */ 1807 1808 batch = bs_sequence_to_batch(seq, blob_persist_clear_clusters_cpl, ctx); 1809 1810 /* Clear all clusters that were truncated */ 1811 lba = 0; 1812 lba_count = 0; 1813 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1814 uint64_t next_lba = blob->active.clusters[i]; 1815 uint64_t next_lba_count = bs_cluster_to_lba(bs, 1); 1816 1817 if (next_lba > 0 && (lba + lba_count) == next_lba) { 1818 /* This cluster is contiguous with the previous one. */ 1819 lba_count += next_lba_count; 1820 continue; 1821 } else if (next_lba == 0) { 1822 continue; 1823 } 1824 1825 /* This cluster is not contiguous with the previous one. */ 1826 1827 /* If a run of LBAs previously existing, clear them now */ 1828 if (lba_count > 0) { 1829 bs_batch_clear_dev(ctx, batch, lba, lba_count); 1830 } 1831 1832 /* Start building the next batch */ 1833 lba = next_lba; 1834 if (next_lba > 0) { 1835 lba_count = next_lba_count; 1836 } else { 1837 lba_count = 0; 1838 } 1839 } 1840 1841 /* If we ended with a contiguous set of LBAs, clear them now */ 1842 if (lba_count > 0) { 1843 bs_batch_clear_dev(ctx, batch, lba, lba_count); 1844 } 1845 1846 bs_batch_close(batch); 1847 } 1848 1849 static void 1850 blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1851 { 1852 struct spdk_blob_persist_ctx *ctx = cb_arg; 1853 struct spdk_blob *blob = ctx->blob; 1854 struct spdk_blob_store *bs = blob->bs; 1855 size_t i; 1856 1857 if (bserrno != 0) { 1858 blob_persist_complete(seq, ctx, bserrno); 1859 return; 1860 } 1861 1862 /* This loop starts at 1 because the first page is special and handled 1863 * below. The pages (except the first) are never written in place, 1864 * so any pages in the clean list must be zeroed. 1865 */ 1866 for (i = 1; i < blob->clean.num_pages; i++) { 1867 bs_release_md_page(bs, blob->clean.pages[i]); 1868 } 1869 1870 if (blob->active.num_pages == 0) { 1871 uint32_t page_num; 1872 1873 page_num = bs_blobid_to_page(blob->id); 1874 bs_release_md_page(bs, page_num); 1875 } 1876 1877 /* Move on to clearing clusters */ 1878 blob_persist_clear_clusters(seq, ctx); 1879 } 1880 1881 static void 1882 blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1883 { 1884 struct spdk_blob_persist_ctx *ctx = cb_arg; 1885 struct spdk_blob *blob = ctx->blob; 1886 struct spdk_blob_store *bs = blob->bs; 1887 uint64_t lba; 1888 uint64_t lba_count; 1889 spdk_bs_batch_t *batch; 1890 size_t i; 1891 1892 if (bserrno != 0) { 1893 blob_persist_complete(seq, ctx, bserrno); 1894 return; 1895 } 1896 1897 batch = bs_sequence_to_batch(seq, blob_persist_zero_pages_cpl, ctx); 1898 1899 lba_count = bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE); 1900 1901 /* This loop starts at 1 because the first page is special and handled 1902 * below. The pages (except the first) are never written in place, 1903 * so any pages in the clean list must be zeroed. 1904 */ 1905 for (i = 1; i < blob->clean.num_pages; i++) { 1906 lba = bs_md_page_to_lba(bs, blob->clean.pages[i]); 1907 1908 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1909 } 1910 1911 /* The first page will only be zeroed if this is a delete. */ 1912 if (blob->active.num_pages == 0) { 1913 uint32_t page_num; 1914 1915 /* The first page in the metadata goes where the blobid indicates */ 1916 page_num = bs_blobid_to_page(blob->id); 1917 lba = bs_md_page_to_lba(bs, page_num); 1918 1919 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1920 } 1921 1922 bs_batch_close(batch); 1923 } 1924 1925 static void 1926 blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1927 { 1928 struct spdk_blob_persist_ctx *ctx = cb_arg; 1929 struct spdk_blob *blob = ctx->blob; 1930 struct spdk_blob_store *bs = blob->bs; 1931 uint64_t lba; 1932 uint32_t lba_count; 1933 struct spdk_blob_md_page *page; 1934 1935 if (bserrno != 0) { 1936 blob_persist_complete(seq, ctx, bserrno); 1937 return; 1938 } 1939 1940 if (blob->active.num_pages == 0) { 1941 /* Move on to the next step */ 1942 blob_persist_zero_pages(seq, ctx, 0); 1943 return; 1944 } 1945 1946 lba_count = bs_byte_to_lba(bs, sizeof(*page)); 1947 1948 page = &ctx->pages[0]; 1949 /* The first page in the metadata goes where the blobid indicates */ 1950 lba = bs_md_page_to_lba(bs, bs_blobid_to_page(blob->id)); 1951 1952 bs_sequence_write_dev(seq, page, lba, lba_count, 1953 blob_persist_zero_pages, ctx); 1954 } 1955 1956 static void 1957 blob_persist_write_page_chain(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 1958 { 1959 struct spdk_blob *blob = ctx->blob; 1960 struct spdk_blob_store *bs = blob->bs; 1961 uint64_t lba; 1962 uint32_t lba_count; 1963 struct spdk_blob_md_page *page; 1964 spdk_bs_batch_t *batch; 1965 size_t i; 1966 1967 /* Clusters don't move around in blobs. The list shrinks or grows 1968 * at the end, but no changes ever occur in the middle of the list. 1969 */ 1970 1971 lba_count = bs_byte_to_lba(bs, sizeof(*page)); 1972 1973 batch = bs_sequence_to_batch(seq, blob_persist_write_page_root, ctx); 1974 1975 /* This starts at 1. The root page is not written until 1976 * all of the others are finished 1977 */ 1978 for (i = 1; i < blob->active.num_pages; i++) { 1979 page = &ctx->pages[i]; 1980 assert(page->sequence_num == i); 1981 1982 lba = bs_md_page_to_lba(bs, blob->active.pages[i]); 1983 1984 bs_batch_write_dev(batch, page, lba, lba_count); 1985 } 1986 1987 bs_batch_close(batch); 1988 } 1989 1990 static int 1991 blob_resize(struct spdk_blob *blob, uint64_t sz) 1992 { 1993 uint64_t i; 1994 uint64_t *tmp; 1995 uint64_t cluster; 1996 uint32_t lfmd; /* lowest free md page */ 1997 uint64_t num_clusters; 1998 uint32_t *ep_tmp; 1999 uint64_t new_num_ep = 0, current_num_ep = 0; 2000 struct spdk_blob_store *bs; 2001 2002 bs = blob->bs; 2003 2004 blob_verify_md_op(blob); 2005 2006 if (blob->active.num_clusters == sz) { 2007 return 0; 2008 } 2009 2010 if (blob->active.num_clusters < blob->active.cluster_array_size) { 2011 /* If this blob was resized to be larger, then smaller, then 2012 * larger without syncing, then the cluster array already 2013 * contains spare assigned clusters we can use. 2014 */ 2015 num_clusters = spdk_min(blob->active.cluster_array_size, 2016 sz); 2017 } else { 2018 num_clusters = blob->active.num_clusters; 2019 } 2020 2021 if (blob->use_extent_table) { 2022 /* Round up since every cluster beyond current Extent Table size, 2023 * requires new extent page. */ 2024 new_num_ep = spdk_divide_round_up(sz, SPDK_EXTENTS_PER_EP); 2025 current_num_ep = spdk_divide_round_up(num_clusters, SPDK_EXTENTS_PER_EP); 2026 } 2027 2028 /* Check first that we have enough clusters and md pages before we start claiming them. */ 2029 if (sz > num_clusters && spdk_blob_is_thin_provisioned(blob) == false) { 2030 if ((sz - num_clusters) > bs->num_free_clusters) { 2031 return -ENOSPC; 2032 } 2033 lfmd = 0; 2034 for (i = current_num_ep; i < new_num_ep ; i++) { 2035 lfmd = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, lfmd); 2036 if (lfmd == UINT32_MAX) { 2037 /* No more free md pages. Cannot satisfy the request */ 2038 return -ENOSPC; 2039 } 2040 } 2041 } 2042 2043 if (sz > num_clusters) { 2044 /* Expand the cluster array if necessary. 2045 * We only shrink the array when persisting. 2046 */ 2047 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * sz); 2048 if (sz > 0 && tmp == NULL) { 2049 return -ENOMEM; 2050 } 2051 memset(tmp + blob->active.cluster_array_size, 0, 2052 sizeof(*blob->active.clusters) * (sz - blob->active.cluster_array_size)); 2053 blob->active.clusters = tmp; 2054 blob->active.cluster_array_size = sz; 2055 2056 /* Expand the extents table, only if enough clusters were added */ 2057 if (new_num_ep > current_num_ep && blob->use_extent_table) { 2058 ep_tmp = realloc(blob->active.extent_pages, sizeof(*blob->active.extent_pages) * new_num_ep); 2059 if (new_num_ep > 0 && ep_tmp == NULL) { 2060 return -ENOMEM; 2061 } 2062 memset(ep_tmp + blob->active.extent_pages_array_size, 0, 2063 sizeof(*blob->active.extent_pages) * (new_num_ep - blob->active.extent_pages_array_size)); 2064 blob->active.extent_pages = ep_tmp; 2065 blob->active.extent_pages_array_size = new_num_ep; 2066 } 2067 } 2068 2069 blob->state = SPDK_BLOB_STATE_DIRTY; 2070 2071 if (spdk_blob_is_thin_provisioned(blob) == false) { 2072 cluster = 0; 2073 lfmd = 0; 2074 pthread_mutex_lock(&blob->bs->used_clusters_mutex); 2075 for (i = num_clusters; i < sz; i++) { 2076 bs_allocate_cluster(blob, i, &cluster, &lfmd, true); 2077 lfmd++; 2078 } 2079 pthread_mutex_unlock(&blob->bs->used_clusters_mutex); 2080 } 2081 2082 blob->active.num_clusters = sz; 2083 blob->active.num_extent_pages = new_num_ep; 2084 2085 return 0; 2086 } 2087 2088 static void 2089 blob_persist_generate_new_md(struct spdk_blob_persist_ctx *ctx) 2090 { 2091 spdk_bs_sequence_t *seq = ctx->seq; 2092 struct spdk_blob *blob = ctx->blob; 2093 struct spdk_blob_store *bs = blob->bs; 2094 uint64_t i; 2095 uint32_t page_num; 2096 void *tmp; 2097 int rc; 2098 2099 /* Generate the new metadata */ 2100 rc = blob_serialize(blob, &ctx->pages, &blob->active.num_pages); 2101 if (rc < 0) { 2102 blob_persist_complete(seq, ctx, rc); 2103 return; 2104 } 2105 2106 assert(blob->active.num_pages >= 1); 2107 2108 /* Resize the cache of page indices */ 2109 tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 2110 if (!tmp) { 2111 blob_persist_complete(seq, ctx, -ENOMEM); 2112 return; 2113 } 2114 blob->active.pages = tmp; 2115 2116 /* Assign this metadata to pages. This requires two passes - 2117 * one to verify that there are enough pages and a second 2118 * to actually claim them. */ 2119 page_num = 0; 2120 /* Note that this loop starts at one. The first page location is fixed by the blobid. */ 2121 for (i = 1; i < blob->active.num_pages; i++) { 2122 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 2123 if (page_num == UINT32_MAX) { 2124 blob_persist_complete(seq, ctx, -ENOMEM); 2125 return; 2126 } 2127 page_num++; 2128 } 2129 2130 page_num = 0; 2131 blob->active.pages[0] = bs_blobid_to_page(blob->id); 2132 for (i = 1; i < blob->active.num_pages; i++) { 2133 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 2134 ctx->pages[i - 1].next = page_num; 2135 /* Now that previous metadata page is complete, calculate the crc for it. */ 2136 ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]); 2137 blob->active.pages[i] = page_num; 2138 bs_claim_md_page(bs, page_num); 2139 SPDK_DEBUGLOG(blob, "Claiming page %u for blob %" PRIu64 "\n", page_num, blob->id); 2140 page_num++; 2141 } 2142 ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]); 2143 /* Start writing the metadata from last page to first */ 2144 blob->state = SPDK_BLOB_STATE_CLEAN; 2145 blob_persist_write_page_chain(seq, ctx); 2146 } 2147 2148 static void 2149 blob_persist_write_extent_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2150 { 2151 struct spdk_blob_persist_ctx *ctx = cb_arg; 2152 struct spdk_blob *blob = ctx->blob; 2153 size_t i; 2154 uint32_t extent_page_id; 2155 uint32_t page_count = 0; 2156 int rc; 2157 2158 if (ctx->extent_page != NULL) { 2159 spdk_free(ctx->extent_page); 2160 ctx->extent_page = NULL; 2161 } 2162 2163 if (bserrno != 0) { 2164 blob_persist_complete(seq, ctx, bserrno); 2165 return; 2166 } 2167 2168 /* Only write out Extent Pages when blob was resized. */ 2169 for (i = ctx->next_extent_page; i < blob->active.extent_pages_array_size; i++) { 2170 extent_page_id = blob->active.extent_pages[i]; 2171 if (extent_page_id == 0) { 2172 /* No Extent Page to persist */ 2173 assert(spdk_blob_is_thin_provisioned(blob)); 2174 continue; 2175 } 2176 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent_page_id)); 2177 ctx->next_extent_page = i + 1; 2178 rc = blob_serialize_add_page(ctx->blob, &ctx->extent_page, &page_count, &ctx->extent_page); 2179 if (rc < 0) { 2180 blob_persist_complete(seq, ctx, rc); 2181 return; 2182 } 2183 2184 blob->state = SPDK_BLOB_STATE_DIRTY; 2185 blob_serialize_extent_page(blob, i * SPDK_EXTENTS_PER_EP, ctx->extent_page); 2186 2187 ctx->extent_page->crc = blob_md_page_calc_crc(ctx->extent_page); 2188 2189 bs_sequence_write_dev(seq, ctx->extent_page, bs_md_page_to_lba(blob->bs, extent_page_id), 2190 bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 2191 blob_persist_write_extent_pages, ctx); 2192 return; 2193 } 2194 2195 blob_persist_generate_new_md(ctx); 2196 } 2197 2198 static void 2199 blob_persist_start(struct spdk_blob_persist_ctx *ctx) 2200 { 2201 spdk_bs_sequence_t *seq = ctx->seq; 2202 struct spdk_blob *blob = ctx->blob; 2203 2204 if (blob->active.num_pages == 0) { 2205 /* This is the signal that the blob should be deleted. 2206 * Immediately jump to the clean up routine. */ 2207 assert(blob->clean.num_pages > 0); 2208 blob->state = SPDK_BLOB_STATE_CLEAN; 2209 blob_persist_zero_pages(seq, ctx, 0); 2210 return; 2211 2212 } 2213 2214 if (blob->clean.num_clusters < blob->active.num_clusters) { 2215 /* Blob was resized up */ 2216 assert(blob->clean.num_extent_pages <= blob->active.num_extent_pages); 2217 ctx->next_extent_page = spdk_max(1, blob->clean.num_extent_pages) - 1; 2218 } else if (blob->active.num_clusters < blob->active.cluster_array_size) { 2219 /* Blob was resized down */ 2220 assert(blob->clean.num_extent_pages >= blob->active.num_extent_pages); 2221 ctx->next_extent_page = spdk_max(1, blob->active.num_extent_pages) - 1; 2222 } else { 2223 /* No change in size occurred */ 2224 blob_persist_generate_new_md(ctx); 2225 return; 2226 } 2227 2228 blob_persist_write_extent_pages(seq, ctx, 0); 2229 } 2230 2231 static void 2232 blob_persist_dirty_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2233 { 2234 struct spdk_blob_persist_ctx *ctx = cb_arg; 2235 2236 spdk_free(ctx->super); 2237 2238 if (bserrno != 0) { 2239 blob_persist_complete(seq, ctx, bserrno); 2240 return; 2241 } 2242 2243 ctx->blob->bs->clean = 0; 2244 2245 blob_persist_start(ctx); 2246 } 2247 2248 static void 2249 bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2250 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg); 2251 2252 2253 static void 2254 blob_persist_dirty(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2255 { 2256 struct spdk_blob_persist_ctx *ctx = cb_arg; 2257 2258 if (bserrno != 0) { 2259 spdk_free(ctx->super); 2260 blob_persist_complete(seq, ctx, bserrno); 2261 return; 2262 } 2263 2264 ctx->super->clean = 0; 2265 if (ctx->super->size == 0) { 2266 ctx->super->size = ctx->blob->bs->dev->blockcnt * ctx->blob->bs->dev->blocklen; 2267 } 2268 2269 bs_write_super(seq, ctx->blob->bs, ctx->super, blob_persist_dirty_cpl, ctx); 2270 } 2271 2272 static void 2273 blob_persist_check_dirty(struct spdk_blob_persist_ctx *ctx) 2274 { 2275 if (ctx->blob->bs->clean) { 2276 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 2277 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 2278 if (!ctx->super) { 2279 blob_persist_complete(ctx->seq, ctx, -ENOMEM); 2280 return; 2281 } 2282 2283 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(ctx->blob->bs, 0), 2284 bs_byte_to_lba(ctx->blob->bs, sizeof(*ctx->super)), 2285 blob_persist_dirty, ctx); 2286 } else { 2287 blob_persist_start(ctx); 2288 } 2289 } 2290 2291 /* Write a blob to disk */ 2292 static void 2293 blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 2294 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2295 { 2296 struct spdk_blob_persist_ctx *ctx; 2297 2298 blob_verify_md_op(blob); 2299 2300 if (blob->state == SPDK_BLOB_STATE_CLEAN && TAILQ_EMPTY(&blob->persists_to_complete)) { 2301 cb_fn(seq, cb_arg, 0); 2302 return; 2303 } 2304 2305 ctx = calloc(1, sizeof(*ctx)); 2306 if (!ctx) { 2307 cb_fn(seq, cb_arg, -ENOMEM); 2308 return; 2309 } 2310 ctx->blob = blob; 2311 ctx->seq = seq; 2312 ctx->cb_fn = cb_fn; 2313 ctx->cb_arg = cb_arg; 2314 2315 /* Multiple blob persists can affect one another, via blob->state or 2316 * blob mutable data changes. To prevent it, queue up the persists. */ 2317 if (!TAILQ_EMPTY(&blob->persists_to_complete)) { 2318 TAILQ_INSERT_TAIL(&blob->pending_persists, ctx, link); 2319 return; 2320 } 2321 TAILQ_INSERT_HEAD(&blob->persists_to_complete, ctx, link); 2322 2323 blob_persist_check_dirty(ctx); 2324 } 2325 2326 struct spdk_blob_copy_cluster_ctx { 2327 struct spdk_blob *blob; 2328 uint8_t *buf; 2329 uint64_t page; 2330 uint64_t new_cluster; 2331 uint32_t new_extent_page; 2332 spdk_bs_sequence_t *seq; 2333 }; 2334 2335 static void 2336 blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno) 2337 { 2338 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2339 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq; 2340 TAILQ_HEAD(, spdk_bs_request_set) requests; 2341 spdk_bs_user_op_t *op; 2342 2343 TAILQ_INIT(&requests); 2344 TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link); 2345 2346 while (!TAILQ_EMPTY(&requests)) { 2347 op = TAILQ_FIRST(&requests); 2348 TAILQ_REMOVE(&requests, op, link); 2349 if (bserrno == 0) { 2350 bs_user_op_execute(op); 2351 } else { 2352 bs_user_op_abort(op, bserrno); 2353 } 2354 } 2355 2356 spdk_free(ctx->buf); 2357 free(ctx); 2358 } 2359 2360 static void 2361 blob_insert_cluster_cpl(void *cb_arg, int bserrno) 2362 { 2363 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2364 2365 if (bserrno) { 2366 if (bserrno == -EEXIST) { 2367 /* The metadata insert failed because another thread 2368 * allocated the cluster first. Free our cluster 2369 * but continue without error. */ 2370 bserrno = 0; 2371 } 2372 pthread_mutex_lock(&ctx->blob->bs->used_clusters_mutex); 2373 bs_release_cluster(ctx->blob->bs, ctx->new_cluster); 2374 pthread_mutex_unlock(&ctx->blob->bs->used_clusters_mutex); 2375 if (ctx->new_extent_page != 0) { 2376 bs_release_md_page(ctx->blob->bs, ctx->new_extent_page); 2377 } 2378 } 2379 2380 bs_sequence_finish(ctx->seq, bserrno); 2381 } 2382 2383 static void 2384 blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2385 { 2386 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2387 uint32_t cluster_number; 2388 2389 if (bserrno) { 2390 /* The write failed, so jump to the final completion handler */ 2391 bs_sequence_finish(seq, bserrno); 2392 return; 2393 } 2394 2395 cluster_number = bs_page_to_cluster(ctx->blob->bs, ctx->page); 2396 2397 blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2398 ctx->new_extent_page, blob_insert_cluster_cpl, ctx); 2399 } 2400 2401 static void 2402 blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2403 { 2404 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2405 2406 if (bserrno != 0) { 2407 /* The read failed, so jump to the final completion handler */ 2408 bs_sequence_finish(seq, bserrno); 2409 return; 2410 } 2411 2412 /* Write whole cluster */ 2413 bs_sequence_write_dev(seq, ctx->buf, 2414 bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 2415 bs_cluster_to_lba(ctx->blob->bs, 1), 2416 blob_write_copy_cpl, ctx); 2417 } 2418 2419 static void 2420 bs_allocate_and_copy_cluster(struct spdk_blob *blob, 2421 struct spdk_io_channel *_ch, 2422 uint64_t io_unit, spdk_bs_user_op_t *op) 2423 { 2424 struct spdk_bs_cpl cpl; 2425 struct spdk_bs_channel *ch; 2426 struct spdk_blob_copy_cluster_ctx *ctx; 2427 uint32_t cluster_start_page; 2428 uint32_t cluster_number; 2429 int rc; 2430 2431 ch = spdk_io_channel_get_ctx(_ch); 2432 2433 if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) { 2434 /* There are already operations pending. Queue this user op 2435 * and return because it will be re-executed when the outstanding 2436 * cluster allocation completes. */ 2437 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2438 return; 2439 } 2440 2441 /* Round the io_unit offset down to the first page in the cluster */ 2442 cluster_start_page = bs_io_unit_to_cluster_start(blob, io_unit); 2443 2444 /* Calculate which index in the metadata cluster array the corresponding 2445 * cluster is supposed to be at. */ 2446 cluster_number = bs_io_unit_to_cluster_number(blob, io_unit); 2447 2448 ctx = calloc(1, sizeof(*ctx)); 2449 if (!ctx) { 2450 bs_user_op_abort(op, -ENOMEM); 2451 return; 2452 } 2453 2454 assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0); 2455 2456 ctx->blob = blob; 2457 ctx->page = cluster_start_page; 2458 2459 if (blob->parent_id != SPDK_BLOBID_INVALID) { 2460 ctx->buf = spdk_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen, 2461 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 2462 if (!ctx->buf) { 2463 SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n", 2464 blob->bs->cluster_sz); 2465 free(ctx); 2466 bs_user_op_abort(op, -ENOMEM); 2467 return; 2468 } 2469 } 2470 2471 pthread_mutex_lock(&blob->bs->used_clusters_mutex); 2472 rc = bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, &ctx->new_extent_page, 2473 false); 2474 pthread_mutex_unlock(&blob->bs->used_clusters_mutex); 2475 if (rc != 0) { 2476 spdk_free(ctx->buf); 2477 free(ctx); 2478 bs_user_op_abort(op, rc); 2479 return; 2480 } 2481 2482 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2483 cpl.u.blob_basic.cb_fn = blob_allocate_and_copy_cluster_cpl; 2484 cpl.u.blob_basic.cb_arg = ctx; 2485 2486 ctx->seq = bs_sequence_start(_ch, &cpl); 2487 if (!ctx->seq) { 2488 pthread_mutex_lock(&blob->bs->used_clusters_mutex); 2489 bs_release_cluster(blob->bs, ctx->new_cluster); 2490 pthread_mutex_unlock(&blob->bs->used_clusters_mutex); 2491 spdk_free(ctx->buf); 2492 free(ctx); 2493 bs_user_op_abort(op, -ENOMEM); 2494 return; 2495 } 2496 2497 /* Queue the user op to block other incoming operations */ 2498 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2499 2500 if (blob->parent_id != SPDK_BLOBID_INVALID) { 2501 /* Read cluster from backing device */ 2502 bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf, 2503 bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page), 2504 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz), 2505 blob_write_copy, ctx); 2506 } else { 2507 blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2508 ctx->new_extent_page, blob_insert_cluster_cpl, ctx); 2509 } 2510 } 2511 2512 static inline bool 2513 blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t io_unit, uint64_t length, 2514 uint64_t *lba, uint64_t *lba_count) 2515 { 2516 *lba_count = length; 2517 2518 if (!bs_io_unit_is_allocated(blob, io_unit)) { 2519 assert(blob->back_bs_dev != NULL); 2520 *lba = bs_io_unit_to_back_dev_lba(blob, io_unit); 2521 *lba_count = bs_io_unit_to_back_dev_lba(blob, *lba_count); 2522 return false; 2523 } else { 2524 *lba = bs_blob_io_unit_to_lba(blob, io_unit); 2525 return true; 2526 } 2527 } 2528 2529 struct op_split_ctx { 2530 struct spdk_blob *blob; 2531 struct spdk_io_channel *channel; 2532 uint64_t io_unit_offset; 2533 uint64_t io_units_remaining; 2534 void *curr_payload; 2535 enum spdk_blob_op_type op_type; 2536 spdk_bs_sequence_t *seq; 2537 bool in_submit_ctx; 2538 bool completed_in_submit_ctx; 2539 bool done; 2540 }; 2541 2542 static void 2543 blob_request_submit_op_split_next(void *cb_arg, int bserrno) 2544 { 2545 struct op_split_ctx *ctx = cb_arg; 2546 struct spdk_blob *blob = ctx->blob; 2547 struct spdk_io_channel *ch = ctx->channel; 2548 enum spdk_blob_op_type op_type = ctx->op_type; 2549 uint8_t *buf; 2550 uint64_t offset; 2551 uint64_t length; 2552 uint64_t op_length; 2553 2554 if (bserrno != 0 || ctx->io_units_remaining == 0) { 2555 bs_sequence_finish(ctx->seq, bserrno); 2556 if (ctx->in_submit_ctx) { 2557 /* Defer freeing of the ctx object, since it will be 2558 * accessed when this unwinds back to the submisison 2559 * context. 2560 */ 2561 ctx->done = true; 2562 } else { 2563 free(ctx); 2564 } 2565 return; 2566 } 2567 2568 if (ctx->in_submit_ctx) { 2569 /* If this split operation completed in the context 2570 * of its submission, mark the flag and return immediately 2571 * to avoid recursion. 2572 */ 2573 ctx->completed_in_submit_ctx = true; 2574 return; 2575 } 2576 2577 while (true) { 2578 ctx->completed_in_submit_ctx = false; 2579 2580 offset = ctx->io_unit_offset; 2581 length = ctx->io_units_remaining; 2582 buf = ctx->curr_payload; 2583 op_length = spdk_min(length, bs_num_io_units_to_cluster_boundary(blob, 2584 offset)); 2585 2586 /* Update length and payload for next operation */ 2587 ctx->io_units_remaining -= op_length; 2588 ctx->io_unit_offset += op_length; 2589 if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) { 2590 ctx->curr_payload += op_length * blob->bs->io_unit_size; 2591 } 2592 2593 assert(!ctx->in_submit_ctx); 2594 ctx->in_submit_ctx = true; 2595 2596 switch (op_type) { 2597 case SPDK_BLOB_READ: 2598 spdk_blob_io_read(blob, ch, buf, offset, op_length, 2599 blob_request_submit_op_split_next, ctx); 2600 break; 2601 case SPDK_BLOB_WRITE: 2602 spdk_blob_io_write(blob, ch, buf, offset, op_length, 2603 blob_request_submit_op_split_next, ctx); 2604 break; 2605 case SPDK_BLOB_UNMAP: 2606 spdk_blob_io_unmap(blob, ch, offset, op_length, 2607 blob_request_submit_op_split_next, ctx); 2608 break; 2609 case SPDK_BLOB_WRITE_ZEROES: 2610 spdk_blob_io_write_zeroes(blob, ch, offset, op_length, 2611 blob_request_submit_op_split_next, ctx); 2612 break; 2613 case SPDK_BLOB_READV: 2614 case SPDK_BLOB_WRITEV: 2615 SPDK_ERRLOG("readv/write not valid\n"); 2616 bs_sequence_finish(ctx->seq, -EINVAL); 2617 free(ctx); 2618 return; 2619 } 2620 2621 #ifndef __clang_analyzer__ 2622 /* scan-build reports a false positive around accessing the ctx here. It 2623 * forms a path that recursively calls this function, but then says 2624 * "assuming ctx->in_submit_ctx is false", when that isn't possible. 2625 * This path does free(ctx), returns to here, and reports a use-after-free 2626 * bug. Wrapping this bit of code so that scan-build doesn't see it 2627 * works around the scan-build bug. 2628 */ 2629 assert(ctx->in_submit_ctx); 2630 ctx->in_submit_ctx = false; 2631 2632 /* If the operation completed immediately, loop back and submit the 2633 * next operation. Otherwise we can return and the next split 2634 * operation will get submitted when this current operation is 2635 * later completed asynchronously. 2636 */ 2637 if (ctx->completed_in_submit_ctx) { 2638 continue; 2639 } else if (ctx->done) { 2640 free(ctx); 2641 } 2642 #endif 2643 break; 2644 } 2645 } 2646 2647 static void 2648 blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob, 2649 void *payload, uint64_t offset, uint64_t length, 2650 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 2651 { 2652 struct op_split_ctx *ctx; 2653 spdk_bs_sequence_t *seq; 2654 struct spdk_bs_cpl cpl; 2655 2656 assert(blob != NULL); 2657 2658 ctx = calloc(1, sizeof(struct op_split_ctx)); 2659 if (ctx == NULL) { 2660 cb_fn(cb_arg, -ENOMEM); 2661 return; 2662 } 2663 2664 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2665 cpl.u.blob_basic.cb_fn = cb_fn; 2666 cpl.u.blob_basic.cb_arg = cb_arg; 2667 2668 seq = bs_sequence_start(ch, &cpl); 2669 if (!seq) { 2670 free(ctx); 2671 cb_fn(cb_arg, -ENOMEM); 2672 return; 2673 } 2674 2675 ctx->blob = blob; 2676 ctx->channel = ch; 2677 ctx->curr_payload = payload; 2678 ctx->io_unit_offset = offset; 2679 ctx->io_units_remaining = length; 2680 ctx->op_type = op_type; 2681 ctx->seq = seq; 2682 2683 blob_request_submit_op_split_next(ctx, 0); 2684 } 2685 2686 static void 2687 blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob, 2688 void *payload, uint64_t offset, uint64_t length, 2689 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 2690 { 2691 struct spdk_bs_cpl cpl; 2692 uint64_t lba; 2693 uint64_t lba_count; 2694 bool is_allocated; 2695 2696 assert(blob != NULL); 2697 2698 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2699 cpl.u.blob_basic.cb_fn = cb_fn; 2700 cpl.u.blob_basic.cb_arg = cb_arg; 2701 2702 if (blob->frozen_refcnt) { 2703 /* This blob I/O is frozen */ 2704 spdk_bs_user_op_t *op; 2705 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 2706 2707 op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 2708 if (!op) { 2709 cb_fn(cb_arg, -ENOMEM); 2710 return; 2711 } 2712 2713 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 2714 2715 return; 2716 } 2717 2718 is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 2719 2720 switch (op_type) { 2721 case SPDK_BLOB_READ: { 2722 spdk_bs_batch_t *batch; 2723 2724 batch = bs_batch_open(_ch, &cpl); 2725 if (!batch) { 2726 cb_fn(cb_arg, -ENOMEM); 2727 return; 2728 } 2729 2730 if (is_allocated) { 2731 /* Read from the blob */ 2732 bs_batch_read_dev(batch, payload, lba, lba_count); 2733 } else { 2734 /* Read from the backing block device */ 2735 bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count); 2736 } 2737 2738 bs_batch_close(batch); 2739 break; 2740 } 2741 case SPDK_BLOB_WRITE: 2742 case SPDK_BLOB_WRITE_ZEROES: { 2743 if (is_allocated) { 2744 /* Write to the blob */ 2745 spdk_bs_batch_t *batch; 2746 2747 if (lba_count == 0) { 2748 cb_fn(cb_arg, 0); 2749 return; 2750 } 2751 2752 batch = bs_batch_open(_ch, &cpl); 2753 if (!batch) { 2754 cb_fn(cb_arg, -ENOMEM); 2755 return; 2756 } 2757 2758 if (op_type == SPDK_BLOB_WRITE) { 2759 bs_batch_write_dev(batch, payload, lba, lba_count); 2760 } else { 2761 bs_batch_write_zeroes_dev(batch, lba, lba_count); 2762 } 2763 2764 bs_batch_close(batch); 2765 } else { 2766 /* Queue this operation and allocate the cluster */ 2767 spdk_bs_user_op_t *op; 2768 2769 op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 2770 if (!op) { 2771 cb_fn(cb_arg, -ENOMEM); 2772 return; 2773 } 2774 2775 bs_allocate_and_copy_cluster(blob, _ch, offset, op); 2776 } 2777 break; 2778 } 2779 case SPDK_BLOB_UNMAP: { 2780 spdk_bs_batch_t *batch; 2781 2782 batch = bs_batch_open(_ch, &cpl); 2783 if (!batch) { 2784 cb_fn(cb_arg, -ENOMEM); 2785 return; 2786 } 2787 2788 if (is_allocated) { 2789 bs_batch_unmap_dev(batch, lba, lba_count); 2790 } 2791 2792 bs_batch_close(batch); 2793 break; 2794 } 2795 case SPDK_BLOB_READV: 2796 case SPDK_BLOB_WRITEV: 2797 SPDK_ERRLOG("readv/write not valid\n"); 2798 cb_fn(cb_arg, -EINVAL); 2799 break; 2800 } 2801 } 2802 2803 static void 2804 blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel, 2805 void *payload, uint64_t offset, uint64_t length, 2806 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 2807 { 2808 assert(blob != NULL); 2809 2810 if (blob->data_ro && op_type != SPDK_BLOB_READ) { 2811 cb_fn(cb_arg, -EPERM); 2812 return; 2813 } 2814 2815 if (length == 0) { 2816 cb_fn(cb_arg, 0); 2817 return; 2818 } 2819 2820 if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 2821 cb_fn(cb_arg, -EINVAL); 2822 return; 2823 } 2824 if (length <= bs_num_io_units_to_cluster_boundary(blob, offset)) { 2825 blob_request_submit_op_single(_channel, blob, payload, offset, length, 2826 cb_fn, cb_arg, op_type); 2827 } else { 2828 blob_request_submit_op_split(_channel, blob, payload, offset, length, 2829 cb_fn, cb_arg, op_type); 2830 } 2831 } 2832 2833 struct rw_iov_ctx { 2834 struct spdk_blob *blob; 2835 struct spdk_io_channel *channel; 2836 spdk_blob_op_complete cb_fn; 2837 void *cb_arg; 2838 bool read; 2839 int iovcnt; 2840 struct iovec *orig_iov; 2841 uint64_t io_unit_offset; 2842 uint64_t io_units_remaining; 2843 uint64_t io_units_done; 2844 struct iovec iov[0]; 2845 }; 2846 2847 static void 2848 rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2849 { 2850 assert(cb_arg == NULL); 2851 bs_sequence_finish(seq, bserrno); 2852 } 2853 2854 static void 2855 rw_iov_split_next(void *cb_arg, int bserrno) 2856 { 2857 struct rw_iov_ctx *ctx = cb_arg; 2858 struct spdk_blob *blob = ctx->blob; 2859 struct iovec *iov, *orig_iov; 2860 int iovcnt; 2861 size_t orig_iovoff; 2862 uint64_t io_units_count, io_units_to_boundary, io_unit_offset; 2863 uint64_t byte_count; 2864 2865 if (bserrno != 0 || ctx->io_units_remaining == 0) { 2866 ctx->cb_fn(ctx->cb_arg, bserrno); 2867 free(ctx); 2868 return; 2869 } 2870 2871 io_unit_offset = ctx->io_unit_offset; 2872 io_units_to_boundary = bs_num_io_units_to_cluster_boundary(blob, io_unit_offset); 2873 io_units_count = spdk_min(ctx->io_units_remaining, io_units_to_boundary); 2874 /* 2875 * Get index and offset into the original iov array for our current position in the I/O sequence. 2876 * byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will 2877 * point to the current position in the I/O sequence. 2878 */ 2879 byte_count = ctx->io_units_done * blob->bs->io_unit_size; 2880 orig_iov = &ctx->orig_iov[0]; 2881 orig_iovoff = 0; 2882 while (byte_count > 0) { 2883 if (byte_count >= orig_iov->iov_len) { 2884 byte_count -= orig_iov->iov_len; 2885 orig_iov++; 2886 } else { 2887 orig_iovoff = byte_count; 2888 byte_count = 0; 2889 } 2890 } 2891 2892 /* 2893 * Build an iov array for the next I/O in the sequence. byte_count will keep track of how many 2894 * bytes of this next I/O remain to be accounted for in the new iov array. 2895 */ 2896 byte_count = io_units_count * blob->bs->io_unit_size; 2897 iov = &ctx->iov[0]; 2898 iovcnt = 0; 2899 while (byte_count > 0) { 2900 assert(iovcnt < ctx->iovcnt); 2901 iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff); 2902 iov->iov_base = orig_iov->iov_base + orig_iovoff; 2903 byte_count -= iov->iov_len; 2904 orig_iovoff = 0; 2905 orig_iov++; 2906 iov++; 2907 iovcnt++; 2908 } 2909 2910 ctx->io_unit_offset += io_units_count; 2911 ctx->io_units_remaining -= io_units_count; 2912 ctx->io_units_done += io_units_count; 2913 iov = &ctx->iov[0]; 2914 2915 if (ctx->read) { 2916 spdk_blob_io_readv(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 2917 io_units_count, rw_iov_split_next, ctx); 2918 } else { 2919 spdk_blob_io_writev(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 2920 io_units_count, rw_iov_split_next, ctx); 2921 } 2922 } 2923 2924 static void 2925 blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel, 2926 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 2927 spdk_blob_op_complete cb_fn, void *cb_arg, bool read) 2928 { 2929 struct spdk_bs_cpl cpl; 2930 2931 assert(blob != NULL); 2932 2933 if (!read && blob->data_ro) { 2934 cb_fn(cb_arg, -EPERM); 2935 return; 2936 } 2937 2938 if (length == 0) { 2939 cb_fn(cb_arg, 0); 2940 return; 2941 } 2942 2943 if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 2944 cb_fn(cb_arg, -EINVAL); 2945 return; 2946 } 2947 2948 /* 2949 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having 2950 * to split a request that spans a cluster boundary. For I/O that do not span a cluster boundary, 2951 * there will be no noticeable difference compared to using a batch. For I/O that do span a cluster 2952 * boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need 2953 * to allocate a separate iov array and split the I/O such that none of the resulting 2954 * smaller I/O cross a cluster boundary. These smaller I/O will be issued in sequence (not in parallel) 2955 * but since this case happens very infrequently, any performance impact will be negligible. 2956 * 2957 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs 2958 * for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them 2959 * in a batch. That would also require creating an intermediate spdk_bs_cpl that would get called 2960 * when the batch was completed, to allow for freeing the memory for the iov arrays. 2961 */ 2962 if (spdk_likely(length <= bs_num_io_units_to_cluster_boundary(blob, offset))) { 2963 uint64_t lba_count; 2964 uint64_t lba; 2965 bool is_allocated; 2966 2967 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2968 cpl.u.blob_basic.cb_fn = cb_fn; 2969 cpl.u.blob_basic.cb_arg = cb_arg; 2970 2971 if (blob->frozen_refcnt) { 2972 /* This blob I/O is frozen */ 2973 enum spdk_blob_op_type op_type; 2974 spdk_bs_user_op_t *op; 2975 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel); 2976 2977 op_type = read ? SPDK_BLOB_READV : SPDK_BLOB_WRITEV; 2978 op = bs_user_op_alloc(_channel, &cpl, op_type, blob, iov, iovcnt, offset, length); 2979 if (!op) { 2980 cb_fn(cb_arg, -ENOMEM); 2981 return; 2982 } 2983 2984 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 2985 2986 return; 2987 } 2988 2989 is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 2990 2991 if (read) { 2992 spdk_bs_sequence_t *seq; 2993 2994 seq = bs_sequence_start(_channel, &cpl); 2995 if (!seq) { 2996 cb_fn(cb_arg, -ENOMEM); 2997 return; 2998 } 2999 3000 if (is_allocated) { 3001 bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL); 3002 } else { 3003 bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count, 3004 rw_iov_done, NULL); 3005 } 3006 } else { 3007 if (is_allocated) { 3008 spdk_bs_sequence_t *seq; 3009 3010 seq = bs_sequence_start(_channel, &cpl); 3011 if (!seq) { 3012 cb_fn(cb_arg, -ENOMEM); 3013 return; 3014 } 3015 3016 bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL); 3017 } else { 3018 /* Queue this operation and allocate the cluster */ 3019 spdk_bs_user_op_t *op; 3020 3021 op = bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset, 3022 length); 3023 if (!op) { 3024 cb_fn(cb_arg, -ENOMEM); 3025 return; 3026 } 3027 3028 bs_allocate_and_copy_cluster(blob, _channel, offset, op); 3029 } 3030 } 3031 } else { 3032 struct rw_iov_ctx *ctx; 3033 3034 ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec)); 3035 if (ctx == NULL) { 3036 cb_fn(cb_arg, -ENOMEM); 3037 return; 3038 } 3039 3040 ctx->blob = blob; 3041 ctx->channel = _channel; 3042 ctx->cb_fn = cb_fn; 3043 ctx->cb_arg = cb_arg; 3044 ctx->read = read; 3045 ctx->orig_iov = iov; 3046 ctx->iovcnt = iovcnt; 3047 ctx->io_unit_offset = offset; 3048 ctx->io_units_remaining = length; 3049 ctx->io_units_done = 0; 3050 3051 rw_iov_split_next(ctx, 0); 3052 } 3053 } 3054 3055 static struct spdk_blob * 3056 blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid) 3057 { 3058 struct spdk_blob find; 3059 3060 if (spdk_bit_array_get(bs->open_blobids, blobid) == 0) { 3061 return NULL; 3062 } 3063 3064 find.id = blobid; 3065 return RB_FIND(spdk_blob_tree, &bs->open_blobs, &find); 3066 } 3067 3068 static void 3069 blob_get_snapshot_and_clone_entries(struct spdk_blob *blob, 3070 struct spdk_blob_list **snapshot_entry, struct spdk_blob_list **clone_entry) 3071 { 3072 assert(blob != NULL); 3073 *snapshot_entry = NULL; 3074 *clone_entry = NULL; 3075 3076 if (blob->parent_id == SPDK_BLOBID_INVALID) { 3077 return; 3078 } 3079 3080 TAILQ_FOREACH(*snapshot_entry, &blob->bs->snapshots, link) { 3081 if ((*snapshot_entry)->id == blob->parent_id) { 3082 break; 3083 } 3084 } 3085 3086 if (*snapshot_entry != NULL) { 3087 TAILQ_FOREACH(*clone_entry, &(*snapshot_entry)->clones, link) { 3088 if ((*clone_entry)->id == blob->id) { 3089 break; 3090 } 3091 } 3092 3093 assert(*clone_entry != NULL); 3094 } 3095 } 3096 3097 static int 3098 bs_channel_create(void *io_device, void *ctx_buf) 3099 { 3100 struct spdk_blob_store *bs = io_device; 3101 struct spdk_bs_channel *channel = ctx_buf; 3102 struct spdk_bs_dev *dev; 3103 uint32_t max_ops = bs->max_channel_ops; 3104 uint32_t i; 3105 3106 dev = bs->dev; 3107 3108 channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set)); 3109 if (!channel->req_mem) { 3110 return -1; 3111 } 3112 3113 TAILQ_INIT(&channel->reqs); 3114 3115 for (i = 0; i < max_ops; i++) { 3116 TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link); 3117 } 3118 3119 channel->bs = bs; 3120 channel->dev = dev; 3121 channel->dev_channel = dev->create_channel(dev); 3122 3123 if (!channel->dev_channel) { 3124 SPDK_ERRLOG("Failed to create device channel.\n"); 3125 free(channel->req_mem); 3126 return -1; 3127 } 3128 3129 TAILQ_INIT(&channel->need_cluster_alloc); 3130 TAILQ_INIT(&channel->queued_io); 3131 3132 return 0; 3133 } 3134 3135 static void 3136 bs_channel_destroy(void *io_device, void *ctx_buf) 3137 { 3138 struct spdk_bs_channel *channel = ctx_buf; 3139 spdk_bs_user_op_t *op; 3140 3141 while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) { 3142 op = TAILQ_FIRST(&channel->need_cluster_alloc); 3143 TAILQ_REMOVE(&channel->need_cluster_alloc, op, link); 3144 bs_user_op_abort(op, -EIO); 3145 } 3146 3147 while (!TAILQ_EMPTY(&channel->queued_io)) { 3148 op = TAILQ_FIRST(&channel->queued_io); 3149 TAILQ_REMOVE(&channel->queued_io, op, link); 3150 bs_user_op_abort(op, -EIO); 3151 } 3152 3153 free(channel->req_mem); 3154 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 3155 } 3156 3157 static void 3158 bs_dev_destroy(void *io_device) 3159 { 3160 struct spdk_blob_store *bs = io_device; 3161 struct spdk_blob *blob, *blob_tmp; 3162 3163 bs->dev->destroy(bs->dev); 3164 3165 RB_FOREACH_SAFE(blob, spdk_blob_tree, &bs->open_blobs, blob_tmp) { 3166 RB_REMOVE(spdk_blob_tree, &bs->open_blobs, blob); 3167 spdk_bit_array_clear(bs->open_blobids, blob->id); 3168 blob_free(blob); 3169 } 3170 3171 pthread_mutex_destroy(&bs->used_clusters_mutex); 3172 3173 spdk_bit_array_free(&bs->open_blobids); 3174 spdk_bit_array_free(&bs->used_blobids); 3175 spdk_bit_array_free(&bs->used_md_pages); 3176 spdk_bit_pool_free(&bs->used_clusters); 3177 /* 3178 * If this function is called for any reason except a successful unload, 3179 * the unload_cpl type will be NONE and this will be a nop. 3180 */ 3181 bs_call_cpl(&bs->unload_cpl, bs->unload_err); 3182 3183 free(bs); 3184 } 3185 3186 static int 3187 bs_blob_list_add(struct spdk_blob *blob) 3188 { 3189 spdk_blob_id snapshot_id; 3190 struct spdk_blob_list *snapshot_entry = NULL; 3191 struct spdk_blob_list *clone_entry = NULL; 3192 3193 assert(blob != NULL); 3194 3195 snapshot_id = blob->parent_id; 3196 if (snapshot_id == SPDK_BLOBID_INVALID) { 3197 return 0; 3198 } 3199 3200 snapshot_entry = bs_get_snapshot_entry(blob->bs, snapshot_id); 3201 if (snapshot_entry == NULL) { 3202 /* Snapshot not found */ 3203 snapshot_entry = calloc(1, sizeof(struct spdk_blob_list)); 3204 if (snapshot_entry == NULL) { 3205 return -ENOMEM; 3206 } 3207 snapshot_entry->id = snapshot_id; 3208 TAILQ_INIT(&snapshot_entry->clones); 3209 TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link); 3210 } else { 3211 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 3212 if (clone_entry->id == blob->id) { 3213 break; 3214 } 3215 } 3216 } 3217 3218 if (clone_entry == NULL) { 3219 /* Clone not found */ 3220 clone_entry = calloc(1, sizeof(struct spdk_blob_list)); 3221 if (clone_entry == NULL) { 3222 return -ENOMEM; 3223 } 3224 clone_entry->id = blob->id; 3225 TAILQ_INIT(&clone_entry->clones); 3226 TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link); 3227 snapshot_entry->clone_count++; 3228 } 3229 3230 return 0; 3231 } 3232 3233 static void 3234 bs_blob_list_remove(struct spdk_blob *blob) 3235 { 3236 struct spdk_blob_list *snapshot_entry = NULL; 3237 struct spdk_blob_list *clone_entry = NULL; 3238 3239 blob_get_snapshot_and_clone_entries(blob, &snapshot_entry, &clone_entry); 3240 3241 if (snapshot_entry == NULL) { 3242 return; 3243 } 3244 3245 blob->parent_id = SPDK_BLOBID_INVALID; 3246 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3247 free(clone_entry); 3248 3249 snapshot_entry->clone_count--; 3250 } 3251 3252 static int 3253 bs_blob_list_free(struct spdk_blob_store *bs) 3254 { 3255 struct spdk_blob_list *snapshot_entry; 3256 struct spdk_blob_list *snapshot_entry_tmp; 3257 struct spdk_blob_list *clone_entry; 3258 struct spdk_blob_list *clone_entry_tmp; 3259 3260 TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) { 3261 TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) { 3262 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3263 free(clone_entry); 3264 } 3265 TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link); 3266 free(snapshot_entry); 3267 } 3268 3269 return 0; 3270 } 3271 3272 static void 3273 bs_free(struct spdk_blob_store *bs) 3274 { 3275 bs_blob_list_free(bs); 3276 3277 bs_unregister_md_thread(bs); 3278 spdk_io_device_unregister(bs, bs_dev_destroy); 3279 } 3280 3281 void 3282 spdk_bs_opts_init(struct spdk_bs_opts *opts, size_t opts_size) 3283 { 3284 3285 if (!opts) { 3286 SPDK_ERRLOG("opts should not be NULL\n"); 3287 return; 3288 } 3289 3290 if (!opts_size) { 3291 SPDK_ERRLOG("opts_size should not be zero value\n"); 3292 return; 3293 } 3294 3295 memset(opts, 0, opts_size); 3296 opts->opts_size = opts_size; 3297 3298 #define FIELD_OK(field) \ 3299 offsetof(struct spdk_bs_opts, field) + sizeof(opts->field) <= opts_size 3300 3301 #define SET_FIELD(field, value) \ 3302 if (FIELD_OK(field)) { \ 3303 opts->field = value; \ 3304 } \ 3305 3306 SET_FIELD(cluster_sz, SPDK_BLOB_OPTS_CLUSTER_SZ); 3307 SET_FIELD(num_md_pages, SPDK_BLOB_OPTS_NUM_MD_PAGES); 3308 SET_FIELD(max_md_ops, SPDK_BLOB_OPTS_NUM_MD_PAGES); 3309 SET_FIELD(max_channel_ops, SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS); 3310 SET_FIELD(clear_method, BS_CLEAR_WITH_UNMAP); 3311 3312 if (FIELD_OK(bstype)) { 3313 memset(&opts->bstype, 0, sizeof(opts->bstype)); 3314 } 3315 3316 SET_FIELD(iter_cb_fn, NULL); 3317 SET_FIELD(iter_cb_arg, NULL); 3318 SET_FIELD(force_recover, false); 3319 3320 #undef FIELD_OK 3321 #undef SET_FIELD 3322 } 3323 3324 static int 3325 bs_opts_verify(struct spdk_bs_opts *opts) 3326 { 3327 if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 || 3328 opts->max_channel_ops == 0) { 3329 SPDK_ERRLOG("Blobstore options cannot be set to 0\n"); 3330 return -1; 3331 } 3332 3333 return 0; 3334 } 3335 3336 /* START spdk_bs_load */ 3337 3338 /* spdk_bs_load_ctx is used for init, load, unload and dump code paths. */ 3339 3340 struct spdk_bs_load_ctx { 3341 struct spdk_blob_store *bs; 3342 struct spdk_bs_super_block *super; 3343 3344 struct spdk_bs_md_mask *mask; 3345 bool in_page_chain; 3346 uint32_t page_index; 3347 uint32_t cur_page; 3348 struct spdk_blob_md_page *page; 3349 3350 uint64_t num_extent_pages; 3351 uint32_t *extent_page_num; 3352 struct spdk_blob_md_page *extent_pages; 3353 struct spdk_bit_array *used_clusters; 3354 3355 spdk_bs_sequence_t *seq; 3356 spdk_blob_op_with_handle_complete iter_cb_fn; 3357 void *iter_cb_arg; 3358 struct spdk_blob *blob; 3359 spdk_blob_id blobid; 3360 3361 bool force_recover; 3362 3363 /* These fields are used in the spdk_bs_dump path. */ 3364 bool dumping; 3365 FILE *fp; 3366 spdk_bs_dump_print_xattr print_xattr_fn; 3367 char xattr_name[4096]; 3368 }; 3369 3370 static int 3371 bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_blob_store **_bs, 3372 struct spdk_bs_load_ctx **_ctx) 3373 { 3374 struct spdk_blob_store *bs; 3375 struct spdk_bs_load_ctx *ctx; 3376 uint64_t dev_size; 3377 int rc; 3378 3379 dev_size = dev->blocklen * dev->blockcnt; 3380 if (dev_size < opts->cluster_sz) { 3381 /* Device size cannot be smaller than cluster size of blobstore */ 3382 SPDK_INFOLOG(blob, "Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n", 3383 dev_size, opts->cluster_sz); 3384 return -ENOSPC; 3385 } 3386 if (opts->cluster_sz < SPDK_BS_PAGE_SIZE) { 3387 /* Cluster size cannot be smaller than page size */ 3388 SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n", 3389 opts->cluster_sz, SPDK_BS_PAGE_SIZE); 3390 return -EINVAL; 3391 } 3392 bs = calloc(1, sizeof(struct spdk_blob_store)); 3393 if (!bs) { 3394 return -ENOMEM; 3395 } 3396 3397 ctx = calloc(1, sizeof(struct spdk_bs_load_ctx)); 3398 if (!ctx) { 3399 free(bs); 3400 return -ENOMEM; 3401 } 3402 3403 ctx->bs = bs; 3404 ctx->iter_cb_fn = opts->iter_cb_fn; 3405 ctx->iter_cb_arg = opts->iter_cb_arg; 3406 ctx->force_recover = opts->force_recover; 3407 3408 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 3409 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3410 if (!ctx->super) { 3411 free(ctx); 3412 free(bs); 3413 return -ENOMEM; 3414 } 3415 3416 RB_INIT(&bs->open_blobs); 3417 TAILQ_INIT(&bs->snapshots); 3418 bs->dev = dev; 3419 bs->md_thread = spdk_get_thread(); 3420 assert(bs->md_thread != NULL); 3421 3422 /* 3423 * Do not use bs_lba_to_cluster() here since blockcnt may not be an 3424 * even multiple of the cluster size. 3425 */ 3426 bs->cluster_sz = opts->cluster_sz; 3427 bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen); 3428 ctx->used_clusters = spdk_bit_array_create(bs->total_clusters); 3429 if (!ctx->used_clusters) { 3430 spdk_free(ctx->super); 3431 free(ctx); 3432 free(bs); 3433 return -ENOMEM; 3434 } 3435 3436 bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE; 3437 if (spdk_u32_is_pow2(bs->pages_per_cluster)) { 3438 bs->pages_per_cluster_shift = spdk_u32log2(bs->pages_per_cluster); 3439 } 3440 bs->num_free_clusters = bs->total_clusters; 3441 bs->io_unit_size = dev->blocklen; 3442 3443 bs->max_channel_ops = opts->max_channel_ops; 3444 bs->super_blob = SPDK_BLOBID_INVALID; 3445 memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype)); 3446 3447 /* The metadata is assumed to be at least 1 page */ 3448 bs->used_md_pages = spdk_bit_array_create(1); 3449 bs->used_blobids = spdk_bit_array_create(0); 3450 bs->open_blobids = spdk_bit_array_create(0); 3451 3452 pthread_mutex_init(&bs->used_clusters_mutex, NULL); 3453 3454 spdk_io_device_register(bs, bs_channel_create, bs_channel_destroy, 3455 sizeof(struct spdk_bs_channel), "blobstore"); 3456 rc = bs_register_md_thread(bs); 3457 if (rc == -1) { 3458 spdk_io_device_unregister(bs, NULL); 3459 pthread_mutex_destroy(&bs->used_clusters_mutex); 3460 spdk_bit_array_free(&bs->open_blobids); 3461 spdk_bit_array_free(&bs->used_blobids); 3462 spdk_bit_array_free(&bs->used_md_pages); 3463 spdk_bit_array_free(&ctx->used_clusters); 3464 spdk_free(ctx->super); 3465 free(ctx); 3466 free(bs); 3467 /* FIXME: this is a lie but don't know how to get a proper error code here */ 3468 return -ENOMEM; 3469 } 3470 3471 *_ctx = ctx; 3472 *_bs = bs; 3473 return 0; 3474 } 3475 3476 static void 3477 bs_load_ctx_fail(struct spdk_bs_load_ctx *ctx, int bserrno) 3478 { 3479 assert(bserrno != 0); 3480 3481 spdk_free(ctx->super); 3482 bs_sequence_finish(ctx->seq, bserrno); 3483 bs_free(ctx->bs); 3484 spdk_bit_array_free(&ctx->used_clusters); 3485 free(ctx); 3486 } 3487 3488 static void 3489 bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 3490 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg) 3491 { 3492 /* Update the values in the super block */ 3493 super->super_blob = bs->super_blob; 3494 memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype)); 3495 super->crc = blob_md_page_calc_crc(super); 3496 bs_sequence_write_dev(seq, super, bs_page_to_lba(bs, 0), 3497 bs_byte_to_lba(bs, sizeof(*super)), 3498 cb_fn, cb_arg); 3499 } 3500 3501 static void 3502 bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 3503 { 3504 struct spdk_bs_load_ctx *ctx = arg; 3505 uint64_t mask_size, lba, lba_count; 3506 3507 /* Write out the used clusters mask */ 3508 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 3509 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 3510 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3511 if (!ctx->mask) { 3512 bs_load_ctx_fail(ctx, -ENOMEM); 3513 return; 3514 } 3515 3516 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS; 3517 ctx->mask->length = ctx->bs->total_clusters; 3518 /* We could get here through the normal unload path, or through dirty 3519 * shutdown recovery. For the normal unload path, we use the mask from 3520 * the bit pool. For dirty shutdown recovery, we don't have a bit pool yet - 3521 * only the bit array from the load ctx. 3522 */ 3523 if (ctx->bs->used_clusters) { 3524 assert(ctx->mask->length == spdk_bit_pool_capacity(ctx->bs->used_clusters)); 3525 spdk_bit_pool_store_mask(ctx->bs->used_clusters, ctx->mask->mask); 3526 } else { 3527 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->used_clusters)); 3528 spdk_bit_array_store_mask(ctx->used_clusters, ctx->mask->mask); 3529 } 3530 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 3531 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 3532 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 3533 } 3534 3535 static void 3536 bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 3537 { 3538 struct spdk_bs_load_ctx *ctx = arg; 3539 uint64_t mask_size, lba, lba_count; 3540 3541 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 3542 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 3543 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3544 if (!ctx->mask) { 3545 bs_load_ctx_fail(ctx, -ENOMEM); 3546 return; 3547 } 3548 3549 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES; 3550 ctx->mask->length = ctx->super->md_len; 3551 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages)); 3552 3553 spdk_bit_array_store_mask(ctx->bs->used_md_pages, ctx->mask->mask); 3554 lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 3555 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 3556 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 3557 } 3558 3559 static void 3560 bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 3561 { 3562 struct spdk_bs_load_ctx *ctx = arg; 3563 uint64_t mask_size, lba, lba_count; 3564 3565 if (ctx->super->used_blobid_mask_len == 0) { 3566 /* 3567 * This is a pre-v3 on-disk format where the blobid mask does not get 3568 * written to disk. 3569 */ 3570 cb_fn(seq, arg, 0); 3571 return; 3572 } 3573 3574 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 3575 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 3576 SPDK_MALLOC_DMA); 3577 if (!ctx->mask) { 3578 bs_load_ctx_fail(ctx, -ENOMEM); 3579 return; 3580 } 3581 3582 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS; 3583 ctx->mask->length = ctx->super->md_len; 3584 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids)); 3585 3586 spdk_bit_array_store_mask(ctx->bs->used_blobids, ctx->mask->mask); 3587 lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 3588 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 3589 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 3590 } 3591 3592 static void 3593 blob_set_thin_provision(struct spdk_blob *blob) 3594 { 3595 blob_verify_md_op(blob); 3596 blob->invalid_flags |= SPDK_BLOB_THIN_PROV; 3597 blob->state = SPDK_BLOB_STATE_DIRTY; 3598 } 3599 3600 static void 3601 blob_set_clear_method(struct spdk_blob *blob, enum blob_clear_method clear_method) 3602 { 3603 blob_verify_md_op(blob); 3604 blob->clear_method = clear_method; 3605 blob->md_ro_flags |= (clear_method << SPDK_BLOB_CLEAR_METHOD_SHIFT); 3606 blob->state = SPDK_BLOB_STATE_DIRTY; 3607 } 3608 3609 static void bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno); 3610 3611 static void 3612 bs_delete_corrupted_blob_cpl(void *cb_arg, int bserrno) 3613 { 3614 struct spdk_bs_load_ctx *ctx = cb_arg; 3615 spdk_blob_id id; 3616 int64_t page_num; 3617 3618 /* Iterate to next blob (we can't use spdk_bs_iter_next function as our 3619 * last blob has been removed */ 3620 page_num = bs_blobid_to_page(ctx->blobid); 3621 page_num++; 3622 page_num = spdk_bit_array_find_first_set(ctx->bs->used_blobids, page_num); 3623 if (page_num >= spdk_bit_array_capacity(ctx->bs->used_blobids)) { 3624 bs_load_iter(ctx, NULL, -ENOENT); 3625 return; 3626 } 3627 3628 id = bs_page_to_blobid(page_num); 3629 3630 spdk_bs_open_blob(ctx->bs, id, bs_load_iter, ctx); 3631 } 3632 3633 static void 3634 bs_delete_corrupted_close_cb(void *cb_arg, int bserrno) 3635 { 3636 struct spdk_bs_load_ctx *ctx = cb_arg; 3637 3638 if (bserrno != 0) { 3639 SPDK_ERRLOG("Failed to close corrupted blob\n"); 3640 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 3641 return; 3642 } 3643 3644 spdk_bs_delete_blob(ctx->bs, ctx->blobid, bs_delete_corrupted_blob_cpl, ctx); 3645 } 3646 3647 static void 3648 bs_delete_corrupted_blob(void *cb_arg, int bserrno) 3649 { 3650 struct spdk_bs_load_ctx *ctx = cb_arg; 3651 uint64_t i; 3652 3653 if (bserrno != 0) { 3654 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 3655 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 3656 return; 3657 } 3658 3659 /* Snapshot and clone have the same copy of cluster map and extent pages 3660 * at this point. Let's clear both for snapshot now, 3661 * so that it won't be cleared for clone later when we remove snapshot. 3662 * Also set thin provision to pass data corruption check */ 3663 for (i = 0; i < ctx->blob->active.num_clusters; i++) { 3664 ctx->blob->active.clusters[i] = 0; 3665 } 3666 for (i = 0; i < ctx->blob->active.num_extent_pages; i++) { 3667 ctx->blob->active.extent_pages[i] = 0; 3668 } 3669 3670 ctx->blob->md_ro = false; 3671 3672 blob_set_thin_provision(ctx->blob); 3673 3674 ctx->blobid = ctx->blob->id; 3675 3676 spdk_blob_close(ctx->blob, bs_delete_corrupted_close_cb, ctx); 3677 } 3678 3679 static void 3680 bs_update_corrupted_blob(void *cb_arg, int bserrno) 3681 { 3682 struct spdk_bs_load_ctx *ctx = cb_arg; 3683 3684 if (bserrno != 0) { 3685 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 3686 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 3687 return; 3688 } 3689 3690 ctx->blob->md_ro = false; 3691 blob_remove_xattr(ctx->blob, SNAPSHOT_PENDING_REMOVAL, true); 3692 blob_remove_xattr(ctx->blob, SNAPSHOT_IN_PROGRESS, true); 3693 spdk_blob_set_read_only(ctx->blob); 3694 3695 if (ctx->iter_cb_fn) { 3696 ctx->iter_cb_fn(ctx->iter_cb_arg, ctx->blob, 0); 3697 } 3698 bs_blob_list_add(ctx->blob); 3699 3700 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 3701 } 3702 3703 static void 3704 bs_examine_clone(void *cb_arg, struct spdk_blob *blob, int bserrno) 3705 { 3706 struct spdk_bs_load_ctx *ctx = cb_arg; 3707 3708 if (bserrno != 0) { 3709 SPDK_ERRLOG("Failed to open clone of a corrupted blob\n"); 3710 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 3711 return; 3712 } 3713 3714 if (blob->parent_id == ctx->blob->id) { 3715 /* Power failure occurred before updating clone (snapshot delete case) 3716 * or after updating clone (creating snapshot case) - keep snapshot */ 3717 spdk_blob_close(blob, bs_update_corrupted_blob, ctx); 3718 } else { 3719 /* Power failure occurred after updating clone (snapshot delete case) 3720 * or before updating clone (creating snapshot case) - remove snapshot */ 3721 spdk_blob_close(blob, bs_delete_corrupted_blob, ctx); 3722 } 3723 } 3724 3725 static void 3726 bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno) 3727 { 3728 struct spdk_bs_load_ctx *ctx = arg; 3729 const void *value; 3730 size_t len; 3731 int rc = 0; 3732 3733 if (bserrno == 0) { 3734 /* Examine blob if it is corrupted after power failure. Fix 3735 * the ones that can be fixed and remove any other corrupted 3736 * ones. If it is not corrupted just process it */ 3737 rc = blob_get_xattr_value(blob, SNAPSHOT_PENDING_REMOVAL, &value, &len, true); 3738 if (rc != 0) { 3739 rc = blob_get_xattr_value(blob, SNAPSHOT_IN_PROGRESS, &value, &len, true); 3740 if (rc != 0) { 3741 /* Not corrupted - process it and continue with iterating through blobs */ 3742 if (ctx->iter_cb_fn) { 3743 ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0); 3744 } 3745 bs_blob_list_add(blob); 3746 spdk_bs_iter_next(ctx->bs, blob, bs_load_iter, ctx); 3747 return; 3748 } 3749 3750 } 3751 3752 assert(len == sizeof(spdk_blob_id)); 3753 3754 ctx->blob = blob; 3755 3756 /* Open clone to check if we are able to fix this blob or should we remove it */ 3757 spdk_bs_open_blob(ctx->bs, *(spdk_blob_id *)value, bs_examine_clone, ctx); 3758 return; 3759 } else if (bserrno == -ENOENT) { 3760 bserrno = 0; 3761 } else { 3762 /* 3763 * This case needs to be looked at further. Same problem 3764 * exists with applications that rely on explicit blob 3765 * iteration. We should just skip the blob that failed 3766 * to load and continue on to the next one. 3767 */ 3768 SPDK_ERRLOG("Error in iterating blobs\n"); 3769 } 3770 3771 ctx->iter_cb_fn = NULL; 3772 3773 spdk_free(ctx->super); 3774 spdk_free(ctx->mask); 3775 bs_sequence_finish(ctx->seq, bserrno); 3776 free(ctx); 3777 } 3778 3779 static void bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg); 3780 3781 static void 3782 bs_load_complete(struct spdk_bs_load_ctx *ctx) 3783 { 3784 ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters); 3785 if (ctx->dumping) { 3786 bs_dump_read_md_page(ctx->seq, ctx); 3787 return; 3788 } 3789 spdk_bs_iter_first(ctx->bs, bs_load_iter, ctx); 3790 } 3791 3792 static void 3793 bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3794 { 3795 struct spdk_bs_load_ctx *ctx = cb_arg; 3796 int rc; 3797 3798 /* The type must be correct */ 3799 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS); 3800 3801 /* The length of the mask (in bits) must not be greater than 3802 * the length of the buffer (converted to bits) */ 3803 assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE * 8)); 3804 3805 /* The length of the mask must be exactly equal to the size 3806 * (in pages) of the metadata region */ 3807 assert(ctx->mask->length == ctx->super->md_len); 3808 3809 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->mask->length); 3810 if (rc < 0) { 3811 spdk_free(ctx->mask); 3812 bs_load_ctx_fail(ctx, rc); 3813 return; 3814 } 3815 3816 spdk_bit_array_load_mask(ctx->bs->used_blobids, ctx->mask->mask); 3817 bs_load_complete(ctx); 3818 } 3819 3820 static void 3821 bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3822 { 3823 struct spdk_bs_load_ctx *ctx = cb_arg; 3824 uint64_t lba, lba_count, mask_size; 3825 int rc; 3826 3827 if (bserrno != 0) { 3828 bs_load_ctx_fail(ctx, bserrno); 3829 return; 3830 } 3831 3832 /* The type must be correct */ 3833 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 3834 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 3835 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 3836 struct spdk_blob_md_page) * 8)); 3837 /* The length of the mask must be exactly equal to the total number of clusters */ 3838 assert(ctx->mask->length == ctx->bs->total_clusters); 3839 3840 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->mask->length); 3841 if (rc < 0) { 3842 spdk_free(ctx->mask); 3843 bs_load_ctx_fail(ctx, rc); 3844 return; 3845 } 3846 3847 spdk_bit_array_load_mask(ctx->used_clusters, ctx->mask->mask); 3848 ctx->bs->num_free_clusters = spdk_bit_array_count_clear(ctx->used_clusters); 3849 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 3850 3851 spdk_free(ctx->mask); 3852 3853 /* Read the used blobids mask */ 3854 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 3855 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 3856 SPDK_MALLOC_DMA); 3857 if (!ctx->mask) { 3858 bs_load_ctx_fail(ctx, -ENOMEM); 3859 return; 3860 } 3861 lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 3862 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 3863 bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 3864 bs_load_used_blobids_cpl, ctx); 3865 } 3866 3867 static void 3868 bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3869 { 3870 struct spdk_bs_load_ctx *ctx = cb_arg; 3871 uint64_t lba, lba_count, mask_size; 3872 int rc; 3873 3874 if (bserrno != 0) { 3875 bs_load_ctx_fail(ctx, bserrno); 3876 return; 3877 } 3878 3879 /* The type must be correct */ 3880 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES); 3881 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 3882 assert(ctx->mask->length <= (ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE * 3883 8)); 3884 /* The length of the mask must be exactly equal to the size (in pages) of the metadata region */ 3885 if (ctx->mask->length != ctx->super->md_len) { 3886 SPDK_ERRLOG("mismatched md_len in used_pages mask: " 3887 "mask->length=%" PRIu32 " super->md_len=%" PRIu32 "\n", 3888 ctx->mask->length, ctx->super->md_len); 3889 assert(false); 3890 } 3891 3892 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->mask->length); 3893 if (rc < 0) { 3894 spdk_free(ctx->mask); 3895 bs_load_ctx_fail(ctx, rc); 3896 return; 3897 } 3898 3899 spdk_bit_array_load_mask(ctx->bs->used_md_pages, ctx->mask->mask); 3900 spdk_free(ctx->mask); 3901 3902 /* Read the used clusters mask */ 3903 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 3904 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 3905 SPDK_MALLOC_DMA); 3906 if (!ctx->mask) { 3907 bs_load_ctx_fail(ctx, -ENOMEM); 3908 return; 3909 } 3910 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 3911 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 3912 bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 3913 bs_load_used_clusters_cpl, ctx); 3914 } 3915 3916 static void 3917 bs_load_read_used_pages(struct spdk_bs_load_ctx *ctx) 3918 { 3919 uint64_t lba, lba_count, mask_size; 3920 3921 /* Read the used pages mask */ 3922 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 3923 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 3924 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3925 if (!ctx->mask) { 3926 bs_load_ctx_fail(ctx, -ENOMEM); 3927 return; 3928 } 3929 3930 lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 3931 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 3932 bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count, 3933 bs_load_used_pages_cpl, ctx); 3934 } 3935 3936 static int 3937 bs_load_replay_md_parse_page(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_page *page) 3938 { 3939 struct spdk_blob_store *bs = ctx->bs; 3940 struct spdk_blob_md_descriptor *desc; 3941 size_t cur_desc = 0; 3942 3943 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 3944 while (cur_desc < sizeof(page->descriptors)) { 3945 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 3946 if (desc->length == 0) { 3947 /* If padding and length are 0, this terminates the page */ 3948 break; 3949 } 3950 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 3951 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 3952 unsigned int i, j; 3953 unsigned int cluster_count = 0; 3954 uint32_t cluster_idx; 3955 3956 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 3957 3958 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 3959 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 3960 cluster_idx = desc_extent_rle->extents[i].cluster_idx; 3961 /* 3962 * cluster_idx = 0 means an unallocated cluster - don't mark that 3963 * in the used cluster map. 3964 */ 3965 if (cluster_idx != 0) { 3966 SPDK_NOTICELOG("Recover: cluster %" PRIu32 "\n", cluster_idx + j); 3967 spdk_bit_array_set(ctx->used_clusters, cluster_idx + j); 3968 if (bs->num_free_clusters == 0) { 3969 return -ENOSPC; 3970 } 3971 bs->num_free_clusters--; 3972 } 3973 cluster_count++; 3974 } 3975 } 3976 if (cluster_count == 0) { 3977 return -EINVAL; 3978 } 3979 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 3980 struct spdk_blob_md_descriptor_extent_page *desc_extent; 3981 uint32_t i; 3982 uint32_t cluster_count = 0; 3983 uint32_t cluster_idx; 3984 size_t cluster_idx_length; 3985 3986 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 3987 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 3988 3989 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 3990 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 3991 return -EINVAL; 3992 } 3993 3994 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 3995 cluster_idx = desc_extent->cluster_idx[i]; 3996 /* 3997 * cluster_idx = 0 means an unallocated cluster - don't mark that 3998 * in the used cluster map. 3999 */ 4000 if (cluster_idx != 0) { 4001 if (cluster_idx < desc_extent->start_cluster_idx && 4002 cluster_idx >= desc_extent->start_cluster_idx + cluster_count) { 4003 return -EINVAL; 4004 } 4005 spdk_bit_array_set(ctx->used_clusters, cluster_idx); 4006 if (bs->num_free_clusters == 0) { 4007 return -ENOSPC; 4008 } 4009 bs->num_free_clusters--; 4010 } 4011 cluster_count++; 4012 } 4013 4014 if (cluster_count == 0) { 4015 return -EINVAL; 4016 } 4017 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 4018 /* Skip this item */ 4019 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 4020 /* Skip this item */ 4021 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 4022 /* Skip this item */ 4023 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 4024 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 4025 uint32_t num_extent_pages = ctx->num_extent_pages; 4026 uint32_t i; 4027 size_t extent_pages_length; 4028 void *tmp; 4029 4030 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 4031 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 4032 4033 if (desc_extent_table->length == 0 || 4034 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 4035 return -EINVAL; 4036 } 4037 4038 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 4039 if (desc_extent_table->extent_page[i].page_idx != 0) { 4040 if (desc_extent_table->extent_page[i].num_pages != 1) { 4041 return -EINVAL; 4042 } 4043 num_extent_pages += 1; 4044 } 4045 } 4046 4047 if (num_extent_pages > 0) { 4048 tmp = realloc(ctx->extent_page_num, num_extent_pages * sizeof(uint32_t)); 4049 if (tmp == NULL) { 4050 return -ENOMEM; 4051 } 4052 ctx->extent_page_num = tmp; 4053 4054 /* Extent table entries contain md page numbers for extent pages. 4055 * Zeroes represent unallocated extent pages, those are run-length-encoded. 4056 */ 4057 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 4058 if (desc_extent_table->extent_page[i].page_idx != 0) { 4059 ctx->extent_page_num[ctx->num_extent_pages] = desc_extent_table->extent_page[i].page_idx; 4060 ctx->num_extent_pages += 1; 4061 } 4062 } 4063 } 4064 } else { 4065 /* Error */ 4066 return -EINVAL; 4067 } 4068 /* Advance to the next descriptor */ 4069 cur_desc += sizeof(*desc) + desc->length; 4070 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 4071 break; 4072 } 4073 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 4074 } 4075 return 0; 4076 } 4077 4078 static bool bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page) 4079 { 4080 uint32_t crc; 4081 struct spdk_blob_md_descriptor *desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4082 size_t desc_len; 4083 4084 crc = blob_md_page_calc_crc(page); 4085 if (crc != page->crc) { 4086 return false; 4087 } 4088 4089 /* Extent page should always be of sequence num 0. */ 4090 if (page->sequence_num != 0) { 4091 return false; 4092 } 4093 4094 /* Descriptor type must be EXTENT_PAGE. */ 4095 if (desc->type != SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4096 return false; 4097 } 4098 4099 /* Descriptor length cannot exceed the page. */ 4100 desc_len = sizeof(*desc) + desc->length; 4101 if (desc_len > sizeof(page->descriptors)) { 4102 return false; 4103 } 4104 4105 /* It has to be the only descriptor in the page. */ 4106 if (desc_len + sizeof(*desc) <= sizeof(page->descriptors)) { 4107 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + desc_len); 4108 if (desc->length != 0) { 4109 return false; 4110 } 4111 } 4112 4113 return true; 4114 } 4115 4116 static bool bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx) 4117 { 4118 uint32_t crc; 4119 struct spdk_blob_md_page *page = ctx->page; 4120 4121 crc = blob_md_page_calc_crc(page); 4122 if (crc != page->crc) { 4123 return false; 4124 } 4125 4126 /* First page of a sequence should match the blobid. */ 4127 if (page->sequence_num == 0 && 4128 bs_page_to_blobid(ctx->cur_page) != page->id) { 4129 return false; 4130 } 4131 assert(bs_load_cur_extent_page_valid(page) == false); 4132 4133 return true; 4134 } 4135 4136 static void 4137 bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx); 4138 4139 static void 4140 bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4141 { 4142 struct spdk_bs_load_ctx *ctx = cb_arg; 4143 4144 if (bserrno != 0) { 4145 bs_load_ctx_fail(ctx, bserrno); 4146 return; 4147 } 4148 4149 bs_load_complete(ctx); 4150 } 4151 4152 static void 4153 bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4154 { 4155 struct spdk_bs_load_ctx *ctx = cb_arg; 4156 4157 spdk_free(ctx->mask); 4158 ctx->mask = NULL; 4159 4160 if (bserrno != 0) { 4161 bs_load_ctx_fail(ctx, bserrno); 4162 return; 4163 } 4164 4165 bs_write_used_clusters(seq, ctx, bs_load_write_used_clusters_cpl); 4166 } 4167 4168 static void 4169 bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4170 { 4171 struct spdk_bs_load_ctx *ctx = cb_arg; 4172 4173 spdk_free(ctx->mask); 4174 ctx->mask = NULL; 4175 4176 if (bserrno != 0) { 4177 bs_load_ctx_fail(ctx, bserrno); 4178 return; 4179 } 4180 4181 bs_write_used_blobids(seq, ctx, bs_load_write_used_blobids_cpl); 4182 } 4183 4184 static void 4185 bs_load_write_used_md(struct spdk_bs_load_ctx *ctx) 4186 { 4187 bs_write_used_md(ctx->seq, ctx, bs_load_write_used_pages_cpl); 4188 } 4189 4190 static void 4191 bs_load_replay_md_chain_cpl(struct spdk_bs_load_ctx *ctx) 4192 { 4193 uint64_t num_md_clusters; 4194 uint64_t i; 4195 4196 ctx->in_page_chain = false; 4197 4198 do { 4199 ctx->page_index++; 4200 } while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true); 4201 4202 if (ctx->page_index < ctx->super->md_len) { 4203 ctx->cur_page = ctx->page_index; 4204 bs_load_replay_cur_md_page(ctx); 4205 } else { 4206 /* Claim all of the clusters used by the metadata */ 4207 num_md_clusters = spdk_divide_round_up( 4208 ctx->super->md_start + ctx->super->md_len, ctx->bs->pages_per_cluster); 4209 for (i = 0; i < num_md_clusters; i++) { 4210 spdk_bit_array_set(ctx->used_clusters, i); 4211 } 4212 ctx->bs->num_free_clusters -= num_md_clusters; 4213 spdk_free(ctx->page); 4214 bs_load_write_used_md(ctx); 4215 } 4216 } 4217 4218 static void 4219 bs_load_replay_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4220 { 4221 struct spdk_bs_load_ctx *ctx = cb_arg; 4222 uint32_t page_num; 4223 uint64_t i; 4224 4225 if (bserrno != 0) { 4226 spdk_free(ctx->extent_pages); 4227 bs_load_ctx_fail(ctx, bserrno); 4228 return; 4229 } 4230 4231 for (i = 0; i < ctx->num_extent_pages; i++) { 4232 /* Extent pages are only read when present within in chain md. 4233 * Integrity of md is not right if that page was not a valid extent page. */ 4234 if (bs_load_cur_extent_page_valid(&ctx->extent_pages[i]) != true) { 4235 spdk_free(ctx->extent_pages); 4236 bs_load_ctx_fail(ctx, -EILSEQ); 4237 return; 4238 } 4239 4240 page_num = ctx->extent_page_num[i]; 4241 spdk_bit_array_set(ctx->bs->used_md_pages, page_num); 4242 if (bs_load_replay_md_parse_page(ctx, &ctx->extent_pages[i])) { 4243 spdk_free(ctx->extent_pages); 4244 bs_load_ctx_fail(ctx, -EILSEQ); 4245 return; 4246 } 4247 } 4248 4249 spdk_free(ctx->extent_pages); 4250 free(ctx->extent_page_num); 4251 ctx->extent_page_num = NULL; 4252 ctx->num_extent_pages = 0; 4253 4254 bs_load_replay_md_chain_cpl(ctx); 4255 } 4256 4257 static void 4258 bs_load_replay_extent_pages(struct spdk_bs_load_ctx *ctx) 4259 { 4260 spdk_bs_batch_t *batch; 4261 uint32_t page; 4262 uint64_t lba; 4263 uint64_t i; 4264 4265 ctx->extent_pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE * ctx->num_extent_pages, 0, 4266 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4267 if (!ctx->extent_pages) { 4268 bs_load_ctx_fail(ctx, -ENOMEM); 4269 return; 4270 } 4271 4272 batch = bs_sequence_to_batch(ctx->seq, bs_load_replay_extent_page_cpl, ctx); 4273 4274 for (i = 0; i < ctx->num_extent_pages; i++) { 4275 page = ctx->extent_page_num[i]; 4276 assert(page < ctx->super->md_len); 4277 lba = bs_md_page_to_lba(ctx->bs, page); 4278 bs_batch_read_dev(batch, &ctx->extent_pages[i], lba, 4279 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE)); 4280 } 4281 4282 bs_batch_close(batch); 4283 } 4284 4285 static void 4286 bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4287 { 4288 struct spdk_bs_load_ctx *ctx = cb_arg; 4289 uint32_t page_num; 4290 struct spdk_blob_md_page *page; 4291 4292 if (bserrno != 0) { 4293 bs_load_ctx_fail(ctx, bserrno); 4294 return; 4295 } 4296 4297 page_num = ctx->cur_page; 4298 page = ctx->page; 4299 if (bs_load_cur_md_page_valid(ctx) == true) { 4300 if (page->sequence_num == 0 || ctx->in_page_chain == true) { 4301 bs_claim_md_page(ctx->bs, page_num); 4302 if (page->sequence_num == 0) { 4303 SPDK_NOTICELOG("Recover: blob %" PRIu32 "\n", page_num); 4304 spdk_bit_array_set(ctx->bs->used_blobids, page_num); 4305 } 4306 if (bs_load_replay_md_parse_page(ctx, page)) { 4307 bs_load_ctx_fail(ctx, -EILSEQ); 4308 return; 4309 } 4310 if (page->next != SPDK_INVALID_MD_PAGE) { 4311 ctx->in_page_chain = true; 4312 ctx->cur_page = page->next; 4313 bs_load_replay_cur_md_page(ctx); 4314 return; 4315 } 4316 if (ctx->num_extent_pages != 0) { 4317 bs_load_replay_extent_pages(ctx); 4318 return; 4319 } 4320 } 4321 } 4322 bs_load_replay_md_chain_cpl(ctx); 4323 } 4324 4325 static void 4326 bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx) 4327 { 4328 uint64_t lba; 4329 4330 assert(ctx->cur_page < ctx->super->md_len); 4331 lba = bs_md_page_to_lba(ctx->bs, ctx->cur_page); 4332 bs_sequence_read_dev(ctx->seq, ctx->page, lba, 4333 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 4334 bs_load_replay_md_cpl, ctx); 4335 } 4336 4337 static void 4338 bs_load_replay_md(struct spdk_bs_load_ctx *ctx) 4339 { 4340 ctx->page_index = 0; 4341 ctx->cur_page = 0; 4342 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 4343 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4344 if (!ctx->page) { 4345 bs_load_ctx_fail(ctx, -ENOMEM); 4346 return; 4347 } 4348 bs_load_replay_cur_md_page(ctx); 4349 } 4350 4351 static void 4352 bs_recover(struct spdk_bs_load_ctx *ctx) 4353 { 4354 int rc; 4355 4356 SPDK_NOTICELOG("Performing recovery on blobstore\n"); 4357 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len); 4358 if (rc < 0) { 4359 bs_load_ctx_fail(ctx, -ENOMEM); 4360 return; 4361 } 4362 4363 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len); 4364 if (rc < 0) { 4365 bs_load_ctx_fail(ctx, -ENOMEM); 4366 return; 4367 } 4368 4369 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 4370 if (rc < 0) { 4371 bs_load_ctx_fail(ctx, -ENOMEM); 4372 return; 4373 } 4374 4375 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->super->md_len); 4376 if (rc < 0) { 4377 bs_load_ctx_fail(ctx, -ENOMEM); 4378 return; 4379 } 4380 4381 ctx->bs->num_free_clusters = ctx->bs->total_clusters; 4382 bs_load_replay_md(ctx); 4383 } 4384 4385 static int 4386 bs_parse_super(struct spdk_bs_load_ctx *ctx) 4387 { 4388 int rc; 4389 4390 if (ctx->super->size == 0) { 4391 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 4392 } 4393 4394 if (ctx->super->io_unit_size == 0) { 4395 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 4396 } 4397 4398 ctx->bs->clean = 1; 4399 ctx->bs->cluster_sz = ctx->super->cluster_size; 4400 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 4401 ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE; 4402 if (spdk_u32_is_pow2(ctx->bs->pages_per_cluster)) { 4403 ctx->bs->pages_per_cluster_shift = spdk_u32log2(ctx->bs->pages_per_cluster); 4404 } 4405 ctx->bs->io_unit_size = ctx->super->io_unit_size; 4406 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 4407 if (rc < 0) { 4408 return -ENOMEM; 4409 } 4410 ctx->bs->md_start = ctx->super->md_start; 4411 ctx->bs->md_len = ctx->super->md_len; 4412 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len); 4413 if (rc < 0) { 4414 return -ENOMEM; 4415 } 4416 4417 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 4418 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 4419 ctx->bs->super_blob = ctx->super->super_blob; 4420 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 4421 4422 return 0; 4423 } 4424 4425 static void 4426 bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4427 { 4428 struct spdk_bs_load_ctx *ctx = cb_arg; 4429 uint32_t crc; 4430 int rc; 4431 static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH]; 4432 4433 if (ctx->super->version > SPDK_BS_VERSION || 4434 ctx->super->version < SPDK_BS_INITIAL_VERSION) { 4435 bs_load_ctx_fail(ctx, -EILSEQ); 4436 return; 4437 } 4438 4439 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 4440 sizeof(ctx->super->signature)) != 0) { 4441 bs_load_ctx_fail(ctx, -EILSEQ); 4442 return; 4443 } 4444 4445 crc = blob_md_page_calc_crc(ctx->super); 4446 if (crc != ctx->super->crc) { 4447 bs_load_ctx_fail(ctx, -EILSEQ); 4448 return; 4449 } 4450 4451 if (memcmp(&ctx->bs->bstype, &ctx->super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 4452 SPDK_DEBUGLOG(blob, "Bstype matched - loading blobstore\n"); 4453 } else if (memcmp(&ctx->bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 4454 SPDK_DEBUGLOG(blob, "Bstype wildcard used - loading blobstore regardless bstype\n"); 4455 } else { 4456 SPDK_DEBUGLOG(blob, "Unexpected bstype\n"); 4457 SPDK_LOGDUMP(blob, "Expected:", ctx->bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 4458 SPDK_LOGDUMP(blob, "Found:", ctx->super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 4459 bs_load_ctx_fail(ctx, -ENXIO); 4460 return; 4461 } 4462 4463 if (ctx->super->size > ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen) { 4464 SPDK_NOTICELOG("Size mismatch, dev size: %" PRIu64 ", blobstore size: %" PRIu64 "\n", 4465 ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen, ctx->super->size); 4466 bs_load_ctx_fail(ctx, -EILSEQ); 4467 return; 4468 } 4469 4470 rc = bs_parse_super(ctx); 4471 if (rc < 0) { 4472 bs_load_ctx_fail(ctx, rc); 4473 return; 4474 } 4475 4476 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0 || ctx->force_recover) { 4477 bs_recover(ctx); 4478 } else { 4479 bs_load_read_used_pages(ctx); 4480 } 4481 } 4482 4483 static inline int 4484 bs_opts_copy(struct spdk_bs_opts *src, struct spdk_bs_opts *dst) 4485 { 4486 4487 if (!src->opts_size) { 4488 SPDK_ERRLOG("opts_size should not be zero value\n"); 4489 return -1; 4490 } 4491 4492 #define FIELD_OK(field) \ 4493 offsetof(struct spdk_bs_opts, field) + sizeof(src->field) <= src->opts_size 4494 4495 #define SET_FIELD(field) \ 4496 if (FIELD_OK(field)) { \ 4497 dst->field = src->field; \ 4498 } \ 4499 4500 SET_FIELD(cluster_sz); 4501 SET_FIELD(num_md_pages); 4502 SET_FIELD(max_md_ops); 4503 SET_FIELD(max_channel_ops); 4504 SET_FIELD(clear_method); 4505 4506 if (FIELD_OK(bstype)) { 4507 memcpy(&dst->bstype, &src->bstype, sizeof(dst->bstype)); 4508 } 4509 SET_FIELD(iter_cb_fn); 4510 SET_FIELD(iter_cb_arg); 4511 SET_FIELD(force_recover); 4512 4513 dst->opts_size = src->opts_size; 4514 4515 /* You should not remove this statement, but need to update the assert statement 4516 * if you add a new field, and also add a corresponding SET_FIELD statement */ 4517 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_opts) == 72, "Incorrect size"); 4518 4519 #undef FIELD_OK 4520 #undef SET_FIELD 4521 4522 return 0; 4523 } 4524 4525 void 4526 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 4527 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 4528 { 4529 struct spdk_blob_store *bs; 4530 struct spdk_bs_cpl cpl; 4531 struct spdk_bs_load_ctx *ctx; 4532 struct spdk_bs_opts opts = {}; 4533 int err; 4534 4535 SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev); 4536 4537 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 4538 SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen); 4539 dev->destroy(dev); 4540 cb_fn(cb_arg, NULL, -EINVAL); 4541 return; 4542 } 4543 4544 spdk_bs_opts_init(&opts, sizeof(opts)); 4545 if (o) { 4546 if (bs_opts_copy(o, &opts)) { 4547 return; 4548 } 4549 } 4550 4551 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 4552 dev->destroy(dev); 4553 cb_fn(cb_arg, NULL, -EINVAL); 4554 return; 4555 } 4556 4557 err = bs_alloc(dev, &opts, &bs, &ctx); 4558 if (err) { 4559 dev->destroy(dev); 4560 cb_fn(cb_arg, NULL, err); 4561 return; 4562 } 4563 4564 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 4565 cpl.u.bs_handle.cb_fn = cb_fn; 4566 cpl.u.bs_handle.cb_arg = cb_arg; 4567 cpl.u.bs_handle.bs = bs; 4568 4569 ctx->seq = bs_sequence_start(bs->md_channel, &cpl); 4570 if (!ctx->seq) { 4571 spdk_free(ctx->super); 4572 free(ctx); 4573 bs_free(bs); 4574 cb_fn(cb_arg, NULL, -ENOMEM); 4575 return; 4576 } 4577 4578 /* Read the super block */ 4579 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 4580 bs_byte_to_lba(bs, sizeof(*ctx->super)), 4581 bs_load_super_cpl, ctx); 4582 } 4583 4584 /* END spdk_bs_load */ 4585 4586 /* START spdk_bs_dump */ 4587 4588 static void 4589 bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno) 4590 { 4591 spdk_free(ctx->super); 4592 4593 /* 4594 * We need to defer calling bs_call_cpl() until after 4595 * dev destruction, so tuck these away for later use. 4596 */ 4597 ctx->bs->unload_err = bserrno; 4598 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 4599 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 4600 4601 bs_sequence_finish(seq, 0); 4602 bs_free(ctx->bs); 4603 free(ctx); 4604 } 4605 4606 static void 4607 bs_dump_print_xattr(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 4608 { 4609 struct spdk_blob_md_descriptor_xattr *desc_xattr; 4610 uint32_t i; 4611 const char *type; 4612 4613 desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc; 4614 4615 if (desc_xattr->length != 4616 sizeof(desc_xattr->name_length) + sizeof(desc_xattr->value_length) + 4617 desc_xattr->name_length + desc_xattr->value_length) { 4618 } 4619 4620 memcpy(ctx->xattr_name, desc_xattr->name, desc_xattr->name_length); 4621 ctx->xattr_name[desc_xattr->name_length] = '\0'; 4622 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 4623 type = "XATTR"; 4624 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 4625 type = "XATTR_INTERNAL"; 4626 } else { 4627 assert(false); 4628 type = "XATTR_?"; 4629 } 4630 fprintf(ctx->fp, "%s: name = \"%s\"\n", type, ctx->xattr_name); 4631 fprintf(ctx->fp, " value = \""); 4632 ctx->print_xattr_fn(ctx->fp, ctx->super->bstype.bstype, ctx->xattr_name, 4633 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 4634 desc_xattr->value_length); 4635 fprintf(ctx->fp, "\"\n"); 4636 for (i = 0; i < desc_xattr->value_length; i++) { 4637 if (i % 16 == 0) { 4638 fprintf(ctx->fp, " "); 4639 } 4640 fprintf(ctx->fp, "%02" PRIx8 " ", *((uint8_t *)desc_xattr->name + desc_xattr->name_length + i)); 4641 if ((i + 1) % 16 == 0) { 4642 fprintf(ctx->fp, "\n"); 4643 } 4644 } 4645 if (i % 16 != 0) { 4646 fprintf(ctx->fp, "\n"); 4647 } 4648 } 4649 4650 struct type_flag_desc { 4651 uint64_t mask; 4652 uint64_t val; 4653 const char *name; 4654 }; 4655 4656 static void 4657 bs_dump_print_type_bits(struct spdk_bs_load_ctx *ctx, uint64_t flags, 4658 struct type_flag_desc *desc, size_t numflags) 4659 { 4660 uint64_t covered = 0; 4661 size_t i; 4662 4663 for (i = 0; i < numflags; i++) { 4664 if ((desc[i].mask & flags) != desc[i].val) { 4665 continue; 4666 } 4667 fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " %s", desc[i].val, desc[i].name); 4668 if (desc[i].mask != desc[i].val) { 4669 fprintf(ctx->fp, " (mask 0x%" PRIx64 " value 0x%" PRIx64 ")", 4670 desc[i].mask, desc[i].val); 4671 } 4672 fprintf(ctx->fp, "\n"); 4673 covered |= desc[i].mask; 4674 } 4675 if ((flags & ~covered) != 0) { 4676 fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " Unknown\n", flags & ~covered); 4677 } 4678 } 4679 4680 static void 4681 bs_dump_print_type_flags(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 4682 { 4683 struct spdk_blob_md_descriptor_flags *type_desc; 4684 #define ADD_FLAG(f) { f, f, #f } 4685 #define ADD_MASK_VAL(m, v) { m, v, #v } 4686 static struct type_flag_desc invalid[] = { 4687 ADD_FLAG(SPDK_BLOB_THIN_PROV), 4688 ADD_FLAG(SPDK_BLOB_INTERNAL_XATTR), 4689 ADD_FLAG(SPDK_BLOB_EXTENT_TABLE), 4690 }; 4691 static struct type_flag_desc data_ro[] = { 4692 ADD_FLAG(SPDK_BLOB_READ_ONLY), 4693 }; 4694 static struct type_flag_desc md_ro[] = { 4695 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_DEFAULT), 4696 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_NONE), 4697 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_UNMAP), 4698 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_WRITE_ZEROES), 4699 }; 4700 #undef ADD_FLAG 4701 #undef ADD_MASK_VAL 4702 4703 type_desc = (struct spdk_blob_md_descriptor_flags *)desc; 4704 fprintf(ctx->fp, "Flags:\n"); 4705 fprintf(ctx->fp, "\tinvalid: 0x%016" PRIx64 "\n", type_desc->invalid_flags); 4706 bs_dump_print_type_bits(ctx, type_desc->invalid_flags, invalid, 4707 SPDK_COUNTOF(invalid)); 4708 fprintf(ctx->fp, "\tdata_ro: 0x%016" PRIx64 "\n", type_desc->data_ro_flags); 4709 bs_dump_print_type_bits(ctx, type_desc->data_ro_flags, data_ro, 4710 SPDK_COUNTOF(data_ro)); 4711 fprintf(ctx->fp, "\t md_ro: 0x%016" PRIx64 "\n", type_desc->md_ro_flags); 4712 bs_dump_print_type_bits(ctx, type_desc->md_ro_flags, md_ro, 4713 SPDK_COUNTOF(md_ro)); 4714 } 4715 4716 static void 4717 bs_dump_print_extent_table(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 4718 { 4719 struct spdk_blob_md_descriptor_extent_table *et_desc; 4720 uint64_t num_extent_pages; 4721 uint32_t et_idx; 4722 4723 et_desc = (struct spdk_blob_md_descriptor_extent_table *)desc; 4724 num_extent_pages = (et_desc->length - sizeof(et_desc->num_clusters)) / 4725 sizeof(et_desc->extent_page[0]); 4726 4727 fprintf(ctx->fp, "Extent table:\n"); 4728 for (et_idx = 0; et_idx < num_extent_pages; et_idx++) { 4729 if (et_desc->extent_page[et_idx].page_idx == 0) { 4730 /* Zeroes represent unallocated extent pages. */ 4731 continue; 4732 } 4733 fprintf(ctx->fp, "\tExtent page: %5" PRIu32 " length %3" PRIu32 4734 " at LBA %" PRIu64 "\n", et_desc->extent_page[et_idx].page_idx, 4735 et_desc->extent_page[et_idx].num_pages, 4736 bs_md_page_to_lba(ctx->bs, et_desc->extent_page[et_idx].page_idx)); 4737 } 4738 } 4739 4740 static void 4741 bs_dump_print_md_page(struct spdk_bs_load_ctx *ctx) 4742 { 4743 uint32_t page_idx = ctx->cur_page; 4744 struct spdk_blob_md_page *page = ctx->page; 4745 struct spdk_blob_md_descriptor *desc; 4746 size_t cur_desc = 0; 4747 uint32_t crc; 4748 4749 fprintf(ctx->fp, "=========\n"); 4750 fprintf(ctx->fp, "Metadata Page Index: %" PRIu32 " (0x%" PRIx32 ")\n", page_idx, page_idx); 4751 fprintf(ctx->fp, "Start LBA: %" PRIu64 "\n", bs_md_page_to_lba(ctx->bs, page_idx)); 4752 fprintf(ctx->fp, "Blob ID: 0x%" PRIx64 "\n", page->id); 4753 fprintf(ctx->fp, "Sequence: %" PRIu32 "\n", page->sequence_num); 4754 if (page->next == SPDK_INVALID_MD_PAGE) { 4755 fprintf(ctx->fp, "Next: None\n"); 4756 } else { 4757 fprintf(ctx->fp, "Next: %" PRIu32 "\n", page->next); 4758 } 4759 fprintf(ctx->fp, "In used bit array%s:", ctx->super->clean ? "" : " (not clean: dubious)"); 4760 if (spdk_bit_array_get(ctx->bs->used_md_pages, page_idx)) { 4761 fprintf(ctx->fp, " md"); 4762 } 4763 if (spdk_bit_array_get(ctx->bs->used_blobids, page_idx)) { 4764 fprintf(ctx->fp, " blob"); 4765 } 4766 fprintf(ctx->fp, "\n"); 4767 4768 crc = blob_md_page_calc_crc(page); 4769 fprintf(ctx->fp, "CRC: 0x%" PRIx32 " (%s)\n", page->crc, crc == page->crc ? "OK" : "Mismatch"); 4770 4771 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4772 while (cur_desc < sizeof(page->descriptors)) { 4773 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 4774 if (desc->length == 0) { 4775 /* If padding and length are 0, this terminates the page */ 4776 break; 4777 } 4778 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 4779 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 4780 unsigned int i; 4781 4782 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 4783 4784 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 4785 if (desc_extent_rle->extents[i].cluster_idx != 0) { 4786 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 4787 desc_extent_rle->extents[i].cluster_idx); 4788 } else { 4789 fprintf(ctx->fp, "Unallocated Extent - "); 4790 } 4791 fprintf(ctx->fp, " Length: %" PRIu32, desc_extent_rle->extents[i].length); 4792 fprintf(ctx->fp, "\n"); 4793 } 4794 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4795 struct spdk_blob_md_descriptor_extent_page *desc_extent; 4796 unsigned int i; 4797 4798 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 4799 4800 for (i = 0; i < desc_extent->length / sizeof(desc_extent->cluster_idx[0]); i++) { 4801 if (desc_extent->cluster_idx[i] != 0) { 4802 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 4803 desc_extent->cluster_idx[i]); 4804 } else { 4805 fprintf(ctx->fp, "Unallocated Extent"); 4806 } 4807 fprintf(ctx->fp, "\n"); 4808 } 4809 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 4810 bs_dump_print_xattr(ctx, desc); 4811 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 4812 bs_dump_print_xattr(ctx, desc); 4813 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 4814 bs_dump_print_type_flags(ctx, desc); 4815 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 4816 bs_dump_print_extent_table(ctx, desc); 4817 } else { 4818 /* Error */ 4819 fprintf(ctx->fp, "Unknown descriptor type %" PRIu8 "\n", desc->type); 4820 } 4821 /* Advance to the next descriptor */ 4822 cur_desc += sizeof(*desc) + desc->length; 4823 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 4824 break; 4825 } 4826 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 4827 } 4828 } 4829 4830 static void 4831 bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4832 { 4833 struct spdk_bs_load_ctx *ctx = cb_arg; 4834 4835 if (bserrno != 0) { 4836 bs_dump_finish(seq, ctx, bserrno); 4837 return; 4838 } 4839 4840 if (ctx->page->id != 0) { 4841 bs_dump_print_md_page(ctx); 4842 } 4843 4844 ctx->cur_page++; 4845 4846 if (ctx->cur_page < ctx->super->md_len) { 4847 bs_dump_read_md_page(seq, ctx); 4848 } else { 4849 spdk_free(ctx->page); 4850 bs_dump_finish(seq, ctx, 0); 4851 } 4852 } 4853 4854 static void 4855 bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg) 4856 { 4857 struct spdk_bs_load_ctx *ctx = cb_arg; 4858 uint64_t lba; 4859 4860 assert(ctx->cur_page < ctx->super->md_len); 4861 lba = bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page); 4862 bs_sequence_read_dev(seq, ctx->page, lba, 4863 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 4864 bs_dump_read_md_page_cpl, ctx); 4865 } 4866 4867 static void 4868 bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4869 { 4870 struct spdk_bs_load_ctx *ctx = cb_arg; 4871 int rc; 4872 4873 fprintf(ctx->fp, "Signature: \"%.8s\" ", ctx->super->signature); 4874 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 4875 sizeof(ctx->super->signature)) != 0) { 4876 fprintf(ctx->fp, "(Mismatch)\n"); 4877 bs_dump_finish(seq, ctx, bserrno); 4878 return; 4879 } else { 4880 fprintf(ctx->fp, "(OK)\n"); 4881 } 4882 fprintf(ctx->fp, "Version: %" PRIu32 "\n", ctx->super->version); 4883 fprintf(ctx->fp, "CRC: 0x%x (%s)\n", ctx->super->crc, 4884 (ctx->super->crc == blob_md_page_calc_crc(ctx->super)) ? "OK" : "Mismatch"); 4885 fprintf(ctx->fp, "Blobstore Type: %.*s\n", SPDK_BLOBSTORE_TYPE_LENGTH, ctx->super->bstype.bstype); 4886 fprintf(ctx->fp, "Cluster Size: %" PRIu32 "\n", ctx->super->cluster_size); 4887 fprintf(ctx->fp, "Super Blob ID: "); 4888 if (ctx->super->super_blob == SPDK_BLOBID_INVALID) { 4889 fprintf(ctx->fp, "(None)\n"); 4890 } else { 4891 fprintf(ctx->fp, "0x%" PRIx64 "\n", ctx->super->super_blob); 4892 } 4893 fprintf(ctx->fp, "Clean: %" PRIu32 "\n", ctx->super->clean); 4894 fprintf(ctx->fp, "Used Metadata Page Mask Start: %" PRIu32 "\n", ctx->super->used_page_mask_start); 4895 fprintf(ctx->fp, "Used Metadata Page Mask Length: %" PRIu32 "\n", ctx->super->used_page_mask_len); 4896 fprintf(ctx->fp, "Used Cluster Mask Start: %" PRIu32 "\n", ctx->super->used_cluster_mask_start); 4897 fprintf(ctx->fp, "Used Cluster Mask Length: %" PRIu32 "\n", ctx->super->used_cluster_mask_len); 4898 fprintf(ctx->fp, "Used Blob ID Mask Start: %" PRIu32 "\n", ctx->super->used_blobid_mask_start); 4899 fprintf(ctx->fp, "Used Blob ID Mask Length: %" PRIu32 "\n", ctx->super->used_blobid_mask_len); 4900 fprintf(ctx->fp, "Metadata Start: %" PRIu32 "\n", ctx->super->md_start); 4901 fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len); 4902 4903 ctx->cur_page = 0; 4904 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 4905 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4906 if (!ctx->page) { 4907 bs_dump_finish(seq, ctx, -ENOMEM); 4908 return; 4909 } 4910 4911 rc = bs_parse_super(ctx); 4912 if (rc < 0) { 4913 bs_load_ctx_fail(ctx, rc); 4914 return; 4915 } 4916 4917 bs_load_read_used_pages(ctx); 4918 } 4919 4920 void 4921 spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_xattr_fn, 4922 spdk_bs_op_complete cb_fn, void *cb_arg) 4923 { 4924 struct spdk_blob_store *bs; 4925 struct spdk_bs_cpl cpl; 4926 struct spdk_bs_load_ctx *ctx; 4927 struct spdk_bs_opts opts = {}; 4928 int err; 4929 4930 SPDK_DEBUGLOG(blob, "Dumping blobstore from dev %p\n", dev); 4931 4932 spdk_bs_opts_init(&opts, sizeof(opts)); 4933 4934 err = bs_alloc(dev, &opts, &bs, &ctx); 4935 if (err) { 4936 dev->destroy(dev); 4937 cb_fn(cb_arg, err); 4938 return; 4939 } 4940 4941 ctx->dumping = true; 4942 ctx->fp = fp; 4943 ctx->print_xattr_fn = print_xattr_fn; 4944 4945 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 4946 cpl.u.bs_basic.cb_fn = cb_fn; 4947 cpl.u.bs_basic.cb_arg = cb_arg; 4948 4949 ctx->seq = bs_sequence_start(bs->md_channel, &cpl); 4950 if (!ctx->seq) { 4951 spdk_free(ctx->super); 4952 free(ctx); 4953 bs_free(bs); 4954 cb_fn(cb_arg, -ENOMEM); 4955 return; 4956 } 4957 4958 /* Read the super block */ 4959 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 4960 bs_byte_to_lba(bs, sizeof(*ctx->super)), 4961 bs_dump_super_cpl, ctx); 4962 } 4963 4964 /* END spdk_bs_dump */ 4965 4966 /* START spdk_bs_init */ 4967 4968 static void 4969 bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4970 { 4971 struct spdk_bs_load_ctx *ctx = cb_arg; 4972 4973 ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters); 4974 spdk_free(ctx->super); 4975 free(ctx); 4976 4977 bs_sequence_finish(seq, bserrno); 4978 } 4979 4980 static void 4981 bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4982 { 4983 struct spdk_bs_load_ctx *ctx = cb_arg; 4984 4985 /* Write super block */ 4986 bs_sequence_write_dev(seq, ctx->super, bs_page_to_lba(ctx->bs, 0), 4987 bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 4988 bs_init_persist_super_cpl, ctx); 4989 } 4990 4991 void 4992 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 4993 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 4994 { 4995 struct spdk_bs_load_ctx *ctx; 4996 struct spdk_blob_store *bs; 4997 struct spdk_bs_cpl cpl; 4998 spdk_bs_sequence_t *seq; 4999 spdk_bs_batch_t *batch; 5000 uint64_t num_md_lba; 5001 uint64_t num_md_pages; 5002 uint64_t num_md_clusters; 5003 uint32_t i; 5004 struct spdk_bs_opts opts = {}; 5005 int rc; 5006 uint64_t lba, lba_count; 5007 5008 SPDK_DEBUGLOG(blob, "Initializing blobstore on dev %p\n", dev); 5009 5010 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 5011 SPDK_ERRLOG("unsupported dev block length of %d\n", 5012 dev->blocklen); 5013 dev->destroy(dev); 5014 cb_fn(cb_arg, NULL, -EINVAL); 5015 return; 5016 } 5017 5018 spdk_bs_opts_init(&opts, sizeof(opts)); 5019 if (o) { 5020 if (bs_opts_copy(o, &opts)) { 5021 return; 5022 } 5023 } 5024 5025 if (bs_opts_verify(&opts) != 0) { 5026 dev->destroy(dev); 5027 cb_fn(cb_arg, NULL, -EINVAL); 5028 return; 5029 } 5030 5031 rc = bs_alloc(dev, &opts, &bs, &ctx); 5032 if (rc) { 5033 dev->destroy(dev); 5034 cb_fn(cb_arg, NULL, rc); 5035 return; 5036 } 5037 5038 if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) { 5039 /* By default, allocate 1 page per cluster. 5040 * Technically, this over-allocates metadata 5041 * because more metadata will reduce the number 5042 * of usable clusters. This can be addressed with 5043 * more complex math in the future. 5044 */ 5045 bs->md_len = bs->total_clusters; 5046 } else { 5047 bs->md_len = opts.num_md_pages; 5048 } 5049 rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len); 5050 if (rc < 0) { 5051 spdk_free(ctx->super); 5052 free(ctx); 5053 bs_free(bs); 5054 cb_fn(cb_arg, NULL, -ENOMEM); 5055 return; 5056 } 5057 5058 rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len); 5059 if (rc < 0) { 5060 spdk_free(ctx->super); 5061 free(ctx); 5062 bs_free(bs); 5063 cb_fn(cb_arg, NULL, -ENOMEM); 5064 return; 5065 } 5066 5067 rc = spdk_bit_array_resize(&bs->open_blobids, bs->md_len); 5068 if (rc < 0) { 5069 spdk_free(ctx->super); 5070 free(ctx); 5071 bs_free(bs); 5072 cb_fn(cb_arg, NULL, -ENOMEM); 5073 return; 5074 } 5075 5076 memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 5077 sizeof(ctx->super->signature)); 5078 ctx->super->version = SPDK_BS_VERSION; 5079 ctx->super->length = sizeof(*ctx->super); 5080 ctx->super->super_blob = bs->super_blob; 5081 ctx->super->clean = 0; 5082 ctx->super->cluster_size = bs->cluster_sz; 5083 ctx->super->io_unit_size = bs->io_unit_size; 5084 memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype)); 5085 5086 /* Calculate how many pages the metadata consumes at the front 5087 * of the disk. 5088 */ 5089 5090 /* The super block uses 1 page */ 5091 num_md_pages = 1; 5092 5093 /* The used_md_pages mask requires 1 bit per metadata page, rounded 5094 * up to the nearest page, plus a header. 5095 */ 5096 ctx->super->used_page_mask_start = num_md_pages; 5097 ctx->super->used_page_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5098 spdk_divide_round_up(bs->md_len, 8), 5099 SPDK_BS_PAGE_SIZE); 5100 num_md_pages += ctx->super->used_page_mask_len; 5101 5102 /* The used_clusters mask requires 1 bit per cluster, rounded 5103 * up to the nearest page, plus a header. 5104 */ 5105 ctx->super->used_cluster_mask_start = num_md_pages; 5106 ctx->super->used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5107 spdk_divide_round_up(bs->total_clusters, 8), 5108 SPDK_BS_PAGE_SIZE); 5109 num_md_pages += ctx->super->used_cluster_mask_len; 5110 5111 /* The used_blobids mask requires 1 bit per metadata page, rounded 5112 * up to the nearest page, plus a header. 5113 */ 5114 ctx->super->used_blobid_mask_start = num_md_pages; 5115 ctx->super->used_blobid_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5116 spdk_divide_round_up(bs->md_len, 8), 5117 SPDK_BS_PAGE_SIZE); 5118 num_md_pages += ctx->super->used_blobid_mask_len; 5119 5120 /* The metadata region size was chosen above */ 5121 ctx->super->md_start = bs->md_start = num_md_pages; 5122 ctx->super->md_len = bs->md_len; 5123 num_md_pages += bs->md_len; 5124 5125 num_md_lba = bs_page_to_lba(bs, num_md_pages); 5126 5127 ctx->super->size = dev->blockcnt * dev->blocklen; 5128 5129 ctx->super->crc = blob_md_page_calc_crc(ctx->super); 5130 5131 num_md_clusters = spdk_divide_round_up(num_md_pages, bs->pages_per_cluster); 5132 if (num_md_clusters > bs->total_clusters) { 5133 SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, " 5134 "please decrease number of pages reserved for metadata " 5135 "or increase cluster size.\n"); 5136 spdk_free(ctx->super); 5137 spdk_bit_array_free(&ctx->used_clusters); 5138 free(ctx); 5139 bs_free(bs); 5140 cb_fn(cb_arg, NULL, -ENOMEM); 5141 return; 5142 } 5143 /* Claim all of the clusters used by the metadata */ 5144 for (i = 0; i < num_md_clusters; i++) { 5145 spdk_bit_array_set(ctx->used_clusters, i); 5146 } 5147 5148 bs->num_free_clusters -= num_md_clusters; 5149 bs->total_data_clusters = bs->num_free_clusters; 5150 5151 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 5152 cpl.u.bs_handle.cb_fn = cb_fn; 5153 cpl.u.bs_handle.cb_arg = cb_arg; 5154 cpl.u.bs_handle.bs = bs; 5155 5156 seq = bs_sequence_start(bs->md_channel, &cpl); 5157 if (!seq) { 5158 spdk_free(ctx->super); 5159 free(ctx); 5160 bs_free(bs); 5161 cb_fn(cb_arg, NULL, -ENOMEM); 5162 return; 5163 } 5164 5165 batch = bs_sequence_to_batch(seq, bs_init_trim_cpl, ctx); 5166 5167 /* Clear metadata space */ 5168 bs_batch_write_zeroes_dev(batch, 0, num_md_lba); 5169 5170 lba = num_md_lba; 5171 lba_count = ctx->bs->dev->blockcnt - lba; 5172 switch (opts.clear_method) { 5173 case BS_CLEAR_WITH_UNMAP: 5174 /* Trim data clusters */ 5175 bs_batch_unmap_dev(batch, lba, lba_count); 5176 break; 5177 case BS_CLEAR_WITH_WRITE_ZEROES: 5178 /* Write_zeroes to data clusters */ 5179 bs_batch_write_zeroes_dev(batch, lba, lba_count); 5180 break; 5181 case BS_CLEAR_WITH_NONE: 5182 default: 5183 break; 5184 } 5185 5186 bs_batch_close(batch); 5187 } 5188 5189 /* END spdk_bs_init */ 5190 5191 /* START spdk_bs_destroy */ 5192 5193 static void 5194 bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5195 { 5196 struct spdk_bs_load_ctx *ctx = cb_arg; 5197 struct spdk_blob_store *bs = ctx->bs; 5198 5199 /* 5200 * We need to defer calling bs_call_cpl() until after 5201 * dev destruction, so tuck these away for later use. 5202 */ 5203 bs->unload_err = bserrno; 5204 memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5205 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5206 5207 bs_sequence_finish(seq, bserrno); 5208 5209 bs_free(bs); 5210 free(ctx); 5211 } 5212 5213 void 5214 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, 5215 void *cb_arg) 5216 { 5217 struct spdk_bs_cpl cpl; 5218 spdk_bs_sequence_t *seq; 5219 struct spdk_bs_load_ctx *ctx; 5220 5221 SPDK_DEBUGLOG(blob, "Destroying blobstore\n"); 5222 5223 if (!RB_EMPTY(&bs->open_blobs)) { 5224 SPDK_ERRLOG("Blobstore still has open blobs\n"); 5225 cb_fn(cb_arg, -EBUSY); 5226 return; 5227 } 5228 5229 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5230 cpl.u.bs_basic.cb_fn = cb_fn; 5231 cpl.u.bs_basic.cb_arg = cb_arg; 5232 5233 ctx = calloc(1, sizeof(*ctx)); 5234 if (!ctx) { 5235 cb_fn(cb_arg, -ENOMEM); 5236 return; 5237 } 5238 5239 ctx->bs = bs; 5240 5241 seq = bs_sequence_start(bs->md_channel, &cpl); 5242 if (!seq) { 5243 free(ctx); 5244 cb_fn(cb_arg, -ENOMEM); 5245 return; 5246 } 5247 5248 /* Write zeroes to the super block */ 5249 bs_sequence_write_zeroes_dev(seq, 5250 bs_page_to_lba(bs, 0), 5251 bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)), 5252 bs_destroy_trim_cpl, ctx); 5253 } 5254 5255 /* END spdk_bs_destroy */ 5256 5257 /* START spdk_bs_unload */ 5258 5259 static void 5260 bs_unload_finish(struct spdk_bs_load_ctx *ctx, int bserrno) 5261 { 5262 spdk_bs_sequence_t *seq = ctx->seq; 5263 5264 spdk_free(ctx->super); 5265 5266 /* 5267 * We need to defer calling bs_call_cpl() until after 5268 * dev destruction, so tuck these away for later use. 5269 */ 5270 ctx->bs->unload_err = bserrno; 5271 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5272 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5273 5274 bs_sequence_finish(seq, bserrno); 5275 5276 bs_free(ctx->bs); 5277 free(ctx); 5278 } 5279 5280 static void 5281 bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5282 { 5283 struct spdk_bs_load_ctx *ctx = cb_arg; 5284 5285 bs_unload_finish(ctx, bserrno); 5286 } 5287 5288 static void 5289 bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5290 { 5291 struct spdk_bs_load_ctx *ctx = cb_arg; 5292 5293 spdk_free(ctx->mask); 5294 5295 if (bserrno != 0) { 5296 bs_unload_finish(ctx, bserrno); 5297 return; 5298 } 5299 5300 ctx->super->clean = 1; 5301 5302 bs_write_super(seq, ctx->bs, ctx->super, bs_unload_write_super_cpl, ctx); 5303 } 5304 5305 static void 5306 bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5307 { 5308 struct spdk_bs_load_ctx *ctx = cb_arg; 5309 5310 spdk_free(ctx->mask); 5311 ctx->mask = NULL; 5312 5313 if (bserrno != 0) { 5314 bs_unload_finish(ctx, bserrno); 5315 return; 5316 } 5317 5318 bs_write_used_clusters(seq, ctx, bs_unload_write_used_clusters_cpl); 5319 } 5320 5321 static void 5322 bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5323 { 5324 struct spdk_bs_load_ctx *ctx = cb_arg; 5325 5326 spdk_free(ctx->mask); 5327 ctx->mask = NULL; 5328 5329 if (bserrno != 0) { 5330 bs_unload_finish(ctx, bserrno); 5331 return; 5332 } 5333 5334 bs_write_used_blobids(seq, ctx, bs_unload_write_used_blobids_cpl); 5335 } 5336 5337 static void 5338 bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5339 { 5340 struct spdk_bs_load_ctx *ctx = cb_arg; 5341 5342 if (bserrno != 0) { 5343 bs_unload_finish(ctx, bserrno); 5344 return; 5345 } 5346 5347 bs_write_used_md(seq, cb_arg, bs_unload_write_used_pages_cpl); 5348 } 5349 5350 void 5351 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg) 5352 { 5353 struct spdk_bs_cpl cpl; 5354 struct spdk_bs_load_ctx *ctx; 5355 5356 SPDK_DEBUGLOG(blob, "Syncing blobstore\n"); 5357 5358 if (!RB_EMPTY(&bs->open_blobs)) { 5359 SPDK_ERRLOG("Blobstore still has open blobs\n"); 5360 cb_fn(cb_arg, -EBUSY); 5361 return; 5362 } 5363 5364 ctx = calloc(1, sizeof(*ctx)); 5365 if (!ctx) { 5366 cb_fn(cb_arg, -ENOMEM); 5367 return; 5368 } 5369 5370 ctx->bs = bs; 5371 5372 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 5373 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 5374 if (!ctx->super) { 5375 free(ctx); 5376 cb_fn(cb_arg, -ENOMEM); 5377 return; 5378 } 5379 5380 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5381 cpl.u.bs_basic.cb_fn = cb_fn; 5382 cpl.u.bs_basic.cb_arg = cb_arg; 5383 5384 ctx->seq = bs_sequence_start(bs->md_channel, &cpl); 5385 if (!ctx->seq) { 5386 spdk_free(ctx->super); 5387 free(ctx); 5388 cb_fn(cb_arg, -ENOMEM); 5389 return; 5390 } 5391 5392 /* Read super block */ 5393 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5394 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5395 bs_unload_read_super_cpl, ctx); 5396 } 5397 5398 /* END spdk_bs_unload */ 5399 5400 /* START spdk_bs_set_super */ 5401 5402 struct spdk_bs_set_super_ctx { 5403 struct spdk_blob_store *bs; 5404 struct spdk_bs_super_block *super; 5405 }; 5406 5407 static void 5408 bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5409 { 5410 struct spdk_bs_set_super_ctx *ctx = cb_arg; 5411 5412 if (bserrno != 0) { 5413 SPDK_ERRLOG("Unable to write to super block of blobstore\n"); 5414 } 5415 5416 spdk_free(ctx->super); 5417 5418 bs_sequence_finish(seq, bserrno); 5419 5420 free(ctx); 5421 } 5422 5423 static void 5424 bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5425 { 5426 struct spdk_bs_set_super_ctx *ctx = cb_arg; 5427 5428 if (bserrno != 0) { 5429 SPDK_ERRLOG("Unable to read super block of blobstore\n"); 5430 spdk_free(ctx->super); 5431 bs_sequence_finish(seq, bserrno); 5432 free(ctx); 5433 return; 5434 } 5435 5436 bs_write_super(seq, ctx->bs, ctx->super, bs_set_super_write_cpl, ctx); 5437 } 5438 5439 void 5440 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid, 5441 spdk_bs_op_complete cb_fn, void *cb_arg) 5442 { 5443 struct spdk_bs_cpl cpl; 5444 spdk_bs_sequence_t *seq; 5445 struct spdk_bs_set_super_ctx *ctx; 5446 5447 SPDK_DEBUGLOG(blob, "Setting super blob id on blobstore\n"); 5448 5449 ctx = calloc(1, sizeof(*ctx)); 5450 if (!ctx) { 5451 cb_fn(cb_arg, -ENOMEM); 5452 return; 5453 } 5454 5455 ctx->bs = bs; 5456 5457 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 5458 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 5459 if (!ctx->super) { 5460 free(ctx); 5461 cb_fn(cb_arg, -ENOMEM); 5462 return; 5463 } 5464 5465 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5466 cpl.u.bs_basic.cb_fn = cb_fn; 5467 cpl.u.bs_basic.cb_arg = cb_arg; 5468 5469 seq = bs_sequence_start(bs->md_channel, &cpl); 5470 if (!seq) { 5471 spdk_free(ctx->super); 5472 free(ctx); 5473 cb_fn(cb_arg, -ENOMEM); 5474 return; 5475 } 5476 5477 bs->super_blob = blobid; 5478 5479 /* Read super block */ 5480 bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0), 5481 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5482 bs_set_super_read_cpl, ctx); 5483 } 5484 5485 /* END spdk_bs_set_super */ 5486 5487 void 5488 spdk_bs_get_super(struct spdk_blob_store *bs, 5489 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5490 { 5491 if (bs->super_blob == SPDK_BLOBID_INVALID) { 5492 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT); 5493 } else { 5494 cb_fn(cb_arg, bs->super_blob, 0); 5495 } 5496 } 5497 5498 uint64_t 5499 spdk_bs_get_cluster_size(struct spdk_blob_store *bs) 5500 { 5501 return bs->cluster_sz; 5502 } 5503 5504 uint64_t 5505 spdk_bs_get_page_size(struct spdk_blob_store *bs) 5506 { 5507 return SPDK_BS_PAGE_SIZE; 5508 } 5509 5510 uint64_t 5511 spdk_bs_get_io_unit_size(struct spdk_blob_store *bs) 5512 { 5513 return bs->io_unit_size; 5514 } 5515 5516 uint64_t 5517 spdk_bs_free_cluster_count(struct spdk_blob_store *bs) 5518 { 5519 return bs->num_free_clusters; 5520 } 5521 5522 uint64_t 5523 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs) 5524 { 5525 return bs->total_data_clusters; 5526 } 5527 5528 static int 5529 bs_register_md_thread(struct spdk_blob_store *bs) 5530 { 5531 bs->md_channel = spdk_get_io_channel(bs); 5532 if (!bs->md_channel) { 5533 SPDK_ERRLOG("Failed to get IO channel.\n"); 5534 return -1; 5535 } 5536 5537 return 0; 5538 } 5539 5540 static int 5541 bs_unregister_md_thread(struct spdk_blob_store *bs) 5542 { 5543 spdk_put_io_channel(bs->md_channel); 5544 5545 return 0; 5546 } 5547 5548 spdk_blob_id spdk_blob_get_id(struct spdk_blob *blob) 5549 { 5550 assert(blob != NULL); 5551 5552 return blob->id; 5553 } 5554 5555 uint64_t spdk_blob_get_num_pages(struct spdk_blob *blob) 5556 { 5557 assert(blob != NULL); 5558 5559 return bs_cluster_to_page(blob->bs, blob->active.num_clusters); 5560 } 5561 5562 uint64_t spdk_blob_get_num_io_units(struct spdk_blob *blob) 5563 { 5564 assert(blob != NULL); 5565 5566 return spdk_blob_get_num_pages(blob) * bs_io_unit_per_page(blob->bs); 5567 } 5568 5569 uint64_t spdk_blob_get_num_clusters(struct spdk_blob *blob) 5570 { 5571 assert(blob != NULL); 5572 5573 return blob->active.num_clusters; 5574 } 5575 5576 /* START spdk_bs_create_blob */ 5577 5578 static void 5579 bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5580 { 5581 struct spdk_blob *blob = cb_arg; 5582 uint32_t page_idx = bs_blobid_to_page(blob->id); 5583 5584 if (bserrno != 0) { 5585 spdk_bit_array_clear(blob->bs->used_blobids, page_idx); 5586 bs_release_md_page(blob->bs, page_idx); 5587 } 5588 5589 blob_free(blob); 5590 5591 bs_sequence_finish(seq, bserrno); 5592 } 5593 5594 static int 5595 blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs, 5596 bool internal) 5597 { 5598 uint64_t i; 5599 size_t value_len = 0; 5600 int rc; 5601 const void *value = NULL; 5602 if (xattrs->count > 0 && xattrs->get_value == NULL) { 5603 return -EINVAL; 5604 } 5605 for (i = 0; i < xattrs->count; i++) { 5606 xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len); 5607 if (value == NULL || value_len == 0) { 5608 return -EINVAL; 5609 } 5610 rc = blob_set_xattr(blob, xattrs->names[i], value, value_len, internal); 5611 if (rc < 0) { 5612 return rc; 5613 } 5614 } 5615 return 0; 5616 } 5617 5618 static void 5619 blob_opts_copy(const struct spdk_blob_opts *src, struct spdk_blob_opts *dst) 5620 { 5621 #define FIELD_OK(field) \ 5622 offsetof(struct spdk_blob_opts, field) + sizeof(src->field) <= src->opts_size 5623 5624 #define SET_FIELD(field) \ 5625 if (FIELD_OK(field)) { \ 5626 dst->field = src->field; \ 5627 } \ 5628 5629 SET_FIELD(num_clusters); 5630 SET_FIELD(thin_provision); 5631 SET_FIELD(clear_method); 5632 5633 if (FIELD_OK(xattrs)) { 5634 memcpy(&dst->xattrs, &src->xattrs, sizeof(src->xattrs)); 5635 } 5636 5637 SET_FIELD(use_extent_table); 5638 5639 dst->opts_size = src->opts_size; 5640 5641 /* You should not remove this statement, but need to update the assert statement 5642 * if you add a new field, and also add a corresponding SET_FIELD statement */ 5643 SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_opts) == 64, "Incorrect size"); 5644 5645 #undef FIELD_OK 5646 #undef SET_FIELD 5647 } 5648 5649 static void 5650 bs_create_blob(struct spdk_blob_store *bs, 5651 const struct spdk_blob_opts *opts, 5652 const struct spdk_blob_xattr_opts *internal_xattrs, 5653 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5654 { 5655 struct spdk_blob *blob; 5656 uint32_t page_idx; 5657 struct spdk_bs_cpl cpl; 5658 struct spdk_blob_opts opts_local; 5659 struct spdk_blob_xattr_opts internal_xattrs_default; 5660 spdk_bs_sequence_t *seq; 5661 spdk_blob_id id; 5662 int rc; 5663 5664 assert(spdk_get_thread() == bs->md_thread); 5665 5666 page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); 5667 if (page_idx == UINT32_MAX) { 5668 cb_fn(cb_arg, 0, -ENOMEM); 5669 return; 5670 } 5671 spdk_bit_array_set(bs->used_blobids, page_idx); 5672 bs_claim_md_page(bs, page_idx); 5673 5674 id = bs_page_to_blobid(page_idx); 5675 5676 SPDK_DEBUGLOG(blob, "Creating blob with id %" PRIu64 " at page %u\n", id, page_idx); 5677 5678 blob = blob_alloc(bs, id); 5679 if (!blob) { 5680 spdk_bit_array_clear(bs->used_blobids, page_idx); 5681 bs_release_md_page(bs, page_idx); 5682 cb_fn(cb_arg, 0, -ENOMEM); 5683 return; 5684 } 5685 5686 spdk_blob_opts_init(&opts_local, sizeof(opts_local)); 5687 if (opts) { 5688 blob_opts_copy(opts, &opts_local); 5689 } 5690 5691 blob->use_extent_table = opts_local.use_extent_table; 5692 if (blob->use_extent_table) { 5693 blob->invalid_flags |= SPDK_BLOB_EXTENT_TABLE; 5694 } 5695 5696 if (!internal_xattrs) { 5697 blob_xattrs_init(&internal_xattrs_default); 5698 internal_xattrs = &internal_xattrs_default; 5699 } 5700 5701 rc = blob_set_xattrs(blob, &opts_local.xattrs, false); 5702 if (rc < 0) { 5703 blob_free(blob); 5704 spdk_bit_array_clear(bs->used_blobids, page_idx); 5705 bs_release_md_page(bs, page_idx); 5706 cb_fn(cb_arg, 0, rc); 5707 return; 5708 } 5709 5710 rc = blob_set_xattrs(blob, internal_xattrs, true); 5711 if (rc < 0) { 5712 blob_free(blob); 5713 spdk_bit_array_clear(bs->used_blobids, page_idx); 5714 bs_release_md_page(bs, page_idx); 5715 cb_fn(cb_arg, 0, rc); 5716 return; 5717 } 5718 5719 if (opts_local.thin_provision) { 5720 blob_set_thin_provision(blob); 5721 } 5722 5723 blob_set_clear_method(blob, opts_local.clear_method); 5724 5725 rc = blob_resize(blob, opts_local.num_clusters); 5726 if (rc < 0) { 5727 blob_free(blob); 5728 spdk_bit_array_clear(bs->used_blobids, page_idx); 5729 bs_release_md_page(bs, page_idx); 5730 cb_fn(cb_arg, 0, rc); 5731 return; 5732 } 5733 cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 5734 cpl.u.blobid.cb_fn = cb_fn; 5735 cpl.u.blobid.cb_arg = cb_arg; 5736 cpl.u.blobid.blobid = blob->id; 5737 5738 seq = bs_sequence_start(bs->md_channel, &cpl); 5739 if (!seq) { 5740 blob_free(blob); 5741 spdk_bit_array_clear(bs->used_blobids, page_idx); 5742 bs_release_md_page(bs, page_idx); 5743 cb_fn(cb_arg, 0, -ENOMEM); 5744 return; 5745 } 5746 5747 blob_persist(seq, blob, bs_create_blob_cpl, blob); 5748 } 5749 5750 void spdk_bs_create_blob(struct spdk_blob_store *bs, 5751 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5752 { 5753 bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg); 5754 } 5755 5756 void spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts, 5757 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5758 { 5759 bs_create_blob(bs, opts, NULL, cb_fn, cb_arg); 5760 } 5761 5762 /* END spdk_bs_create_blob */ 5763 5764 /* START blob_cleanup */ 5765 5766 struct spdk_clone_snapshot_ctx { 5767 struct spdk_bs_cpl cpl; 5768 int bserrno; 5769 bool frozen; 5770 5771 struct spdk_io_channel *channel; 5772 5773 /* Current cluster for inflate operation */ 5774 uint64_t cluster; 5775 5776 /* For inflation force allocation of all unallocated clusters and remove 5777 * thin-provisioning. Otherwise only decouple parent and keep clone thin. */ 5778 bool allocate_all; 5779 5780 struct { 5781 spdk_blob_id id; 5782 struct spdk_blob *blob; 5783 bool md_ro; 5784 } original; 5785 struct { 5786 spdk_blob_id id; 5787 struct spdk_blob *blob; 5788 } new; 5789 5790 /* xattrs specified for snapshot/clones only. They have no impact on 5791 * the original blobs xattrs. */ 5792 const struct spdk_blob_xattr_opts *xattrs; 5793 }; 5794 5795 static void 5796 bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno) 5797 { 5798 struct spdk_clone_snapshot_ctx *ctx = cb_arg; 5799 struct spdk_bs_cpl *cpl = &ctx->cpl; 5800 5801 if (bserrno != 0) { 5802 if (ctx->bserrno != 0) { 5803 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 5804 } else { 5805 ctx->bserrno = bserrno; 5806 } 5807 } 5808 5809 switch (cpl->type) { 5810 case SPDK_BS_CPL_TYPE_BLOBID: 5811 cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno); 5812 break; 5813 case SPDK_BS_CPL_TYPE_BLOB_BASIC: 5814 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno); 5815 break; 5816 default: 5817 SPDK_UNREACHABLE(); 5818 break; 5819 } 5820 5821 free(ctx); 5822 } 5823 5824 static void 5825 bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 5826 { 5827 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5828 struct spdk_blob *origblob = ctx->original.blob; 5829 5830 if (bserrno != 0) { 5831 if (ctx->bserrno != 0) { 5832 SPDK_ERRLOG("Unfreeze error %d\n", bserrno); 5833 } else { 5834 ctx->bserrno = bserrno; 5835 } 5836 } 5837 5838 ctx->original.id = origblob->id; 5839 origblob->locked_operation_in_progress = false; 5840 5841 /* Revert md_ro to original state */ 5842 origblob->md_ro = ctx->original.md_ro; 5843 5844 spdk_blob_close(origblob, bs_clone_snapshot_cleanup_finish, ctx); 5845 } 5846 5847 static void 5848 bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno) 5849 { 5850 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5851 struct spdk_blob *origblob = ctx->original.blob; 5852 5853 if (bserrno != 0) { 5854 if (ctx->bserrno != 0) { 5855 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 5856 } else { 5857 ctx->bserrno = bserrno; 5858 } 5859 } 5860 5861 if (ctx->frozen) { 5862 /* Unfreeze any outstanding I/O */ 5863 blob_unfreeze_io(origblob, bs_snapshot_unfreeze_cpl, ctx); 5864 } else { 5865 bs_snapshot_unfreeze_cpl(ctx, 0); 5866 } 5867 5868 } 5869 5870 static void 5871 bs_clone_snapshot_newblob_cleanup(struct spdk_clone_snapshot_ctx *ctx, int bserrno) 5872 { 5873 struct spdk_blob *newblob = ctx->new.blob; 5874 5875 if (bserrno != 0) { 5876 if (ctx->bserrno != 0) { 5877 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 5878 } else { 5879 ctx->bserrno = bserrno; 5880 } 5881 } 5882 5883 ctx->new.id = newblob->id; 5884 spdk_blob_close(newblob, bs_clone_snapshot_origblob_cleanup, ctx); 5885 } 5886 5887 /* END blob_cleanup */ 5888 5889 /* START spdk_bs_create_snapshot */ 5890 5891 static void 5892 bs_snapshot_swap_cluster_maps(struct spdk_blob *blob1, struct spdk_blob *blob2) 5893 { 5894 uint64_t *cluster_temp; 5895 uint32_t *extent_page_temp; 5896 5897 cluster_temp = blob1->active.clusters; 5898 blob1->active.clusters = blob2->active.clusters; 5899 blob2->active.clusters = cluster_temp; 5900 5901 extent_page_temp = blob1->active.extent_pages; 5902 blob1->active.extent_pages = blob2->active.extent_pages; 5903 blob2->active.extent_pages = extent_page_temp; 5904 } 5905 5906 static void 5907 bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno) 5908 { 5909 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5910 struct spdk_blob *origblob = ctx->original.blob; 5911 struct spdk_blob *newblob = ctx->new.blob; 5912 5913 if (bserrno != 0) { 5914 bs_snapshot_swap_cluster_maps(newblob, origblob); 5915 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 5916 return; 5917 } 5918 5919 /* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */ 5920 bserrno = blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true); 5921 if (bserrno != 0) { 5922 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 5923 return; 5924 } 5925 5926 bs_blob_list_add(ctx->original.blob); 5927 5928 spdk_blob_set_read_only(newblob); 5929 5930 /* sync snapshot metadata */ 5931 spdk_blob_sync_md(newblob, bs_clone_snapshot_origblob_cleanup, ctx); 5932 } 5933 5934 static void 5935 bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno) 5936 { 5937 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5938 struct spdk_blob *origblob = ctx->original.blob; 5939 struct spdk_blob *newblob = ctx->new.blob; 5940 5941 if (bserrno != 0) { 5942 /* return cluster map back to original */ 5943 bs_snapshot_swap_cluster_maps(newblob, origblob); 5944 5945 /* Newblob md sync failed. Valid clusters are only present in origblob. 5946 * Since I/O is frozen on origblob, not changes to zeroed out cluster map should have occurred. 5947 * Newblob needs to be reverted to thin_provisioned state at creation to properly close. */ 5948 blob_set_thin_provision(newblob); 5949 assert(spdk_mem_all_zero(newblob->active.clusters, 5950 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 5951 assert(spdk_mem_all_zero(newblob->active.extent_pages, 5952 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 5953 5954 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 5955 return; 5956 } 5957 5958 /* Set internal xattr for snapshot id */ 5959 bserrno = blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true); 5960 if (bserrno != 0) { 5961 /* return cluster map back to original */ 5962 bs_snapshot_swap_cluster_maps(newblob, origblob); 5963 blob_set_thin_provision(newblob); 5964 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 5965 return; 5966 } 5967 5968 /* Create new back_bs_dev for snapshot */ 5969 origblob->back_bs_dev = bs_create_blob_bs_dev(newblob); 5970 if (origblob->back_bs_dev == NULL) { 5971 /* return cluster map back to original */ 5972 bs_snapshot_swap_cluster_maps(newblob, origblob); 5973 blob_set_thin_provision(newblob); 5974 bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL); 5975 return; 5976 } 5977 5978 bs_blob_list_remove(origblob); 5979 origblob->parent_id = newblob->id; 5980 /* set clone blob as thin provisioned */ 5981 blob_set_thin_provision(origblob); 5982 5983 bs_blob_list_add(newblob); 5984 5985 /* sync clone metadata */ 5986 spdk_blob_sync_md(origblob, bs_snapshot_origblob_sync_cpl, ctx); 5987 } 5988 5989 static void 5990 bs_snapshot_freeze_cpl(void *cb_arg, int rc) 5991 { 5992 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5993 struct spdk_blob *origblob = ctx->original.blob; 5994 struct spdk_blob *newblob = ctx->new.blob; 5995 int bserrno; 5996 5997 if (rc != 0) { 5998 bs_clone_snapshot_newblob_cleanup(ctx, rc); 5999 return; 6000 } 6001 6002 ctx->frozen = true; 6003 6004 /* set new back_bs_dev for snapshot */ 6005 newblob->back_bs_dev = origblob->back_bs_dev; 6006 /* Set invalid flags from origblob */ 6007 newblob->invalid_flags = origblob->invalid_flags; 6008 6009 /* inherit parent from original blob if set */ 6010 newblob->parent_id = origblob->parent_id; 6011 if (origblob->parent_id != SPDK_BLOBID_INVALID) { 6012 /* Set internal xattr for snapshot id */ 6013 bserrno = blob_set_xattr(newblob, BLOB_SNAPSHOT, 6014 &origblob->parent_id, sizeof(spdk_blob_id), true); 6015 if (bserrno != 0) { 6016 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6017 return; 6018 } 6019 } 6020 6021 /* swap cluster maps */ 6022 bs_snapshot_swap_cluster_maps(newblob, origblob); 6023 6024 /* Set the clear method on the new blob to match the original. */ 6025 blob_set_clear_method(newblob, origblob->clear_method); 6026 6027 /* sync snapshot metadata */ 6028 spdk_blob_sync_md(newblob, bs_snapshot_newblob_sync_cpl, ctx); 6029 } 6030 6031 static void 6032 bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6033 { 6034 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6035 struct spdk_blob *origblob = ctx->original.blob; 6036 struct spdk_blob *newblob = _blob; 6037 6038 if (bserrno != 0) { 6039 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6040 return; 6041 } 6042 6043 ctx->new.blob = newblob; 6044 assert(spdk_blob_is_thin_provisioned(newblob)); 6045 assert(spdk_mem_all_zero(newblob->active.clusters, 6046 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 6047 assert(spdk_mem_all_zero(newblob->active.extent_pages, 6048 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 6049 6050 blob_freeze_io(origblob, bs_snapshot_freeze_cpl, ctx); 6051 } 6052 6053 static void 6054 bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 6055 { 6056 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6057 struct spdk_blob *origblob = ctx->original.blob; 6058 6059 if (bserrno != 0) { 6060 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6061 return; 6062 } 6063 6064 ctx->new.id = blobid; 6065 ctx->cpl.u.blobid.blobid = blobid; 6066 6067 spdk_bs_open_blob(origblob->bs, ctx->new.id, bs_snapshot_newblob_open_cpl, ctx); 6068 } 6069 6070 6071 static void 6072 bs_xattr_snapshot(void *arg, const char *name, 6073 const void **value, size_t *value_len) 6074 { 6075 assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0); 6076 6077 struct spdk_blob *blob = (struct spdk_blob *)arg; 6078 *value = &blob->id; 6079 *value_len = sizeof(blob->id); 6080 } 6081 6082 static void 6083 bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6084 { 6085 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6086 struct spdk_blob_opts opts; 6087 struct spdk_blob_xattr_opts internal_xattrs; 6088 char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS }; 6089 6090 if (bserrno != 0) { 6091 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6092 return; 6093 } 6094 6095 ctx->original.blob = _blob; 6096 6097 if (_blob->data_ro || _blob->md_ro) { 6098 SPDK_DEBUGLOG(blob, "Cannot create snapshot from read only blob with id %" PRIu64 "\n", 6099 _blob->id); 6100 ctx->bserrno = -EINVAL; 6101 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6102 return; 6103 } 6104 6105 if (_blob->locked_operation_in_progress) { 6106 SPDK_DEBUGLOG(blob, "Cannot create snapshot - another operation in progress\n"); 6107 ctx->bserrno = -EBUSY; 6108 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6109 return; 6110 } 6111 6112 _blob->locked_operation_in_progress = true; 6113 6114 spdk_blob_opts_init(&opts, sizeof(opts)); 6115 blob_xattrs_init(&internal_xattrs); 6116 6117 /* Change the size of new blob to the same as in original blob, 6118 * but do not allocate clusters */ 6119 opts.thin_provision = true; 6120 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 6121 opts.use_extent_table = _blob->use_extent_table; 6122 6123 /* If there are any xattrs specified for snapshot, set them now */ 6124 if (ctx->xattrs) { 6125 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 6126 } 6127 /* Set internal xattr SNAPSHOT_IN_PROGRESS */ 6128 internal_xattrs.count = 1; 6129 internal_xattrs.ctx = _blob; 6130 internal_xattrs.names = xattrs_names; 6131 internal_xattrs.get_value = bs_xattr_snapshot; 6132 6133 bs_create_blob(_blob->bs, &opts, &internal_xattrs, 6134 bs_snapshot_newblob_create_cpl, ctx); 6135 } 6136 6137 void spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid, 6138 const struct spdk_blob_xattr_opts *snapshot_xattrs, 6139 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6140 { 6141 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6142 6143 if (!ctx) { 6144 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 6145 return; 6146 } 6147 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6148 ctx->cpl.u.blobid.cb_fn = cb_fn; 6149 ctx->cpl.u.blobid.cb_arg = cb_arg; 6150 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 6151 ctx->bserrno = 0; 6152 ctx->frozen = false; 6153 ctx->original.id = blobid; 6154 ctx->xattrs = snapshot_xattrs; 6155 6156 spdk_bs_open_blob(bs, ctx->original.id, bs_snapshot_origblob_open_cpl, ctx); 6157 } 6158 /* END spdk_bs_create_snapshot */ 6159 6160 /* START spdk_bs_create_clone */ 6161 6162 static void 6163 bs_xattr_clone(void *arg, const char *name, 6164 const void **value, size_t *value_len) 6165 { 6166 assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0); 6167 6168 struct spdk_blob *blob = (struct spdk_blob *)arg; 6169 *value = &blob->id; 6170 *value_len = sizeof(blob->id); 6171 } 6172 6173 static void 6174 bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6175 { 6176 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6177 struct spdk_blob *clone = _blob; 6178 6179 ctx->new.blob = clone; 6180 bs_blob_list_add(clone); 6181 6182 spdk_blob_close(clone, bs_clone_snapshot_origblob_cleanup, ctx); 6183 } 6184 6185 static void 6186 bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 6187 { 6188 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6189 6190 ctx->cpl.u.blobid.blobid = blobid; 6191 spdk_bs_open_blob(ctx->original.blob->bs, blobid, bs_clone_newblob_open_cpl, ctx); 6192 } 6193 6194 static void 6195 bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6196 { 6197 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6198 struct spdk_blob_opts opts; 6199 struct spdk_blob_xattr_opts internal_xattrs; 6200 char *xattr_names[] = { BLOB_SNAPSHOT }; 6201 6202 if (bserrno != 0) { 6203 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6204 return; 6205 } 6206 6207 ctx->original.blob = _blob; 6208 ctx->original.md_ro = _blob->md_ro; 6209 6210 if (!_blob->data_ro || !_blob->md_ro) { 6211 SPDK_DEBUGLOG(blob, "Clone not from read-only blob\n"); 6212 ctx->bserrno = -EINVAL; 6213 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6214 return; 6215 } 6216 6217 if (_blob->locked_operation_in_progress) { 6218 SPDK_DEBUGLOG(blob, "Cannot create clone - another operation in progress\n"); 6219 ctx->bserrno = -EBUSY; 6220 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6221 return; 6222 } 6223 6224 _blob->locked_operation_in_progress = true; 6225 6226 spdk_blob_opts_init(&opts, sizeof(opts)); 6227 blob_xattrs_init(&internal_xattrs); 6228 6229 opts.thin_provision = true; 6230 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 6231 opts.use_extent_table = _blob->use_extent_table; 6232 if (ctx->xattrs) { 6233 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 6234 } 6235 6236 /* Set internal xattr BLOB_SNAPSHOT */ 6237 internal_xattrs.count = 1; 6238 internal_xattrs.ctx = _blob; 6239 internal_xattrs.names = xattr_names; 6240 internal_xattrs.get_value = bs_xattr_clone; 6241 6242 bs_create_blob(_blob->bs, &opts, &internal_xattrs, 6243 bs_clone_newblob_create_cpl, ctx); 6244 } 6245 6246 void spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid, 6247 const struct spdk_blob_xattr_opts *clone_xattrs, 6248 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6249 { 6250 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6251 6252 if (!ctx) { 6253 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 6254 return; 6255 } 6256 6257 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6258 ctx->cpl.u.blobid.cb_fn = cb_fn; 6259 ctx->cpl.u.blobid.cb_arg = cb_arg; 6260 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 6261 ctx->bserrno = 0; 6262 ctx->xattrs = clone_xattrs; 6263 ctx->original.id = blobid; 6264 6265 spdk_bs_open_blob(bs, ctx->original.id, bs_clone_origblob_open_cpl, ctx); 6266 } 6267 6268 /* END spdk_bs_create_clone */ 6269 6270 /* START spdk_bs_inflate_blob */ 6271 6272 static void 6273 bs_inflate_blob_set_parent_cpl(void *cb_arg, struct spdk_blob *_parent, int bserrno) 6274 { 6275 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6276 struct spdk_blob *_blob = ctx->original.blob; 6277 6278 if (bserrno != 0) { 6279 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6280 return; 6281 } 6282 6283 /* Temporarily override md_ro flag for MD modification */ 6284 _blob->md_ro = false; 6285 6286 bserrno = blob_set_xattr(_blob, BLOB_SNAPSHOT, &_parent->id, sizeof(spdk_blob_id), true); 6287 if (bserrno != 0) { 6288 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6289 return; 6290 } 6291 6292 assert(_parent != NULL); 6293 6294 bs_blob_list_remove(_blob); 6295 _blob->parent_id = _parent->id; 6296 6297 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 6298 _blob->back_bs_dev = bs_create_blob_bs_dev(_parent); 6299 bs_blob_list_add(_blob); 6300 6301 spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx); 6302 } 6303 6304 static void 6305 bs_inflate_blob_done(struct spdk_clone_snapshot_ctx *ctx) 6306 { 6307 struct spdk_blob *_blob = ctx->original.blob; 6308 struct spdk_blob *_parent; 6309 6310 if (ctx->allocate_all) { 6311 /* remove thin provisioning */ 6312 bs_blob_list_remove(_blob); 6313 blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 6314 _blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV; 6315 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 6316 _blob->back_bs_dev = NULL; 6317 _blob->parent_id = SPDK_BLOBID_INVALID; 6318 } else { 6319 _parent = ((struct spdk_blob_bs_dev *)(_blob->back_bs_dev))->blob; 6320 if (_parent->parent_id != SPDK_BLOBID_INVALID) { 6321 /* We must change the parent of the inflated blob */ 6322 spdk_bs_open_blob(_blob->bs, _parent->parent_id, 6323 bs_inflate_blob_set_parent_cpl, ctx); 6324 return; 6325 } 6326 6327 bs_blob_list_remove(_blob); 6328 blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 6329 _blob->parent_id = SPDK_BLOBID_INVALID; 6330 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 6331 _blob->back_bs_dev = bs_create_zeroes_dev(); 6332 } 6333 6334 /* Temporarily override md_ro flag for MD modification */ 6335 _blob->md_ro = false; 6336 _blob->state = SPDK_BLOB_STATE_DIRTY; 6337 6338 spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx); 6339 } 6340 6341 /* Check if cluster needs allocation */ 6342 static inline bool 6343 bs_cluster_needs_allocation(struct spdk_blob *blob, uint64_t cluster, bool allocate_all) 6344 { 6345 struct spdk_blob_bs_dev *b; 6346 6347 assert(blob != NULL); 6348 6349 if (blob->active.clusters[cluster] != 0) { 6350 /* Cluster is already allocated */ 6351 return false; 6352 } 6353 6354 if (blob->parent_id == SPDK_BLOBID_INVALID) { 6355 /* Blob have no parent blob */ 6356 return allocate_all; 6357 } 6358 6359 b = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 6360 return (allocate_all || b->blob->active.clusters[cluster] != 0); 6361 } 6362 6363 static void 6364 bs_inflate_blob_touch_next(void *cb_arg, int bserrno) 6365 { 6366 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6367 struct spdk_blob *_blob = ctx->original.blob; 6368 struct spdk_bs_cpl cpl; 6369 spdk_bs_user_op_t *op; 6370 uint64_t offset; 6371 6372 if (bserrno != 0) { 6373 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6374 return; 6375 } 6376 6377 for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) { 6378 if (bs_cluster_needs_allocation(_blob, ctx->cluster, ctx->allocate_all)) { 6379 break; 6380 } 6381 } 6382 6383 if (ctx->cluster < _blob->active.num_clusters) { 6384 offset = bs_cluster_to_lba(_blob->bs, ctx->cluster); 6385 6386 /* We may safely increment a cluster before copying */ 6387 ctx->cluster++; 6388 6389 /* Use a dummy 0B read as a context for cluster copy */ 6390 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 6391 cpl.u.blob_basic.cb_fn = bs_inflate_blob_touch_next; 6392 cpl.u.blob_basic.cb_arg = ctx; 6393 6394 op = bs_user_op_alloc(ctx->channel, &cpl, SPDK_BLOB_READ, _blob, 6395 NULL, 0, offset, 0); 6396 if (!op) { 6397 bs_clone_snapshot_origblob_cleanup(ctx, -ENOMEM); 6398 return; 6399 } 6400 6401 bs_allocate_and_copy_cluster(_blob, ctx->channel, offset, op); 6402 } else { 6403 bs_inflate_blob_done(ctx); 6404 } 6405 } 6406 6407 static void 6408 bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6409 { 6410 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6411 uint64_t clusters_needed; 6412 uint64_t i; 6413 6414 if (bserrno != 0) { 6415 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6416 return; 6417 } 6418 6419 ctx->original.blob = _blob; 6420 ctx->original.md_ro = _blob->md_ro; 6421 6422 if (_blob->locked_operation_in_progress) { 6423 SPDK_DEBUGLOG(blob, "Cannot inflate blob - another operation in progress\n"); 6424 ctx->bserrno = -EBUSY; 6425 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6426 return; 6427 } 6428 6429 _blob->locked_operation_in_progress = true; 6430 6431 if (!ctx->allocate_all && _blob->parent_id == SPDK_BLOBID_INVALID) { 6432 /* This blob have no parent, so we cannot decouple it. */ 6433 SPDK_ERRLOG("Cannot decouple parent of blob with no parent.\n"); 6434 bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 6435 return; 6436 } 6437 6438 if (spdk_blob_is_thin_provisioned(_blob) == false) { 6439 /* This is not thin provisioned blob. No need to inflate. */ 6440 bs_clone_snapshot_origblob_cleanup(ctx, 0); 6441 return; 6442 } 6443 6444 /* Do two passes - one to verify that we can obtain enough clusters 6445 * and another to actually claim them. 6446 */ 6447 clusters_needed = 0; 6448 for (i = 0; i < _blob->active.num_clusters; i++) { 6449 if (bs_cluster_needs_allocation(_blob, i, ctx->allocate_all)) { 6450 clusters_needed++; 6451 } 6452 } 6453 6454 if (clusters_needed > _blob->bs->num_free_clusters) { 6455 /* Not enough free clusters. Cannot satisfy the request. */ 6456 bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC); 6457 return; 6458 } 6459 6460 ctx->cluster = 0; 6461 bs_inflate_blob_touch_next(ctx, 0); 6462 } 6463 6464 static void 6465 bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 6466 spdk_blob_id blobid, bool allocate_all, spdk_blob_op_complete cb_fn, void *cb_arg) 6467 { 6468 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6469 6470 if (!ctx) { 6471 cb_fn(cb_arg, -ENOMEM); 6472 return; 6473 } 6474 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 6475 ctx->cpl.u.bs_basic.cb_fn = cb_fn; 6476 ctx->cpl.u.bs_basic.cb_arg = cb_arg; 6477 ctx->bserrno = 0; 6478 ctx->original.id = blobid; 6479 ctx->channel = channel; 6480 ctx->allocate_all = allocate_all; 6481 6482 spdk_bs_open_blob(bs, ctx->original.id, bs_inflate_blob_open_cpl, ctx); 6483 } 6484 6485 void 6486 spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 6487 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 6488 { 6489 bs_inflate_blob(bs, channel, blobid, true, cb_fn, cb_arg); 6490 } 6491 6492 void 6493 spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 6494 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 6495 { 6496 bs_inflate_blob(bs, channel, blobid, false, cb_fn, cb_arg); 6497 } 6498 /* END spdk_bs_inflate_blob */ 6499 6500 /* START spdk_blob_resize */ 6501 struct spdk_bs_resize_ctx { 6502 spdk_blob_op_complete cb_fn; 6503 void *cb_arg; 6504 struct spdk_blob *blob; 6505 uint64_t sz; 6506 int rc; 6507 }; 6508 6509 static void 6510 bs_resize_unfreeze_cpl(void *cb_arg, int rc) 6511 { 6512 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 6513 6514 if (rc != 0) { 6515 SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc); 6516 } 6517 6518 if (ctx->rc != 0) { 6519 SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc); 6520 rc = ctx->rc; 6521 } 6522 6523 ctx->blob->locked_operation_in_progress = false; 6524 6525 ctx->cb_fn(ctx->cb_arg, rc); 6526 free(ctx); 6527 } 6528 6529 static void 6530 bs_resize_freeze_cpl(void *cb_arg, int rc) 6531 { 6532 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 6533 6534 if (rc != 0) { 6535 ctx->blob->locked_operation_in_progress = false; 6536 ctx->cb_fn(ctx->cb_arg, rc); 6537 free(ctx); 6538 return; 6539 } 6540 6541 ctx->rc = blob_resize(ctx->blob, ctx->sz); 6542 6543 blob_unfreeze_io(ctx->blob, bs_resize_unfreeze_cpl, ctx); 6544 } 6545 6546 void 6547 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg) 6548 { 6549 struct spdk_bs_resize_ctx *ctx; 6550 6551 blob_verify_md_op(blob); 6552 6553 SPDK_DEBUGLOG(blob, "Resizing blob %" PRIu64 " to %" PRIu64 " clusters\n", blob->id, sz); 6554 6555 if (blob->md_ro) { 6556 cb_fn(cb_arg, -EPERM); 6557 return; 6558 } 6559 6560 if (sz == blob->active.num_clusters) { 6561 cb_fn(cb_arg, 0); 6562 return; 6563 } 6564 6565 if (blob->locked_operation_in_progress) { 6566 cb_fn(cb_arg, -EBUSY); 6567 return; 6568 } 6569 6570 ctx = calloc(1, sizeof(*ctx)); 6571 if (!ctx) { 6572 cb_fn(cb_arg, -ENOMEM); 6573 return; 6574 } 6575 6576 blob->locked_operation_in_progress = true; 6577 ctx->cb_fn = cb_fn; 6578 ctx->cb_arg = cb_arg; 6579 ctx->blob = blob; 6580 ctx->sz = sz; 6581 blob_freeze_io(blob, bs_resize_freeze_cpl, ctx); 6582 } 6583 6584 /* END spdk_blob_resize */ 6585 6586 6587 /* START spdk_bs_delete_blob */ 6588 6589 static void 6590 bs_delete_close_cpl(void *cb_arg, int bserrno) 6591 { 6592 spdk_bs_sequence_t *seq = cb_arg; 6593 6594 bs_sequence_finish(seq, bserrno); 6595 } 6596 6597 static void 6598 bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 6599 { 6600 struct spdk_blob *blob = cb_arg; 6601 6602 if (bserrno != 0) { 6603 /* 6604 * We already removed this blob from the blobstore tailq, so 6605 * we need to free it here since this is the last reference 6606 * to it. 6607 */ 6608 blob_free(blob); 6609 bs_delete_close_cpl(seq, bserrno); 6610 return; 6611 } 6612 6613 /* 6614 * This will immediately decrement the ref_count and call 6615 * the completion routine since the metadata state is clean. 6616 * By calling spdk_blob_close, we reduce the number of call 6617 * points into code that touches the blob->open_ref count 6618 * and the blobstore's blob list. 6619 */ 6620 spdk_blob_close(blob, bs_delete_close_cpl, seq); 6621 } 6622 6623 struct delete_snapshot_ctx { 6624 struct spdk_blob_list *parent_snapshot_entry; 6625 struct spdk_blob *snapshot; 6626 bool snapshot_md_ro; 6627 struct spdk_blob *clone; 6628 bool clone_md_ro; 6629 spdk_blob_op_with_handle_complete cb_fn; 6630 void *cb_arg; 6631 int bserrno; 6632 uint32_t next_extent_page; 6633 }; 6634 6635 static void 6636 delete_blob_cleanup_finish(void *cb_arg, int bserrno) 6637 { 6638 struct delete_snapshot_ctx *ctx = cb_arg; 6639 6640 if (bserrno != 0) { 6641 SPDK_ERRLOG("Snapshot cleanup error %d\n", bserrno); 6642 } 6643 6644 assert(ctx != NULL); 6645 6646 if (bserrno != 0 && ctx->bserrno == 0) { 6647 ctx->bserrno = bserrno; 6648 } 6649 6650 ctx->cb_fn(ctx->cb_arg, ctx->snapshot, ctx->bserrno); 6651 free(ctx); 6652 } 6653 6654 static void 6655 delete_snapshot_cleanup_snapshot(void *cb_arg, int bserrno) 6656 { 6657 struct delete_snapshot_ctx *ctx = cb_arg; 6658 6659 if (bserrno != 0) { 6660 ctx->bserrno = bserrno; 6661 SPDK_ERRLOG("Clone cleanup error %d\n", bserrno); 6662 } 6663 6664 if (ctx->bserrno != 0) { 6665 assert(blob_lookup(ctx->snapshot->bs, ctx->snapshot->id) == NULL); 6666 RB_INSERT(spdk_blob_tree, &ctx->snapshot->bs->open_blobs, ctx->snapshot); 6667 spdk_bit_array_set(ctx->snapshot->bs->open_blobids, ctx->snapshot->id); 6668 } 6669 6670 ctx->snapshot->locked_operation_in_progress = false; 6671 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 6672 6673 spdk_blob_close(ctx->snapshot, delete_blob_cleanup_finish, ctx); 6674 } 6675 6676 static void 6677 delete_snapshot_cleanup_clone(void *cb_arg, int bserrno) 6678 { 6679 struct delete_snapshot_ctx *ctx = cb_arg; 6680 6681 ctx->clone->locked_operation_in_progress = false; 6682 ctx->clone->md_ro = ctx->clone_md_ro; 6683 6684 spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx); 6685 } 6686 6687 static void 6688 delete_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 6689 { 6690 struct delete_snapshot_ctx *ctx = cb_arg; 6691 6692 if (bserrno) { 6693 ctx->bserrno = bserrno; 6694 delete_snapshot_cleanup_clone(ctx, 0); 6695 return; 6696 } 6697 6698 ctx->clone->locked_operation_in_progress = false; 6699 spdk_blob_close(ctx->clone, delete_blob_cleanup_finish, ctx); 6700 } 6701 6702 static void 6703 delete_snapshot_sync_snapshot_cpl(void *cb_arg, int bserrno) 6704 { 6705 struct delete_snapshot_ctx *ctx = cb_arg; 6706 struct spdk_blob_list *parent_snapshot_entry = NULL; 6707 struct spdk_blob_list *snapshot_entry = NULL; 6708 struct spdk_blob_list *clone_entry = NULL; 6709 struct spdk_blob_list *snapshot_clone_entry = NULL; 6710 6711 if (bserrno) { 6712 SPDK_ERRLOG("Failed to sync MD on blob\n"); 6713 ctx->bserrno = bserrno; 6714 delete_snapshot_cleanup_clone(ctx, 0); 6715 return; 6716 } 6717 6718 /* Get snapshot entry for the snapshot we want to remove */ 6719 snapshot_entry = bs_get_snapshot_entry(ctx->snapshot->bs, ctx->snapshot->id); 6720 6721 assert(snapshot_entry != NULL); 6722 6723 /* Remove clone entry in this snapshot (at this point there can be only one clone) */ 6724 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 6725 assert(clone_entry != NULL); 6726 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 6727 snapshot_entry->clone_count--; 6728 assert(TAILQ_EMPTY(&snapshot_entry->clones)); 6729 6730 if (ctx->snapshot->parent_id != SPDK_BLOBID_INVALID) { 6731 /* This snapshot is at the same time a clone of another snapshot - we need to 6732 * update parent snapshot (remove current clone, add new one inherited from 6733 * the snapshot that is being removed) */ 6734 6735 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 6736 * snapshot that we are removing */ 6737 blob_get_snapshot_and_clone_entries(ctx->snapshot, &parent_snapshot_entry, 6738 &snapshot_clone_entry); 6739 6740 /* Switch clone entry in parent snapshot */ 6741 TAILQ_INSERT_TAIL(&parent_snapshot_entry->clones, clone_entry, link); 6742 TAILQ_REMOVE(&parent_snapshot_entry->clones, snapshot_clone_entry, link); 6743 free(snapshot_clone_entry); 6744 } else { 6745 /* No parent snapshot - just remove clone entry */ 6746 free(clone_entry); 6747 } 6748 6749 /* Restore md_ro flags */ 6750 ctx->clone->md_ro = ctx->clone_md_ro; 6751 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 6752 6753 blob_unfreeze_io(ctx->clone, delete_snapshot_unfreeze_cpl, ctx); 6754 } 6755 6756 static void 6757 delete_snapshot_sync_clone_cpl(void *cb_arg, int bserrno) 6758 { 6759 struct delete_snapshot_ctx *ctx = cb_arg; 6760 uint64_t i; 6761 6762 ctx->snapshot->md_ro = false; 6763 6764 if (bserrno) { 6765 SPDK_ERRLOG("Failed to sync MD on clone\n"); 6766 ctx->bserrno = bserrno; 6767 6768 /* Restore snapshot to previous state */ 6769 bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true); 6770 if (bserrno != 0) { 6771 delete_snapshot_cleanup_clone(ctx, bserrno); 6772 return; 6773 } 6774 6775 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx); 6776 return; 6777 } 6778 6779 /* Clear cluster map entries for snapshot */ 6780 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 6781 if (ctx->clone->active.clusters[i] == ctx->snapshot->active.clusters[i]) { 6782 ctx->snapshot->active.clusters[i] = 0; 6783 } 6784 } 6785 for (i = 0; i < ctx->snapshot->active.num_extent_pages && 6786 i < ctx->clone->active.num_extent_pages; i++) { 6787 if (ctx->clone->active.extent_pages[i] == ctx->snapshot->active.extent_pages[i]) { 6788 ctx->snapshot->active.extent_pages[i] = 0; 6789 } 6790 } 6791 6792 blob_set_thin_provision(ctx->snapshot); 6793 ctx->snapshot->state = SPDK_BLOB_STATE_DIRTY; 6794 6795 if (ctx->parent_snapshot_entry != NULL) { 6796 ctx->snapshot->back_bs_dev = NULL; 6797 } 6798 6799 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_cpl, ctx); 6800 } 6801 6802 static void 6803 delete_snapshot_update_extent_pages_cpl(struct delete_snapshot_ctx *ctx) 6804 { 6805 /* Delete old backing bs_dev from clone (related to snapshot that will be removed) */ 6806 ctx->clone->back_bs_dev->destroy(ctx->clone->back_bs_dev); 6807 6808 /* Set/remove snapshot xattr and switch parent ID and backing bs_dev on clone... */ 6809 if (ctx->parent_snapshot_entry != NULL) { 6810 /* ...to parent snapshot */ 6811 ctx->clone->parent_id = ctx->parent_snapshot_entry->id; 6812 ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev; 6813 blob_set_xattr(ctx->clone, BLOB_SNAPSHOT, &ctx->parent_snapshot_entry->id, 6814 sizeof(spdk_blob_id), 6815 true); 6816 } else { 6817 /* ...to blobid invalid and zeroes dev */ 6818 ctx->clone->parent_id = SPDK_BLOBID_INVALID; 6819 ctx->clone->back_bs_dev = bs_create_zeroes_dev(); 6820 blob_remove_xattr(ctx->clone, BLOB_SNAPSHOT, true); 6821 } 6822 6823 spdk_blob_sync_md(ctx->clone, delete_snapshot_sync_clone_cpl, ctx); 6824 } 6825 6826 static void 6827 delete_snapshot_update_extent_pages(void *cb_arg, int bserrno) 6828 { 6829 struct delete_snapshot_ctx *ctx = cb_arg; 6830 uint32_t *extent_page; 6831 uint64_t i; 6832 6833 for (i = ctx->next_extent_page; i < ctx->snapshot->active.num_extent_pages && 6834 i < ctx->clone->active.num_extent_pages; i++) { 6835 if (ctx->snapshot->active.extent_pages[i] == 0) { 6836 /* No extent page to use from snapshot */ 6837 continue; 6838 } 6839 6840 extent_page = &ctx->clone->active.extent_pages[i]; 6841 if (*extent_page == 0) { 6842 /* Copy extent page from snapshot when clone did not have a matching one */ 6843 *extent_page = ctx->snapshot->active.extent_pages[i]; 6844 continue; 6845 } 6846 6847 /* Clone and snapshot both contain partially filled matching extent pages. 6848 * Update the clone extent page in place with cluster map containing the mix of both. */ 6849 ctx->next_extent_page = i + 1; 6850 6851 blob_write_extent_page(ctx->clone, *extent_page, i * SPDK_EXTENTS_PER_EP, 6852 delete_snapshot_update_extent_pages, ctx); 6853 return; 6854 } 6855 delete_snapshot_update_extent_pages_cpl(ctx); 6856 } 6857 6858 static void 6859 delete_snapshot_sync_snapshot_xattr_cpl(void *cb_arg, int bserrno) 6860 { 6861 struct delete_snapshot_ctx *ctx = cb_arg; 6862 uint64_t i; 6863 6864 /* Temporarily override md_ro flag for clone for MD modification */ 6865 ctx->clone_md_ro = ctx->clone->md_ro; 6866 ctx->clone->md_ro = false; 6867 6868 if (bserrno) { 6869 SPDK_ERRLOG("Failed to sync MD with xattr on blob\n"); 6870 ctx->bserrno = bserrno; 6871 delete_snapshot_cleanup_clone(ctx, 0); 6872 return; 6873 } 6874 6875 /* Copy snapshot map to clone map (only unallocated clusters in clone) */ 6876 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 6877 if (ctx->clone->active.clusters[i] == 0) { 6878 ctx->clone->active.clusters[i] = ctx->snapshot->active.clusters[i]; 6879 } 6880 } 6881 ctx->next_extent_page = 0; 6882 delete_snapshot_update_extent_pages(ctx, 0); 6883 } 6884 6885 static void 6886 delete_snapshot_freeze_io_cb(void *cb_arg, int bserrno) 6887 { 6888 struct delete_snapshot_ctx *ctx = cb_arg; 6889 6890 if (bserrno) { 6891 SPDK_ERRLOG("Failed to freeze I/O on clone\n"); 6892 ctx->bserrno = bserrno; 6893 delete_snapshot_cleanup_clone(ctx, 0); 6894 return; 6895 } 6896 6897 /* Temporarily override md_ro flag for snapshot for MD modification */ 6898 ctx->snapshot_md_ro = ctx->snapshot->md_ro; 6899 ctx->snapshot->md_ro = false; 6900 6901 /* Mark blob as pending for removal for power failure safety, use clone id for recovery */ 6902 ctx->bserrno = blob_set_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, &ctx->clone->id, 6903 sizeof(spdk_blob_id), true); 6904 if (ctx->bserrno != 0) { 6905 delete_snapshot_cleanup_clone(ctx, 0); 6906 return; 6907 } 6908 6909 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx); 6910 } 6911 6912 static void 6913 delete_snapshot_open_clone_cb(void *cb_arg, struct spdk_blob *clone, int bserrno) 6914 { 6915 struct delete_snapshot_ctx *ctx = cb_arg; 6916 6917 if (bserrno) { 6918 SPDK_ERRLOG("Failed to open clone\n"); 6919 ctx->bserrno = bserrno; 6920 delete_snapshot_cleanup_snapshot(ctx, 0); 6921 return; 6922 } 6923 6924 ctx->clone = clone; 6925 6926 if (clone->locked_operation_in_progress) { 6927 SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress on its clone\n"); 6928 ctx->bserrno = -EBUSY; 6929 spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx); 6930 return; 6931 } 6932 6933 clone->locked_operation_in_progress = true; 6934 6935 blob_freeze_io(clone, delete_snapshot_freeze_io_cb, ctx); 6936 } 6937 6938 static void 6939 update_clone_on_snapshot_deletion(struct spdk_blob *snapshot, struct delete_snapshot_ctx *ctx) 6940 { 6941 struct spdk_blob_list *snapshot_entry = NULL; 6942 struct spdk_blob_list *clone_entry = NULL; 6943 struct spdk_blob_list *snapshot_clone_entry = NULL; 6944 6945 /* Get snapshot entry for the snapshot we want to remove */ 6946 snapshot_entry = bs_get_snapshot_entry(snapshot->bs, snapshot->id); 6947 6948 assert(snapshot_entry != NULL); 6949 6950 /* Get clone of the snapshot (at this point there can be only one clone) */ 6951 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 6952 assert(snapshot_entry->clone_count == 1); 6953 assert(clone_entry != NULL); 6954 6955 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 6956 * snapshot that we are removing */ 6957 blob_get_snapshot_and_clone_entries(snapshot, &ctx->parent_snapshot_entry, 6958 &snapshot_clone_entry); 6959 6960 spdk_bs_open_blob(snapshot->bs, clone_entry->id, delete_snapshot_open_clone_cb, ctx); 6961 } 6962 6963 static void 6964 bs_delete_blob_finish(void *cb_arg, struct spdk_blob *blob, int bserrno) 6965 { 6966 spdk_bs_sequence_t *seq = cb_arg; 6967 struct spdk_blob_list *snapshot_entry = NULL; 6968 uint32_t page_num; 6969 6970 if (bserrno) { 6971 SPDK_ERRLOG("Failed to remove blob\n"); 6972 bs_sequence_finish(seq, bserrno); 6973 return; 6974 } 6975 6976 /* Remove snapshot from the list */ 6977 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 6978 if (snapshot_entry != NULL) { 6979 TAILQ_REMOVE(&blob->bs->snapshots, snapshot_entry, link); 6980 free(snapshot_entry); 6981 } 6982 6983 page_num = bs_blobid_to_page(blob->id); 6984 spdk_bit_array_clear(blob->bs->used_blobids, page_num); 6985 blob->state = SPDK_BLOB_STATE_DIRTY; 6986 blob->active.num_pages = 0; 6987 blob_resize(blob, 0); 6988 6989 blob_persist(seq, blob, bs_delete_persist_cpl, blob); 6990 } 6991 6992 static int 6993 bs_is_blob_deletable(struct spdk_blob *blob, bool *update_clone) 6994 { 6995 struct spdk_blob_list *snapshot_entry = NULL; 6996 struct spdk_blob_list *clone_entry = NULL; 6997 struct spdk_blob *clone = NULL; 6998 bool has_one_clone = false; 6999 7000 /* Check if this is a snapshot with clones */ 7001 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 7002 if (snapshot_entry != NULL) { 7003 if (snapshot_entry->clone_count > 1) { 7004 SPDK_ERRLOG("Cannot remove snapshot with more than one clone\n"); 7005 return -EBUSY; 7006 } else if (snapshot_entry->clone_count == 1) { 7007 has_one_clone = true; 7008 } 7009 } 7010 7011 /* Check if someone has this blob open (besides this delete context): 7012 * - open_ref = 1 - only this context opened blob, so it is ok to remove it 7013 * - open_ref <= 2 && has_one_clone = true - clone is holding snapshot 7014 * and that is ok, because we will update it accordingly */ 7015 if (blob->open_ref <= 2 && has_one_clone) { 7016 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 7017 assert(clone_entry != NULL); 7018 clone = blob_lookup(blob->bs, clone_entry->id); 7019 7020 if (blob->open_ref == 2 && clone == NULL) { 7021 /* Clone is closed and someone else opened this blob */ 7022 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 7023 return -EBUSY; 7024 } 7025 7026 *update_clone = true; 7027 return 0; 7028 } 7029 7030 if (blob->open_ref > 1) { 7031 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 7032 return -EBUSY; 7033 } 7034 7035 assert(has_one_clone == false); 7036 *update_clone = false; 7037 return 0; 7038 } 7039 7040 static void 7041 bs_delete_enomem_close_cpl(void *cb_arg, int bserrno) 7042 { 7043 spdk_bs_sequence_t *seq = cb_arg; 7044 7045 bs_sequence_finish(seq, -ENOMEM); 7046 } 7047 7048 static void 7049 bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 7050 { 7051 spdk_bs_sequence_t *seq = cb_arg; 7052 struct delete_snapshot_ctx *ctx; 7053 bool update_clone = false; 7054 7055 if (bserrno != 0) { 7056 bs_sequence_finish(seq, bserrno); 7057 return; 7058 } 7059 7060 blob_verify_md_op(blob); 7061 7062 ctx = calloc(1, sizeof(*ctx)); 7063 if (ctx == NULL) { 7064 spdk_blob_close(blob, bs_delete_enomem_close_cpl, seq); 7065 return; 7066 } 7067 7068 ctx->snapshot = blob; 7069 ctx->cb_fn = bs_delete_blob_finish; 7070 ctx->cb_arg = seq; 7071 7072 /* Check if blob can be removed and if it is a snapshot with clone on top of it */ 7073 ctx->bserrno = bs_is_blob_deletable(blob, &update_clone); 7074 if (ctx->bserrno) { 7075 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 7076 return; 7077 } 7078 7079 if (blob->locked_operation_in_progress) { 7080 SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress\n"); 7081 ctx->bserrno = -EBUSY; 7082 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 7083 return; 7084 } 7085 7086 blob->locked_operation_in_progress = true; 7087 7088 /* 7089 * Remove the blob from the blob_store list now, to ensure it does not 7090 * get returned after this point by blob_lookup(). 7091 */ 7092 spdk_bit_array_clear(blob->bs->open_blobids, blob->id); 7093 RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob); 7094 7095 if (update_clone) { 7096 /* This blob is a snapshot with active clone - update clone first */ 7097 update_clone_on_snapshot_deletion(blob, ctx); 7098 } else { 7099 /* This blob does not have any clones - just remove it */ 7100 bs_blob_list_remove(blob); 7101 bs_delete_blob_finish(seq, blob, 0); 7102 free(ctx); 7103 } 7104 } 7105 7106 void 7107 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 7108 spdk_blob_op_complete cb_fn, void *cb_arg) 7109 { 7110 struct spdk_bs_cpl cpl; 7111 spdk_bs_sequence_t *seq; 7112 7113 SPDK_DEBUGLOG(blob, "Deleting blob %" PRIu64 "\n", blobid); 7114 7115 assert(spdk_get_thread() == bs->md_thread); 7116 7117 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7118 cpl.u.blob_basic.cb_fn = cb_fn; 7119 cpl.u.blob_basic.cb_arg = cb_arg; 7120 7121 seq = bs_sequence_start(bs->md_channel, &cpl); 7122 if (!seq) { 7123 cb_fn(cb_arg, -ENOMEM); 7124 return; 7125 } 7126 7127 spdk_bs_open_blob(bs, blobid, bs_delete_open_cpl, seq); 7128 } 7129 7130 /* END spdk_bs_delete_blob */ 7131 7132 /* START spdk_bs_open_blob */ 7133 7134 static void 7135 bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7136 { 7137 struct spdk_blob *blob = cb_arg; 7138 struct spdk_blob *existing; 7139 7140 if (bserrno != 0) { 7141 blob_free(blob); 7142 seq->cpl.u.blob_handle.blob = NULL; 7143 bs_sequence_finish(seq, bserrno); 7144 return; 7145 } 7146 7147 existing = blob_lookup(blob->bs, blob->id); 7148 if (existing) { 7149 blob_free(blob); 7150 existing->open_ref++; 7151 seq->cpl.u.blob_handle.blob = existing; 7152 bs_sequence_finish(seq, 0); 7153 return; 7154 } 7155 7156 blob->open_ref++; 7157 7158 spdk_bit_array_set(blob->bs->open_blobids, blob->id); 7159 RB_INSERT(spdk_blob_tree, &blob->bs->open_blobs, blob); 7160 7161 bs_sequence_finish(seq, bserrno); 7162 } 7163 7164 static inline void 7165 blob_open_opts_copy(const struct spdk_blob_open_opts *src, struct spdk_blob_open_opts *dst) 7166 { 7167 #define FIELD_OK(field) \ 7168 offsetof(struct spdk_blob_opts, field) + sizeof(src->field) <= src->opts_size 7169 7170 #define SET_FIELD(field) \ 7171 if (FIELD_OK(field)) { \ 7172 dst->field = src->field; \ 7173 } \ 7174 7175 SET_FIELD(clear_method); 7176 7177 dst->opts_size = src->opts_size; 7178 7179 /* You should not remove this statement, but need to update the assert statement 7180 * if you add a new field, and also add a corresponding SET_FIELD statement */ 7181 SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_open_opts) == 16, "Incorrect size"); 7182 7183 #undef FIELD_OK 7184 #undef SET_FIELD 7185 } 7186 7187 static void 7188 bs_open_blob(struct spdk_blob_store *bs, 7189 spdk_blob_id blobid, 7190 struct spdk_blob_open_opts *opts, 7191 spdk_blob_op_with_handle_complete cb_fn, 7192 void *cb_arg) 7193 { 7194 struct spdk_blob *blob; 7195 struct spdk_bs_cpl cpl; 7196 struct spdk_blob_open_opts opts_local; 7197 spdk_bs_sequence_t *seq; 7198 uint32_t page_num; 7199 7200 SPDK_DEBUGLOG(blob, "Opening blob %" PRIu64 "\n", blobid); 7201 assert(spdk_get_thread() == bs->md_thread); 7202 7203 page_num = bs_blobid_to_page(blobid); 7204 if (spdk_bit_array_get(bs->used_blobids, page_num) == false) { 7205 /* Invalid blobid */ 7206 cb_fn(cb_arg, NULL, -ENOENT); 7207 return; 7208 } 7209 7210 blob = blob_lookup(bs, blobid); 7211 if (blob) { 7212 blob->open_ref++; 7213 cb_fn(cb_arg, blob, 0); 7214 return; 7215 } 7216 7217 blob = blob_alloc(bs, blobid); 7218 if (!blob) { 7219 cb_fn(cb_arg, NULL, -ENOMEM); 7220 return; 7221 } 7222 7223 spdk_blob_open_opts_init(&opts_local, sizeof(opts_local)); 7224 if (opts) { 7225 blob_open_opts_copy(opts, &opts_local); 7226 } 7227 7228 blob->clear_method = opts_local.clear_method; 7229 7230 cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE; 7231 cpl.u.blob_handle.cb_fn = cb_fn; 7232 cpl.u.blob_handle.cb_arg = cb_arg; 7233 cpl.u.blob_handle.blob = blob; 7234 7235 seq = bs_sequence_start(bs->md_channel, &cpl); 7236 if (!seq) { 7237 blob_free(blob); 7238 cb_fn(cb_arg, NULL, -ENOMEM); 7239 return; 7240 } 7241 7242 blob_load(seq, blob, bs_open_blob_cpl, blob); 7243 } 7244 7245 void spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 7246 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 7247 { 7248 bs_open_blob(bs, blobid, NULL, cb_fn, cb_arg); 7249 } 7250 7251 void spdk_bs_open_blob_ext(struct spdk_blob_store *bs, spdk_blob_id blobid, 7252 struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 7253 { 7254 bs_open_blob(bs, blobid, opts, cb_fn, cb_arg); 7255 } 7256 7257 /* END spdk_bs_open_blob */ 7258 7259 /* START spdk_blob_set_read_only */ 7260 int spdk_blob_set_read_only(struct spdk_blob *blob) 7261 { 7262 blob_verify_md_op(blob); 7263 7264 blob->data_ro_flags |= SPDK_BLOB_READ_ONLY; 7265 7266 blob->state = SPDK_BLOB_STATE_DIRTY; 7267 return 0; 7268 } 7269 /* END spdk_blob_set_read_only */ 7270 7271 /* START spdk_blob_sync_md */ 7272 7273 static void 7274 blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7275 { 7276 struct spdk_blob *blob = cb_arg; 7277 7278 if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 7279 blob->data_ro = true; 7280 blob->md_ro = true; 7281 } 7282 7283 bs_sequence_finish(seq, bserrno); 7284 } 7285 7286 static void 7287 blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 7288 { 7289 struct spdk_bs_cpl cpl; 7290 spdk_bs_sequence_t *seq; 7291 7292 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7293 cpl.u.blob_basic.cb_fn = cb_fn; 7294 cpl.u.blob_basic.cb_arg = cb_arg; 7295 7296 seq = bs_sequence_start(blob->bs->md_channel, &cpl); 7297 if (!seq) { 7298 cb_fn(cb_arg, -ENOMEM); 7299 return; 7300 } 7301 7302 blob_persist(seq, blob, blob_sync_md_cpl, blob); 7303 } 7304 7305 void 7306 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 7307 { 7308 blob_verify_md_op(blob); 7309 7310 SPDK_DEBUGLOG(blob, "Syncing blob %" PRIu64 "\n", blob->id); 7311 7312 if (blob->md_ro) { 7313 assert(blob->state == SPDK_BLOB_STATE_CLEAN); 7314 cb_fn(cb_arg, 0); 7315 return; 7316 } 7317 7318 blob_sync_md(blob, cb_fn, cb_arg); 7319 } 7320 7321 /* END spdk_blob_sync_md */ 7322 7323 struct spdk_blob_insert_cluster_ctx { 7324 struct spdk_thread *thread; 7325 struct spdk_blob *blob; 7326 uint32_t cluster_num; /* cluster index in blob */ 7327 uint32_t cluster; /* cluster on disk */ 7328 uint32_t extent_page; /* extent page on disk */ 7329 int rc; 7330 spdk_blob_op_complete cb_fn; 7331 void *cb_arg; 7332 }; 7333 7334 static void 7335 blob_insert_cluster_msg_cpl(void *arg) 7336 { 7337 struct spdk_blob_insert_cluster_ctx *ctx = arg; 7338 7339 ctx->cb_fn(ctx->cb_arg, ctx->rc); 7340 free(ctx); 7341 } 7342 7343 static void 7344 blob_insert_cluster_msg_cb(void *arg, int bserrno) 7345 { 7346 struct spdk_blob_insert_cluster_ctx *ctx = arg; 7347 7348 ctx->rc = bserrno; 7349 spdk_thread_send_msg(ctx->thread, blob_insert_cluster_msg_cpl, ctx); 7350 } 7351 7352 static void 7353 blob_insert_new_ep_cb(void *arg, int bserrno) 7354 { 7355 struct spdk_blob_insert_cluster_ctx *ctx = arg; 7356 uint32_t *extent_page; 7357 7358 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 7359 *extent_page = ctx->extent_page; 7360 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 7361 blob_sync_md(ctx->blob, blob_insert_cluster_msg_cb, ctx); 7362 } 7363 7364 static void 7365 blob_persist_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7366 { 7367 struct spdk_blob_md_page *page = cb_arg; 7368 7369 bs_sequence_finish(seq, bserrno); 7370 spdk_free(page); 7371 } 7372 7373 static void 7374 blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 7375 spdk_blob_op_complete cb_fn, void *cb_arg) 7376 { 7377 spdk_bs_sequence_t *seq; 7378 struct spdk_bs_cpl cpl; 7379 struct spdk_blob_md_page *page = NULL; 7380 uint32_t page_count = 0; 7381 int rc; 7382 7383 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7384 cpl.u.blob_basic.cb_fn = cb_fn; 7385 cpl.u.blob_basic.cb_arg = cb_arg; 7386 7387 seq = bs_sequence_start(blob->bs->md_channel, &cpl); 7388 if (!seq) { 7389 cb_fn(cb_arg, -ENOMEM); 7390 return; 7391 } 7392 rc = blob_serialize_add_page(blob, &page, &page_count, &page); 7393 if (rc < 0) { 7394 bs_sequence_finish(seq, rc); 7395 return; 7396 } 7397 7398 blob_serialize_extent_page(blob, cluster_num, page); 7399 7400 page->crc = blob_md_page_calc_crc(page); 7401 7402 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent) == true); 7403 7404 bs_sequence_write_dev(seq, page, bs_md_page_to_lba(blob->bs, extent), 7405 bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 7406 blob_persist_extent_page_cpl, page); 7407 } 7408 7409 static void 7410 blob_insert_cluster_msg(void *arg) 7411 { 7412 struct spdk_blob_insert_cluster_ctx *ctx = arg; 7413 uint32_t *extent_page; 7414 7415 ctx->rc = blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster); 7416 if (ctx->rc != 0) { 7417 spdk_thread_send_msg(ctx->thread, blob_insert_cluster_msg_cpl, ctx); 7418 return; 7419 } 7420 7421 if (ctx->blob->use_extent_table == false) { 7422 /* Extent table is not used, proceed with sync of md that will only use extents_rle. */ 7423 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 7424 blob_sync_md(ctx->blob, blob_insert_cluster_msg_cb, ctx); 7425 return; 7426 } 7427 7428 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 7429 if (*extent_page == 0) { 7430 /* Extent page requires allocation. 7431 * It was already claimed in the used_md_pages map and placed in ctx. */ 7432 assert(ctx->extent_page != 0); 7433 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 7434 blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, 7435 blob_insert_new_ep_cb, ctx); 7436 } else { 7437 /* It is possible for original thread to allocate extent page for 7438 * different cluster in the same extent page. In such case proceed with 7439 * updating the existing extent page, but release the additional one. */ 7440 if (ctx->extent_page != 0) { 7441 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 7442 bs_release_md_page(ctx->blob->bs, ctx->extent_page); 7443 ctx->extent_page = 0; 7444 } 7445 /* Extent page already allocated. 7446 * Every cluster allocation, requires just an update of single extent page. */ 7447 blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, 7448 blob_insert_cluster_msg_cb, ctx); 7449 } 7450 } 7451 7452 static void 7453 blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 7454 uint64_t cluster, uint32_t extent_page, spdk_blob_op_complete cb_fn, void *cb_arg) 7455 { 7456 struct spdk_blob_insert_cluster_ctx *ctx; 7457 7458 ctx = calloc(1, sizeof(*ctx)); 7459 if (ctx == NULL) { 7460 cb_fn(cb_arg, -ENOMEM); 7461 return; 7462 } 7463 7464 ctx->thread = spdk_get_thread(); 7465 ctx->blob = blob; 7466 ctx->cluster_num = cluster_num; 7467 ctx->cluster = cluster; 7468 ctx->extent_page = extent_page; 7469 ctx->cb_fn = cb_fn; 7470 ctx->cb_arg = cb_arg; 7471 7472 spdk_thread_send_msg(blob->bs->md_thread, blob_insert_cluster_msg, ctx); 7473 } 7474 7475 /* START spdk_blob_close */ 7476 7477 static void 7478 blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7479 { 7480 struct spdk_blob *blob = cb_arg; 7481 7482 if (bserrno == 0) { 7483 blob->open_ref--; 7484 if (blob->open_ref == 0) { 7485 /* 7486 * Blobs with active.num_pages == 0 are deleted blobs. 7487 * these blobs are removed from the blob_store list 7488 * when the deletion process starts - so don't try to 7489 * remove them again. 7490 */ 7491 if (blob->active.num_pages > 0) { 7492 spdk_bit_array_clear(blob->bs->open_blobids, blob->id); 7493 RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob); 7494 } 7495 blob_free(blob); 7496 } 7497 } 7498 7499 bs_sequence_finish(seq, bserrno); 7500 } 7501 7502 void spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 7503 { 7504 struct spdk_bs_cpl cpl; 7505 spdk_bs_sequence_t *seq; 7506 7507 blob_verify_md_op(blob); 7508 7509 SPDK_DEBUGLOG(blob, "Closing blob %" PRIu64 "\n", blob->id); 7510 7511 if (blob->open_ref == 0) { 7512 cb_fn(cb_arg, -EBADF); 7513 return; 7514 } 7515 7516 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7517 cpl.u.blob_basic.cb_fn = cb_fn; 7518 cpl.u.blob_basic.cb_arg = cb_arg; 7519 7520 seq = bs_sequence_start(blob->bs->md_channel, &cpl); 7521 if (!seq) { 7522 cb_fn(cb_arg, -ENOMEM); 7523 return; 7524 } 7525 7526 /* Sync metadata */ 7527 blob_persist(seq, blob, blob_close_cpl, blob); 7528 } 7529 7530 /* END spdk_blob_close */ 7531 7532 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs) 7533 { 7534 return spdk_get_io_channel(bs); 7535 } 7536 7537 void spdk_bs_free_io_channel(struct spdk_io_channel *channel) 7538 { 7539 spdk_put_io_channel(channel); 7540 } 7541 7542 void spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel, 7543 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 7544 { 7545 blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 7546 SPDK_BLOB_UNMAP); 7547 } 7548 7549 void spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel, 7550 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 7551 { 7552 blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 7553 SPDK_BLOB_WRITE_ZEROES); 7554 } 7555 7556 void spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel, 7557 void *payload, uint64_t offset, uint64_t length, 7558 spdk_blob_op_complete cb_fn, void *cb_arg) 7559 { 7560 blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 7561 SPDK_BLOB_WRITE); 7562 } 7563 7564 void spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel, 7565 void *payload, uint64_t offset, uint64_t length, 7566 spdk_blob_op_complete cb_fn, void *cb_arg) 7567 { 7568 blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 7569 SPDK_BLOB_READ); 7570 } 7571 7572 void spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel, 7573 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 7574 spdk_blob_op_complete cb_fn, void *cb_arg) 7575 { 7576 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false); 7577 } 7578 7579 void spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel, 7580 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 7581 spdk_blob_op_complete cb_fn, void *cb_arg) 7582 { 7583 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true); 7584 } 7585 7586 struct spdk_bs_iter_ctx { 7587 int64_t page_num; 7588 struct spdk_blob_store *bs; 7589 7590 spdk_blob_op_with_handle_complete cb_fn; 7591 void *cb_arg; 7592 }; 7593 7594 static void 7595 bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 7596 { 7597 struct spdk_bs_iter_ctx *ctx = cb_arg; 7598 struct spdk_blob_store *bs = ctx->bs; 7599 spdk_blob_id id; 7600 7601 if (bserrno == 0) { 7602 ctx->cb_fn(ctx->cb_arg, _blob, bserrno); 7603 free(ctx); 7604 return; 7605 } 7606 7607 ctx->page_num++; 7608 ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num); 7609 if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) { 7610 ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT); 7611 free(ctx); 7612 return; 7613 } 7614 7615 id = bs_page_to_blobid(ctx->page_num); 7616 7617 spdk_bs_open_blob(bs, id, bs_iter_cpl, ctx); 7618 } 7619 7620 void 7621 spdk_bs_iter_first(struct spdk_blob_store *bs, 7622 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 7623 { 7624 struct spdk_bs_iter_ctx *ctx; 7625 7626 ctx = calloc(1, sizeof(*ctx)); 7627 if (!ctx) { 7628 cb_fn(cb_arg, NULL, -ENOMEM); 7629 return; 7630 } 7631 7632 ctx->page_num = -1; 7633 ctx->bs = bs; 7634 ctx->cb_fn = cb_fn; 7635 ctx->cb_arg = cb_arg; 7636 7637 bs_iter_cpl(ctx, NULL, -1); 7638 } 7639 7640 static void 7641 bs_iter_close_cpl(void *cb_arg, int bserrno) 7642 { 7643 struct spdk_bs_iter_ctx *ctx = cb_arg; 7644 7645 bs_iter_cpl(ctx, NULL, -1); 7646 } 7647 7648 void 7649 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob, 7650 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 7651 { 7652 struct spdk_bs_iter_ctx *ctx; 7653 7654 assert(blob != NULL); 7655 7656 ctx = calloc(1, sizeof(*ctx)); 7657 if (!ctx) { 7658 cb_fn(cb_arg, NULL, -ENOMEM); 7659 return; 7660 } 7661 7662 ctx->page_num = bs_blobid_to_page(blob->id); 7663 ctx->bs = bs; 7664 ctx->cb_fn = cb_fn; 7665 ctx->cb_arg = cb_arg; 7666 7667 /* Close the existing blob */ 7668 spdk_blob_close(blob, bs_iter_close_cpl, ctx); 7669 } 7670 7671 static int 7672 blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 7673 uint16_t value_len, bool internal) 7674 { 7675 struct spdk_xattr_tailq *xattrs; 7676 struct spdk_xattr *xattr; 7677 size_t desc_size; 7678 void *tmp; 7679 7680 blob_verify_md_op(blob); 7681 7682 if (blob->md_ro) { 7683 return -EPERM; 7684 } 7685 7686 desc_size = sizeof(struct spdk_blob_md_descriptor_xattr) + strlen(name) + value_len; 7687 if (desc_size > SPDK_BS_MAX_DESC_SIZE) { 7688 SPDK_DEBUGLOG(blob, "Xattr '%s' of size %zu does not fix into single page %zu\n", name, 7689 desc_size, SPDK_BS_MAX_DESC_SIZE); 7690 return -ENOMEM; 7691 } 7692 7693 if (internal) { 7694 xattrs = &blob->xattrs_internal; 7695 blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR; 7696 } else { 7697 xattrs = &blob->xattrs; 7698 } 7699 7700 TAILQ_FOREACH(xattr, xattrs, link) { 7701 if (!strcmp(name, xattr->name)) { 7702 tmp = malloc(value_len); 7703 if (!tmp) { 7704 return -ENOMEM; 7705 } 7706 7707 free(xattr->value); 7708 xattr->value_len = value_len; 7709 xattr->value = tmp; 7710 memcpy(xattr->value, value, value_len); 7711 7712 blob->state = SPDK_BLOB_STATE_DIRTY; 7713 7714 return 0; 7715 } 7716 } 7717 7718 xattr = calloc(1, sizeof(*xattr)); 7719 if (!xattr) { 7720 return -ENOMEM; 7721 } 7722 7723 xattr->name = strdup(name); 7724 if (!xattr->name) { 7725 free(xattr); 7726 return -ENOMEM; 7727 } 7728 7729 xattr->value_len = value_len; 7730 xattr->value = malloc(value_len); 7731 if (!xattr->value) { 7732 free(xattr->name); 7733 free(xattr); 7734 return -ENOMEM; 7735 } 7736 memcpy(xattr->value, value, value_len); 7737 TAILQ_INSERT_TAIL(xattrs, xattr, link); 7738 7739 blob->state = SPDK_BLOB_STATE_DIRTY; 7740 7741 return 0; 7742 } 7743 7744 int 7745 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 7746 uint16_t value_len) 7747 { 7748 return blob_set_xattr(blob, name, value, value_len, false); 7749 } 7750 7751 static int 7752 blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal) 7753 { 7754 struct spdk_xattr_tailq *xattrs; 7755 struct spdk_xattr *xattr; 7756 7757 blob_verify_md_op(blob); 7758 7759 if (blob->md_ro) { 7760 return -EPERM; 7761 } 7762 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 7763 7764 TAILQ_FOREACH(xattr, xattrs, link) { 7765 if (!strcmp(name, xattr->name)) { 7766 TAILQ_REMOVE(xattrs, xattr, link); 7767 free(xattr->value); 7768 free(xattr->name); 7769 free(xattr); 7770 7771 if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) { 7772 blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR; 7773 } 7774 blob->state = SPDK_BLOB_STATE_DIRTY; 7775 7776 return 0; 7777 } 7778 } 7779 7780 return -ENOENT; 7781 } 7782 7783 int 7784 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name) 7785 { 7786 return blob_remove_xattr(blob, name, false); 7787 } 7788 7789 static int 7790 blob_get_xattr_value(struct spdk_blob *blob, const char *name, 7791 const void **value, size_t *value_len, bool internal) 7792 { 7793 struct spdk_xattr *xattr; 7794 struct spdk_xattr_tailq *xattrs; 7795 7796 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 7797 7798 TAILQ_FOREACH(xattr, xattrs, link) { 7799 if (!strcmp(name, xattr->name)) { 7800 *value = xattr->value; 7801 *value_len = xattr->value_len; 7802 return 0; 7803 } 7804 } 7805 return -ENOENT; 7806 } 7807 7808 int 7809 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 7810 const void **value, size_t *value_len) 7811 { 7812 blob_verify_md_op(blob); 7813 7814 return blob_get_xattr_value(blob, name, value, value_len, false); 7815 } 7816 7817 struct spdk_xattr_names { 7818 uint32_t count; 7819 const char *names[0]; 7820 }; 7821 7822 static int 7823 blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names) 7824 { 7825 struct spdk_xattr *xattr; 7826 int count = 0; 7827 7828 TAILQ_FOREACH(xattr, xattrs, link) { 7829 count++; 7830 } 7831 7832 *names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *)); 7833 if (*names == NULL) { 7834 return -ENOMEM; 7835 } 7836 7837 TAILQ_FOREACH(xattr, xattrs, link) { 7838 (*names)->names[(*names)->count++] = xattr->name; 7839 } 7840 7841 return 0; 7842 } 7843 7844 int 7845 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names) 7846 { 7847 blob_verify_md_op(blob); 7848 7849 return blob_get_xattr_names(&blob->xattrs, names); 7850 } 7851 7852 uint32_t 7853 spdk_xattr_names_get_count(struct spdk_xattr_names *names) 7854 { 7855 assert(names != NULL); 7856 7857 return names->count; 7858 } 7859 7860 const char * 7861 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index) 7862 { 7863 if (index >= names->count) { 7864 return NULL; 7865 } 7866 7867 return names->names[index]; 7868 } 7869 7870 void 7871 spdk_xattr_names_free(struct spdk_xattr_names *names) 7872 { 7873 free(names); 7874 } 7875 7876 struct spdk_bs_type 7877 spdk_bs_get_bstype(struct spdk_blob_store *bs) 7878 { 7879 return bs->bstype; 7880 } 7881 7882 void 7883 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype) 7884 { 7885 memcpy(&bs->bstype, &bstype, sizeof(bstype)); 7886 } 7887 7888 bool 7889 spdk_blob_is_read_only(struct spdk_blob *blob) 7890 { 7891 assert(blob != NULL); 7892 return (blob->data_ro || blob->md_ro); 7893 } 7894 7895 bool 7896 spdk_blob_is_snapshot(struct spdk_blob *blob) 7897 { 7898 struct spdk_blob_list *snapshot_entry; 7899 7900 assert(blob != NULL); 7901 7902 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 7903 if (snapshot_entry == NULL) { 7904 return false; 7905 } 7906 7907 return true; 7908 } 7909 7910 bool 7911 spdk_blob_is_clone(struct spdk_blob *blob) 7912 { 7913 assert(blob != NULL); 7914 7915 if (blob->parent_id != SPDK_BLOBID_INVALID) { 7916 assert(spdk_blob_is_thin_provisioned(blob)); 7917 return true; 7918 } 7919 7920 return false; 7921 } 7922 7923 bool 7924 spdk_blob_is_thin_provisioned(struct spdk_blob *blob) 7925 { 7926 assert(blob != NULL); 7927 return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 7928 } 7929 7930 static void 7931 blob_update_clear_method(struct spdk_blob *blob) 7932 { 7933 enum blob_clear_method stored_cm; 7934 7935 assert(blob != NULL); 7936 7937 /* If BLOB_CLEAR_WITH_DEFAULT was passed in, use the setting stored 7938 * in metadata previously. If something other than the default was 7939 * specified, ignore stored value and used what was passed in. 7940 */ 7941 stored_cm = ((blob->md_ro_flags & SPDK_BLOB_CLEAR_METHOD) >> SPDK_BLOB_CLEAR_METHOD_SHIFT); 7942 7943 if (blob->clear_method == BLOB_CLEAR_WITH_DEFAULT) { 7944 blob->clear_method = stored_cm; 7945 } else if (blob->clear_method != stored_cm) { 7946 SPDK_WARNLOG("Using passed in clear method 0x%x instead of stored value of 0x%x\n", 7947 blob->clear_method, stored_cm); 7948 } 7949 } 7950 7951 spdk_blob_id 7952 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id) 7953 { 7954 struct spdk_blob_list *snapshot_entry = NULL; 7955 struct spdk_blob_list *clone_entry = NULL; 7956 7957 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 7958 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 7959 if (clone_entry->id == blob_id) { 7960 return snapshot_entry->id; 7961 } 7962 } 7963 } 7964 7965 return SPDK_BLOBID_INVALID; 7966 } 7967 7968 int 7969 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids, 7970 size_t *count) 7971 { 7972 struct spdk_blob_list *snapshot_entry, *clone_entry; 7973 size_t n; 7974 7975 snapshot_entry = bs_get_snapshot_entry(bs, blobid); 7976 if (snapshot_entry == NULL) { 7977 *count = 0; 7978 return 0; 7979 } 7980 7981 if (ids == NULL || *count < snapshot_entry->clone_count) { 7982 *count = snapshot_entry->clone_count; 7983 return -ENOMEM; 7984 } 7985 *count = snapshot_entry->clone_count; 7986 7987 n = 0; 7988 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 7989 ids[n++] = clone_entry->id; 7990 } 7991 7992 return 0; 7993 } 7994 7995 SPDK_LOG_REGISTER_COMPONENT(blob) 7996