1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "spdk/stdinc.h" 36 37 #include "spdk/blob.h" 38 #include "spdk/crc32.h" 39 #include "spdk/env.h" 40 #include "spdk/queue.h" 41 #include "spdk/thread.h" 42 #include "spdk/bit_array.h" 43 #include "spdk/bit_pool.h" 44 #include "spdk/likely.h" 45 #include "spdk/util.h" 46 #include "spdk/string.h" 47 48 #include "spdk_internal/assert.h" 49 #include "spdk/log.h" 50 51 #include "blobstore.h" 52 53 #define BLOB_CRC32C_INITIAL 0xffffffffUL 54 55 static int bs_register_md_thread(struct spdk_blob_store *bs); 56 static int bs_unregister_md_thread(struct spdk_blob_store *bs); 57 static void blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 58 static void blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 59 uint64_t cluster, uint32_t extent, spdk_blob_op_complete cb_fn, void *cb_arg); 60 61 static int blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 62 uint16_t value_len, bool internal); 63 static int blob_get_xattr_value(struct spdk_blob *blob, const char *name, 64 const void **value, size_t *value_len, bool internal); 65 static int blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal); 66 67 static void blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 68 spdk_blob_op_complete cb_fn, void *cb_arg); 69 70 static int 71 blob_id_cmp(struct spdk_blob *blob1, struct spdk_blob *blob2) 72 { 73 return (blob1->id < blob2->id ? -1 : blob1->id > blob2->id); 74 } 75 76 RB_GENERATE_STATIC(spdk_blob_tree, spdk_blob, link, blob_id_cmp); 77 78 static void 79 blob_verify_md_op(struct spdk_blob *blob) 80 { 81 assert(blob != NULL); 82 assert(spdk_get_thread() == blob->bs->md_thread); 83 assert(blob->state != SPDK_BLOB_STATE_LOADING); 84 } 85 86 static struct spdk_blob_list * 87 bs_get_snapshot_entry(struct spdk_blob_store *bs, spdk_blob_id blobid) 88 { 89 struct spdk_blob_list *snapshot_entry = NULL; 90 91 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 92 if (snapshot_entry->id == blobid) { 93 break; 94 } 95 } 96 97 return snapshot_entry; 98 } 99 100 static void 101 bs_claim_md_page(struct spdk_blob_store *bs, uint32_t page) 102 { 103 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 104 assert(spdk_bit_array_get(bs->used_md_pages, page) == false); 105 106 spdk_bit_array_set(bs->used_md_pages, page); 107 } 108 109 static void 110 bs_release_md_page(struct spdk_blob_store *bs, uint32_t page) 111 { 112 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 113 assert(spdk_bit_array_get(bs->used_md_pages, page) == true); 114 115 spdk_bit_array_clear(bs->used_md_pages, page); 116 } 117 118 static uint32_t 119 bs_claim_cluster(struct spdk_blob_store *bs) 120 { 121 uint32_t cluster_num; 122 123 cluster_num = spdk_bit_pool_allocate_bit(bs->used_clusters); 124 if (cluster_num == UINT32_MAX) { 125 return UINT32_MAX; 126 } 127 128 SPDK_DEBUGLOG(blob, "Claiming cluster %u\n", cluster_num); 129 bs->num_free_clusters--; 130 131 return cluster_num; 132 } 133 134 static void 135 bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 136 { 137 assert(cluster_num < spdk_bit_pool_capacity(bs->used_clusters)); 138 assert(spdk_bit_pool_is_allocated(bs->used_clusters, cluster_num) == true); 139 assert(bs->num_free_clusters < bs->total_clusters); 140 141 SPDK_DEBUGLOG(blob, "Releasing cluster %u\n", cluster_num); 142 143 spdk_bit_pool_free_bit(bs->used_clusters, cluster_num); 144 bs->num_free_clusters++; 145 } 146 147 static int 148 blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster) 149 { 150 uint64_t *cluster_lba = &blob->active.clusters[cluster_num]; 151 152 blob_verify_md_op(blob); 153 154 if (*cluster_lba != 0) { 155 return -EEXIST; 156 } 157 158 *cluster_lba = bs_cluster_to_lba(blob->bs, cluster); 159 return 0; 160 } 161 162 static int 163 bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num, 164 uint64_t *cluster, uint32_t *lowest_free_md_page, bool update_map) 165 { 166 uint32_t *extent_page = 0; 167 168 *cluster = bs_claim_cluster(blob->bs); 169 if (*cluster == UINT32_MAX) { 170 /* No more free clusters. Cannot satisfy the request */ 171 return -ENOSPC; 172 } 173 174 if (blob->use_extent_table) { 175 extent_page = bs_cluster_to_extent_page(blob, cluster_num); 176 if (*extent_page == 0) { 177 /* Extent page shall never occupy md_page so start the search from 1 */ 178 if (*lowest_free_md_page == 0) { 179 *lowest_free_md_page = 1; 180 } 181 /* No extent_page is allocated for the cluster */ 182 *lowest_free_md_page = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, 183 *lowest_free_md_page); 184 if (*lowest_free_md_page == UINT32_MAX) { 185 /* No more free md pages. Cannot satisfy the request */ 186 bs_release_cluster(blob->bs, *cluster); 187 return -ENOSPC; 188 } 189 bs_claim_md_page(blob->bs, *lowest_free_md_page); 190 } 191 } 192 193 SPDK_DEBUGLOG(blob, "Claiming cluster %" PRIu64 " for blob %" PRIu64 "\n", *cluster, blob->id); 194 195 if (update_map) { 196 blob_insert_cluster(blob, cluster_num, *cluster); 197 if (blob->use_extent_table && *extent_page == 0) { 198 *extent_page = *lowest_free_md_page; 199 } 200 } 201 202 return 0; 203 } 204 205 static void 206 blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs) 207 { 208 xattrs->count = 0; 209 xattrs->names = NULL; 210 xattrs->ctx = NULL; 211 xattrs->get_value = NULL; 212 } 213 214 void 215 spdk_blob_opts_init(struct spdk_blob_opts *opts, size_t opts_size) 216 { 217 if (!opts) { 218 SPDK_ERRLOG("opts should not be NULL\n"); 219 return; 220 } 221 222 if (!opts_size) { 223 SPDK_ERRLOG("opts_size should not be zero value\n"); 224 return; 225 } 226 227 memset(opts, 0, opts_size); 228 opts->opts_size = opts_size; 229 230 #define FIELD_OK(field) \ 231 offsetof(struct spdk_blob_opts, field) + sizeof(opts->field) <= opts_size 232 233 #define SET_FIELD(field, value) \ 234 if (FIELD_OK(field)) { \ 235 opts->field = value; \ 236 } \ 237 238 SET_FIELD(num_clusters, 0); 239 SET_FIELD(thin_provision, false); 240 SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT); 241 242 if (FIELD_OK(xattrs)) { 243 blob_xattrs_init(&opts->xattrs); 244 } 245 246 SET_FIELD(use_extent_table, true); 247 248 #undef FIELD_OK 249 #undef SET_FIELD 250 } 251 252 void 253 spdk_blob_open_opts_init(struct spdk_blob_open_opts *opts, size_t opts_size) 254 { 255 if (!opts) { 256 SPDK_ERRLOG("opts should not be NULL\n"); 257 return; 258 } 259 260 if (!opts_size) { 261 SPDK_ERRLOG("opts_size should not be zero value\n"); 262 return; 263 } 264 265 memset(opts, 0, opts_size); 266 opts->opts_size = opts_size; 267 268 #define FIELD_OK(field) \ 269 offsetof(struct spdk_blob_open_opts, field) + sizeof(opts->field) <= opts_size 270 271 #define SET_FIELD(field, value) \ 272 if (FIELD_OK(field)) { \ 273 opts->field = value; \ 274 } \ 275 276 SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT); 277 278 #undef FIELD_OK 279 #undef SET_FILED 280 } 281 282 static struct spdk_blob * 283 blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id) 284 { 285 struct spdk_blob *blob; 286 287 blob = calloc(1, sizeof(*blob)); 288 if (!blob) { 289 return NULL; 290 } 291 292 blob->id = id; 293 blob->bs = bs; 294 295 blob->parent_id = SPDK_BLOBID_INVALID; 296 297 blob->state = SPDK_BLOB_STATE_DIRTY; 298 blob->extent_rle_found = false; 299 blob->extent_table_found = false; 300 blob->active.num_pages = 1; 301 blob->active.pages = calloc(1, sizeof(*blob->active.pages)); 302 if (!blob->active.pages) { 303 free(blob); 304 return NULL; 305 } 306 307 blob->active.pages[0] = bs_blobid_to_page(id); 308 309 TAILQ_INIT(&blob->xattrs); 310 TAILQ_INIT(&blob->xattrs_internal); 311 TAILQ_INIT(&blob->pending_persists); 312 TAILQ_INIT(&blob->persists_to_complete); 313 314 return blob; 315 } 316 317 static void 318 xattrs_free(struct spdk_xattr_tailq *xattrs) 319 { 320 struct spdk_xattr *xattr, *xattr_tmp; 321 322 TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) { 323 TAILQ_REMOVE(xattrs, xattr, link); 324 free(xattr->name); 325 free(xattr->value); 326 free(xattr); 327 } 328 } 329 330 static void 331 blob_free(struct spdk_blob *blob) 332 { 333 assert(blob != NULL); 334 assert(TAILQ_EMPTY(&blob->pending_persists)); 335 assert(TAILQ_EMPTY(&blob->persists_to_complete)); 336 337 free(blob->active.extent_pages); 338 free(blob->clean.extent_pages); 339 free(blob->active.clusters); 340 free(blob->clean.clusters); 341 free(blob->active.pages); 342 free(blob->clean.pages); 343 344 xattrs_free(&blob->xattrs); 345 xattrs_free(&blob->xattrs_internal); 346 347 if (blob->back_bs_dev) { 348 blob->back_bs_dev->destroy(blob->back_bs_dev); 349 } 350 351 free(blob); 352 } 353 354 struct freeze_io_ctx { 355 struct spdk_bs_cpl cpl; 356 struct spdk_blob *blob; 357 }; 358 359 static void 360 blob_io_sync(struct spdk_io_channel_iter *i) 361 { 362 spdk_for_each_channel_continue(i, 0); 363 } 364 365 static void 366 blob_execute_queued_io(struct spdk_io_channel_iter *i) 367 { 368 struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); 369 struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch); 370 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 371 struct spdk_bs_request_set *set; 372 struct spdk_bs_user_op_args *args; 373 spdk_bs_user_op_t *op, *tmp; 374 375 TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) { 376 set = (struct spdk_bs_request_set *)op; 377 args = &set->u.user_op; 378 379 if (args->blob == ctx->blob) { 380 TAILQ_REMOVE(&ch->queued_io, op, link); 381 bs_user_op_execute(op); 382 } 383 } 384 385 spdk_for_each_channel_continue(i, 0); 386 } 387 388 static void 389 blob_io_cpl(struct spdk_io_channel_iter *i, int status) 390 { 391 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 392 393 ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0); 394 395 free(ctx); 396 } 397 398 static void 399 blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 400 { 401 struct freeze_io_ctx *ctx; 402 403 ctx = calloc(1, sizeof(*ctx)); 404 if (!ctx) { 405 cb_fn(cb_arg, -ENOMEM); 406 return; 407 } 408 409 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 410 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 411 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 412 ctx->blob = blob; 413 414 /* Freeze I/O on blob */ 415 blob->frozen_refcnt++; 416 417 if (blob->frozen_refcnt == 1) { 418 spdk_for_each_channel(blob->bs, blob_io_sync, ctx, blob_io_cpl); 419 } else { 420 cb_fn(cb_arg, 0); 421 free(ctx); 422 } 423 } 424 425 static void 426 blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 427 { 428 struct freeze_io_ctx *ctx; 429 430 ctx = calloc(1, sizeof(*ctx)); 431 if (!ctx) { 432 cb_fn(cb_arg, -ENOMEM); 433 return; 434 } 435 436 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 437 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 438 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 439 ctx->blob = blob; 440 441 assert(blob->frozen_refcnt > 0); 442 443 blob->frozen_refcnt--; 444 445 if (blob->frozen_refcnt == 0) { 446 spdk_for_each_channel(blob->bs, blob_execute_queued_io, ctx, blob_io_cpl); 447 } else { 448 cb_fn(cb_arg, 0); 449 free(ctx); 450 } 451 } 452 453 static int 454 blob_mark_clean(struct spdk_blob *blob) 455 { 456 uint32_t *extent_pages = NULL; 457 uint64_t *clusters = NULL; 458 uint32_t *pages = NULL; 459 460 assert(blob != NULL); 461 462 if (blob->active.num_extent_pages) { 463 assert(blob->active.extent_pages); 464 extent_pages = calloc(blob->active.num_extent_pages, sizeof(*blob->active.extent_pages)); 465 if (!extent_pages) { 466 return -ENOMEM; 467 } 468 memcpy(extent_pages, blob->active.extent_pages, 469 blob->active.num_extent_pages * sizeof(*extent_pages)); 470 } 471 472 if (blob->active.num_clusters) { 473 assert(blob->active.clusters); 474 clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters)); 475 if (!clusters) { 476 free(extent_pages); 477 return -ENOMEM; 478 } 479 memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 480 } 481 482 if (blob->active.num_pages) { 483 assert(blob->active.pages); 484 pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages)); 485 if (!pages) { 486 free(extent_pages); 487 free(clusters); 488 return -ENOMEM; 489 } 490 memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 491 } 492 493 free(blob->clean.extent_pages); 494 free(blob->clean.clusters); 495 free(blob->clean.pages); 496 497 blob->clean.num_extent_pages = blob->active.num_extent_pages; 498 blob->clean.extent_pages = blob->active.extent_pages; 499 blob->clean.num_clusters = blob->active.num_clusters; 500 blob->clean.clusters = blob->active.clusters; 501 blob->clean.num_pages = blob->active.num_pages; 502 blob->clean.pages = blob->active.pages; 503 504 blob->active.extent_pages = extent_pages; 505 blob->active.clusters = clusters; 506 blob->active.pages = pages; 507 508 /* If the metadata was dirtied again while the metadata was being written to disk, 509 * we do not want to revert the DIRTY state back to CLEAN here. 510 */ 511 if (blob->state == SPDK_BLOB_STATE_LOADING) { 512 blob->state = SPDK_BLOB_STATE_CLEAN; 513 } 514 515 return 0; 516 } 517 518 static int 519 blob_deserialize_xattr(struct spdk_blob *blob, 520 struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal) 521 { 522 struct spdk_xattr *xattr; 523 524 if (desc_xattr->length != sizeof(desc_xattr->name_length) + 525 sizeof(desc_xattr->value_length) + 526 desc_xattr->name_length + desc_xattr->value_length) { 527 return -EINVAL; 528 } 529 530 xattr = calloc(1, sizeof(*xattr)); 531 if (xattr == NULL) { 532 return -ENOMEM; 533 } 534 535 xattr->name = malloc(desc_xattr->name_length + 1); 536 if (xattr->name == NULL) { 537 free(xattr); 538 return -ENOMEM; 539 } 540 memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length); 541 xattr->name[desc_xattr->name_length] = '\0'; 542 543 xattr->value = malloc(desc_xattr->value_length); 544 if (xattr->value == NULL) { 545 free(xattr->name); 546 free(xattr); 547 return -ENOMEM; 548 } 549 xattr->value_len = desc_xattr->value_length; 550 memcpy(xattr->value, 551 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 552 desc_xattr->value_length); 553 554 TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link); 555 556 return 0; 557 } 558 559 560 static int 561 blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob) 562 { 563 struct spdk_blob_md_descriptor *desc; 564 size_t cur_desc = 0; 565 void *tmp; 566 567 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 568 while (cur_desc < sizeof(page->descriptors)) { 569 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 570 if (desc->length == 0) { 571 /* If padding and length are 0, this terminates the page */ 572 break; 573 } 574 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 575 struct spdk_blob_md_descriptor_flags *desc_flags; 576 577 desc_flags = (struct spdk_blob_md_descriptor_flags *)desc; 578 579 if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) { 580 return -EINVAL; 581 } 582 583 if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) != 584 SPDK_BLOB_INVALID_FLAGS_MASK) { 585 return -EINVAL; 586 } 587 588 if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) != 589 SPDK_BLOB_DATA_RO_FLAGS_MASK) { 590 blob->data_ro = true; 591 blob->md_ro = true; 592 } 593 594 if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) != 595 SPDK_BLOB_MD_RO_FLAGS_MASK) { 596 blob->md_ro = true; 597 } 598 599 if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 600 blob->data_ro = true; 601 blob->md_ro = true; 602 } 603 604 blob->invalid_flags = desc_flags->invalid_flags; 605 blob->data_ro_flags = desc_flags->data_ro_flags; 606 blob->md_ro_flags = desc_flags->md_ro_flags; 607 608 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 609 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 610 unsigned int i, j; 611 unsigned int cluster_count = blob->active.num_clusters; 612 613 if (blob->extent_table_found) { 614 /* Extent Table already present in the md, 615 * both descriptors should never be at the same time. */ 616 return -EINVAL; 617 } 618 blob->extent_rle_found = true; 619 620 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 621 622 if (desc_extent_rle->length == 0 || 623 (desc_extent_rle->length % sizeof(desc_extent_rle->extents[0]) != 0)) { 624 return -EINVAL; 625 } 626 627 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 628 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 629 if (desc_extent_rle->extents[i].cluster_idx != 0) { 630 if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, 631 desc_extent_rle->extents[i].cluster_idx + j)) { 632 return -EINVAL; 633 } 634 } 635 cluster_count++; 636 } 637 } 638 639 if (cluster_count == 0) { 640 return -EINVAL; 641 } 642 tmp = realloc(blob->active.clusters, cluster_count * sizeof(*blob->active.clusters)); 643 if (tmp == NULL) { 644 return -ENOMEM; 645 } 646 blob->active.clusters = tmp; 647 blob->active.cluster_array_size = cluster_count; 648 649 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 650 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 651 if (desc_extent_rle->extents[i].cluster_idx != 0) { 652 blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs, 653 desc_extent_rle->extents[i].cluster_idx + j); 654 } else if (spdk_blob_is_thin_provisioned(blob)) { 655 blob->active.clusters[blob->active.num_clusters++] = 0; 656 } else { 657 return -EINVAL; 658 } 659 } 660 } 661 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 662 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 663 uint32_t num_extent_pages = blob->active.num_extent_pages; 664 uint32_t i, j; 665 size_t extent_pages_length; 666 667 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 668 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 669 670 if (blob->extent_rle_found) { 671 /* This means that Extent RLE is present in MD, 672 * both should never be at the same time. */ 673 return -EINVAL; 674 } else if (blob->extent_table_found && 675 desc_extent_table->num_clusters != blob->remaining_clusters_in_et) { 676 /* Number of clusters in this ET does not match number 677 * from previously read EXTENT_TABLE. */ 678 return -EINVAL; 679 } 680 681 if (desc_extent_table->length == 0 || 682 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 683 return -EINVAL; 684 } 685 686 blob->extent_table_found = true; 687 688 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 689 num_extent_pages += desc_extent_table->extent_page[i].num_pages; 690 } 691 692 if (num_extent_pages > 0) { 693 tmp = realloc(blob->active.extent_pages, num_extent_pages * sizeof(uint32_t)); 694 if (tmp == NULL) { 695 return -ENOMEM; 696 } 697 blob->active.extent_pages = tmp; 698 } 699 blob->active.extent_pages_array_size = num_extent_pages; 700 701 blob->remaining_clusters_in_et = desc_extent_table->num_clusters; 702 703 /* Extent table entries contain md page numbers for extent pages. 704 * Zeroes represent unallocated extent pages, those are run-length-encoded. 705 */ 706 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 707 if (desc_extent_table->extent_page[i].page_idx != 0) { 708 assert(desc_extent_table->extent_page[i].num_pages == 1); 709 blob->active.extent_pages[blob->active.num_extent_pages++] = 710 desc_extent_table->extent_page[i].page_idx; 711 } else if (spdk_blob_is_thin_provisioned(blob)) { 712 for (j = 0; j < desc_extent_table->extent_page[i].num_pages; j++) { 713 blob->active.extent_pages[blob->active.num_extent_pages++] = 0; 714 } 715 } else { 716 return -EINVAL; 717 } 718 } 719 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 720 struct spdk_blob_md_descriptor_extent_page *desc_extent; 721 unsigned int i; 722 unsigned int cluster_count = 0; 723 size_t cluster_idx_length; 724 725 if (blob->extent_rle_found) { 726 /* This means that Extent RLE is present in MD, 727 * both should never be at the same time. */ 728 return -EINVAL; 729 } 730 731 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 732 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 733 734 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 735 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 736 return -EINVAL; 737 } 738 739 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 740 if (desc_extent->cluster_idx[i] != 0) { 741 if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, desc_extent->cluster_idx[i])) { 742 return -EINVAL; 743 } 744 } 745 cluster_count++; 746 } 747 748 if (cluster_count == 0) { 749 return -EINVAL; 750 } 751 752 /* When reading extent pages sequentially starting cluster idx should match 753 * current size of a blob. 754 * If changed to batch reading, this check shall be removed. */ 755 if (desc_extent->start_cluster_idx != blob->active.num_clusters) { 756 return -EINVAL; 757 } 758 759 tmp = realloc(blob->active.clusters, 760 (cluster_count + blob->active.num_clusters) * sizeof(*blob->active.clusters)); 761 if (tmp == NULL) { 762 return -ENOMEM; 763 } 764 blob->active.clusters = tmp; 765 blob->active.cluster_array_size = (cluster_count + blob->active.num_clusters); 766 767 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 768 if (desc_extent->cluster_idx[i] != 0) { 769 blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs, 770 desc_extent->cluster_idx[i]); 771 } else if (spdk_blob_is_thin_provisioned(blob)) { 772 blob->active.clusters[blob->active.num_clusters++] = 0; 773 } else { 774 return -EINVAL; 775 } 776 } 777 assert(desc_extent->start_cluster_idx + cluster_count == blob->active.num_clusters); 778 assert(blob->remaining_clusters_in_et >= cluster_count); 779 blob->remaining_clusters_in_et -= cluster_count; 780 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 781 int rc; 782 783 rc = blob_deserialize_xattr(blob, 784 (struct spdk_blob_md_descriptor_xattr *) desc, false); 785 if (rc != 0) { 786 return rc; 787 } 788 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 789 int rc; 790 791 rc = blob_deserialize_xattr(blob, 792 (struct spdk_blob_md_descriptor_xattr *) desc, true); 793 if (rc != 0) { 794 return rc; 795 } 796 } else { 797 /* Unrecognized descriptor type. Do not fail - just continue to the 798 * next descriptor. If this descriptor is associated with some feature 799 * defined in a newer version of blobstore, that version of blobstore 800 * should create and set an associated feature flag to specify if this 801 * blob can be loaded or not. 802 */ 803 } 804 805 /* Advance to the next descriptor */ 806 cur_desc += sizeof(*desc) + desc->length; 807 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 808 break; 809 } 810 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 811 } 812 813 return 0; 814 } 815 816 static bool bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page); 817 818 static int 819 blob_parse_extent_page(struct spdk_blob_md_page *extent_page, struct spdk_blob *blob) 820 { 821 assert(blob != NULL); 822 assert(blob->state == SPDK_BLOB_STATE_LOADING); 823 824 if (bs_load_cur_extent_page_valid(extent_page) == false) { 825 return -ENOENT; 826 } 827 828 return blob_parse_page(extent_page, blob); 829 } 830 831 static int 832 blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count, 833 struct spdk_blob *blob) 834 { 835 const struct spdk_blob_md_page *page; 836 uint32_t i; 837 int rc; 838 void *tmp; 839 840 assert(page_count > 0); 841 assert(pages[0].sequence_num == 0); 842 assert(blob != NULL); 843 assert(blob->state == SPDK_BLOB_STATE_LOADING); 844 assert(blob->active.clusters == NULL); 845 846 /* The blobid provided doesn't match what's in the MD, this can 847 * happen for example if a bogus blobid is passed in through open. 848 */ 849 if (blob->id != pages[0].id) { 850 SPDK_ERRLOG("Blobid (%" PRIu64 ") doesn't match what's in metadata (%" PRIu64 ")\n", 851 blob->id, pages[0].id); 852 return -ENOENT; 853 } 854 855 tmp = realloc(blob->active.pages, page_count * sizeof(*blob->active.pages)); 856 if (!tmp) { 857 return -ENOMEM; 858 } 859 blob->active.pages = tmp; 860 861 blob->active.pages[0] = pages[0].id; 862 863 for (i = 1; i < page_count; i++) { 864 assert(spdk_bit_array_get(blob->bs->used_md_pages, pages[i - 1].next)); 865 blob->active.pages[i] = pages[i - 1].next; 866 } 867 blob->active.num_pages = page_count; 868 869 for (i = 0; i < page_count; i++) { 870 page = &pages[i]; 871 872 assert(page->id == blob->id); 873 assert(page->sequence_num == i); 874 875 rc = blob_parse_page(page, blob); 876 if (rc != 0) { 877 return rc; 878 } 879 } 880 881 return 0; 882 } 883 884 static int 885 blob_serialize_add_page(const struct spdk_blob *blob, 886 struct spdk_blob_md_page **pages, 887 uint32_t *page_count, 888 struct spdk_blob_md_page **last_page) 889 { 890 struct spdk_blob_md_page *page, *tmp_pages; 891 892 assert(pages != NULL); 893 assert(page_count != NULL); 894 895 *last_page = NULL; 896 if (*page_count == 0) { 897 assert(*pages == NULL); 898 *pages = spdk_malloc(SPDK_BS_PAGE_SIZE, 0, 899 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 900 if (*pages == NULL) { 901 return -ENOMEM; 902 } 903 *page_count = 1; 904 } else { 905 assert(*pages != NULL); 906 tmp_pages = spdk_realloc(*pages, SPDK_BS_PAGE_SIZE * (*page_count + 1), 0); 907 if (tmp_pages == NULL) { 908 return -ENOMEM; 909 } 910 (*page_count)++; 911 *pages = tmp_pages; 912 } 913 914 page = &(*pages)[*page_count - 1]; 915 memset(page, 0, sizeof(*page)); 916 page->id = blob->id; 917 page->sequence_num = *page_count - 1; 918 page->next = SPDK_INVALID_MD_PAGE; 919 *last_page = page; 920 921 return 0; 922 } 923 924 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor. 925 * Update required_sz on both success and failure. 926 * 927 */ 928 static int 929 blob_serialize_xattr(const struct spdk_xattr *xattr, 930 uint8_t *buf, size_t buf_sz, 931 size_t *required_sz, bool internal) 932 { 933 struct spdk_blob_md_descriptor_xattr *desc; 934 935 *required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) + 936 strlen(xattr->name) + 937 xattr->value_len; 938 939 if (buf_sz < *required_sz) { 940 return -1; 941 } 942 943 desc = (struct spdk_blob_md_descriptor_xattr *)buf; 944 945 desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR; 946 desc->length = sizeof(desc->name_length) + 947 sizeof(desc->value_length) + 948 strlen(xattr->name) + 949 xattr->value_len; 950 desc->name_length = strlen(xattr->name); 951 desc->value_length = xattr->value_len; 952 953 memcpy(desc->name, xattr->name, desc->name_length); 954 memcpy((void *)((uintptr_t)desc->name + desc->name_length), 955 xattr->value, 956 desc->value_length); 957 958 return 0; 959 } 960 961 static void 962 blob_serialize_extent_table_entry(const struct spdk_blob *blob, 963 uint64_t start_ep, uint64_t *next_ep, 964 uint8_t **buf, size_t *remaining_sz) 965 { 966 struct spdk_blob_md_descriptor_extent_table *desc; 967 size_t cur_sz; 968 uint64_t i, et_idx; 969 uint32_t extent_page, ep_len; 970 971 /* The buffer must have room for at least num_clusters entry */ 972 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->num_clusters); 973 if (*remaining_sz < cur_sz) { 974 *next_ep = start_ep; 975 return; 976 } 977 978 desc = (struct spdk_blob_md_descriptor_extent_table *)*buf; 979 desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE; 980 981 desc->num_clusters = blob->active.num_clusters; 982 983 ep_len = 1; 984 et_idx = 0; 985 for (i = start_ep; i < blob->active.num_extent_pages; i++) { 986 if (*remaining_sz < cur_sz + sizeof(desc->extent_page[0])) { 987 /* If we ran out of buffer space, return */ 988 break; 989 } 990 991 extent_page = blob->active.extent_pages[i]; 992 /* Verify that next extent_page is unallocated */ 993 if (extent_page == 0 && 994 (i + 1 < blob->active.num_extent_pages && blob->active.extent_pages[i + 1] == 0)) { 995 ep_len++; 996 continue; 997 } 998 desc->extent_page[et_idx].page_idx = extent_page; 999 desc->extent_page[et_idx].num_pages = ep_len; 1000 et_idx++; 1001 1002 ep_len = 1; 1003 cur_sz += sizeof(desc->extent_page[et_idx]); 1004 } 1005 *next_ep = i; 1006 1007 desc->length = sizeof(desc->num_clusters) + sizeof(desc->extent_page[0]) * et_idx; 1008 *remaining_sz -= sizeof(struct spdk_blob_md_descriptor) + desc->length; 1009 *buf += sizeof(struct spdk_blob_md_descriptor) + desc->length; 1010 } 1011 1012 static int 1013 blob_serialize_extent_table(const struct spdk_blob *blob, 1014 struct spdk_blob_md_page **pages, 1015 struct spdk_blob_md_page *cur_page, 1016 uint32_t *page_count, uint8_t **buf, 1017 size_t *remaining_sz) 1018 { 1019 uint64_t last_extent_page; 1020 int rc; 1021 1022 last_extent_page = 0; 1023 /* At least single extent table entry has to be always persisted. 1024 * Such case occurs with num_extent_pages == 0. */ 1025 while (last_extent_page <= blob->active.num_extent_pages) { 1026 blob_serialize_extent_table_entry(blob, last_extent_page, &last_extent_page, buf, 1027 remaining_sz); 1028 1029 if (last_extent_page == blob->active.num_extent_pages) { 1030 break; 1031 } 1032 1033 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1034 if (rc < 0) { 1035 return rc; 1036 } 1037 1038 *buf = (uint8_t *)cur_page->descriptors; 1039 *remaining_sz = sizeof(cur_page->descriptors); 1040 } 1041 1042 return 0; 1043 } 1044 1045 static void 1046 blob_serialize_extent_rle(const struct spdk_blob *blob, 1047 uint64_t start_cluster, uint64_t *next_cluster, 1048 uint8_t **buf, size_t *buf_sz) 1049 { 1050 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 1051 size_t cur_sz; 1052 uint64_t i, extent_idx; 1053 uint64_t lba, lba_per_cluster, lba_count; 1054 1055 /* The buffer must have room for at least one extent */ 1056 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc_extent_rle->extents[0]); 1057 if (*buf_sz < cur_sz) { 1058 *next_cluster = start_cluster; 1059 return; 1060 } 1061 1062 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)*buf; 1063 desc_extent_rle->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE; 1064 1065 lba_per_cluster = bs_cluster_to_lba(blob->bs, 1); 1066 1067 lba = blob->active.clusters[start_cluster]; 1068 lba_count = lba_per_cluster; 1069 extent_idx = 0; 1070 for (i = start_cluster + 1; i < blob->active.num_clusters; i++) { 1071 if ((lba + lba_count) == blob->active.clusters[i] && lba != 0) { 1072 /* Run-length encode sequential non-zero LBA */ 1073 lba_count += lba_per_cluster; 1074 continue; 1075 } else if (lba == 0 && blob->active.clusters[i] == 0) { 1076 /* Run-length encode unallocated clusters */ 1077 lba_count += lba_per_cluster; 1078 continue; 1079 } 1080 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1081 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1082 extent_idx++; 1083 1084 cur_sz += sizeof(desc_extent_rle->extents[extent_idx]); 1085 1086 if (*buf_sz < cur_sz) { 1087 /* If we ran out of buffer space, return */ 1088 *next_cluster = i; 1089 break; 1090 } 1091 1092 lba = blob->active.clusters[i]; 1093 lba_count = lba_per_cluster; 1094 } 1095 1096 if (*buf_sz >= cur_sz) { 1097 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1098 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1099 extent_idx++; 1100 1101 *next_cluster = blob->active.num_clusters; 1102 } 1103 1104 desc_extent_rle->length = sizeof(desc_extent_rle->extents[0]) * extent_idx; 1105 *buf_sz -= sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1106 *buf += sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1107 } 1108 1109 static int 1110 blob_serialize_extents_rle(const struct spdk_blob *blob, 1111 struct spdk_blob_md_page **pages, 1112 struct spdk_blob_md_page *cur_page, 1113 uint32_t *page_count, uint8_t **buf, 1114 size_t *remaining_sz) 1115 { 1116 uint64_t last_cluster; 1117 int rc; 1118 1119 last_cluster = 0; 1120 while (last_cluster < blob->active.num_clusters) { 1121 blob_serialize_extent_rle(blob, last_cluster, &last_cluster, buf, remaining_sz); 1122 1123 if (last_cluster == blob->active.num_clusters) { 1124 break; 1125 } 1126 1127 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1128 if (rc < 0) { 1129 return rc; 1130 } 1131 1132 *buf = (uint8_t *)cur_page->descriptors; 1133 *remaining_sz = sizeof(cur_page->descriptors); 1134 } 1135 1136 return 0; 1137 } 1138 1139 static void 1140 blob_serialize_extent_page(const struct spdk_blob *blob, 1141 uint64_t cluster, struct spdk_blob_md_page *page) 1142 { 1143 struct spdk_blob_md_descriptor_extent_page *desc_extent; 1144 uint64_t i, extent_idx; 1145 uint64_t lba, lba_per_cluster; 1146 uint64_t start_cluster_idx = (cluster / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP; 1147 1148 desc_extent = (struct spdk_blob_md_descriptor_extent_page *) page->descriptors; 1149 desc_extent->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE; 1150 1151 lba_per_cluster = bs_cluster_to_lba(blob->bs, 1); 1152 1153 desc_extent->start_cluster_idx = start_cluster_idx; 1154 extent_idx = 0; 1155 for (i = start_cluster_idx; i < blob->active.num_clusters; i++) { 1156 lba = blob->active.clusters[i]; 1157 desc_extent->cluster_idx[extent_idx++] = lba / lba_per_cluster; 1158 if (extent_idx >= SPDK_EXTENTS_PER_EP) { 1159 break; 1160 } 1161 } 1162 desc_extent->length = sizeof(desc_extent->start_cluster_idx) + 1163 sizeof(desc_extent->cluster_idx[0]) * extent_idx; 1164 } 1165 1166 static void 1167 blob_serialize_flags(const struct spdk_blob *blob, 1168 uint8_t *buf, size_t *buf_sz) 1169 { 1170 struct spdk_blob_md_descriptor_flags *desc; 1171 1172 /* 1173 * Flags get serialized first, so we should always have room for the flags 1174 * descriptor. 1175 */ 1176 assert(*buf_sz >= sizeof(*desc)); 1177 1178 desc = (struct spdk_blob_md_descriptor_flags *)buf; 1179 desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS; 1180 desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor); 1181 desc->invalid_flags = blob->invalid_flags; 1182 desc->data_ro_flags = blob->data_ro_flags; 1183 desc->md_ro_flags = blob->md_ro_flags; 1184 1185 *buf_sz -= sizeof(*desc); 1186 } 1187 1188 static int 1189 blob_serialize_xattrs(const struct spdk_blob *blob, 1190 const struct spdk_xattr_tailq *xattrs, bool internal, 1191 struct spdk_blob_md_page **pages, 1192 struct spdk_blob_md_page *cur_page, 1193 uint32_t *page_count, uint8_t **buf, 1194 size_t *remaining_sz) 1195 { 1196 const struct spdk_xattr *xattr; 1197 int rc; 1198 1199 TAILQ_FOREACH(xattr, xattrs, link) { 1200 size_t required_sz = 0; 1201 1202 rc = blob_serialize_xattr(xattr, 1203 *buf, *remaining_sz, 1204 &required_sz, internal); 1205 if (rc < 0) { 1206 /* Need to add a new page to the chain */ 1207 rc = blob_serialize_add_page(blob, pages, page_count, 1208 &cur_page); 1209 if (rc < 0) { 1210 spdk_free(*pages); 1211 *pages = NULL; 1212 *page_count = 0; 1213 return rc; 1214 } 1215 1216 *buf = (uint8_t *)cur_page->descriptors; 1217 *remaining_sz = sizeof(cur_page->descriptors); 1218 1219 /* Try again */ 1220 required_sz = 0; 1221 rc = blob_serialize_xattr(xattr, 1222 *buf, *remaining_sz, 1223 &required_sz, internal); 1224 1225 if (rc < 0) { 1226 spdk_free(*pages); 1227 *pages = NULL; 1228 *page_count = 0; 1229 return rc; 1230 } 1231 } 1232 1233 *remaining_sz -= required_sz; 1234 *buf += required_sz; 1235 } 1236 1237 return 0; 1238 } 1239 1240 static int 1241 blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages, 1242 uint32_t *page_count) 1243 { 1244 struct spdk_blob_md_page *cur_page; 1245 int rc; 1246 uint8_t *buf; 1247 size_t remaining_sz; 1248 1249 assert(pages != NULL); 1250 assert(page_count != NULL); 1251 assert(blob != NULL); 1252 assert(blob->state == SPDK_BLOB_STATE_DIRTY); 1253 1254 *pages = NULL; 1255 *page_count = 0; 1256 1257 /* A blob always has at least 1 page, even if it has no descriptors */ 1258 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1259 if (rc < 0) { 1260 return rc; 1261 } 1262 1263 buf = (uint8_t *)cur_page->descriptors; 1264 remaining_sz = sizeof(cur_page->descriptors); 1265 1266 /* Serialize flags */ 1267 blob_serialize_flags(blob, buf, &remaining_sz); 1268 buf += sizeof(struct spdk_blob_md_descriptor_flags); 1269 1270 /* Serialize xattrs */ 1271 rc = blob_serialize_xattrs(blob, &blob->xattrs, false, 1272 pages, cur_page, page_count, &buf, &remaining_sz); 1273 if (rc < 0) { 1274 return rc; 1275 } 1276 1277 /* Serialize internal xattrs */ 1278 rc = blob_serialize_xattrs(blob, &blob->xattrs_internal, true, 1279 pages, cur_page, page_count, &buf, &remaining_sz); 1280 if (rc < 0) { 1281 return rc; 1282 } 1283 1284 if (blob->use_extent_table) { 1285 /* Serialize extent table */ 1286 rc = blob_serialize_extent_table(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1287 } else { 1288 /* Serialize extents */ 1289 rc = blob_serialize_extents_rle(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1290 } 1291 1292 return rc; 1293 } 1294 1295 struct spdk_blob_load_ctx { 1296 struct spdk_blob *blob; 1297 1298 struct spdk_blob_md_page *pages; 1299 uint32_t num_pages; 1300 uint32_t next_extent_page; 1301 spdk_bs_sequence_t *seq; 1302 1303 spdk_bs_sequence_cpl cb_fn; 1304 void *cb_arg; 1305 }; 1306 1307 static uint32_t 1308 blob_md_page_calc_crc(void *page) 1309 { 1310 uint32_t crc; 1311 1312 crc = BLOB_CRC32C_INITIAL; 1313 crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc); 1314 crc ^= BLOB_CRC32C_INITIAL; 1315 1316 return crc; 1317 1318 } 1319 1320 static void 1321 blob_load_final(struct spdk_blob_load_ctx *ctx, int bserrno) 1322 { 1323 struct spdk_blob *blob = ctx->blob; 1324 1325 if (bserrno == 0) { 1326 blob_mark_clean(blob); 1327 } 1328 1329 ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno); 1330 1331 /* Free the memory */ 1332 spdk_free(ctx->pages); 1333 free(ctx); 1334 } 1335 1336 static void 1337 blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno) 1338 { 1339 struct spdk_blob_load_ctx *ctx = cb_arg; 1340 struct spdk_blob *blob = ctx->blob; 1341 1342 if (bserrno == 0) { 1343 blob->back_bs_dev = bs_create_blob_bs_dev(snapshot); 1344 if (blob->back_bs_dev == NULL) { 1345 bserrno = -ENOMEM; 1346 } 1347 } 1348 if (bserrno != 0) { 1349 SPDK_ERRLOG("Snapshot fail\n"); 1350 } 1351 1352 blob_load_final(ctx, bserrno); 1353 } 1354 1355 static void blob_update_clear_method(struct spdk_blob *blob); 1356 1357 static void 1358 blob_load_backing_dev(void *cb_arg) 1359 { 1360 struct spdk_blob_load_ctx *ctx = cb_arg; 1361 struct spdk_blob *blob = ctx->blob; 1362 const void *value; 1363 size_t len; 1364 int rc; 1365 1366 if (spdk_blob_is_thin_provisioned(blob)) { 1367 rc = blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true); 1368 if (rc == 0) { 1369 if (len != sizeof(spdk_blob_id)) { 1370 blob_load_final(ctx, -EINVAL); 1371 return; 1372 } 1373 /* open snapshot blob and continue in the callback function */ 1374 blob->parent_id = *(spdk_blob_id *)value; 1375 spdk_bs_open_blob(blob->bs, blob->parent_id, 1376 blob_load_snapshot_cpl, ctx); 1377 return; 1378 } else { 1379 /* add zeroes_dev for thin provisioned blob */ 1380 blob->back_bs_dev = bs_create_zeroes_dev(); 1381 } 1382 } else { 1383 /* standard blob */ 1384 blob->back_bs_dev = NULL; 1385 } 1386 blob_load_final(ctx, 0); 1387 } 1388 1389 static void 1390 blob_load_cpl_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1391 { 1392 struct spdk_blob_load_ctx *ctx = cb_arg; 1393 struct spdk_blob *blob = ctx->blob; 1394 struct spdk_blob_md_page *page; 1395 uint64_t i; 1396 uint32_t crc; 1397 uint64_t lba; 1398 void *tmp; 1399 uint64_t sz; 1400 1401 if (bserrno) { 1402 SPDK_ERRLOG("Extent page read failed: %d\n", bserrno); 1403 blob_load_final(ctx, bserrno); 1404 return; 1405 } 1406 1407 if (ctx->pages == NULL) { 1408 /* First iteration of this function, allocate buffer for single EXTENT_PAGE */ 1409 ctx->pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 1410 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 1411 if (!ctx->pages) { 1412 blob_load_final(ctx, -ENOMEM); 1413 return; 1414 } 1415 ctx->num_pages = 1; 1416 ctx->next_extent_page = 0; 1417 } else { 1418 page = &ctx->pages[0]; 1419 crc = blob_md_page_calc_crc(page); 1420 if (crc != page->crc) { 1421 blob_load_final(ctx, -EINVAL); 1422 return; 1423 } 1424 1425 if (page->next != SPDK_INVALID_MD_PAGE) { 1426 blob_load_final(ctx, -EINVAL); 1427 return; 1428 } 1429 1430 bserrno = blob_parse_extent_page(page, blob); 1431 if (bserrno) { 1432 blob_load_final(ctx, bserrno); 1433 return; 1434 } 1435 } 1436 1437 for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) { 1438 if (blob->active.extent_pages[i] != 0) { 1439 /* Extent page was allocated, read and parse it. */ 1440 lba = bs_md_page_to_lba(blob->bs, blob->active.extent_pages[i]); 1441 ctx->next_extent_page = i + 1; 1442 1443 bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1444 bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 1445 blob_load_cpl_extents_cpl, ctx); 1446 return; 1447 } else { 1448 /* Thin provisioned blobs can point to unallocated extent pages. 1449 * In this case blob size should be increased by up to the amount left in remaining_clusters_in_et. */ 1450 1451 sz = spdk_min(blob->remaining_clusters_in_et, SPDK_EXTENTS_PER_EP); 1452 blob->active.num_clusters += sz; 1453 blob->remaining_clusters_in_et -= sz; 1454 1455 assert(spdk_blob_is_thin_provisioned(blob)); 1456 assert(i + 1 < blob->active.num_extent_pages || blob->remaining_clusters_in_et == 0); 1457 1458 tmp = realloc(blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 1459 if (tmp == NULL) { 1460 blob_load_final(ctx, -ENOMEM); 1461 return; 1462 } 1463 memset(tmp + sizeof(*blob->active.clusters) * blob->active.cluster_array_size, 0, 1464 sizeof(*blob->active.clusters) * (blob->active.num_clusters - blob->active.cluster_array_size)); 1465 blob->active.clusters = tmp; 1466 blob->active.cluster_array_size = blob->active.num_clusters; 1467 } 1468 } 1469 1470 blob_load_backing_dev(ctx); 1471 } 1472 1473 static void 1474 blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1475 { 1476 struct spdk_blob_load_ctx *ctx = cb_arg; 1477 struct spdk_blob *blob = ctx->blob; 1478 struct spdk_blob_md_page *page; 1479 int rc; 1480 uint32_t crc; 1481 uint32_t current_page; 1482 1483 if (ctx->num_pages == 1) { 1484 current_page = bs_blobid_to_page(blob->id); 1485 } else { 1486 assert(ctx->num_pages != 0); 1487 page = &ctx->pages[ctx->num_pages - 2]; 1488 current_page = page->next; 1489 } 1490 1491 if (bserrno) { 1492 SPDK_ERRLOG("Metadata page %d read failed for blobid %" PRIu64 ": %d\n", 1493 current_page, blob->id, bserrno); 1494 blob_load_final(ctx, bserrno); 1495 return; 1496 } 1497 1498 page = &ctx->pages[ctx->num_pages - 1]; 1499 crc = blob_md_page_calc_crc(page); 1500 if (crc != page->crc) { 1501 SPDK_ERRLOG("Metadata page %d crc mismatch for blobid %" PRIu64 "\n", 1502 current_page, blob->id); 1503 blob_load_final(ctx, -EINVAL); 1504 return; 1505 } 1506 1507 if (page->next != SPDK_INVALID_MD_PAGE) { 1508 struct spdk_blob_md_page *tmp_pages; 1509 uint32_t next_page = page->next; 1510 uint64_t next_lba = bs_md_page_to_lba(blob->bs, next_page); 1511 1512 /* Read the next page */ 1513 tmp_pages = spdk_realloc(ctx->pages, (sizeof(*page) * (ctx->num_pages + 1)), 0); 1514 if (tmp_pages == NULL) { 1515 blob_load_final(ctx, -ENOMEM); 1516 return; 1517 } 1518 ctx->num_pages++; 1519 ctx->pages = tmp_pages; 1520 1521 bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1], 1522 next_lba, 1523 bs_byte_to_lba(blob->bs, sizeof(*page)), 1524 blob_load_cpl, ctx); 1525 return; 1526 } 1527 1528 /* Parse the pages */ 1529 rc = blob_parse(ctx->pages, ctx->num_pages, blob); 1530 if (rc) { 1531 blob_load_final(ctx, rc); 1532 return; 1533 } 1534 1535 if (blob->extent_table_found == true) { 1536 /* If EXTENT_TABLE was found, that means support for it should be enabled. */ 1537 assert(blob->extent_rle_found == false); 1538 blob->use_extent_table = true; 1539 } else { 1540 /* If EXTENT_RLE or no extent_* descriptor was found disable support 1541 * for extent table. No extent_* descriptors means that blob has length of 0 1542 * and no extent_rle descriptors were persisted for it. 1543 * EXTENT_TABLE if used, is always present in metadata regardless of length. */ 1544 blob->use_extent_table = false; 1545 } 1546 1547 /* Check the clear_method stored in metadata vs what may have been passed 1548 * via spdk_bs_open_blob_ext() and update accordingly. 1549 */ 1550 blob_update_clear_method(blob); 1551 1552 spdk_free(ctx->pages); 1553 ctx->pages = NULL; 1554 1555 if (blob->extent_table_found) { 1556 blob_load_cpl_extents_cpl(seq, ctx, 0); 1557 } else { 1558 blob_load_backing_dev(ctx); 1559 } 1560 } 1561 1562 /* Load a blob from disk given a blobid */ 1563 static void 1564 blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 1565 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 1566 { 1567 struct spdk_blob_load_ctx *ctx; 1568 struct spdk_blob_store *bs; 1569 uint32_t page_num; 1570 uint64_t lba; 1571 1572 blob_verify_md_op(blob); 1573 1574 bs = blob->bs; 1575 1576 ctx = calloc(1, sizeof(*ctx)); 1577 if (!ctx) { 1578 cb_fn(seq, cb_arg, -ENOMEM); 1579 return; 1580 } 1581 1582 ctx->blob = blob; 1583 ctx->pages = spdk_realloc(ctx->pages, SPDK_BS_PAGE_SIZE, 0); 1584 if (!ctx->pages) { 1585 free(ctx); 1586 cb_fn(seq, cb_arg, -ENOMEM); 1587 return; 1588 } 1589 ctx->num_pages = 1; 1590 ctx->cb_fn = cb_fn; 1591 ctx->cb_arg = cb_arg; 1592 ctx->seq = seq; 1593 1594 page_num = bs_blobid_to_page(blob->id); 1595 lba = bs_md_page_to_lba(blob->bs, page_num); 1596 1597 blob->state = SPDK_BLOB_STATE_LOADING; 1598 1599 bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1600 bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE), 1601 blob_load_cpl, ctx); 1602 } 1603 1604 struct spdk_blob_persist_ctx { 1605 struct spdk_blob *blob; 1606 1607 struct spdk_bs_super_block *super; 1608 1609 struct spdk_blob_md_page *pages; 1610 uint32_t next_extent_page; 1611 struct spdk_blob_md_page *extent_page; 1612 1613 spdk_bs_sequence_t *seq; 1614 spdk_bs_sequence_cpl cb_fn; 1615 void *cb_arg; 1616 TAILQ_ENTRY(spdk_blob_persist_ctx) link; 1617 }; 1618 1619 static void 1620 bs_batch_clear_dev(struct spdk_blob_persist_ctx *ctx, spdk_bs_batch_t *batch, uint64_t lba, 1621 uint64_t lba_count) 1622 { 1623 switch (ctx->blob->clear_method) { 1624 case BLOB_CLEAR_WITH_DEFAULT: 1625 case BLOB_CLEAR_WITH_UNMAP: 1626 bs_batch_unmap_dev(batch, lba, lba_count); 1627 break; 1628 case BLOB_CLEAR_WITH_WRITE_ZEROES: 1629 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1630 break; 1631 case BLOB_CLEAR_WITH_NONE: 1632 default: 1633 break; 1634 } 1635 } 1636 1637 static void blob_persist_check_dirty(struct spdk_blob_persist_ctx *ctx); 1638 1639 static void 1640 blob_persist_complete_cb(void *arg) 1641 { 1642 struct spdk_blob_persist_ctx *ctx = arg; 1643 1644 /* Call user callback */ 1645 ctx->cb_fn(ctx->seq, ctx->cb_arg, 0); 1646 1647 /* Free the memory */ 1648 spdk_free(ctx->pages); 1649 free(ctx); 1650 } 1651 1652 static void 1653 blob_persist_complete(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx, int bserrno) 1654 { 1655 struct spdk_blob_persist_ctx *next_persist, *tmp; 1656 struct spdk_blob *blob = ctx->blob; 1657 1658 if (bserrno == 0) { 1659 blob_mark_clean(blob); 1660 } 1661 1662 assert(ctx == TAILQ_FIRST(&blob->persists_to_complete)); 1663 1664 /* Complete all persists that were pending when the current persist started */ 1665 TAILQ_FOREACH_SAFE(next_persist, &blob->persists_to_complete, link, tmp) { 1666 TAILQ_REMOVE(&blob->persists_to_complete, next_persist, link); 1667 spdk_thread_send_msg(spdk_get_thread(), blob_persist_complete_cb, next_persist); 1668 } 1669 1670 if (TAILQ_EMPTY(&blob->pending_persists)) { 1671 return; 1672 } 1673 1674 /* Queue up all pending persists for completion and start blob persist with first one */ 1675 TAILQ_SWAP(&blob->persists_to_complete, &blob->pending_persists, spdk_blob_persist_ctx, link); 1676 next_persist = TAILQ_FIRST(&blob->persists_to_complete); 1677 1678 blob->state = SPDK_BLOB_STATE_DIRTY; 1679 blob_persist_check_dirty(next_persist); 1680 } 1681 1682 static void 1683 blob_persist_clear_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1684 { 1685 struct spdk_blob_persist_ctx *ctx = cb_arg; 1686 struct spdk_blob *blob = ctx->blob; 1687 struct spdk_blob_store *bs = blob->bs; 1688 size_t i; 1689 1690 if (bserrno != 0) { 1691 blob_persist_complete(seq, ctx, bserrno); 1692 return; 1693 } 1694 1695 /* Release all extent_pages that were truncated */ 1696 for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) { 1697 /* Nothing to release if it was not allocated */ 1698 if (blob->active.extent_pages[i] != 0) { 1699 bs_release_md_page(bs, blob->active.extent_pages[i]); 1700 } 1701 } 1702 1703 if (blob->active.num_extent_pages == 0) { 1704 free(blob->active.extent_pages); 1705 blob->active.extent_pages = NULL; 1706 blob->active.extent_pages_array_size = 0; 1707 } else if (blob->active.num_extent_pages != blob->active.extent_pages_array_size) { 1708 #ifndef __clang_analyzer__ 1709 void *tmp; 1710 1711 /* scan-build really can't figure reallocs, workaround it */ 1712 tmp = realloc(blob->active.extent_pages, sizeof(uint32_t) * blob->active.num_extent_pages); 1713 assert(tmp != NULL); 1714 blob->active.extent_pages = tmp; 1715 #endif 1716 blob->active.extent_pages_array_size = blob->active.num_extent_pages; 1717 } 1718 1719 blob_persist_complete(seq, ctx, bserrno); 1720 } 1721 1722 static void 1723 blob_persist_clear_extents(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 1724 { 1725 struct spdk_blob *blob = ctx->blob; 1726 struct spdk_blob_store *bs = blob->bs; 1727 size_t i; 1728 uint64_t lba; 1729 uint64_t lba_count; 1730 spdk_bs_batch_t *batch; 1731 1732 batch = bs_sequence_to_batch(seq, blob_persist_clear_extents_cpl, ctx); 1733 lba_count = bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE); 1734 1735 /* Clear all extent_pages that were truncated */ 1736 for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) { 1737 /* Nothing to clear if it was not allocated */ 1738 if (blob->active.extent_pages[i] != 0) { 1739 lba = bs_md_page_to_lba(bs, blob->active.extent_pages[i]); 1740 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1741 } 1742 } 1743 1744 bs_batch_close(batch); 1745 } 1746 1747 static void 1748 blob_persist_clear_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1749 { 1750 struct spdk_blob_persist_ctx *ctx = cb_arg; 1751 struct spdk_blob *blob = ctx->blob; 1752 struct spdk_blob_store *bs = blob->bs; 1753 size_t i; 1754 1755 if (bserrno != 0) { 1756 blob_persist_complete(seq, ctx, bserrno); 1757 return; 1758 } 1759 1760 pthread_mutex_lock(&bs->used_clusters_mutex); 1761 /* Release all clusters that were truncated */ 1762 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1763 uint32_t cluster_num = bs_lba_to_cluster(bs, blob->active.clusters[i]); 1764 1765 /* Nothing to release if it was not allocated */ 1766 if (blob->active.clusters[i] != 0) { 1767 bs_release_cluster(bs, cluster_num); 1768 } 1769 } 1770 pthread_mutex_unlock(&bs->used_clusters_mutex); 1771 1772 if (blob->active.num_clusters == 0) { 1773 free(blob->active.clusters); 1774 blob->active.clusters = NULL; 1775 blob->active.cluster_array_size = 0; 1776 } else if (blob->active.num_clusters != blob->active.cluster_array_size) { 1777 #ifndef __clang_analyzer__ 1778 void *tmp; 1779 1780 /* scan-build really can't figure reallocs, workaround it */ 1781 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * blob->active.num_clusters); 1782 assert(tmp != NULL); 1783 blob->active.clusters = tmp; 1784 1785 #endif 1786 blob->active.cluster_array_size = blob->active.num_clusters; 1787 } 1788 1789 /* Move on to clearing extent pages */ 1790 blob_persist_clear_extents(seq, ctx); 1791 } 1792 1793 static void 1794 blob_persist_clear_clusters(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 1795 { 1796 struct spdk_blob *blob = ctx->blob; 1797 struct spdk_blob_store *bs = blob->bs; 1798 spdk_bs_batch_t *batch; 1799 size_t i; 1800 uint64_t lba; 1801 uint64_t lba_count; 1802 1803 /* Clusters don't move around in blobs. The list shrinks or grows 1804 * at the end, but no changes ever occur in the middle of the list. 1805 */ 1806 1807 batch = bs_sequence_to_batch(seq, blob_persist_clear_clusters_cpl, ctx); 1808 1809 /* Clear all clusters that were truncated */ 1810 lba = 0; 1811 lba_count = 0; 1812 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1813 uint64_t next_lba = blob->active.clusters[i]; 1814 uint64_t next_lba_count = bs_cluster_to_lba(bs, 1); 1815 1816 if (next_lba > 0 && (lba + lba_count) == next_lba) { 1817 /* This cluster is contiguous with the previous one. */ 1818 lba_count += next_lba_count; 1819 continue; 1820 } else if (next_lba == 0) { 1821 continue; 1822 } 1823 1824 /* This cluster is not contiguous with the previous one. */ 1825 1826 /* If a run of LBAs previously existing, clear them now */ 1827 if (lba_count > 0) { 1828 bs_batch_clear_dev(ctx, batch, lba, lba_count); 1829 } 1830 1831 /* Start building the next batch */ 1832 lba = next_lba; 1833 if (next_lba > 0) { 1834 lba_count = next_lba_count; 1835 } else { 1836 lba_count = 0; 1837 } 1838 } 1839 1840 /* If we ended with a contiguous set of LBAs, clear them now */ 1841 if (lba_count > 0) { 1842 bs_batch_clear_dev(ctx, batch, lba, lba_count); 1843 } 1844 1845 bs_batch_close(batch); 1846 } 1847 1848 static void 1849 blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1850 { 1851 struct spdk_blob_persist_ctx *ctx = cb_arg; 1852 struct spdk_blob *blob = ctx->blob; 1853 struct spdk_blob_store *bs = blob->bs; 1854 size_t i; 1855 1856 if (bserrno != 0) { 1857 blob_persist_complete(seq, ctx, bserrno); 1858 return; 1859 } 1860 1861 /* This loop starts at 1 because the first page is special and handled 1862 * below. The pages (except the first) are never written in place, 1863 * so any pages in the clean list must be zeroed. 1864 */ 1865 for (i = 1; i < blob->clean.num_pages; i++) { 1866 bs_release_md_page(bs, blob->clean.pages[i]); 1867 } 1868 1869 if (blob->active.num_pages == 0) { 1870 uint32_t page_num; 1871 1872 page_num = bs_blobid_to_page(blob->id); 1873 bs_release_md_page(bs, page_num); 1874 } 1875 1876 /* Move on to clearing clusters */ 1877 blob_persist_clear_clusters(seq, ctx); 1878 } 1879 1880 static void 1881 blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1882 { 1883 struct spdk_blob_persist_ctx *ctx = cb_arg; 1884 struct spdk_blob *blob = ctx->blob; 1885 struct spdk_blob_store *bs = blob->bs; 1886 uint64_t lba; 1887 uint64_t lba_count; 1888 spdk_bs_batch_t *batch; 1889 size_t i; 1890 1891 if (bserrno != 0) { 1892 blob_persist_complete(seq, ctx, bserrno); 1893 return; 1894 } 1895 1896 batch = bs_sequence_to_batch(seq, blob_persist_zero_pages_cpl, ctx); 1897 1898 lba_count = bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE); 1899 1900 /* This loop starts at 1 because the first page is special and handled 1901 * below. The pages (except the first) are never written in place, 1902 * so any pages in the clean list must be zeroed. 1903 */ 1904 for (i = 1; i < blob->clean.num_pages; i++) { 1905 lba = bs_md_page_to_lba(bs, blob->clean.pages[i]); 1906 1907 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1908 } 1909 1910 /* The first page will only be zeroed if this is a delete. */ 1911 if (blob->active.num_pages == 0) { 1912 uint32_t page_num; 1913 1914 /* The first page in the metadata goes where the blobid indicates */ 1915 page_num = bs_blobid_to_page(blob->id); 1916 lba = bs_md_page_to_lba(bs, page_num); 1917 1918 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1919 } 1920 1921 bs_batch_close(batch); 1922 } 1923 1924 static void 1925 blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1926 { 1927 struct spdk_blob_persist_ctx *ctx = cb_arg; 1928 struct spdk_blob *blob = ctx->blob; 1929 struct spdk_blob_store *bs = blob->bs; 1930 uint64_t lba; 1931 uint32_t lba_count; 1932 struct spdk_blob_md_page *page; 1933 1934 if (bserrno != 0) { 1935 blob_persist_complete(seq, ctx, bserrno); 1936 return; 1937 } 1938 1939 if (blob->active.num_pages == 0) { 1940 /* Move on to the next step */ 1941 blob_persist_zero_pages(seq, ctx, 0); 1942 return; 1943 } 1944 1945 lba_count = bs_byte_to_lba(bs, sizeof(*page)); 1946 1947 page = &ctx->pages[0]; 1948 /* The first page in the metadata goes where the blobid indicates */ 1949 lba = bs_md_page_to_lba(bs, bs_blobid_to_page(blob->id)); 1950 1951 bs_sequence_write_dev(seq, page, lba, lba_count, 1952 blob_persist_zero_pages, ctx); 1953 } 1954 1955 static void 1956 blob_persist_write_page_chain(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 1957 { 1958 struct spdk_blob *blob = ctx->blob; 1959 struct spdk_blob_store *bs = blob->bs; 1960 uint64_t lba; 1961 uint32_t lba_count; 1962 struct spdk_blob_md_page *page; 1963 spdk_bs_batch_t *batch; 1964 size_t i; 1965 1966 /* Clusters don't move around in blobs. The list shrinks or grows 1967 * at the end, but no changes ever occur in the middle of the list. 1968 */ 1969 1970 lba_count = bs_byte_to_lba(bs, sizeof(*page)); 1971 1972 batch = bs_sequence_to_batch(seq, blob_persist_write_page_root, ctx); 1973 1974 /* This starts at 1. The root page is not written until 1975 * all of the others are finished 1976 */ 1977 for (i = 1; i < blob->active.num_pages; i++) { 1978 page = &ctx->pages[i]; 1979 assert(page->sequence_num == i); 1980 1981 lba = bs_md_page_to_lba(bs, blob->active.pages[i]); 1982 1983 bs_batch_write_dev(batch, page, lba, lba_count); 1984 } 1985 1986 bs_batch_close(batch); 1987 } 1988 1989 static int 1990 blob_resize(struct spdk_blob *blob, uint64_t sz) 1991 { 1992 uint64_t i; 1993 uint64_t *tmp; 1994 uint64_t cluster; 1995 uint32_t lfmd; /* lowest free md page */ 1996 uint64_t num_clusters; 1997 uint32_t *ep_tmp; 1998 uint64_t new_num_ep = 0, current_num_ep = 0; 1999 struct spdk_blob_store *bs; 2000 2001 bs = blob->bs; 2002 2003 blob_verify_md_op(blob); 2004 2005 if (blob->active.num_clusters == sz) { 2006 return 0; 2007 } 2008 2009 if (blob->active.num_clusters < blob->active.cluster_array_size) { 2010 /* If this blob was resized to be larger, then smaller, then 2011 * larger without syncing, then the cluster array already 2012 * contains spare assigned clusters we can use. 2013 */ 2014 num_clusters = spdk_min(blob->active.cluster_array_size, 2015 sz); 2016 } else { 2017 num_clusters = blob->active.num_clusters; 2018 } 2019 2020 if (blob->use_extent_table) { 2021 /* Round up since every cluster beyond current Extent Table size, 2022 * requires new extent page. */ 2023 new_num_ep = spdk_divide_round_up(sz, SPDK_EXTENTS_PER_EP); 2024 current_num_ep = spdk_divide_round_up(num_clusters, SPDK_EXTENTS_PER_EP); 2025 } 2026 2027 /* Check first that we have enough clusters and md pages before we start claiming them. */ 2028 if (sz > num_clusters && spdk_blob_is_thin_provisioned(blob) == false) { 2029 if ((sz - num_clusters) > bs->num_free_clusters) { 2030 return -ENOSPC; 2031 } 2032 lfmd = 0; 2033 for (i = current_num_ep; i < new_num_ep ; i++) { 2034 lfmd = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, lfmd); 2035 if (lfmd == UINT32_MAX) { 2036 /* No more free md pages. Cannot satisfy the request */ 2037 return -ENOSPC; 2038 } 2039 } 2040 } 2041 2042 if (sz > num_clusters) { 2043 /* Expand the cluster array if necessary. 2044 * We only shrink the array when persisting. 2045 */ 2046 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * sz); 2047 if (sz > 0 && tmp == NULL) { 2048 return -ENOMEM; 2049 } 2050 memset(tmp + blob->active.cluster_array_size, 0, 2051 sizeof(*blob->active.clusters) * (sz - blob->active.cluster_array_size)); 2052 blob->active.clusters = tmp; 2053 blob->active.cluster_array_size = sz; 2054 2055 /* Expand the extents table, only if enough clusters were added */ 2056 if (new_num_ep > current_num_ep && blob->use_extent_table) { 2057 ep_tmp = realloc(blob->active.extent_pages, sizeof(*blob->active.extent_pages) * new_num_ep); 2058 if (new_num_ep > 0 && ep_tmp == NULL) { 2059 return -ENOMEM; 2060 } 2061 memset(ep_tmp + blob->active.extent_pages_array_size, 0, 2062 sizeof(*blob->active.extent_pages) * (new_num_ep - blob->active.extent_pages_array_size)); 2063 blob->active.extent_pages = ep_tmp; 2064 blob->active.extent_pages_array_size = new_num_ep; 2065 } 2066 } 2067 2068 blob->state = SPDK_BLOB_STATE_DIRTY; 2069 2070 if (spdk_blob_is_thin_provisioned(blob) == false) { 2071 cluster = 0; 2072 lfmd = 0; 2073 pthread_mutex_lock(&blob->bs->used_clusters_mutex); 2074 for (i = num_clusters; i < sz; i++) { 2075 bs_allocate_cluster(blob, i, &cluster, &lfmd, true); 2076 lfmd++; 2077 } 2078 pthread_mutex_unlock(&blob->bs->used_clusters_mutex); 2079 } 2080 2081 blob->active.num_clusters = sz; 2082 blob->active.num_extent_pages = new_num_ep; 2083 2084 return 0; 2085 } 2086 2087 static void 2088 blob_persist_generate_new_md(struct spdk_blob_persist_ctx *ctx) 2089 { 2090 spdk_bs_sequence_t *seq = ctx->seq; 2091 struct spdk_blob *blob = ctx->blob; 2092 struct spdk_blob_store *bs = blob->bs; 2093 uint64_t i; 2094 uint32_t page_num; 2095 void *tmp; 2096 int rc; 2097 2098 /* Generate the new metadata */ 2099 rc = blob_serialize(blob, &ctx->pages, &blob->active.num_pages); 2100 if (rc < 0) { 2101 blob_persist_complete(seq, ctx, rc); 2102 return; 2103 } 2104 2105 assert(blob->active.num_pages >= 1); 2106 2107 /* Resize the cache of page indices */ 2108 tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 2109 if (!tmp) { 2110 blob_persist_complete(seq, ctx, -ENOMEM); 2111 return; 2112 } 2113 blob->active.pages = tmp; 2114 2115 /* Assign this metadata to pages. This requires two passes - 2116 * one to verify that there are enough pages and a second 2117 * to actually claim them. */ 2118 page_num = 0; 2119 /* Note that this loop starts at one. The first page location is fixed by the blobid. */ 2120 for (i = 1; i < blob->active.num_pages; i++) { 2121 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 2122 if (page_num == UINT32_MAX) { 2123 blob_persist_complete(seq, ctx, -ENOMEM); 2124 return; 2125 } 2126 page_num++; 2127 } 2128 2129 page_num = 0; 2130 blob->active.pages[0] = bs_blobid_to_page(blob->id); 2131 for (i = 1; i < blob->active.num_pages; i++) { 2132 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 2133 ctx->pages[i - 1].next = page_num; 2134 /* Now that previous metadata page is complete, calculate the crc for it. */ 2135 ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]); 2136 blob->active.pages[i] = page_num; 2137 bs_claim_md_page(bs, page_num); 2138 SPDK_DEBUGLOG(blob, "Claiming page %u for blob %" PRIu64 "\n", page_num, blob->id); 2139 page_num++; 2140 } 2141 ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]); 2142 /* Start writing the metadata from last page to first */ 2143 blob->state = SPDK_BLOB_STATE_CLEAN; 2144 blob_persist_write_page_chain(seq, ctx); 2145 } 2146 2147 static void 2148 blob_persist_write_extent_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2149 { 2150 struct spdk_blob_persist_ctx *ctx = cb_arg; 2151 struct spdk_blob *blob = ctx->blob; 2152 size_t i; 2153 uint32_t extent_page_id; 2154 uint32_t page_count = 0; 2155 int rc; 2156 2157 if (ctx->extent_page != NULL) { 2158 spdk_free(ctx->extent_page); 2159 ctx->extent_page = NULL; 2160 } 2161 2162 if (bserrno != 0) { 2163 blob_persist_complete(seq, ctx, bserrno); 2164 return; 2165 } 2166 2167 /* Only write out Extent Pages when blob was resized. */ 2168 for (i = ctx->next_extent_page; i < blob->active.extent_pages_array_size; i++) { 2169 extent_page_id = blob->active.extent_pages[i]; 2170 if (extent_page_id == 0) { 2171 /* No Extent Page to persist */ 2172 assert(spdk_blob_is_thin_provisioned(blob)); 2173 continue; 2174 } 2175 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent_page_id)); 2176 ctx->next_extent_page = i + 1; 2177 rc = blob_serialize_add_page(ctx->blob, &ctx->extent_page, &page_count, &ctx->extent_page); 2178 if (rc < 0) { 2179 blob_persist_complete(seq, ctx, rc); 2180 return; 2181 } 2182 2183 blob->state = SPDK_BLOB_STATE_DIRTY; 2184 blob_serialize_extent_page(blob, i * SPDK_EXTENTS_PER_EP, ctx->extent_page); 2185 2186 ctx->extent_page->crc = blob_md_page_calc_crc(ctx->extent_page); 2187 2188 bs_sequence_write_dev(seq, ctx->extent_page, bs_md_page_to_lba(blob->bs, extent_page_id), 2189 bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 2190 blob_persist_write_extent_pages, ctx); 2191 return; 2192 } 2193 2194 blob_persist_generate_new_md(ctx); 2195 } 2196 2197 static void 2198 blob_persist_start(struct spdk_blob_persist_ctx *ctx) 2199 { 2200 spdk_bs_sequence_t *seq = ctx->seq; 2201 struct spdk_blob *blob = ctx->blob; 2202 2203 if (blob->active.num_pages == 0) { 2204 /* This is the signal that the blob should be deleted. 2205 * Immediately jump to the clean up routine. */ 2206 assert(blob->clean.num_pages > 0); 2207 blob->state = SPDK_BLOB_STATE_CLEAN; 2208 blob_persist_zero_pages(seq, ctx, 0); 2209 return; 2210 2211 } 2212 2213 if (blob->clean.num_clusters < blob->active.num_clusters) { 2214 /* Blob was resized up */ 2215 assert(blob->clean.num_extent_pages <= blob->active.num_extent_pages); 2216 ctx->next_extent_page = spdk_max(1, blob->clean.num_extent_pages) - 1; 2217 } else if (blob->active.num_clusters < blob->active.cluster_array_size) { 2218 /* Blob was resized down */ 2219 assert(blob->clean.num_extent_pages >= blob->active.num_extent_pages); 2220 ctx->next_extent_page = spdk_max(1, blob->active.num_extent_pages) - 1; 2221 } else { 2222 /* No change in size occurred */ 2223 blob_persist_generate_new_md(ctx); 2224 return; 2225 } 2226 2227 blob_persist_write_extent_pages(seq, ctx, 0); 2228 } 2229 2230 static void 2231 blob_persist_dirty_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2232 { 2233 struct spdk_blob_persist_ctx *ctx = cb_arg; 2234 2235 spdk_free(ctx->super); 2236 2237 if (bserrno != 0) { 2238 blob_persist_complete(seq, ctx, bserrno); 2239 return; 2240 } 2241 2242 ctx->blob->bs->clean = 0; 2243 2244 blob_persist_start(ctx); 2245 } 2246 2247 static void 2248 bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2249 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg); 2250 2251 2252 static void 2253 blob_persist_dirty(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2254 { 2255 struct spdk_blob_persist_ctx *ctx = cb_arg; 2256 2257 if (bserrno != 0) { 2258 spdk_free(ctx->super); 2259 blob_persist_complete(seq, ctx, bserrno); 2260 return; 2261 } 2262 2263 ctx->super->clean = 0; 2264 if (ctx->super->size == 0) { 2265 ctx->super->size = ctx->blob->bs->dev->blockcnt * ctx->blob->bs->dev->blocklen; 2266 } 2267 2268 bs_write_super(seq, ctx->blob->bs, ctx->super, blob_persist_dirty_cpl, ctx); 2269 } 2270 2271 static void 2272 blob_persist_check_dirty(struct spdk_blob_persist_ctx *ctx) 2273 { 2274 if (ctx->blob->bs->clean) { 2275 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 2276 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 2277 if (!ctx->super) { 2278 blob_persist_complete(ctx->seq, ctx, -ENOMEM); 2279 return; 2280 } 2281 2282 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(ctx->blob->bs, 0), 2283 bs_byte_to_lba(ctx->blob->bs, sizeof(*ctx->super)), 2284 blob_persist_dirty, ctx); 2285 } else { 2286 blob_persist_start(ctx); 2287 } 2288 } 2289 2290 /* Write a blob to disk */ 2291 static void 2292 blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 2293 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2294 { 2295 struct spdk_blob_persist_ctx *ctx; 2296 2297 blob_verify_md_op(blob); 2298 2299 if (blob->state == SPDK_BLOB_STATE_CLEAN && TAILQ_EMPTY(&blob->persists_to_complete)) { 2300 cb_fn(seq, cb_arg, 0); 2301 return; 2302 } 2303 2304 ctx = calloc(1, sizeof(*ctx)); 2305 if (!ctx) { 2306 cb_fn(seq, cb_arg, -ENOMEM); 2307 return; 2308 } 2309 ctx->blob = blob; 2310 ctx->seq = seq; 2311 ctx->cb_fn = cb_fn; 2312 ctx->cb_arg = cb_arg; 2313 2314 /* Multiple blob persists can affect one another, via blob->state or 2315 * blob mutable data changes. To prevent it, queue up the persists. */ 2316 if (!TAILQ_EMPTY(&blob->persists_to_complete)) { 2317 TAILQ_INSERT_TAIL(&blob->pending_persists, ctx, link); 2318 return; 2319 } 2320 TAILQ_INSERT_HEAD(&blob->persists_to_complete, ctx, link); 2321 2322 blob_persist_check_dirty(ctx); 2323 } 2324 2325 struct spdk_blob_copy_cluster_ctx { 2326 struct spdk_blob *blob; 2327 uint8_t *buf; 2328 uint64_t page; 2329 uint64_t new_cluster; 2330 uint32_t new_extent_page; 2331 spdk_bs_sequence_t *seq; 2332 }; 2333 2334 static void 2335 blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno) 2336 { 2337 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2338 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq; 2339 TAILQ_HEAD(, spdk_bs_request_set) requests; 2340 spdk_bs_user_op_t *op; 2341 2342 TAILQ_INIT(&requests); 2343 TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link); 2344 2345 while (!TAILQ_EMPTY(&requests)) { 2346 op = TAILQ_FIRST(&requests); 2347 TAILQ_REMOVE(&requests, op, link); 2348 if (bserrno == 0) { 2349 bs_user_op_execute(op); 2350 } else { 2351 bs_user_op_abort(op); 2352 } 2353 } 2354 2355 spdk_free(ctx->buf); 2356 free(ctx); 2357 } 2358 2359 static void 2360 blob_insert_cluster_cpl(void *cb_arg, int bserrno) 2361 { 2362 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2363 2364 if (bserrno) { 2365 if (bserrno == -EEXIST) { 2366 /* The metadata insert failed because another thread 2367 * allocated the cluster first. Free our cluster 2368 * but continue without error. */ 2369 bserrno = 0; 2370 } 2371 pthread_mutex_lock(&ctx->blob->bs->used_clusters_mutex); 2372 bs_release_cluster(ctx->blob->bs, ctx->new_cluster); 2373 pthread_mutex_unlock(&ctx->blob->bs->used_clusters_mutex); 2374 if (ctx->new_extent_page != 0) { 2375 bs_release_md_page(ctx->blob->bs, ctx->new_extent_page); 2376 } 2377 } 2378 2379 bs_sequence_finish(ctx->seq, bserrno); 2380 } 2381 2382 static void 2383 blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2384 { 2385 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2386 uint32_t cluster_number; 2387 2388 if (bserrno) { 2389 /* The write failed, so jump to the final completion handler */ 2390 bs_sequence_finish(seq, bserrno); 2391 return; 2392 } 2393 2394 cluster_number = bs_page_to_cluster(ctx->blob->bs, ctx->page); 2395 2396 blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2397 ctx->new_extent_page, blob_insert_cluster_cpl, ctx); 2398 } 2399 2400 static void 2401 blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2402 { 2403 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2404 2405 if (bserrno != 0) { 2406 /* The read failed, so jump to the final completion handler */ 2407 bs_sequence_finish(seq, bserrno); 2408 return; 2409 } 2410 2411 /* Write whole cluster */ 2412 bs_sequence_write_dev(seq, ctx->buf, 2413 bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 2414 bs_cluster_to_lba(ctx->blob->bs, 1), 2415 blob_write_copy_cpl, ctx); 2416 } 2417 2418 static void 2419 bs_allocate_and_copy_cluster(struct spdk_blob *blob, 2420 struct spdk_io_channel *_ch, 2421 uint64_t io_unit, spdk_bs_user_op_t *op) 2422 { 2423 struct spdk_bs_cpl cpl; 2424 struct spdk_bs_channel *ch; 2425 struct spdk_blob_copy_cluster_ctx *ctx; 2426 uint32_t cluster_start_page; 2427 uint32_t cluster_number; 2428 int rc; 2429 2430 ch = spdk_io_channel_get_ctx(_ch); 2431 2432 if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) { 2433 /* There are already operations pending. Queue this user op 2434 * and return because it will be re-executed when the outstanding 2435 * cluster allocation completes. */ 2436 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2437 return; 2438 } 2439 2440 /* Round the io_unit offset down to the first page in the cluster */ 2441 cluster_start_page = bs_io_unit_to_cluster_start(blob, io_unit); 2442 2443 /* Calculate which index in the metadata cluster array the corresponding 2444 * cluster is supposed to be at. */ 2445 cluster_number = bs_io_unit_to_cluster_number(blob, io_unit); 2446 2447 ctx = calloc(1, sizeof(*ctx)); 2448 if (!ctx) { 2449 bs_user_op_abort(op); 2450 return; 2451 } 2452 2453 assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0); 2454 2455 ctx->blob = blob; 2456 ctx->page = cluster_start_page; 2457 2458 if (blob->parent_id != SPDK_BLOBID_INVALID) { 2459 ctx->buf = spdk_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen, 2460 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 2461 if (!ctx->buf) { 2462 SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n", 2463 blob->bs->cluster_sz); 2464 free(ctx); 2465 bs_user_op_abort(op); 2466 return; 2467 } 2468 } 2469 2470 pthread_mutex_lock(&blob->bs->used_clusters_mutex); 2471 rc = bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, &ctx->new_extent_page, 2472 false); 2473 pthread_mutex_unlock(&blob->bs->used_clusters_mutex); 2474 if (rc != 0) { 2475 spdk_free(ctx->buf); 2476 free(ctx); 2477 bs_user_op_abort(op); 2478 return; 2479 } 2480 2481 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2482 cpl.u.blob_basic.cb_fn = blob_allocate_and_copy_cluster_cpl; 2483 cpl.u.blob_basic.cb_arg = ctx; 2484 2485 ctx->seq = bs_sequence_start(_ch, &cpl); 2486 if (!ctx->seq) { 2487 pthread_mutex_lock(&blob->bs->used_clusters_mutex); 2488 bs_release_cluster(blob->bs, ctx->new_cluster); 2489 pthread_mutex_unlock(&blob->bs->used_clusters_mutex); 2490 spdk_free(ctx->buf); 2491 free(ctx); 2492 bs_user_op_abort(op); 2493 return; 2494 } 2495 2496 /* Queue the user op to block other incoming operations */ 2497 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2498 2499 if (blob->parent_id != SPDK_BLOBID_INVALID) { 2500 /* Read cluster from backing device */ 2501 bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf, 2502 bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page), 2503 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz), 2504 blob_write_copy, ctx); 2505 } else { 2506 blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2507 ctx->new_extent_page, blob_insert_cluster_cpl, ctx); 2508 } 2509 } 2510 2511 static inline bool 2512 blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t io_unit, uint64_t length, 2513 uint64_t *lba, uint64_t *lba_count) 2514 { 2515 *lba_count = length; 2516 2517 if (!bs_io_unit_is_allocated(blob, io_unit)) { 2518 assert(blob->back_bs_dev != NULL); 2519 *lba = bs_io_unit_to_back_dev_lba(blob, io_unit); 2520 *lba_count = bs_io_unit_to_back_dev_lba(blob, *lba_count); 2521 return false; 2522 } else { 2523 *lba = bs_blob_io_unit_to_lba(blob, io_unit); 2524 return true; 2525 } 2526 } 2527 2528 struct op_split_ctx { 2529 struct spdk_blob *blob; 2530 struct spdk_io_channel *channel; 2531 uint64_t io_unit_offset; 2532 uint64_t io_units_remaining; 2533 void *curr_payload; 2534 enum spdk_blob_op_type op_type; 2535 spdk_bs_sequence_t *seq; 2536 bool in_submit_ctx; 2537 bool completed_in_submit_ctx; 2538 bool done; 2539 }; 2540 2541 static void 2542 blob_request_submit_op_split_next(void *cb_arg, int bserrno) 2543 { 2544 struct op_split_ctx *ctx = cb_arg; 2545 struct spdk_blob *blob = ctx->blob; 2546 struct spdk_io_channel *ch = ctx->channel; 2547 enum spdk_blob_op_type op_type = ctx->op_type; 2548 uint8_t *buf; 2549 uint64_t offset; 2550 uint64_t length; 2551 uint64_t op_length; 2552 2553 if (bserrno != 0 || ctx->io_units_remaining == 0) { 2554 bs_sequence_finish(ctx->seq, bserrno); 2555 if (ctx->in_submit_ctx) { 2556 /* Defer freeing of the ctx object, since it will be 2557 * accessed when this unwinds back to the submisison 2558 * context. 2559 */ 2560 ctx->done = true; 2561 } else { 2562 free(ctx); 2563 } 2564 return; 2565 } 2566 2567 if (ctx->in_submit_ctx) { 2568 /* If this split operation completed in the context 2569 * of its submission, mark the flag and return immediately 2570 * to avoid recursion. 2571 */ 2572 ctx->completed_in_submit_ctx = true; 2573 return; 2574 } 2575 2576 while (true) { 2577 ctx->completed_in_submit_ctx = false; 2578 2579 offset = ctx->io_unit_offset; 2580 length = ctx->io_units_remaining; 2581 buf = ctx->curr_payload; 2582 op_length = spdk_min(length, bs_num_io_units_to_cluster_boundary(blob, 2583 offset)); 2584 2585 /* Update length and payload for next operation */ 2586 ctx->io_units_remaining -= op_length; 2587 ctx->io_unit_offset += op_length; 2588 if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) { 2589 ctx->curr_payload += op_length * blob->bs->io_unit_size; 2590 } 2591 2592 assert(!ctx->in_submit_ctx); 2593 ctx->in_submit_ctx = true; 2594 2595 switch (op_type) { 2596 case SPDK_BLOB_READ: 2597 spdk_blob_io_read(blob, ch, buf, offset, op_length, 2598 blob_request_submit_op_split_next, ctx); 2599 break; 2600 case SPDK_BLOB_WRITE: 2601 spdk_blob_io_write(blob, ch, buf, offset, op_length, 2602 blob_request_submit_op_split_next, ctx); 2603 break; 2604 case SPDK_BLOB_UNMAP: 2605 spdk_blob_io_unmap(blob, ch, offset, op_length, 2606 blob_request_submit_op_split_next, ctx); 2607 break; 2608 case SPDK_BLOB_WRITE_ZEROES: 2609 spdk_blob_io_write_zeroes(blob, ch, offset, op_length, 2610 blob_request_submit_op_split_next, ctx); 2611 break; 2612 case SPDK_BLOB_READV: 2613 case SPDK_BLOB_WRITEV: 2614 SPDK_ERRLOG("readv/write not valid\n"); 2615 bs_sequence_finish(ctx->seq, -EINVAL); 2616 free(ctx); 2617 return; 2618 } 2619 2620 #ifndef __clang_analyzer__ 2621 /* scan-build reports a false positive around accessing the ctx here. It 2622 * forms a path that recursively calls this function, but then says 2623 * "assuming ctx->in_submit_ctx is false", when that isn't possible. 2624 * This path does free(ctx), returns to here, and reports a use-after-free 2625 * bug. Wrapping this bit of code so that scan-build doesn't see it 2626 * works around the scan-build bug. 2627 */ 2628 assert(ctx->in_submit_ctx); 2629 ctx->in_submit_ctx = false; 2630 2631 /* If the operation completed immediately, loop back and submit the 2632 * next operation. Otherwise we can return and the next split 2633 * operation will get submitted when this current operation is 2634 * later completed asynchronously. 2635 */ 2636 if (ctx->completed_in_submit_ctx) { 2637 continue; 2638 } else if (ctx->done) { 2639 free(ctx); 2640 } 2641 #endif 2642 break; 2643 } 2644 } 2645 2646 static void 2647 blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob, 2648 void *payload, uint64_t offset, uint64_t length, 2649 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 2650 { 2651 struct op_split_ctx *ctx; 2652 spdk_bs_sequence_t *seq; 2653 struct spdk_bs_cpl cpl; 2654 2655 assert(blob != NULL); 2656 2657 ctx = calloc(1, sizeof(struct op_split_ctx)); 2658 if (ctx == NULL) { 2659 cb_fn(cb_arg, -ENOMEM); 2660 return; 2661 } 2662 2663 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2664 cpl.u.blob_basic.cb_fn = cb_fn; 2665 cpl.u.blob_basic.cb_arg = cb_arg; 2666 2667 seq = bs_sequence_start(ch, &cpl); 2668 if (!seq) { 2669 free(ctx); 2670 cb_fn(cb_arg, -ENOMEM); 2671 return; 2672 } 2673 2674 ctx->blob = blob; 2675 ctx->channel = ch; 2676 ctx->curr_payload = payload; 2677 ctx->io_unit_offset = offset; 2678 ctx->io_units_remaining = length; 2679 ctx->op_type = op_type; 2680 ctx->seq = seq; 2681 2682 blob_request_submit_op_split_next(ctx, 0); 2683 } 2684 2685 static void 2686 blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob, 2687 void *payload, uint64_t offset, uint64_t length, 2688 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 2689 { 2690 struct spdk_bs_cpl cpl; 2691 uint64_t lba; 2692 uint64_t lba_count; 2693 bool is_allocated; 2694 2695 assert(blob != NULL); 2696 2697 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2698 cpl.u.blob_basic.cb_fn = cb_fn; 2699 cpl.u.blob_basic.cb_arg = cb_arg; 2700 2701 if (blob->frozen_refcnt) { 2702 /* This blob I/O is frozen */ 2703 spdk_bs_user_op_t *op; 2704 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 2705 2706 op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 2707 if (!op) { 2708 cb_fn(cb_arg, -ENOMEM); 2709 return; 2710 } 2711 2712 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 2713 2714 return; 2715 } 2716 2717 is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 2718 2719 switch (op_type) { 2720 case SPDK_BLOB_READ: { 2721 spdk_bs_batch_t *batch; 2722 2723 batch = bs_batch_open(_ch, &cpl); 2724 if (!batch) { 2725 cb_fn(cb_arg, -ENOMEM); 2726 return; 2727 } 2728 2729 if (is_allocated) { 2730 /* Read from the blob */ 2731 bs_batch_read_dev(batch, payload, lba, lba_count); 2732 } else { 2733 /* Read from the backing block device */ 2734 bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count); 2735 } 2736 2737 bs_batch_close(batch); 2738 break; 2739 } 2740 case SPDK_BLOB_WRITE: 2741 case SPDK_BLOB_WRITE_ZEROES: { 2742 if (is_allocated) { 2743 /* Write to the blob */ 2744 spdk_bs_batch_t *batch; 2745 2746 if (lba_count == 0) { 2747 cb_fn(cb_arg, 0); 2748 return; 2749 } 2750 2751 batch = bs_batch_open(_ch, &cpl); 2752 if (!batch) { 2753 cb_fn(cb_arg, -ENOMEM); 2754 return; 2755 } 2756 2757 if (op_type == SPDK_BLOB_WRITE) { 2758 bs_batch_write_dev(batch, payload, lba, lba_count); 2759 } else { 2760 bs_batch_write_zeroes_dev(batch, lba, lba_count); 2761 } 2762 2763 bs_batch_close(batch); 2764 } else { 2765 /* Queue this operation and allocate the cluster */ 2766 spdk_bs_user_op_t *op; 2767 2768 op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 2769 if (!op) { 2770 cb_fn(cb_arg, -ENOMEM); 2771 return; 2772 } 2773 2774 bs_allocate_and_copy_cluster(blob, _ch, offset, op); 2775 } 2776 break; 2777 } 2778 case SPDK_BLOB_UNMAP: { 2779 spdk_bs_batch_t *batch; 2780 2781 batch = bs_batch_open(_ch, &cpl); 2782 if (!batch) { 2783 cb_fn(cb_arg, -ENOMEM); 2784 return; 2785 } 2786 2787 if (is_allocated) { 2788 bs_batch_unmap_dev(batch, lba, lba_count); 2789 } 2790 2791 bs_batch_close(batch); 2792 break; 2793 } 2794 case SPDK_BLOB_READV: 2795 case SPDK_BLOB_WRITEV: 2796 SPDK_ERRLOG("readv/write not valid\n"); 2797 cb_fn(cb_arg, -EINVAL); 2798 break; 2799 } 2800 } 2801 2802 static void 2803 blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel, 2804 void *payload, uint64_t offset, uint64_t length, 2805 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 2806 { 2807 assert(blob != NULL); 2808 2809 if (blob->data_ro && op_type != SPDK_BLOB_READ) { 2810 cb_fn(cb_arg, -EPERM); 2811 return; 2812 } 2813 2814 if (length == 0) { 2815 cb_fn(cb_arg, 0); 2816 return; 2817 } 2818 2819 if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 2820 cb_fn(cb_arg, -EINVAL); 2821 return; 2822 } 2823 if (length <= bs_num_io_units_to_cluster_boundary(blob, offset)) { 2824 blob_request_submit_op_single(_channel, blob, payload, offset, length, 2825 cb_fn, cb_arg, op_type); 2826 } else { 2827 blob_request_submit_op_split(_channel, blob, payload, offset, length, 2828 cb_fn, cb_arg, op_type); 2829 } 2830 } 2831 2832 struct rw_iov_ctx { 2833 struct spdk_blob *blob; 2834 struct spdk_io_channel *channel; 2835 spdk_blob_op_complete cb_fn; 2836 void *cb_arg; 2837 bool read; 2838 int iovcnt; 2839 struct iovec *orig_iov; 2840 uint64_t io_unit_offset; 2841 uint64_t io_units_remaining; 2842 uint64_t io_units_done; 2843 struct iovec iov[0]; 2844 }; 2845 2846 static void 2847 rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2848 { 2849 assert(cb_arg == NULL); 2850 bs_sequence_finish(seq, bserrno); 2851 } 2852 2853 static void 2854 rw_iov_split_next(void *cb_arg, int bserrno) 2855 { 2856 struct rw_iov_ctx *ctx = cb_arg; 2857 struct spdk_blob *blob = ctx->blob; 2858 struct iovec *iov, *orig_iov; 2859 int iovcnt; 2860 size_t orig_iovoff; 2861 uint64_t io_units_count, io_units_to_boundary, io_unit_offset; 2862 uint64_t byte_count; 2863 2864 if (bserrno != 0 || ctx->io_units_remaining == 0) { 2865 ctx->cb_fn(ctx->cb_arg, bserrno); 2866 free(ctx); 2867 return; 2868 } 2869 2870 io_unit_offset = ctx->io_unit_offset; 2871 io_units_to_boundary = bs_num_io_units_to_cluster_boundary(blob, io_unit_offset); 2872 io_units_count = spdk_min(ctx->io_units_remaining, io_units_to_boundary); 2873 /* 2874 * Get index and offset into the original iov array for our current position in the I/O sequence. 2875 * byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will 2876 * point to the current position in the I/O sequence. 2877 */ 2878 byte_count = ctx->io_units_done * blob->bs->io_unit_size; 2879 orig_iov = &ctx->orig_iov[0]; 2880 orig_iovoff = 0; 2881 while (byte_count > 0) { 2882 if (byte_count >= orig_iov->iov_len) { 2883 byte_count -= orig_iov->iov_len; 2884 orig_iov++; 2885 } else { 2886 orig_iovoff = byte_count; 2887 byte_count = 0; 2888 } 2889 } 2890 2891 /* 2892 * Build an iov array for the next I/O in the sequence. byte_count will keep track of how many 2893 * bytes of this next I/O remain to be accounted for in the new iov array. 2894 */ 2895 byte_count = io_units_count * blob->bs->io_unit_size; 2896 iov = &ctx->iov[0]; 2897 iovcnt = 0; 2898 while (byte_count > 0) { 2899 assert(iovcnt < ctx->iovcnt); 2900 iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff); 2901 iov->iov_base = orig_iov->iov_base + orig_iovoff; 2902 byte_count -= iov->iov_len; 2903 orig_iovoff = 0; 2904 orig_iov++; 2905 iov++; 2906 iovcnt++; 2907 } 2908 2909 ctx->io_unit_offset += io_units_count; 2910 ctx->io_units_remaining -= io_units_count; 2911 ctx->io_units_done += io_units_count; 2912 iov = &ctx->iov[0]; 2913 2914 if (ctx->read) { 2915 spdk_blob_io_readv(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 2916 io_units_count, rw_iov_split_next, ctx); 2917 } else { 2918 spdk_blob_io_writev(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 2919 io_units_count, rw_iov_split_next, ctx); 2920 } 2921 } 2922 2923 static void 2924 blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel, 2925 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 2926 spdk_blob_op_complete cb_fn, void *cb_arg, bool read) 2927 { 2928 struct spdk_bs_cpl cpl; 2929 2930 assert(blob != NULL); 2931 2932 if (!read && blob->data_ro) { 2933 cb_fn(cb_arg, -EPERM); 2934 return; 2935 } 2936 2937 if (length == 0) { 2938 cb_fn(cb_arg, 0); 2939 return; 2940 } 2941 2942 if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 2943 cb_fn(cb_arg, -EINVAL); 2944 return; 2945 } 2946 2947 /* 2948 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having 2949 * to split a request that spans a cluster boundary. For I/O that do not span a cluster boundary, 2950 * there will be no noticeable difference compared to using a batch. For I/O that do span a cluster 2951 * boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need 2952 * to allocate a separate iov array and split the I/O such that none of the resulting 2953 * smaller I/O cross a cluster boundary. These smaller I/O will be issued in sequence (not in parallel) 2954 * but since this case happens very infrequently, any performance impact will be negligible. 2955 * 2956 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs 2957 * for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them 2958 * in a batch. That would also require creating an intermediate spdk_bs_cpl that would get called 2959 * when the batch was completed, to allow for freeing the memory for the iov arrays. 2960 */ 2961 if (spdk_likely(length <= bs_num_io_units_to_cluster_boundary(blob, offset))) { 2962 uint64_t lba_count; 2963 uint64_t lba; 2964 bool is_allocated; 2965 2966 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2967 cpl.u.blob_basic.cb_fn = cb_fn; 2968 cpl.u.blob_basic.cb_arg = cb_arg; 2969 2970 if (blob->frozen_refcnt) { 2971 /* This blob I/O is frozen */ 2972 enum spdk_blob_op_type op_type; 2973 spdk_bs_user_op_t *op; 2974 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel); 2975 2976 op_type = read ? SPDK_BLOB_READV : SPDK_BLOB_WRITEV; 2977 op = bs_user_op_alloc(_channel, &cpl, op_type, blob, iov, iovcnt, offset, length); 2978 if (!op) { 2979 cb_fn(cb_arg, -ENOMEM); 2980 return; 2981 } 2982 2983 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 2984 2985 return; 2986 } 2987 2988 is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 2989 2990 if (read) { 2991 spdk_bs_sequence_t *seq; 2992 2993 seq = bs_sequence_start(_channel, &cpl); 2994 if (!seq) { 2995 cb_fn(cb_arg, -ENOMEM); 2996 return; 2997 } 2998 2999 if (is_allocated) { 3000 bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL); 3001 } else { 3002 bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count, 3003 rw_iov_done, NULL); 3004 } 3005 } else { 3006 if (is_allocated) { 3007 spdk_bs_sequence_t *seq; 3008 3009 seq = bs_sequence_start(_channel, &cpl); 3010 if (!seq) { 3011 cb_fn(cb_arg, -ENOMEM); 3012 return; 3013 } 3014 3015 bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL); 3016 } else { 3017 /* Queue this operation and allocate the cluster */ 3018 spdk_bs_user_op_t *op; 3019 3020 op = bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset, 3021 length); 3022 if (!op) { 3023 cb_fn(cb_arg, -ENOMEM); 3024 return; 3025 } 3026 3027 bs_allocate_and_copy_cluster(blob, _channel, offset, op); 3028 } 3029 } 3030 } else { 3031 struct rw_iov_ctx *ctx; 3032 3033 ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec)); 3034 if (ctx == NULL) { 3035 cb_fn(cb_arg, -ENOMEM); 3036 return; 3037 } 3038 3039 ctx->blob = blob; 3040 ctx->channel = _channel; 3041 ctx->cb_fn = cb_fn; 3042 ctx->cb_arg = cb_arg; 3043 ctx->read = read; 3044 ctx->orig_iov = iov; 3045 ctx->iovcnt = iovcnt; 3046 ctx->io_unit_offset = offset; 3047 ctx->io_units_remaining = length; 3048 ctx->io_units_done = 0; 3049 3050 rw_iov_split_next(ctx, 0); 3051 } 3052 } 3053 3054 static struct spdk_blob * 3055 blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid) 3056 { 3057 struct spdk_blob find; 3058 3059 if (spdk_bit_array_get(bs->open_blobids, blobid) == 0) { 3060 return NULL; 3061 } 3062 3063 find.id = blobid; 3064 return RB_FIND(spdk_blob_tree, &bs->open_blobs, &find); 3065 } 3066 3067 static void 3068 blob_get_snapshot_and_clone_entries(struct spdk_blob *blob, 3069 struct spdk_blob_list **snapshot_entry, struct spdk_blob_list **clone_entry) 3070 { 3071 assert(blob != NULL); 3072 *snapshot_entry = NULL; 3073 *clone_entry = NULL; 3074 3075 if (blob->parent_id == SPDK_BLOBID_INVALID) { 3076 return; 3077 } 3078 3079 TAILQ_FOREACH(*snapshot_entry, &blob->bs->snapshots, link) { 3080 if ((*snapshot_entry)->id == blob->parent_id) { 3081 break; 3082 } 3083 } 3084 3085 if (*snapshot_entry != NULL) { 3086 TAILQ_FOREACH(*clone_entry, &(*snapshot_entry)->clones, link) { 3087 if ((*clone_entry)->id == blob->id) { 3088 break; 3089 } 3090 } 3091 3092 assert(*clone_entry != NULL); 3093 } 3094 } 3095 3096 static int 3097 bs_channel_create(void *io_device, void *ctx_buf) 3098 { 3099 struct spdk_blob_store *bs = io_device; 3100 struct spdk_bs_channel *channel = ctx_buf; 3101 struct spdk_bs_dev *dev; 3102 uint32_t max_ops = bs->max_channel_ops; 3103 uint32_t i; 3104 3105 dev = bs->dev; 3106 3107 channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set)); 3108 if (!channel->req_mem) { 3109 return -1; 3110 } 3111 3112 TAILQ_INIT(&channel->reqs); 3113 3114 for (i = 0; i < max_ops; i++) { 3115 TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link); 3116 } 3117 3118 channel->bs = bs; 3119 channel->dev = dev; 3120 channel->dev_channel = dev->create_channel(dev); 3121 3122 if (!channel->dev_channel) { 3123 SPDK_ERRLOG("Failed to create device channel.\n"); 3124 free(channel->req_mem); 3125 return -1; 3126 } 3127 3128 TAILQ_INIT(&channel->need_cluster_alloc); 3129 TAILQ_INIT(&channel->queued_io); 3130 3131 return 0; 3132 } 3133 3134 static void 3135 bs_channel_destroy(void *io_device, void *ctx_buf) 3136 { 3137 struct spdk_bs_channel *channel = ctx_buf; 3138 spdk_bs_user_op_t *op; 3139 3140 while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) { 3141 op = TAILQ_FIRST(&channel->need_cluster_alloc); 3142 TAILQ_REMOVE(&channel->need_cluster_alloc, op, link); 3143 bs_user_op_abort(op); 3144 } 3145 3146 while (!TAILQ_EMPTY(&channel->queued_io)) { 3147 op = TAILQ_FIRST(&channel->queued_io); 3148 TAILQ_REMOVE(&channel->queued_io, op, link); 3149 bs_user_op_abort(op); 3150 } 3151 3152 free(channel->req_mem); 3153 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 3154 } 3155 3156 static void 3157 bs_dev_destroy(void *io_device) 3158 { 3159 struct spdk_blob_store *bs = io_device; 3160 struct spdk_blob *blob, *blob_tmp; 3161 3162 bs->dev->destroy(bs->dev); 3163 3164 RB_FOREACH_SAFE(blob, spdk_blob_tree, &bs->open_blobs, blob_tmp) { 3165 RB_REMOVE(spdk_blob_tree, &bs->open_blobs, blob); 3166 spdk_bit_array_clear(bs->open_blobids, blob->id); 3167 blob_free(blob); 3168 } 3169 3170 pthread_mutex_destroy(&bs->used_clusters_mutex); 3171 3172 spdk_bit_array_free(&bs->open_blobids); 3173 spdk_bit_array_free(&bs->used_blobids); 3174 spdk_bit_array_free(&bs->used_md_pages); 3175 spdk_bit_pool_free(&bs->used_clusters); 3176 /* 3177 * If this function is called for any reason except a successful unload, 3178 * the unload_cpl type will be NONE and this will be a nop. 3179 */ 3180 bs_call_cpl(&bs->unload_cpl, bs->unload_err); 3181 3182 free(bs); 3183 } 3184 3185 static int 3186 bs_blob_list_add(struct spdk_blob *blob) 3187 { 3188 spdk_blob_id snapshot_id; 3189 struct spdk_blob_list *snapshot_entry = NULL; 3190 struct spdk_blob_list *clone_entry = NULL; 3191 3192 assert(blob != NULL); 3193 3194 snapshot_id = blob->parent_id; 3195 if (snapshot_id == SPDK_BLOBID_INVALID) { 3196 return 0; 3197 } 3198 3199 snapshot_entry = bs_get_snapshot_entry(blob->bs, snapshot_id); 3200 if (snapshot_entry == NULL) { 3201 /* Snapshot not found */ 3202 snapshot_entry = calloc(1, sizeof(struct spdk_blob_list)); 3203 if (snapshot_entry == NULL) { 3204 return -ENOMEM; 3205 } 3206 snapshot_entry->id = snapshot_id; 3207 TAILQ_INIT(&snapshot_entry->clones); 3208 TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link); 3209 } else { 3210 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 3211 if (clone_entry->id == blob->id) { 3212 break; 3213 } 3214 } 3215 } 3216 3217 if (clone_entry == NULL) { 3218 /* Clone not found */ 3219 clone_entry = calloc(1, sizeof(struct spdk_blob_list)); 3220 if (clone_entry == NULL) { 3221 return -ENOMEM; 3222 } 3223 clone_entry->id = blob->id; 3224 TAILQ_INIT(&clone_entry->clones); 3225 TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link); 3226 snapshot_entry->clone_count++; 3227 } 3228 3229 return 0; 3230 } 3231 3232 static void 3233 bs_blob_list_remove(struct spdk_blob *blob) 3234 { 3235 struct spdk_blob_list *snapshot_entry = NULL; 3236 struct spdk_blob_list *clone_entry = NULL; 3237 3238 blob_get_snapshot_and_clone_entries(blob, &snapshot_entry, &clone_entry); 3239 3240 if (snapshot_entry == NULL) { 3241 return; 3242 } 3243 3244 blob->parent_id = SPDK_BLOBID_INVALID; 3245 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3246 free(clone_entry); 3247 3248 snapshot_entry->clone_count--; 3249 } 3250 3251 static int 3252 bs_blob_list_free(struct spdk_blob_store *bs) 3253 { 3254 struct spdk_blob_list *snapshot_entry; 3255 struct spdk_blob_list *snapshot_entry_tmp; 3256 struct spdk_blob_list *clone_entry; 3257 struct spdk_blob_list *clone_entry_tmp; 3258 3259 TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) { 3260 TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) { 3261 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3262 free(clone_entry); 3263 } 3264 TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link); 3265 free(snapshot_entry); 3266 } 3267 3268 return 0; 3269 } 3270 3271 static void 3272 bs_free(struct spdk_blob_store *bs) 3273 { 3274 bs_blob_list_free(bs); 3275 3276 bs_unregister_md_thread(bs); 3277 spdk_io_device_unregister(bs, bs_dev_destroy); 3278 } 3279 3280 void 3281 spdk_bs_opts_init(struct spdk_bs_opts *opts, size_t opts_size) 3282 { 3283 3284 if (!opts) { 3285 SPDK_ERRLOG("opts should not be NULL\n"); 3286 return; 3287 } 3288 3289 if (!opts_size) { 3290 SPDK_ERRLOG("opts_size should not be zero value\n"); 3291 return; 3292 } 3293 3294 memset(opts, 0, opts_size); 3295 opts->opts_size = opts_size; 3296 3297 #define FIELD_OK(field) \ 3298 offsetof(struct spdk_bs_opts, field) + sizeof(opts->field) <= opts_size 3299 3300 #define SET_FIELD(field, value) \ 3301 if (FIELD_OK(field)) { \ 3302 opts->field = value; \ 3303 } \ 3304 3305 SET_FIELD(cluster_sz, SPDK_BLOB_OPTS_CLUSTER_SZ); 3306 SET_FIELD(num_md_pages, SPDK_BLOB_OPTS_NUM_MD_PAGES); 3307 SET_FIELD(max_md_ops, SPDK_BLOB_OPTS_NUM_MD_PAGES); 3308 SET_FIELD(max_channel_ops, SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS); 3309 SET_FIELD(clear_method, BS_CLEAR_WITH_UNMAP); 3310 3311 if (FIELD_OK(bstype)) { 3312 memset(&opts->bstype, 0, sizeof(opts->bstype)); 3313 } 3314 3315 SET_FIELD(iter_cb_fn, NULL); 3316 SET_FIELD(iter_cb_arg, NULL); 3317 SET_FIELD(force_recover, false); 3318 3319 #undef FIELD_OK 3320 #undef SET_FIELD 3321 } 3322 3323 static int 3324 bs_opts_verify(struct spdk_bs_opts *opts) 3325 { 3326 if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 || 3327 opts->max_channel_ops == 0) { 3328 SPDK_ERRLOG("Blobstore options cannot be set to 0\n"); 3329 return -1; 3330 } 3331 3332 return 0; 3333 } 3334 3335 /* START spdk_bs_load */ 3336 3337 /* spdk_bs_load_ctx is used for init, load, unload and dump code paths. */ 3338 3339 struct spdk_bs_load_ctx { 3340 struct spdk_blob_store *bs; 3341 struct spdk_bs_super_block *super; 3342 3343 struct spdk_bs_md_mask *mask; 3344 bool in_page_chain; 3345 uint32_t page_index; 3346 uint32_t cur_page; 3347 struct spdk_blob_md_page *page; 3348 3349 uint64_t num_extent_pages; 3350 uint32_t *extent_page_num; 3351 struct spdk_blob_md_page *extent_pages; 3352 struct spdk_bit_array *used_clusters; 3353 3354 spdk_bs_sequence_t *seq; 3355 spdk_blob_op_with_handle_complete iter_cb_fn; 3356 void *iter_cb_arg; 3357 struct spdk_blob *blob; 3358 spdk_blob_id blobid; 3359 3360 bool force_recover; 3361 3362 /* These fields are used in the spdk_bs_dump path. */ 3363 bool dumping; 3364 FILE *fp; 3365 spdk_bs_dump_print_xattr print_xattr_fn; 3366 char xattr_name[4096]; 3367 }; 3368 3369 static int 3370 bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_blob_store **_bs, 3371 struct spdk_bs_load_ctx **_ctx) 3372 { 3373 struct spdk_blob_store *bs; 3374 struct spdk_bs_load_ctx *ctx; 3375 uint64_t dev_size; 3376 int rc; 3377 3378 dev_size = dev->blocklen * dev->blockcnt; 3379 if (dev_size < opts->cluster_sz) { 3380 /* Device size cannot be smaller than cluster size of blobstore */ 3381 SPDK_INFOLOG(blob, "Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n", 3382 dev_size, opts->cluster_sz); 3383 return -ENOSPC; 3384 } 3385 if (opts->cluster_sz < SPDK_BS_PAGE_SIZE) { 3386 /* Cluster size cannot be smaller than page size */ 3387 SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n", 3388 opts->cluster_sz, SPDK_BS_PAGE_SIZE); 3389 return -EINVAL; 3390 } 3391 bs = calloc(1, sizeof(struct spdk_blob_store)); 3392 if (!bs) { 3393 return -ENOMEM; 3394 } 3395 3396 ctx = calloc(1, sizeof(struct spdk_bs_load_ctx)); 3397 if (!ctx) { 3398 free(bs); 3399 return -ENOMEM; 3400 } 3401 3402 ctx->bs = bs; 3403 ctx->iter_cb_fn = opts->iter_cb_fn; 3404 ctx->iter_cb_arg = opts->iter_cb_arg; 3405 ctx->force_recover = opts->force_recover; 3406 3407 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 3408 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3409 if (!ctx->super) { 3410 free(ctx); 3411 free(bs); 3412 return -ENOMEM; 3413 } 3414 3415 RB_INIT(&bs->open_blobs); 3416 TAILQ_INIT(&bs->snapshots); 3417 bs->dev = dev; 3418 bs->md_thread = spdk_get_thread(); 3419 assert(bs->md_thread != NULL); 3420 3421 /* 3422 * Do not use bs_lba_to_cluster() here since blockcnt may not be an 3423 * even multiple of the cluster size. 3424 */ 3425 bs->cluster_sz = opts->cluster_sz; 3426 bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen); 3427 ctx->used_clusters = spdk_bit_array_create(bs->total_clusters); 3428 if (!ctx->used_clusters) { 3429 spdk_free(ctx->super); 3430 free(ctx); 3431 free(bs); 3432 return -ENOMEM; 3433 } 3434 3435 bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE; 3436 if (spdk_u32_is_pow2(bs->pages_per_cluster)) { 3437 bs->pages_per_cluster_shift = spdk_u32log2(bs->pages_per_cluster); 3438 } 3439 bs->num_free_clusters = bs->total_clusters; 3440 bs->io_unit_size = dev->blocklen; 3441 3442 bs->max_channel_ops = opts->max_channel_ops; 3443 bs->super_blob = SPDK_BLOBID_INVALID; 3444 memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype)); 3445 3446 /* The metadata is assumed to be at least 1 page */ 3447 bs->used_md_pages = spdk_bit_array_create(1); 3448 bs->used_blobids = spdk_bit_array_create(0); 3449 bs->open_blobids = spdk_bit_array_create(0); 3450 3451 pthread_mutex_init(&bs->used_clusters_mutex, NULL); 3452 3453 spdk_io_device_register(bs, bs_channel_create, bs_channel_destroy, 3454 sizeof(struct spdk_bs_channel), "blobstore"); 3455 rc = bs_register_md_thread(bs); 3456 if (rc == -1) { 3457 spdk_io_device_unregister(bs, NULL); 3458 pthread_mutex_destroy(&bs->used_clusters_mutex); 3459 spdk_bit_array_free(&bs->open_blobids); 3460 spdk_bit_array_free(&bs->used_blobids); 3461 spdk_bit_array_free(&bs->used_md_pages); 3462 spdk_bit_array_free(&ctx->used_clusters); 3463 spdk_free(ctx->super); 3464 free(ctx); 3465 free(bs); 3466 /* FIXME: this is a lie but don't know how to get a proper error code here */ 3467 return -ENOMEM; 3468 } 3469 3470 *_ctx = ctx; 3471 *_bs = bs; 3472 return 0; 3473 } 3474 3475 static void 3476 bs_load_ctx_fail(struct spdk_bs_load_ctx *ctx, int bserrno) 3477 { 3478 assert(bserrno != 0); 3479 3480 spdk_free(ctx->super); 3481 bs_sequence_finish(ctx->seq, bserrno); 3482 bs_free(ctx->bs); 3483 spdk_bit_array_free(&ctx->used_clusters); 3484 free(ctx); 3485 } 3486 3487 static void 3488 bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 3489 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg) 3490 { 3491 /* Update the values in the super block */ 3492 super->super_blob = bs->super_blob; 3493 memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype)); 3494 super->crc = blob_md_page_calc_crc(super); 3495 bs_sequence_write_dev(seq, super, bs_page_to_lba(bs, 0), 3496 bs_byte_to_lba(bs, sizeof(*super)), 3497 cb_fn, cb_arg); 3498 } 3499 3500 static void 3501 bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 3502 { 3503 struct spdk_bs_load_ctx *ctx = arg; 3504 uint64_t mask_size, lba, lba_count; 3505 3506 /* Write out the used clusters mask */ 3507 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 3508 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 3509 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3510 if (!ctx->mask) { 3511 bs_load_ctx_fail(ctx, -ENOMEM); 3512 return; 3513 } 3514 3515 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS; 3516 ctx->mask->length = ctx->bs->total_clusters; 3517 /* We could get here through the normal unload path, or through dirty 3518 * shutdown recovery. For the normal unload path, we use the mask from 3519 * the bit pool. For dirty shutdown recovery, we don't have a bit pool yet - 3520 * only the bit array from the load ctx. 3521 */ 3522 if (ctx->bs->used_clusters) { 3523 assert(ctx->mask->length == spdk_bit_pool_capacity(ctx->bs->used_clusters)); 3524 spdk_bit_pool_store_mask(ctx->bs->used_clusters, ctx->mask->mask); 3525 } else { 3526 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->used_clusters)); 3527 spdk_bit_array_store_mask(ctx->used_clusters, ctx->mask->mask); 3528 } 3529 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 3530 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 3531 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 3532 } 3533 3534 static void 3535 bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 3536 { 3537 struct spdk_bs_load_ctx *ctx = arg; 3538 uint64_t mask_size, lba, lba_count; 3539 3540 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 3541 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 3542 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3543 if (!ctx->mask) { 3544 bs_load_ctx_fail(ctx, -ENOMEM); 3545 return; 3546 } 3547 3548 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES; 3549 ctx->mask->length = ctx->super->md_len; 3550 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages)); 3551 3552 spdk_bit_array_store_mask(ctx->bs->used_md_pages, ctx->mask->mask); 3553 lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 3554 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 3555 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 3556 } 3557 3558 static void 3559 bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 3560 { 3561 struct spdk_bs_load_ctx *ctx = arg; 3562 uint64_t mask_size, lba, lba_count; 3563 3564 if (ctx->super->used_blobid_mask_len == 0) { 3565 /* 3566 * This is a pre-v3 on-disk format where the blobid mask does not get 3567 * written to disk. 3568 */ 3569 cb_fn(seq, arg, 0); 3570 return; 3571 } 3572 3573 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 3574 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 3575 SPDK_MALLOC_DMA); 3576 if (!ctx->mask) { 3577 bs_load_ctx_fail(ctx, -ENOMEM); 3578 return; 3579 } 3580 3581 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS; 3582 ctx->mask->length = ctx->super->md_len; 3583 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids)); 3584 3585 spdk_bit_array_store_mask(ctx->bs->used_blobids, ctx->mask->mask); 3586 lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 3587 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 3588 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 3589 } 3590 3591 static void 3592 blob_set_thin_provision(struct spdk_blob *blob) 3593 { 3594 blob_verify_md_op(blob); 3595 blob->invalid_flags |= SPDK_BLOB_THIN_PROV; 3596 blob->state = SPDK_BLOB_STATE_DIRTY; 3597 } 3598 3599 static void 3600 blob_set_clear_method(struct spdk_blob *blob, enum blob_clear_method clear_method) 3601 { 3602 blob_verify_md_op(blob); 3603 blob->clear_method = clear_method; 3604 blob->md_ro_flags |= (clear_method << SPDK_BLOB_CLEAR_METHOD_SHIFT); 3605 blob->state = SPDK_BLOB_STATE_DIRTY; 3606 } 3607 3608 static void bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno); 3609 3610 static void 3611 bs_delete_corrupted_blob_cpl(void *cb_arg, int bserrno) 3612 { 3613 struct spdk_bs_load_ctx *ctx = cb_arg; 3614 spdk_blob_id id; 3615 int64_t page_num; 3616 3617 /* Iterate to next blob (we can't use spdk_bs_iter_next function as our 3618 * last blob has been removed */ 3619 page_num = bs_blobid_to_page(ctx->blobid); 3620 page_num++; 3621 page_num = spdk_bit_array_find_first_set(ctx->bs->used_blobids, page_num); 3622 if (page_num >= spdk_bit_array_capacity(ctx->bs->used_blobids)) { 3623 bs_load_iter(ctx, NULL, -ENOENT); 3624 return; 3625 } 3626 3627 id = bs_page_to_blobid(page_num); 3628 3629 spdk_bs_open_blob(ctx->bs, id, bs_load_iter, ctx); 3630 } 3631 3632 static void 3633 bs_delete_corrupted_close_cb(void *cb_arg, int bserrno) 3634 { 3635 struct spdk_bs_load_ctx *ctx = cb_arg; 3636 3637 if (bserrno != 0) { 3638 SPDK_ERRLOG("Failed to close corrupted blob\n"); 3639 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 3640 return; 3641 } 3642 3643 spdk_bs_delete_blob(ctx->bs, ctx->blobid, bs_delete_corrupted_blob_cpl, ctx); 3644 } 3645 3646 static void 3647 bs_delete_corrupted_blob(void *cb_arg, int bserrno) 3648 { 3649 struct spdk_bs_load_ctx *ctx = cb_arg; 3650 uint64_t i; 3651 3652 if (bserrno != 0) { 3653 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 3654 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 3655 return; 3656 } 3657 3658 /* Snapshot and clone have the same copy of cluster map and extent pages 3659 * at this point. Let's clear both for snapshot now, 3660 * so that it won't be cleared for clone later when we remove snapshot. 3661 * Also set thin provision to pass data corruption check */ 3662 for (i = 0; i < ctx->blob->active.num_clusters; i++) { 3663 ctx->blob->active.clusters[i] = 0; 3664 } 3665 for (i = 0; i < ctx->blob->active.num_extent_pages; i++) { 3666 ctx->blob->active.extent_pages[i] = 0; 3667 } 3668 3669 ctx->blob->md_ro = false; 3670 3671 blob_set_thin_provision(ctx->blob); 3672 3673 ctx->blobid = ctx->blob->id; 3674 3675 spdk_blob_close(ctx->blob, bs_delete_corrupted_close_cb, ctx); 3676 } 3677 3678 static void 3679 bs_update_corrupted_blob(void *cb_arg, int bserrno) 3680 { 3681 struct spdk_bs_load_ctx *ctx = cb_arg; 3682 3683 if (bserrno != 0) { 3684 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 3685 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 3686 return; 3687 } 3688 3689 ctx->blob->md_ro = false; 3690 blob_remove_xattr(ctx->blob, SNAPSHOT_PENDING_REMOVAL, true); 3691 blob_remove_xattr(ctx->blob, SNAPSHOT_IN_PROGRESS, true); 3692 spdk_blob_set_read_only(ctx->blob); 3693 3694 if (ctx->iter_cb_fn) { 3695 ctx->iter_cb_fn(ctx->iter_cb_arg, ctx->blob, 0); 3696 } 3697 bs_blob_list_add(ctx->blob); 3698 3699 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 3700 } 3701 3702 static void 3703 bs_examine_clone(void *cb_arg, struct spdk_blob *blob, int bserrno) 3704 { 3705 struct spdk_bs_load_ctx *ctx = cb_arg; 3706 3707 if (bserrno != 0) { 3708 SPDK_ERRLOG("Failed to open clone of a corrupted blob\n"); 3709 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 3710 return; 3711 } 3712 3713 if (blob->parent_id == ctx->blob->id) { 3714 /* Power failure occurred before updating clone (snapshot delete case) 3715 * or after updating clone (creating snapshot case) - keep snapshot */ 3716 spdk_blob_close(blob, bs_update_corrupted_blob, ctx); 3717 } else { 3718 /* Power failure occurred after updating clone (snapshot delete case) 3719 * or before updating clone (creating snapshot case) - remove snapshot */ 3720 spdk_blob_close(blob, bs_delete_corrupted_blob, ctx); 3721 } 3722 } 3723 3724 static void 3725 bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno) 3726 { 3727 struct spdk_bs_load_ctx *ctx = arg; 3728 const void *value; 3729 size_t len; 3730 int rc = 0; 3731 3732 if (bserrno == 0) { 3733 /* Examine blob if it is corrupted after power failure. Fix 3734 * the ones that can be fixed and remove any other corrupted 3735 * ones. If it is not corrupted just process it */ 3736 rc = blob_get_xattr_value(blob, SNAPSHOT_PENDING_REMOVAL, &value, &len, true); 3737 if (rc != 0) { 3738 rc = blob_get_xattr_value(blob, SNAPSHOT_IN_PROGRESS, &value, &len, true); 3739 if (rc != 0) { 3740 /* Not corrupted - process it and continue with iterating through blobs */ 3741 if (ctx->iter_cb_fn) { 3742 ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0); 3743 } 3744 bs_blob_list_add(blob); 3745 spdk_bs_iter_next(ctx->bs, blob, bs_load_iter, ctx); 3746 return; 3747 } 3748 3749 } 3750 3751 assert(len == sizeof(spdk_blob_id)); 3752 3753 ctx->blob = blob; 3754 3755 /* Open clone to check if we are able to fix this blob or should we remove it */ 3756 spdk_bs_open_blob(ctx->bs, *(spdk_blob_id *)value, bs_examine_clone, ctx); 3757 return; 3758 } else if (bserrno == -ENOENT) { 3759 bserrno = 0; 3760 } else { 3761 /* 3762 * This case needs to be looked at further. Same problem 3763 * exists with applications that rely on explicit blob 3764 * iteration. We should just skip the blob that failed 3765 * to load and continue on to the next one. 3766 */ 3767 SPDK_ERRLOG("Error in iterating blobs\n"); 3768 } 3769 3770 ctx->iter_cb_fn = NULL; 3771 3772 spdk_free(ctx->super); 3773 spdk_free(ctx->mask); 3774 bs_sequence_finish(ctx->seq, bserrno); 3775 free(ctx); 3776 } 3777 3778 static void bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg); 3779 3780 static void 3781 bs_load_complete(struct spdk_bs_load_ctx *ctx) 3782 { 3783 ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters); 3784 if (ctx->dumping) { 3785 bs_dump_read_md_page(ctx->seq, ctx); 3786 return; 3787 } 3788 spdk_bs_iter_first(ctx->bs, bs_load_iter, ctx); 3789 } 3790 3791 static void 3792 bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3793 { 3794 struct spdk_bs_load_ctx *ctx = cb_arg; 3795 int rc; 3796 3797 /* The type must be correct */ 3798 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS); 3799 3800 /* The length of the mask (in bits) must not be greater than 3801 * the length of the buffer (converted to bits) */ 3802 assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE * 8)); 3803 3804 /* The length of the mask must be exactly equal to the size 3805 * (in pages) of the metadata region */ 3806 assert(ctx->mask->length == ctx->super->md_len); 3807 3808 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->mask->length); 3809 if (rc < 0) { 3810 spdk_free(ctx->mask); 3811 bs_load_ctx_fail(ctx, rc); 3812 return; 3813 } 3814 3815 spdk_bit_array_load_mask(ctx->bs->used_blobids, ctx->mask->mask); 3816 bs_load_complete(ctx); 3817 } 3818 3819 static void 3820 bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3821 { 3822 struct spdk_bs_load_ctx *ctx = cb_arg; 3823 uint64_t lba, lba_count, mask_size; 3824 int rc; 3825 3826 if (bserrno != 0) { 3827 bs_load_ctx_fail(ctx, bserrno); 3828 return; 3829 } 3830 3831 /* The type must be correct */ 3832 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 3833 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 3834 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 3835 struct spdk_blob_md_page) * 8)); 3836 /* The length of the mask must be exactly equal to the total number of clusters */ 3837 assert(ctx->mask->length == ctx->bs->total_clusters); 3838 3839 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->mask->length); 3840 if (rc < 0) { 3841 spdk_free(ctx->mask); 3842 bs_load_ctx_fail(ctx, rc); 3843 return; 3844 } 3845 3846 spdk_bit_array_load_mask(ctx->used_clusters, ctx->mask->mask); 3847 ctx->bs->num_free_clusters = spdk_bit_array_count_clear(ctx->used_clusters); 3848 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 3849 3850 spdk_free(ctx->mask); 3851 3852 /* Read the used blobids mask */ 3853 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 3854 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 3855 SPDK_MALLOC_DMA); 3856 if (!ctx->mask) { 3857 bs_load_ctx_fail(ctx, -ENOMEM); 3858 return; 3859 } 3860 lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 3861 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 3862 bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 3863 bs_load_used_blobids_cpl, ctx); 3864 } 3865 3866 static void 3867 bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3868 { 3869 struct spdk_bs_load_ctx *ctx = cb_arg; 3870 uint64_t lba, lba_count, mask_size; 3871 int rc; 3872 3873 if (bserrno != 0) { 3874 bs_load_ctx_fail(ctx, bserrno); 3875 return; 3876 } 3877 3878 /* The type must be correct */ 3879 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES); 3880 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 3881 assert(ctx->mask->length <= (ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE * 3882 8)); 3883 /* The length of the mask must be exactly equal to the size (in pages) of the metadata region */ 3884 if (ctx->mask->length != ctx->super->md_len) { 3885 SPDK_ERRLOG("mismatched md_len in used_pages mask: " 3886 "mask->length=%" PRIu32 " super->md_len=%" PRIu32 "\n", 3887 ctx->mask->length, ctx->super->md_len); 3888 assert(false); 3889 } 3890 3891 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->mask->length); 3892 if (rc < 0) { 3893 spdk_free(ctx->mask); 3894 bs_load_ctx_fail(ctx, rc); 3895 return; 3896 } 3897 3898 spdk_bit_array_load_mask(ctx->bs->used_md_pages, ctx->mask->mask); 3899 spdk_free(ctx->mask); 3900 3901 /* Read the used clusters mask */ 3902 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 3903 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 3904 SPDK_MALLOC_DMA); 3905 if (!ctx->mask) { 3906 bs_load_ctx_fail(ctx, -ENOMEM); 3907 return; 3908 } 3909 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 3910 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 3911 bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 3912 bs_load_used_clusters_cpl, ctx); 3913 } 3914 3915 static void 3916 bs_load_read_used_pages(struct spdk_bs_load_ctx *ctx) 3917 { 3918 uint64_t lba, lba_count, mask_size; 3919 3920 /* Read the used pages mask */ 3921 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 3922 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 3923 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3924 if (!ctx->mask) { 3925 bs_load_ctx_fail(ctx, -ENOMEM); 3926 return; 3927 } 3928 3929 lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 3930 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 3931 bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count, 3932 bs_load_used_pages_cpl, ctx); 3933 } 3934 3935 static int 3936 bs_load_replay_md_parse_page(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_page *page) 3937 { 3938 struct spdk_blob_store *bs = ctx->bs; 3939 struct spdk_blob_md_descriptor *desc; 3940 size_t cur_desc = 0; 3941 3942 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 3943 while (cur_desc < sizeof(page->descriptors)) { 3944 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 3945 if (desc->length == 0) { 3946 /* If padding and length are 0, this terminates the page */ 3947 break; 3948 } 3949 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 3950 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 3951 unsigned int i, j; 3952 unsigned int cluster_count = 0; 3953 uint32_t cluster_idx; 3954 3955 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 3956 3957 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 3958 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 3959 cluster_idx = desc_extent_rle->extents[i].cluster_idx; 3960 /* 3961 * cluster_idx = 0 means an unallocated cluster - don't mark that 3962 * in the used cluster map. 3963 */ 3964 if (cluster_idx != 0) { 3965 SPDK_NOTICELOG("Recover: cluster %" PRIu32 "\n", cluster_idx + j); 3966 spdk_bit_array_set(ctx->used_clusters, cluster_idx + j); 3967 if (bs->num_free_clusters == 0) { 3968 return -ENOSPC; 3969 } 3970 bs->num_free_clusters--; 3971 } 3972 cluster_count++; 3973 } 3974 } 3975 if (cluster_count == 0) { 3976 return -EINVAL; 3977 } 3978 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 3979 struct spdk_blob_md_descriptor_extent_page *desc_extent; 3980 uint32_t i; 3981 uint32_t cluster_count = 0; 3982 uint32_t cluster_idx; 3983 size_t cluster_idx_length; 3984 3985 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 3986 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 3987 3988 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 3989 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 3990 return -EINVAL; 3991 } 3992 3993 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 3994 cluster_idx = desc_extent->cluster_idx[i]; 3995 /* 3996 * cluster_idx = 0 means an unallocated cluster - don't mark that 3997 * in the used cluster map. 3998 */ 3999 if (cluster_idx != 0) { 4000 if (cluster_idx < desc_extent->start_cluster_idx && 4001 cluster_idx >= desc_extent->start_cluster_idx + cluster_count) { 4002 return -EINVAL; 4003 } 4004 spdk_bit_array_set(ctx->used_clusters, cluster_idx); 4005 if (bs->num_free_clusters == 0) { 4006 return -ENOSPC; 4007 } 4008 bs->num_free_clusters--; 4009 } 4010 cluster_count++; 4011 } 4012 4013 if (cluster_count == 0) { 4014 return -EINVAL; 4015 } 4016 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 4017 /* Skip this item */ 4018 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 4019 /* Skip this item */ 4020 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 4021 /* Skip this item */ 4022 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 4023 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 4024 uint32_t num_extent_pages = ctx->num_extent_pages; 4025 uint32_t i; 4026 size_t extent_pages_length; 4027 void *tmp; 4028 4029 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 4030 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 4031 4032 if (desc_extent_table->length == 0 || 4033 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 4034 return -EINVAL; 4035 } 4036 4037 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 4038 if (desc_extent_table->extent_page[i].page_idx != 0) { 4039 if (desc_extent_table->extent_page[i].num_pages != 1) { 4040 return -EINVAL; 4041 } 4042 num_extent_pages += 1; 4043 } 4044 } 4045 4046 if (num_extent_pages > 0) { 4047 tmp = realloc(ctx->extent_page_num, num_extent_pages * sizeof(uint32_t)); 4048 if (tmp == NULL) { 4049 return -ENOMEM; 4050 } 4051 ctx->extent_page_num = tmp; 4052 4053 /* Extent table entries contain md page numbers for extent pages. 4054 * Zeroes represent unallocated extent pages, those are run-length-encoded. 4055 */ 4056 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 4057 if (desc_extent_table->extent_page[i].page_idx != 0) { 4058 ctx->extent_page_num[ctx->num_extent_pages] = desc_extent_table->extent_page[i].page_idx; 4059 ctx->num_extent_pages += 1; 4060 } 4061 } 4062 } 4063 } else { 4064 /* Error */ 4065 return -EINVAL; 4066 } 4067 /* Advance to the next descriptor */ 4068 cur_desc += sizeof(*desc) + desc->length; 4069 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 4070 break; 4071 } 4072 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 4073 } 4074 return 0; 4075 } 4076 4077 static bool bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page) 4078 { 4079 uint32_t crc; 4080 struct spdk_blob_md_descriptor *desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4081 size_t desc_len; 4082 4083 crc = blob_md_page_calc_crc(page); 4084 if (crc != page->crc) { 4085 return false; 4086 } 4087 4088 /* Extent page should always be of sequence num 0. */ 4089 if (page->sequence_num != 0) { 4090 return false; 4091 } 4092 4093 /* Descriptor type must be EXTENT_PAGE. */ 4094 if (desc->type != SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4095 return false; 4096 } 4097 4098 /* Descriptor length cannot exceed the page. */ 4099 desc_len = sizeof(*desc) + desc->length; 4100 if (desc_len > sizeof(page->descriptors)) { 4101 return false; 4102 } 4103 4104 /* It has to be the only descriptor in the page. */ 4105 if (desc_len + sizeof(*desc) <= sizeof(page->descriptors)) { 4106 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + desc_len); 4107 if (desc->length != 0) { 4108 return false; 4109 } 4110 } 4111 4112 return true; 4113 } 4114 4115 static bool bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx) 4116 { 4117 uint32_t crc; 4118 struct spdk_blob_md_page *page = ctx->page; 4119 4120 crc = blob_md_page_calc_crc(page); 4121 if (crc != page->crc) { 4122 return false; 4123 } 4124 4125 /* First page of a sequence should match the blobid. */ 4126 if (page->sequence_num == 0 && 4127 bs_page_to_blobid(ctx->cur_page) != page->id) { 4128 return false; 4129 } 4130 assert(bs_load_cur_extent_page_valid(page) == false); 4131 4132 return true; 4133 } 4134 4135 static void 4136 bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx); 4137 4138 static void 4139 bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4140 { 4141 struct spdk_bs_load_ctx *ctx = cb_arg; 4142 4143 if (bserrno != 0) { 4144 bs_load_ctx_fail(ctx, bserrno); 4145 return; 4146 } 4147 4148 bs_load_complete(ctx); 4149 } 4150 4151 static void 4152 bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4153 { 4154 struct spdk_bs_load_ctx *ctx = cb_arg; 4155 4156 spdk_free(ctx->mask); 4157 ctx->mask = NULL; 4158 4159 if (bserrno != 0) { 4160 bs_load_ctx_fail(ctx, bserrno); 4161 return; 4162 } 4163 4164 bs_write_used_clusters(seq, ctx, bs_load_write_used_clusters_cpl); 4165 } 4166 4167 static void 4168 bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4169 { 4170 struct spdk_bs_load_ctx *ctx = cb_arg; 4171 4172 spdk_free(ctx->mask); 4173 ctx->mask = NULL; 4174 4175 if (bserrno != 0) { 4176 bs_load_ctx_fail(ctx, bserrno); 4177 return; 4178 } 4179 4180 bs_write_used_blobids(seq, ctx, bs_load_write_used_blobids_cpl); 4181 } 4182 4183 static void 4184 bs_load_write_used_md(struct spdk_bs_load_ctx *ctx) 4185 { 4186 bs_write_used_md(ctx->seq, ctx, bs_load_write_used_pages_cpl); 4187 } 4188 4189 static void 4190 bs_load_replay_md_chain_cpl(struct spdk_bs_load_ctx *ctx) 4191 { 4192 uint64_t num_md_clusters; 4193 uint64_t i; 4194 4195 ctx->in_page_chain = false; 4196 4197 do { 4198 ctx->page_index++; 4199 } while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true); 4200 4201 if (ctx->page_index < ctx->super->md_len) { 4202 ctx->cur_page = ctx->page_index; 4203 bs_load_replay_cur_md_page(ctx); 4204 } else { 4205 /* Claim all of the clusters used by the metadata */ 4206 num_md_clusters = spdk_divide_round_up( 4207 ctx->super->md_start + ctx->super->md_len, ctx->bs->pages_per_cluster); 4208 for (i = 0; i < num_md_clusters; i++) { 4209 spdk_bit_array_set(ctx->used_clusters, i); 4210 } 4211 ctx->bs->num_free_clusters -= num_md_clusters; 4212 spdk_free(ctx->page); 4213 bs_load_write_used_md(ctx); 4214 } 4215 } 4216 4217 static void 4218 bs_load_replay_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4219 { 4220 struct spdk_bs_load_ctx *ctx = cb_arg; 4221 uint32_t page_num; 4222 uint64_t i; 4223 4224 if (bserrno != 0) { 4225 spdk_free(ctx->extent_pages); 4226 bs_load_ctx_fail(ctx, bserrno); 4227 return; 4228 } 4229 4230 for (i = 0; i < ctx->num_extent_pages; i++) { 4231 /* Extent pages are only read when present within in chain md. 4232 * Integrity of md is not right if that page was not a valid extent page. */ 4233 if (bs_load_cur_extent_page_valid(&ctx->extent_pages[i]) != true) { 4234 spdk_free(ctx->extent_pages); 4235 bs_load_ctx_fail(ctx, -EILSEQ); 4236 return; 4237 } 4238 4239 page_num = ctx->extent_page_num[i]; 4240 spdk_bit_array_set(ctx->bs->used_md_pages, page_num); 4241 if (bs_load_replay_md_parse_page(ctx, &ctx->extent_pages[i])) { 4242 spdk_free(ctx->extent_pages); 4243 bs_load_ctx_fail(ctx, -EILSEQ); 4244 return; 4245 } 4246 } 4247 4248 spdk_free(ctx->extent_pages); 4249 free(ctx->extent_page_num); 4250 ctx->extent_page_num = NULL; 4251 ctx->num_extent_pages = 0; 4252 4253 bs_load_replay_md_chain_cpl(ctx); 4254 } 4255 4256 static void 4257 bs_load_replay_extent_pages(struct spdk_bs_load_ctx *ctx) 4258 { 4259 spdk_bs_batch_t *batch; 4260 uint32_t page; 4261 uint64_t lba; 4262 uint64_t i; 4263 4264 ctx->extent_pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE * ctx->num_extent_pages, 0, 4265 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4266 if (!ctx->extent_pages) { 4267 bs_load_ctx_fail(ctx, -ENOMEM); 4268 return; 4269 } 4270 4271 batch = bs_sequence_to_batch(ctx->seq, bs_load_replay_extent_page_cpl, ctx); 4272 4273 for (i = 0; i < ctx->num_extent_pages; i++) { 4274 page = ctx->extent_page_num[i]; 4275 assert(page < ctx->super->md_len); 4276 lba = bs_md_page_to_lba(ctx->bs, page); 4277 bs_batch_read_dev(batch, &ctx->extent_pages[i], lba, 4278 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE)); 4279 } 4280 4281 bs_batch_close(batch); 4282 } 4283 4284 static void 4285 bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4286 { 4287 struct spdk_bs_load_ctx *ctx = cb_arg; 4288 uint32_t page_num; 4289 struct spdk_blob_md_page *page; 4290 4291 if (bserrno != 0) { 4292 bs_load_ctx_fail(ctx, bserrno); 4293 return; 4294 } 4295 4296 page_num = ctx->cur_page; 4297 page = ctx->page; 4298 if (bs_load_cur_md_page_valid(ctx) == true) { 4299 if (page->sequence_num == 0 || ctx->in_page_chain == true) { 4300 bs_claim_md_page(ctx->bs, page_num); 4301 if (page->sequence_num == 0) { 4302 SPDK_NOTICELOG("Recover: blob %" PRIu32 "\n", page_num); 4303 spdk_bit_array_set(ctx->bs->used_blobids, page_num); 4304 } 4305 if (bs_load_replay_md_parse_page(ctx, page)) { 4306 bs_load_ctx_fail(ctx, -EILSEQ); 4307 return; 4308 } 4309 if (page->next != SPDK_INVALID_MD_PAGE) { 4310 ctx->in_page_chain = true; 4311 ctx->cur_page = page->next; 4312 bs_load_replay_cur_md_page(ctx); 4313 return; 4314 } 4315 if (ctx->num_extent_pages != 0) { 4316 bs_load_replay_extent_pages(ctx); 4317 return; 4318 } 4319 } 4320 } 4321 bs_load_replay_md_chain_cpl(ctx); 4322 } 4323 4324 static void 4325 bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx) 4326 { 4327 uint64_t lba; 4328 4329 assert(ctx->cur_page < ctx->super->md_len); 4330 lba = bs_md_page_to_lba(ctx->bs, ctx->cur_page); 4331 bs_sequence_read_dev(ctx->seq, ctx->page, lba, 4332 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 4333 bs_load_replay_md_cpl, ctx); 4334 } 4335 4336 static void 4337 bs_load_replay_md(struct spdk_bs_load_ctx *ctx) 4338 { 4339 ctx->page_index = 0; 4340 ctx->cur_page = 0; 4341 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 4342 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4343 if (!ctx->page) { 4344 bs_load_ctx_fail(ctx, -ENOMEM); 4345 return; 4346 } 4347 bs_load_replay_cur_md_page(ctx); 4348 } 4349 4350 static void 4351 bs_recover(struct spdk_bs_load_ctx *ctx) 4352 { 4353 int rc; 4354 4355 SPDK_NOTICELOG("Performing recovery on blobstore\n"); 4356 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len); 4357 if (rc < 0) { 4358 bs_load_ctx_fail(ctx, -ENOMEM); 4359 return; 4360 } 4361 4362 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len); 4363 if (rc < 0) { 4364 bs_load_ctx_fail(ctx, -ENOMEM); 4365 return; 4366 } 4367 4368 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 4369 if (rc < 0) { 4370 bs_load_ctx_fail(ctx, -ENOMEM); 4371 return; 4372 } 4373 4374 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->super->md_len); 4375 if (rc < 0) { 4376 bs_load_ctx_fail(ctx, -ENOMEM); 4377 return; 4378 } 4379 4380 ctx->bs->num_free_clusters = ctx->bs->total_clusters; 4381 bs_load_replay_md(ctx); 4382 } 4383 4384 static int 4385 bs_parse_super(struct spdk_bs_load_ctx *ctx) 4386 { 4387 int rc; 4388 4389 if (ctx->super->size == 0) { 4390 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 4391 } 4392 4393 if (ctx->super->io_unit_size == 0) { 4394 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 4395 } 4396 4397 ctx->bs->clean = 1; 4398 ctx->bs->cluster_sz = ctx->super->cluster_size; 4399 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 4400 ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE; 4401 if (spdk_u32_is_pow2(ctx->bs->pages_per_cluster)) { 4402 ctx->bs->pages_per_cluster_shift = spdk_u32log2(ctx->bs->pages_per_cluster); 4403 } 4404 ctx->bs->io_unit_size = ctx->super->io_unit_size; 4405 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 4406 if (rc < 0) { 4407 return -ENOMEM; 4408 } 4409 ctx->bs->md_start = ctx->super->md_start; 4410 ctx->bs->md_len = ctx->super->md_len; 4411 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len); 4412 if (rc < 0) { 4413 return -ENOMEM; 4414 } 4415 4416 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 4417 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 4418 ctx->bs->super_blob = ctx->super->super_blob; 4419 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 4420 4421 return 0; 4422 } 4423 4424 static void 4425 bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4426 { 4427 struct spdk_bs_load_ctx *ctx = cb_arg; 4428 uint32_t crc; 4429 int rc; 4430 static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH]; 4431 4432 if (ctx->super->version > SPDK_BS_VERSION || 4433 ctx->super->version < SPDK_BS_INITIAL_VERSION) { 4434 bs_load_ctx_fail(ctx, -EILSEQ); 4435 return; 4436 } 4437 4438 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 4439 sizeof(ctx->super->signature)) != 0) { 4440 bs_load_ctx_fail(ctx, -EILSEQ); 4441 return; 4442 } 4443 4444 crc = blob_md_page_calc_crc(ctx->super); 4445 if (crc != ctx->super->crc) { 4446 bs_load_ctx_fail(ctx, -EILSEQ); 4447 return; 4448 } 4449 4450 if (memcmp(&ctx->bs->bstype, &ctx->super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 4451 SPDK_DEBUGLOG(blob, "Bstype matched - loading blobstore\n"); 4452 } else if (memcmp(&ctx->bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 4453 SPDK_DEBUGLOG(blob, "Bstype wildcard used - loading blobstore regardless bstype\n"); 4454 } else { 4455 SPDK_DEBUGLOG(blob, "Unexpected bstype\n"); 4456 SPDK_LOGDUMP(blob, "Expected:", ctx->bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 4457 SPDK_LOGDUMP(blob, "Found:", ctx->super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 4458 bs_load_ctx_fail(ctx, -ENXIO); 4459 return; 4460 } 4461 4462 if (ctx->super->size > ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen) { 4463 SPDK_NOTICELOG("Size mismatch, dev size: %" PRIu64 ", blobstore size: %" PRIu64 "\n", 4464 ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen, ctx->super->size); 4465 bs_load_ctx_fail(ctx, -EILSEQ); 4466 return; 4467 } 4468 4469 rc = bs_parse_super(ctx); 4470 if (rc < 0) { 4471 bs_load_ctx_fail(ctx, rc); 4472 return; 4473 } 4474 4475 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0 || ctx->force_recover) { 4476 bs_recover(ctx); 4477 } else { 4478 bs_load_read_used_pages(ctx); 4479 } 4480 } 4481 4482 static inline int 4483 bs_opts_copy(struct spdk_bs_opts *src, struct spdk_bs_opts *dst) 4484 { 4485 4486 if (!src->opts_size) { 4487 SPDK_ERRLOG("opts_size should not be zero value\n"); 4488 return -1; 4489 } 4490 4491 #define FIELD_OK(field) \ 4492 offsetof(struct spdk_bs_opts, field) + sizeof(src->field) <= src->opts_size 4493 4494 #define SET_FIELD(field) \ 4495 if (FIELD_OK(field)) { \ 4496 dst->field = src->field; \ 4497 } \ 4498 4499 SET_FIELD(cluster_sz); 4500 SET_FIELD(num_md_pages); 4501 SET_FIELD(max_md_ops); 4502 SET_FIELD(max_channel_ops); 4503 SET_FIELD(clear_method); 4504 4505 if (FIELD_OK(bstype)) { 4506 memcpy(&dst->bstype, &src->bstype, sizeof(dst->bstype)); 4507 } 4508 SET_FIELD(iter_cb_fn); 4509 SET_FIELD(iter_cb_arg); 4510 SET_FIELD(force_recover); 4511 4512 dst->opts_size = src->opts_size; 4513 4514 /* You should not remove this statement, but need to update the assert statement 4515 * if you add a new field, and also add a corresponding SET_FIELD statement */ 4516 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_opts) == 72, "Incorrect size"); 4517 4518 #undef FIELD_OK 4519 #undef SET_FIELD 4520 4521 return 0; 4522 } 4523 4524 void 4525 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 4526 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 4527 { 4528 struct spdk_blob_store *bs; 4529 struct spdk_bs_cpl cpl; 4530 struct spdk_bs_load_ctx *ctx; 4531 struct spdk_bs_opts opts = {}; 4532 int err; 4533 4534 SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev); 4535 4536 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 4537 SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen); 4538 dev->destroy(dev); 4539 cb_fn(cb_arg, NULL, -EINVAL); 4540 return; 4541 } 4542 4543 spdk_bs_opts_init(&opts, sizeof(opts)); 4544 if (o) { 4545 if (bs_opts_copy(o, &opts)) { 4546 return; 4547 } 4548 } 4549 4550 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 4551 dev->destroy(dev); 4552 cb_fn(cb_arg, NULL, -EINVAL); 4553 return; 4554 } 4555 4556 err = bs_alloc(dev, &opts, &bs, &ctx); 4557 if (err) { 4558 dev->destroy(dev); 4559 cb_fn(cb_arg, NULL, err); 4560 return; 4561 } 4562 4563 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 4564 cpl.u.bs_handle.cb_fn = cb_fn; 4565 cpl.u.bs_handle.cb_arg = cb_arg; 4566 cpl.u.bs_handle.bs = bs; 4567 4568 ctx->seq = bs_sequence_start(bs->md_channel, &cpl); 4569 if (!ctx->seq) { 4570 spdk_free(ctx->super); 4571 free(ctx); 4572 bs_free(bs); 4573 cb_fn(cb_arg, NULL, -ENOMEM); 4574 return; 4575 } 4576 4577 /* Read the super block */ 4578 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 4579 bs_byte_to_lba(bs, sizeof(*ctx->super)), 4580 bs_load_super_cpl, ctx); 4581 } 4582 4583 /* END spdk_bs_load */ 4584 4585 /* START spdk_bs_dump */ 4586 4587 static void 4588 bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno) 4589 { 4590 spdk_free(ctx->super); 4591 4592 /* 4593 * We need to defer calling bs_call_cpl() until after 4594 * dev destruction, so tuck these away for later use. 4595 */ 4596 ctx->bs->unload_err = bserrno; 4597 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 4598 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 4599 4600 bs_sequence_finish(seq, 0); 4601 bs_free(ctx->bs); 4602 free(ctx); 4603 } 4604 4605 static void 4606 bs_dump_print_xattr(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 4607 { 4608 struct spdk_blob_md_descriptor_xattr *desc_xattr; 4609 uint32_t i; 4610 const char *type; 4611 4612 desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc; 4613 4614 if (desc_xattr->length != 4615 sizeof(desc_xattr->name_length) + sizeof(desc_xattr->value_length) + 4616 desc_xattr->name_length + desc_xattr->value_length) { 4617 } 4618 4619 memcpy(ctx->xattr_name, desc_xattr->name, desc_xattr->name_length); 4620 ctx->xattr_name[desc_xattr->name_length] = '\0'; 4621 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 4622 type = "XATTR"; 4623 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 4624 type = "XATTR_INTERNAL"; 4625 } else { 4626 assert(false); 4627 type = "XATTR_?"; 4628 } 4629 fprintf(ctx->fp, "%s: name = \"%s\"\n", type, ctx->xattr_name); 4630 fprintf(ctx->fp, " value = \""); 4631 ctx->print_xattr_fn(ctx->fp, ctx->super->bstype.bstype, ctx->xattr_name, 4632 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 4633 desc_xattr->value_length); 4634 fprintf(ctx->fp, "\"\n"); 4635 for (i = 0; i < desc_xattr->value_length; i++) { 4636 if (i % 16 == 0) { 4637 fprintf(ctx->fp, " "); 4638 } 4639 fprintf(ctx->fp, "%02" PRIx8 " ", *((uint8_t *)desc_xattr->name + desc_xattr->name_length + i)); 4640 if ((i + 1) % 16 == 0) { 4641 fprintf(ctx->fp, "\n"); 4642 } 4643 } 4644 if (i % 16 != 0) { 4645 fprintf(ctx->fp, "\n"); 4646 } 4647 } 4648 4649 struct type_flag_desc { 4650 uint64_t mask; 4651 uint64_t val; 4652 const char *name; 4653 }; 4654 4655 static void 4656 bs_dump_print_type_bits(struct spdk_bs_load_ctx *ctx, uint64_t flags, 4657 struct type_flag_desc *desc, size_t numflags) 4658 { 4659 uint64_t covered = 0; 4660 size_t i; 4661 4662 for (i = 0; i < numflags; i++) { 4663 if ((desc[i].mask & flags) != desc[i].val) { 4664 continue; 4665 } 4666 fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " %s", desc[i].val, desc[i].name); 4667 if (desc[i].mask != desc[i].val) { 4668 fprintf(ctx->fp, " (mask 0x%" PRIx64 " value 0x%" PRIx64 ")", 4669 desc[i].mask, desc[i].val); 4670 } 4671 fprintf(ctx->fp, "\n"); 4672 covered |= desc[i].mask; 4673 } 4674 if ((flags & ~covered) != 0) { 4675 fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " Unknown\n", flags & ~covered); 4676 } 4677 } 4678 4679 static void 4680 bs_dump_print_type_flags(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 4681 { 4682 struct spdk_blob_md_descriptor_flags *type_desc; 4683 #define ADD_FLAG(f) { f, f, #f } 4684 #define ADD_MASK_VAL(m, v) { m, v, #v } 4685 static struct type_flag_desc invalid[] = { 4686 ADD_FLAG(SPDK_BLOB_THIN_PROV), 4687 ADD_FLAG(SPDK_BLOB_INTERNAL_XATTR), 4688 ADD_FLAG(SPDK_BLOB_EXTENT_TABLE), 4689 }; 4690 static struct type_flag_desc data_ro[] = { 4691 ADD_FLAG(SPDK_BLOB_READ_ONLY), 4692 }; 4693 static struct type_flag_desc md_ro[] = { 4694 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_DEFAULT), 4695 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_NONE), 4696 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_UNMAP), 4697 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_WRITE_ZEROES), 4698 }; 4699 #undef ADD_FLAG 4700 #undef ADD_MASK_VAL 4701 4702 type_desc = (struct spdk_blob_md_descriptor_flags *)desc; 4703 fprintf(ctx->fp, "Flags:\n"); 4704 fprintf(ctx->fp, "\tinvalid: 0x%016" PRIx64 "\n", type_desc->invalid_flags); 4705 bs_dump_print_type_bits(ctx, type_desc->invalid_flags, invalid, 4706 SPDK_COUNTOF(invalid)); 4707 fprintf(ctx->fp, "\tdata_ro: 0x%016" PRIx64 "\n", type_desc->data_ro_flags); 4708 bs_dump_print_type_bits(ctx, type_desc->data_ro_flags, data_ro, 4709 SPDK_COUNTOF(data_ro)); 4710 fprintf(ctx->fp, "\t md_ro: 0x%016" PRIx64 "\n", type_desc->md_ro_flags); 4711 bs_dump_print_type_bits(ctx, type_desc->md_ro_flags, md_ro, 4712 SPDK_COUNTOF(md_ro)); 4713 } 4714 4715 static void 4716 bs_dump_print_extent_table(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 4717 { 4718 struct spdk_blob_md_descriptor_extent_table *et_desc; 4719 uint64_t num_extent_pages; 4720 uint32_t et_idx; 4721 4722 et_desc = (struct spdk_blob_md_descriptor_extent_table *)desc; 4723 num_extent_pages = (et_desc->length - sizeof(et_desc->num_clusters)) / 4724 sizeof(et_desc->extent_page[0]); 4725 4726 fprintf(ctx->fp, "Extent table:\n"); 4727 for (et_idx = 0; et_idx < num_extent_pages; et_idx++) { 4728 if (et_desc->extent_page[et_idx].page_idx == 0) { 4729 /* Zeroes represent unallocated extent pages. */ 4730 continue; 4731 } 4732 fprintf(ctx->fp, "\tExtent page: %5" PRIu32 " length %3" PRIu32 4733 " at LBA %" PRIu64 "\n", et_desc->extent_page[et_idx].page_idx, 4734 et_desc->extent_page[et_idx].num_pages, 4735 bs_md_page_to_lba(ctx->bs, et_desc->extent_page[et_idx].page_idx)); 4736 } 4737 } 4738 4739 static void 4740 bs_dump_print_md_page(struct spdk_bs_load_ctx *ctx) 4741 { 4742 uint32_t page_idx = ctx->cur_page; 4743 struct spdk_blob_md_page *page = ctx->page; 4744 struct spdk_blob_md_descriptor *desc; 4745 size_t cur_desc = 0; 4746 uint32_t crc; 4747 4748 fprintf(ctx->fp, "=========\n"); 4749 fprintf(ctx->fp, "Metadata Page Index: %" PRIu32 " (0x%" PRIx32 ")\n", page_idx, page_idx); 4750 fprintf(ctx->fp, "Blob ID: 0x%" PRIx64 "\n", page->id); 4751 fprintf(ctx->fp, "Sequence: %" PRIu32 "\n", page->sequence_num); 4752 if (page->next == SPDK_INVALID_MD_PAGE) { 4753 fprintf(ctx->fp, "Next: None\n"); 4754 } else { 4755 fprintf(ctx->fp, "Next: %" PRIu32 "\n", page->next); 4756 } 4757 fprintf(ctx->fp, "In used bit array%s:", ctx->super->clean ? "" : " (not clean: dubious)"); 4758 if (spdk_bit_array_get(ctx->bs->used_md_pages, page_idx)) { 4759 fprintf(ctx->fp, " md"); 4760 } 4761 if (spdk_bit_array_get(ctx->bs->used_blobids, page_idx)) { 4762 fprintf(ctx->fp, " blob"); 4763 } 4764 fprintf(ctx->fp, "\n"); 4765 4766 crc = blob_md_page_calc_crc(page); 4767 fprintf(ctx->fp, "CRC: 0x%" PRIx32 " (%s)\n", page->crc, crc == page->crc ? "OK" : "Mismatch"); 4768 4769 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4770 while (cur_desc < sizeof(page->descriptors)) { 4771 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 4772 if (desc->length == 0) { 4773 /* If padding and length are 0, this terminates the page */ 4774 break; 4775 } 4776 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 4777 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 4778 unsigned int i; 4779 4780 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 4781 4782 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 4783 if (desc_extent_rle->extents[i].cluster_idx != 0) { 4784 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 4785 desc_extent_rle->extents[i].cluster_idx); 4786 } else { 4787 fprintf(ctx->fp, "Unallocated Extent - "); 4788 } 4789 fprintf(ctx->fp, " Length: %" PRIu32, desc_extent_rle->extents[i].length); 4790 fprintf(ctx->fp, "\n"); 4791 } 4792 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4793 struct spdk_blob_md_descriptor_extent_page *desc_extent; 4794 unsigned int i; 4795 4796 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 4797 4798 for (i = 0; i < desc_extent->length / sizeof(desc_extent->cluster_idx[0]); i++) { 4799 if (desc_extent->cluster_idx[i] != 0) { 4800 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 4801 desc_extent->cluster_idx[i]); 4802 } else { 4803 fprintf(ctx->fp, "Unallocated Extent"); 4804 } 4805 fprintf(ctx->fp, "\n"); 4806 } 4807 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 4808 bs_dump_print_xattr(ctx, desc); 4809 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 4810 bs_dump_print_xattr(ctx, desc); 4811 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 4812 bs_dump_print_type_flags(ctx, desc); 4813 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 4814 bs_dump_print_extent_table(ctx, desc); 4815 } else { 4816 /* Error */ 4817 fprintf(ctx->fp, "Unknown descriptor type %" PRIu8 "\n", desc->type); 4818 } 4819 /* Advance to the next descriptor */ 4820 cur_desc += sizeof(*desc) + desc->length; 4821 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 4822 break; 4823 } 4824 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 4825 } 4826 } 4827 4828 static void 4829 bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4830 { 4831 struct spdk_bs_load_ctx *ctx = cb_arg; 4832 4833 if (bserrno != 0) { 4834 bs_dump_finish(seq, ctx, bserrno); 4835 return; 4836 } 4837 4838 if (ctx->page->id != 0) { 4839 bs_dump_print_md_page(ctx); 4840 } 4841 4842 ctx->cur_page++; 4843 4844 if (ctx->cur_page < ctx->super->md_len) { 4845 bs_dump_read_md_page(seq, ctx); 4846 } else { 4847 spdk_free(ctx->page); 4848 bs_dump_finish(seq, ctx, 0); 4849 } 4850 } 4851 4852 static void 4853 bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg) 4854 { 4855 struct spdk_bs_load_ctx *ctx = cb_arg; 4856 uint64_t lba; 4857 4858 assert(ctx->cur_page < ctx->super->md_len); 4859 lba = bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page); 4860 bs_sequence_read_dev(seq, ctx->page, lba, 4861 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 4862 bs_dump_read_md_page_cpl, ctx); 4863 } 4864 4865 static void 4866 bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4867 { 4868 struct spdk_bs_load_ctx *ctx = cb_arg; 4869 int rc; 4870 4871 fprintf(ctx->fp, "Signature: \"%.8s\" ", ctx->super->signature); 4872 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 4873 sizeof(ctx->super->signature)) != 0) { 4874 fprintf(ctx->fp, "(Mismatch)\n"); 4875 bs_dump_finish(seq, ctx, bserrno); 4876 return; 4877 } else { 4878 fprintf(ctx->fp, "(OK)\n"); 4879 } 4880 fprintf(ctx->fp, "Version: %" PRIu32 "\n", ctx->super->version); 4881 fprintf(ctx->fp, "CRC: 0x%x (%s)\n", ctx->super->crc, 4882 (ctx->super->crc == blob_md_page_calc_crc(ctx->super)) ? "OK" : "Mismatch"); 4883 fprintf(ctx->fp, "Blobstore Type: %.*s\n", SPDK_BLOBSTORE_TYPE_LENGTH, ctx->super->bstype.bstype); 4884 fprintf(ctx->fp, "Cluster Size: %" PRIu32 "\n", ctx->super->cluster_size); 4885 fprintf(ctx->fp, "Super Blob ID: "); 4886 if (ctx->super->super_blob == SPDK_BLOBID_INVALID) { 4887 fprintf(ctx->fp, "(None)\n"); 4888 } else { 4889 fprintf(ctx->fp, "0x%" PRIx64 "\n", ctx->super->super_blob); 4890 } 4891 fprintf(ctx->fp, "Clean: %" PRIu32 "\n", ctx->super->clean); 4892 fprintf(ctx->fp, "Used Metadata Page Mask Start: %" PRIu32 "\n", ctx->super->used_page_mask_start); 4893 fprintf(ctx->fp, "Used Metadata Page Mask Length: %" PRIu32 "\n", ctx->super->used_page_mask_len); 4894 fprintf(ctx->fp, "Used Cluster Mask Start: %" PRIu32 "\n", ctx->super->used_cluster_mask_start); 4895 fprintf(ctx->fp, "Used Cluster Mask Length: %" PRIu32 "\n", ctx->super->used_cluster_mask_len); 4896 fprintf(ctx->fp, "Used Blob ID Mask Start: %" PRIu32 "\n", ctx->super->used_blobid_mask_start); 4897 fprintf(ctx->fp, "Used Blob ID Mask Length: %" PRIu32 "\n", ctx->super->used_blobid_mask_len); 4898 fprintf(ctx->fp, "Metadata Start: %" PRIu32 "\n", ctx->super->md_start); 4899 fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len); 4900 4901 ctx->cur_page = 0; 4902 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 4903 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4904 if (!ctx->page) { 4905 bs_dump_finish(seq, ctx, -ENOMEM); 4906 return; 4907 } 4908 4909 rc = bs_parse_super(ctx); 4910 if (rc < 0) { 4911 bs_load_ctx_fail(ctx, rc); 4912 return; 4913 } 4914 4915 bs_load_read_used_pages(ctx); 4916 } 4917 4918 void 4919 spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_xattr_fn, 4920 spdk_bs_op_complete cb_fn, void *cb_arg) 4921 { 4922 struct spdk_blob_store *bs; 4923 struct spdk_bs_cpl cpl; 4924 struct spdk_bs_load_ctx *ctx; 4925 struct spdk_bs_opts opts = {}; 4926 int err; 4927 4928 SPDK_DEBUGLOG(blob, "Dumping blobstore from dev %p\n", dev); 4929 4930 spdk_bs_opts_init(&opts, sizeof(opts)); 4931 4932 err = bs_alloc(dev, &opts, &bs, &ctx); 4933 if (err) { 4934 dev->destroy(dev); 4935 cb_fn(cb_arg, err); 4936 return; 4937 } 4938 4939 ctx->dumping = true; 4940 ctx->fp = fp; 4941 ctx->print_xattr_fn = print_xattr_fn; 4942 4943 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 4944 cpl.u.bs_basic.cb_fn = cb_fn; 4945 cpl.u.bs_basic.cb_arg = cb_arg; 4946 4947 ctx->seq = bs_sequence_start(bs->md_channel, &cpl); 4948 if (!ctx->seq) { 4949 spdk_free(ctx->super); 4950 free(ctx); 4951 bs_free(bs); 4952 cb_fn(cb_arg, -ENOMEM); 4953 return; 4954 } 4955 4956 /* Read the super block */ 4957 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 4958 bs_byte_to_lba(bs, sizeof(*ctx->super)), 4959 bs_dump_super_cpl, ctx); 4960 } 4961 4962 /* END spdk_bs_dump */ 4963 4964 /* START spdk_bs_init */ 4965 4966 static void 4967 bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4968 { 4969 struct spdk_bs_load_ctx *ctx = cb_arg; 4970 4971 ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters); 4972 spdk_free(ctx->super); 4973 free(ctx); 4974 4975 bs_sequence_finish(seq, bserrno); 4976 } 4977 4978 static void 4979 bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4980 { 4981 struct spdk_bs_load_ctx *ctx = cb_arg; 4982 4983 /* Write super block */ 4984 bs_sequence_write_dev(seq, ctx->super, bs_page_to_lba(ctx->bs, 0), 4985 bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 4986 bs_init_persist_super_cpl, ctx); 4987 } 4988 4989 void 4990 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 4991 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 4992 { 4993 struct spdk_bs_load_ctx *ctx; 4994 struct spdk_blob_store *bs; 4995 struct spdk_bs_cpl cpl; 4996 spdk_bs_sequence_t *seq; 4997 spdk_bs_batch_t *batch; 4998 uint64_t num_md_lba; 4999 uint64_t num_md_pages; 5000 uint64_t num_md_clusters; 5001 uint32_t i; 5002 struct spdk_bs_opts opts = {}; 5003 int rc; 5004 uint64_t lba, lba_count; 5005 5006 SPDK_DEBUGLOG(blob, "Initializing blobstore on dev %p\n", dev); 5007 5008 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 5009 SPDK_ERRLOG("unsupported dev block length of %d\n", 5010 dev->blocklen); 5011 dev->destroy(dev); 5012 cb_fn(cb_arg, NULL, -EINVAL); 5013 return; 5014 } 5015 5016 spdk_bs_opts_init(&opts, sizeof(opts)); 5017 if (o) { 5018 if (bs_opts_copy(o, &opts)) { 5019 return; 5020 } 5021 } 5022 5023 if (bs_opts_verify(&opts) != 0) { 5024 dev->destroy(dev); 5025 cb_fn(cb_arg, NULL, -EINVAL); 5026 return; 5027 } 5028 5029 rc = bs_alloc(dev, &opts, &bs, &ctx); 5030 if (rc) { 5031 dev->destroy(dev); 5032 cb_fn(cb_arg, NULL, rc); 5033 return; 5034 } 5035 5036 if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) { 5037 /* By default, allocate 1 page per cluster. 5038 * Technically, this over-allocates metadata 5039 * because more metadata will reduce the number 5040 * of usable clusters. This can be addressed with 5041 * more complex math in the future. 5042 */ 5043 bs->md_len = bs->total_clusters; 5044 } else { 5045 bs->md_len = opts.num_md_pages; 5046 } 5047 rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len); 5048 if (rc < 0) { 5049 spdk_free(ctx->super); 5050 free(ctx); 5051 bs_free(bs); 5052 cb_fn(cb_arg, NULL, -ENOMEM); 5053 return; 5054 } 5055 5056 rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len); 5057 if (rc < 0) { 5058 spdk_free(ctx->super); 5059 free(ctx); 5060 bs_free(bs); 5061 cb_fn(cb_arg, NULL, -ENOMEM); 5062 return; 5063 } 5064 5065 rc = spdk_bit_array_resize(&bs->open_blobids, bs->md_len); 5066 if (rc < 0) { 5067 spdk_free(ctx->super); 5068 free(ctx); 5069 bs_free(bs); 5070 cb_fn(cb_arg, NULL, -ENOMEM); 5071 return; 5072 } 5073 5074 memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 5075 sizeof(ctx->super->signature)); 5076 ctx->super->version = SPDK_BS_VERSION; 5077 ctx->super->length = sizeof(*ctx->super); 5078 ctx->super->super_blob = bs->super_blob; 5079 ctx->super->clean = 0; 5080 ctx->super->cluster_size = bs->cluster_sz; 5081 ctx->super->io_unit_size = bs->io_unit_size; 5082 memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype)); 5083 5084 /* Calculate how many pages the metadata consumes at the front 5085 * of the disk. 5086 */ 5087 5088 /* The super block uses 1 page */ 5089 num_md_pages = 1; 5090 5091 /* The used_md_pages mask requires 1 bit per metadata page, rounded 5092 * up to the nearest page, plus a header. 5093 */ 5094 ctx->super->used_page_mask_start = num_md_pages; 5095 ctx->super->used_page_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5096 spdk_divide_round_up(bs->md_len, 8), 5097 SPDK_BS_PAGE_SIZE); 5098 num_md_pages += ctx->super->used_page_mask_len; 5099 5100 /* The used_clusters mask requires 1 bit per cluster, rounded 5101 * up to the nearest page, plus a header. 5102 */ 5103 ctx->super->used_cluster_mask_start = num_md_pages; 5104 ctx->super->used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5105 spdk_divide_round_up(bs->total_clusters, 8), 5106 SPDK_BS_PAGE_SIZE); 5107 num_md_pages += ctx->super->used_cluster_mask_len; 5108 5109 /* The used_blobids mask requires 1 bit per metadata page, rounded 5110 * up to the nearest page, plus a header. 5111 */ 5112 ctx->super->used_blobid_mask_start = num_md_pages; 5113 ctx->super->used_blobid_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5114 spdk_divide_round_up(bs->md_len, 8), 5115 SPDK_BS_PAGE_SIZE); 5116 num_md_pages += ctx->super->used_blobid_mask_len; 5117 5118 /* The metadata region size was chosen above */ 5119 ctx->super->md_start = bs->md_start = num_md_pages; 5120 ctx->super->md_len = bs->md_len; 5121 num_md_pages += bs->md_len; 5122 5123 num_md_lba = bs_page_to_lba(bs, num_md_pages); 5124 5125 ctx->super->size = dev->blockcnt * dev->blocklen; 5126 5127 ctx->super->crc = blob_md_page_calc_crc(ctx->super); 5128 5129 num_md_clusters = spdk_divide_round_up(num_md_pages, bs->pages_per_cluster); 5130 if (num_md_clusters > bs->total_clusters) { 5131 SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, " 5132 "please decrease number of pages reserved for metadata " 5133 "or increase cluster size.\n"); 5134 spdk_free(ctx->super); 5135 spdk_bit_array_free(&ctx->used_clusters); 5136 free(ctx); 5137 bs_free(bs); 5138 cb_fn(cb_arg, NULL, -ENOMEM); 5139 return; 5140 } 5141 /* Claim all of the clusters used by the metadata */ 5142 for (i = 0; i < num_md_clusters; i++) { 5143 spdk_bit_array_set(ctx->used_clusters, i); 5144 } 5145 5146 bs->num_free_clusters -= num_md_clusters; 5147 bs->total_data_clusters = bs->num_free_clusters; 5148 5149 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 5150 cpl.u.bs_handle.cb_fn = cb_fn; 5151 cpl.u.bs_handle.cb_arg = cb_arg; 5152 cpl.u.bs_handle.bs = bs; 5153 5154 seq = bs_sequence_start(bs->md_channel, &cpl); 5155 if (!seq) { 5156 spdk_free(ctx->super); 5157 free(ctx); 5158 bs_free(bs); 5159 cb_fn(cb_arg, NULL, -ENOMEM); 5160 return; 5161 } 5162 5163 batch = bs_sequence_to_batch(seq, bs_init_trim_cpl, ctx); 5164 5165 /* Clear metadata space */ 5166 bs_batch_write_zeroes_dev(batch, 0, num_md_lba); 5167 5168 lba = num_md_lba; 5169 lba_count = ctx->bs->dev->blockcnt - lba; 5170 switch (opts.clear_method) { 5171 case BS_CLEAR_WITH_UNMAP: 5172 /* Trim data clusters */ 5173 bs_batch_unmap_dev(batch, lba, lba_count); 5174 break; 5175 case BS_CLEAR_WITH_WRITE_ZEROES: 5176 /* Write_zeroes to data clusters */ 5177 bs_batch_write_zeroes_dev(batch, lba, lba_count); 5178 break; 5179 case BS_CLEAR_WITH_NONE: 5180 default: 5181 break; 5182 } 5183 5184 bs_batch_close(batch); 5185 } 5186 5187 /* END spdk_bs_init */ 5188 5189 /* START spdk_bs_destroy */ 5190 5191 static void 5192 bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5193 { 5194 struct spdk_bs_load_ctx *ctx = cb_arg; 5195 struct spdk_blob_store *bs = ctx->bs; 5196 5197 /* 5198 * We need to defer calling bs_call_cpl() until after 5199 * dev destruction, so tuck these away for later use. 5200 */ 5201 bs->unload_err = bserrno; 5202 memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5203 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5204 5205 bs_sequence_finish(seq, bserrno); 5206 5207 bs_free(bs); 5208 free(ctx); 5209 } 5210 5211 void 5212 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, 5213 void *cb_arg) 5214 { 5215 struct spdk_bs_cpl cpl; 5216 spdk_bs_sequence_t *seq; 5217 struct spdk_bs_load_ctx *ctx; 5218 5219 SPDK_DEBUGLOG(blob, "Destroying blobstore\n"); 5220 5221 if (!RB_EMPTY(&bs->open_blobs)) { 5222 SPDK_ERRLOG("Blobstore still has open blobs\n"); 5223 cb_fn(cb_arg, -EBUSY); 5224 return; 5225 } 5226 5227 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5228 cpl.u.bs_basic.cb_fn = cb_fn; 5229 cpl.u.bs_basic.cb_arg = cb_arg; 5230 5231 ctx = calloc(1, sizeof(*ctx)); 5232 if (!ctx) { 5233 cb_fn(cb_arg, -ENOMEM); 5234 return; 5235 } 5236 5237 ctx->bs = bs; 5238 5239 seq = bs_sequence_start(bs->md_channel, &cpl); 5240 if (!seq) { 5241 free(ctx); 5242 cb_fn(cb_arg, -ENOMEM); 5243 return; 5244 } 5245 5246 /* Write zeroes to the super block */ 5247 bs_sequence_write_zeroes_dev(seq, 5248 bs_page_to_lba(bs, 0), 5249 bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)), 5250 bs_destroy_trim_cpl, ctx); 5251 } 5252 5253 /* END spdk_bs_destroy */ 5254 5255 /* START spdk_bs_unload */ 5256 5257 static void 5258 bs_unload_finish(struct spdk_bs_load_ctx *ctx, int bserrno) 5259 { 5260 spdk_bs_sequence_t *seq = ctx->seq; 5261 5262 spdk_free(ctx->super); 5263 5264 /* 5265 * We need to defer calling bs_call_cpl() until after 5266 * dev destruction, so tuck these away for later use. 5267 */ 5268 ctx->bs->unload_err = bserrno; 5269 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5270 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5271 5272 bs_sequence_finish(seq, bserrno); 5273 5274 bs_free(ctx->bs); 5275 free(ctx); 5276 } 5277 5278 static void 5279 bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5280 { 5281 struct spdk_bs_load_ctx *ctx = cb_arg; 5282 5283 bs_unload_finish(ctx, bserrno); 5284 } 5285 5286 static void 5287 bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5288 { 5289 struct spdk_bs_load_ctx *ctx = cb_arg; 5290 5291 spdk_free(ctx->mask); 5292 5293 if (bserrno != 0) { 5294 bs_unload_finish(ctx, bserrno); 5295 return; 5296 } 5297 5298 ctx->super->clean = 1; 5299 5300 bs_write_super(seq, ctx->bs, ctx->super, bs_unload_write_super_cpl, ctx); 5301 } 5302 5303 static void 5304 bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5305 { 5306 struct spdk_bs_load_ctx *ctx = cb_arg; 5307 5308 spdk_free(ctx->mask); 5309 ctx->mask = NULL; 5310 5311 if (bserrno != 0) { 5312 bs_unload_finish(ctx, bserrno); 5313 return; 5314 } 5315 5316 bs_write_used_clusters(seq, ctx, bs_unload_write_used_clusters_cpl); 5317 } 5318 5319 static void 5320 bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5321 { 5322 struct spdk_bs_load_ctx *ctx = cb_arg; 5323 5324 spdk_free(ctx->mask); 5325 ctx->mask = NULL; 5326 5327 if (bserrno != 0) { 5328 bs_unload_finish(ctx, bserrno); 5329 return; 5330 } 5331 5332 bs_write_used_blobids(seq, ctx, bs_unload_write_used_blobids_cpl); 5333 } 5334 5335 static void 5336 bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5337 { 5338 struct spdk_bs_load_ctx *ctx = cb_arg; 5339 5340 if (bserrno != 0) { 5341 bs_unload_finish(ctx, bserrno); 5342 return; 5343 } 5344 5345 bs_write_used_md(seq, cb_arg, bs_unload_write_used_pages_cpl); 5346 } 5347 5348 void 5349 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg) 5350 { 5351 struct spdk_bs_cpl cpl; 5352 struct spdk_bs_load_ctx *ctx; 5353 5354 SPDK_DEBUGLOG(blob, "Syncing blobstore\n"); 5355 5356 if (!RB_EMPTY(&bs->open_blobs)) { 5357 SPDK_ERRLOG("Blobstore still has open blobs\n"); 5358 cb_fn(cb_arg, -EBUSY); 5359 return; 5360 } 5361 5362 ctx = calloc(1, sizeof(*ctx)); 5363 if (!ctx) { 5364 cb_fn(cb_arg, -ENOMEM); 5365 return; 5366 } 5367 5368 ctx->bs = bs; 5369 5370 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 5371 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 5372 if (!ctx->super) { 5373 free(ctx); 5374 cb_fn(cb_arg, -ENOMEM); 5375 return; 5376 } 5377 5378 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5379 cpl.u.bs_basic.cb_fn = cb_fn; 5380 cpl.u.bs_basic.cb_arg = cb_arg; 5381 5382 ctx->seq = bs_sequence_start(bs->md_channel, &cpl); 5383 if (!ctx->seq) { 5384 spdk_free(ctx->super); 5385 free(ctx); 5386 cb_fn(cb_arg, -ENOMEM); 5387 return; 5388 } 5389 5390 /* Read super block */ 5391 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5392 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5393 bs_unload_read_super_cpl, ctx); 5394 } 5395 5396 /* END spdk_bs_unload */ 5397 5398 /* START spdk_bs_set_super */ 5399 5400 struct spdk_bs_set_super_ctx { 5401 struct spdk_blob_store *bs; 5402 struct spdk_bs_super_block *super; 5403 }; 5404 5405 static void 5406 bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5407 { 5408 struct spdk_bs_set_super_ctx *ctx = cb_arg; 5409 5410 if (bserrno != 0) { 5411 SPDK_ERRLOG("Unable to write to super block of blobstore\n"); 5412 } 5413 5414 spdk_free(ctx->super); 5415 5416 bs_sequence_finish(seq, bserrno); 5417 5418 free(ctx); 5419 } 5420 5421 static void 5422 bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5423 { 5424 struct spdk_bs_set_super_ctx *ctx = cb_arg; 5425 5426 if (bserrno != 0) { 5427 SPDK_ERRLOG("Unable to read super block of blobstore\n"); 5428 spdk_free(ctx->super); 5429 bs_sequence_finish(seq, bserrno); 5430 free(ctx); 5431 return; 5432 } 5433 5434 bs_write_super(seq, ctx->bs, ctx->super, bs_set_super_write_cpl, ctx); 5435 } 5436 5437 void 5438 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid, 5439 spdk_bs_op_complete cb_fn, void *cb_arg) 5440 { 5441 struct spdk_bs_cpl cpl; 5442 spdk_bs_sequence_t *seq; 5443 struct spdk_bs_set_super_ctx *ctx; 5444 5445 SPDK_DEBUGLOG(blob, "Setting super blob id on blobstore\n"); 5446 5447 ctx = calloc(1, sizeof(*ctx)); 5448 if (!ctx) { 5449 cb_fn(cb_arg, -ENOMEM); 5450 return; 5451 } 5452 5453 ctx->bs = bs; 5454 5455 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 5456 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 5457 if (!ctx->super) { 5458 free(ctx); 5459 cb_fn(cb_arg, -ENOMEM); 5460 return; 5461 } 5462 5463 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5464 cpl.u.bs_basic.cb_fn = cb_fn; 5465 cpl.u.bs_basic.cb_arg = cb_arg; 5466 5467 seq = bs_sequence_start(bs->md_channel, &cpl); 5468 if (!seq) { 5469 spdk_free(ctx->super); 5470 free(ctx); 5471 cb_fn(cb_arg, -ENOMEM); 5472 return; 5473 } 5474 5475 bs->super_blob = blobid; 5476 5477 /* Read super block */ 5478 bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0), 5479 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5480 bs_set_super_read_cpl, ctx); 5481 } 5482 5483 /* END spdk_bs_set_super */ 5484 5485 void 5486 spdk_bs_get_super(struct spdk_blob_store *bs, 5487 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5488 { 5489 if (bs->super_blob == SPDK_BLOBID_INVALID) { 5490 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT); 5491 } else { 5492 cb_fn(cb_arg, bs->super_blob, 0); 5493 } 5494 } 5495 5496 uint64_t 5497 spdk_bs_get_cluster_size(struct spdk_blob_store *bs) 5498 { 5499 return bs->cluster_sz; 5500 } 5501 5502 uint64_t 5503 spdk_bs_get_page_size(struct spdk_blob_store *bs) 5504 { 5505 return SPDK_BS_PAGE_SIZE; 5506 } 5507 5508 uint64_t 5509 spdk_bs_get_io_unit_size(struct spdk_blob_store *bs) 5510 { 5511 return bs->io_unit_size; 5512 } 5513 5514 uint64_t 5515 spdk_bs_free_cluster_count(struct spdk_blob_store *bs) 5516 { 5517 return bs->num_free_clusters; 5518 } 5519 5520 uint64_t 5521 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs) 5522 { 5523 return bs->total_data_clusters; 5524 } 5525 5526 static int 5527 bs_register_md_thread(struct spdk_blob_store *bs) 5528 { 5529 bs->md_channel = spdk_get_io_channel(bs); 5530 if (!bs->md_channel) { 5531 SPDK_ERRLOG("Failed to get IO channel.\n"); 5532 return -1; 5533 } 5534 5535 return 0; 5536 } 5537 5538 static int 5539 bs_unregister_md_thread(struct spdk_blob_store *bs) 5540 { 5541 spdk_put_io_channel(bs->md_channel); 5542 5543 return 0; 5544 } 5545 5546 spdk_blob_id spdk_blob_get_id(struct spdk_blob *blob) 5547 { 5548 assert(blob != NULL); 5549 5550 return blob->id; 5551 } 5552 5553 uint64_t spdk_blob_get_num_pages(struct spdk_blob *blob) 5554 { 5555 assert(blob != NULL); 5556 5557 return bs_cluster_to_page(blob->bs, blob->active.num_clusters); 5558 } 5559 5560 uint64_t spdk_blob_get_num_io_units(struct spdk_blob *blob) 5561 { 5562 assert(blob != NULL); 5563 5564 return spdk_blob_get_num_pages(blob) * bs_io_unit_per_page(blob->bs); 5565 } 5566 5567 uint64_t spdk_blob_get_num_clusters(struct spdk_blob *blob) 5568 { 5569 assert(blob != NULL); 5570 5571 return blob->active.num_clusters; 5572 } 5573 5574 /* START spdk_bs_create_blob */ 5575 5576 static void 5577 bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5578 { 5579 struct spdk_blob *blob = cb_arg; 5580 uint32_t page_idx = bs_blobid_to_page(blob->id); 5581 5582 if (bserrno != 0) { 5583 spdk_bit_array_clear(blob->bs->used_blobids, page_idx); 5584 bs_release_md_page(blob->bs, page_idx); 5585 } 5586 5587 blob_free(blob); 5588 5589 bs_sequence_finish(seq, bserrno); 5590 } 5591 5592 static int 5593 blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs, 5594 bool internal) 5595 { 5596 uint64_t i; 5597 size_t value_len = 0; 5598 int rc; 5599 const void *value = NULL; 5600 if (xattrs->count > 0 && xattrs->get_value == NULL) { 5601 return -EINVAL; 5602 } 5603 for (i = 0; i < xattrs->count; i++) { 5604 xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len); 5605 if (value == NULL || value_len == 0) { 5606 return -EINVAL; 5607 } 5608 rc = blob_set_xattr(blob, xattrs->names[i], value, value_len, internal); 5609 if (rc < 0) { 5610 return rc; 5611 } 5612 } 5613 return 0; 5614 } 5615 5616 static void 5617 blob_opts_copy(const struct spdk_blob_opts *src, struct spdk_blob_opts *dst) 5618 { 5619 #define FIELD_OK(field) \ 5620 offsetof(struct spdk_blob_opts, field) + sizeof(src->field) <= src->opts_size 5621 5622 #define SET_FIELD(field) \ 5623 if (FIELD_OK(field)) { \ 5624 dst->field = src->field; \ 5625 } \ 5626 5627 SET_FIELD(num_clusters); 5628 SET_FIELD(thin_provision); 5629 SET_FIELD(clear_method); 5630 5631 if (FIELD_OK(xattrs)) { 5632 memcpy(&dst->xattrs, &src->xattrs, sizeof(src->xattrs)); 5633 } 5634 5635 SET_FIELD(use_extent_table); 5636 5637 dst->opts_size = src->opts_size; 5638 5639 /* You should not remove this statement, but need to update the assert statement 5640 * if you add a new field, and also add a corresponding SET_FIELD statement */ 5641 SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_opts) == 64, "Incorrect size"); 5642 5643 #undef FIELD_OK 5644 #undef SET_FIELD 5645 } 5646 5647 static void 5648 bs_create_blob(struct spdk_blob_store *bs, 5649 const struct spdk_blob_opts *opts, 5650 const struct spdk_blob_xattr_opts *internal_xattrs, 5651 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5652 { 5653 struct spdk_blob *blob; 5654 uint32_t page_idx; 5655 struct spdk_bs_cpl cpl; 5656 struct spdk_blob_opts opts_local; 5657 struct spdk_blob_xattr_opts internal_xattrs_default; 5658 spdk_bs_sequence_t *seq; 5659 spdk_blob_id id; 5660 int rc; 5661 5662 assert(spdk_get_thread() == bs->md_thread); 5663 5664 page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); 5665 if (page_idx == UINT32_MAX) { 5666 cb_fn(cb_arg, 0, -ENOMEM); 5667 return; 5668 } 5669 spdk_bit_array_set(bs->used_blobids, page_idx); 5670 bs_claim_md_page(bs, page_idx); 5671 5672 id = bs_page_to_blobid(page_idx); 5673 5674 SPDK_DEBUGLOG(blob, "Creating blob with id %" PRIu64 " at page %u\n", id, page_idx); 5675 5676 blob = blob_alloc(bs, id); 5677 if (!blob) { 5678 spdk_bit_array_clear(bs->used_blobids, page_idx); 5679 bs_release_md_page(bs, page_idx); 5680 cb_fn(cb_arg, 0, -ENOMEM); 5681 return; 5682 } 5683 5684 spdk_blob_opts_init(&opts_local, sizeof(opts_local)); 5685 if (opts) { 5686 blob_opts_copy(opts, &opts_local); 5687 } 5688 5689 blob->use_extent_table = opts_local.use_extent_table; 5690 if (blob->use_extent_table) { 5691 blob->invalid_flags |= SPDK_BLOB_EXTENT_TABLE; 5692 } 5693 5694 if (!internal_xattrs) { 5695 blob_xattrs_init(&internal_xattrs_default); 5696 internal_xattrs = &internal_xattrs_default; 5697 } 5698 5699 rc = blob_set_xattrs(blob, &opts_local.xattrs, false); 5700 if (rc < 0) { 5701 blob_free(blob); 5702 spdk_bit_array_clear(bs->used_blobids, page_idx); 5703 bs_release_md_page(bs, page_idx); 5704 cb_fn(cb_arg, 0, rc); 5705 return; 5706 } 5707 5708 rc = blob_set_xattrs(blob, internal_xattrs, true); 5709 if (rc < 0) { 5710 blob_free(blob); 5711 spdk_bit_array_clear(bs->used_blobids, page_idx); 5712 bs_release_md_page(bs, page_idx); 5713 cb_fn(cb_arg, 0, rc); 5714 return; 5715 } 5716 5717 if (opts_local.thin_provision) { 5718 blob_set_thin_provision(blob); 5719 } 5720 5721 blob_set_clear_method(blob, opts_local.clear_method); 5722 5723 rc = blob_resize(blob, opts_local.num_clusters); 5724 if (rc < 0) { 5725 blob_free(blob); 5726 spdk_bit_array_clear(bs->used_blobids, page_idx); 5727 bs_release_md_page(bs, page_idx); 5728 cb_fn(cb_arg, 0, rc); 5729 return; 5730 } 5731 cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 5732 cpl.u.blobid.cb_fn = cb_fn; 5733 cpl.u.blobid.cb_arg = cb_arg; 5734 cpl.u.blobid.blobid = blob->id; 5735 5736 seq = bs_sequence_start(bs->md_channel, &cpl); 5737 if (!seq) { 5738 blob_free(blob); 5739 spdk_bit_array_clear(bs->used_blobids, page_idx); 5740 bs_release_md_page(bs, page_idx); 5741 cb_fn(cb_arg, 0, -ENOMEM); 5742 return; 5743 } 5744 5745 blob_persist(seq, blob, bs_create_blob_cpl, blob); 5746 } 5747 5748 void spdk_bs_create_blob(struct spdk_blob_store *bs, 5749 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5750 { 5751 bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg); 5752 } 5753 5754 void spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts, 5755 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5756 { 5757 bs_create_blob(bs, opts, NULL, cb_fn, cb_arg); 5758 } 5759 5760 /* END spdk_bs_create_blob */ 5761 5762 /* START blob_cleanup */ 5763 5764 struct spdk_clone_snapshot_ctx { 5765 struct spdk_bs_cpl cpl; 5766 int bserrno; 5767 bool frozen; 5768 5769 struct spdk_io_channel *channel; 5770 5771 /* Current cluster for inflate operation */ 5772 uint64_t cluster; 5773 5774 /* For inflation force allocation of all unallocated clusters and remove 5775 * thin-provisioning. Otherwise only decouple parent and keep clone thin. */ 5776 bool allocate_all; 5777 5778 struct { 5779 spdk_blob_id id; 5780 struct spdk_blob *blob; 5781 bool md_ro; 5782 } original; 5783 struct { 5784 spdk_blob_id id; 5785 struct spdk_blob *blob; 5786 } new; 5787 5788 /* xattrs specified for snapshot/clones only. They have no impact on 5789 * the original blobs xattrs. */ 5790 const struct spdk_blob_xattr_opts *xattrs; 5791 }; 5792 5793 static void 5794 bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno) 5795 { 5796 struct spdk_clone_snapshot_ctx *ctx = cb_arg; 5797 struct spdk_bs_cpl *cpl = &ctx->cpl; 5798 5799 if (bserrno != 0) { 5800 if (ctx->bserrno != 0) { 5801 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 5802 } else { 5803 ctx->bserrno = bserrno; 5804 } 5805 } 5806 5807 switch (cpl->type) { 5808 case SPDK_BS_CPL_TYPE_BLOBID: 5809 cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno); 5810 break; 5811 case SPDK_BS_CPL_TYPE_BLOB_BASIC: 5812 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno); 5813 break; 5814 default: 5815 SPDK_UNREACHABLE(); 5816 break; 5817 } 5818 5819 free(ctx); 5820 } 5821 5822 static void 5823 bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 5824 { 5825 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5826 struct spdk_blob *origblob = ctx->original.blob; 5827 5828 if (bserrno != 0) { 5829 if (ctx->bserrno != 0) { 5830 SPDK_ERRLOG("Unfreeze error %d\n", bserrno); 5831 } else { 5832 ctx->bserrno = bserrno; 5833 } 5834 } 5835 5836 ctx->original.id = origblob->id; 5837 origblob->locked_operation_in_progress = false; 5838 5839 /* Revert md_ro to original state */ 5840 origblob->md_ro = ctx->original.md_ro; 5841 5842 spdk_blob_close(origblob, bs_clone_snapshot_cleanup_finish, ctx); 5843 } 5844 5845 static void 5846 bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno) 5847 { 5848 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5849 struct spdk_blob *origblob = ctx->original.blob; 5850 5851 if (bserrno != 0) { 5852 if (ctx->bserrno != 0) { 5853 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 5854 } else { 5855 ctx->bserrno = bserrno; 5856 } 5857 } 5858 5859 if (ctx->frozen) { 5860 /* Unfreeze any outstanding I/O */ 5861 blob_unfreeze_io(origblob, bs_snapshot_unfreeze_cpl, ctx); 5862 } else { 5863 bs_snapshot_unfreeze_cpl(ctx, 0); 5864 } 5865 5866 } 5867 5868 static void 5869 bs_clone_snapshot_newblob_cleanup(struct spdk_clone_snapshot_ctx *ctx, int bserrno) 5870 { 5871 struct spdk_blob *newblob = ctx->new.blob; 5872 5873 if (bserrno != 0) { 5874 if (ctx->bserrno != 0) { 5875 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 5876 } else { 5877 ctx->bserrno = bserrno; 5878 } 5879 } 5880 5881 ctx->new.id = newblob->id; 5882 spdk_blob_close(newblob, bs_clone_snapshot_origblob_cleanup, ctx); 5883 } 5884 5885 /* END blob_cleanup */ 5886 5887 /* START spdk_bs_create_snapshot */ 5888 5889 static void 5890 bs_snapshot_swap_cluster_maps(struct spdk_blob *blob1, struct spdk_blob *blob2) 5891 { 5892 uint64_t *cluster_temp; 5893 uint32_t *extent_page_temp; 5894 5895 cluster_temp = blob1->active.clusters; 5896 blob1->active.clusters = blob2->active.clusters; 5897 blob2->active.clusters = cluster_temp; 5898 5899 extent_page_temp = blob1->active.extent_pages; 5900 blob1->active.extent_pages = blob2->active.extent_pages; 5901 blob2->active.extent_pages = extent_page_temp; 5902 } 5903 5904 static void 5905 bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno) 5906 { 5907 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5908 struct spdk_blob *origblob = ctx->original.blob; 5909 struct spdk_blob *newblob = ctx->new.blob; 5910 5911 if (bserrno != 0) { 5912 bs_snapshot_swap_cluster_maps(newblob, origblob); 5913 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 5914 return; 5915 } 5916 5917 /* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */ 5918 bserrno = blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true); 5919 if (bserrno != 0) { 5920 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 5921 return; 5922 } 5923 5924 bs_blob_list_add(ctx->original.blob); 5925 5926 spdk_blob_set_read_only(newblob); 5927 5928 /* sync snapshot metadata */ 5929 spdk_blob_sync_md(newblob, bs_clone_snapshot_origblob_cleanup, ctx); 5930 } 5931 5932 static void 5933 bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno) 5934 { 5935 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5936 struct spdk_blob *origblob = ctx->original.blob; 5937 struct spdk_blob *newblob = ctx->new.blob; 5938 5939 if (bserrno != 0) { 5940 /* return cluster map back to original */ 5941 bs_snapshot_swap_cluster_maps(newblob, origblob); 5942 5943 /* Newblob md sync failed. Valid clusters are only present in origblob. 5944 * Since I/O is frozen on origblob, not changes to zeroed out cluster map should have occurred. 5945 * Newblob needs to be reverted to thin_provisioned state at creation to properly close. */ 5946 blob_set_thin_provision(newblob); 5947 assert(spdk_mem_all_zero(newblob->active.clusters, 5948 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 5949 assert(spdk_mem_all_zero(newblob->active.extent_pages, 5950 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 5951 5952 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 5953 return; 5954 } 5955 5956 /* Set internal xattr for snapshot id */ 5957 bserrno = blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true); 5958 if (bserrno != 0) { 5959 /* return cluster map back to original */ 5960 bs_snapshot_swap_cluster_maps(newblob, origblob); 5961 blob_set_thin_provision(newblob); 5962 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 5963 return; 5964 } 5965 5966 /* Create new back_bs_dev for snapshot */ 5967 origblob->back_bs_dev = bs_create_blob_bs_dev(newblob); 5968 if (origblob->back_bs_dev == NULL) { 5969 /* return cluster map back to original */ 5970 bs_snapshot_swap_cluster_maps(newblob, origblob); 5971 blob_set_thin_provision(newblob); 5972 bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL); 5973 return; 5974 } 5975 5976 bs_blob_list_remove(origblob); 5977 origblob->parent_id = newblob->id; 5978 /* set clone blob as thin provisioned */ 5979 blob_set_thin_provision(origblob); 5980 5981 bs_blob_list_add(newblob); 5982 5983 /* sync clone metadata */ 5984 spdk_blob_sync_md(origblob, bs_snapshot_origblob_sync_cpl, ctx); 5985 } 5986 5987 static void 5988 bs_snapshot_freeze_cpl(void *cb_arg, int rc) 5989 { 5990 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5991 struct spdk_blob *origblob = ctx->original.blob; 5992 struct spdk_blob *newblob = ctx->new.blob; 5993 int bserrno; 5994 5995 if (rc != 0) { 5996 bs_clone_snapshot_newblob_cleanup(ctx, rc); 5997 return; 5998 } 5999 6000 ctx->frozen = true; 6001 6002 /* set new back_bs_dev for snapshot */ 6003 newblob->back_bs_dev = origblob->back_bs_dev; 6004 /* Set invalid flags from origblob */ 6005 newblob->invalid_flags = origblob->invalid_flags; 6006 6007 /* inherit parent from original blob if set */ 6008 newblob->parent_id = origblob->parent_id; 6009 if (origblob->parent_id != SPDK_BLOBID_INVALID) { 6010 /* Set internal xattr for snapshot id */ 6011 bserrno = blob_set_xattr(newblob, BLOB_SNAPSHOT, 6012 &origblob->parent_id, sizeof(spdk_blob_id), true); 6013 if (bserrno != 0) { 6014 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6015 return; 6016 } 6017 } 6018 6019 /* swap cluster maps */ 6020 bs_snapshot_swap_cluster_maps(newblob, origblob); 6021 6022 /* Set the clear method on the new blob to match the original. */ 6023 blob_set_clear_method(newblob, origblob->clear_method); 6024 6025 /* sync snapshot metadata */ 6026 spdk_blob_sync_md(newblob, bs_snapshot_newblob_sync_cpl, ctx); 6027 } 6028 6029 static void 6030 bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6031 { 6032 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6033 struct spdk_blob *origblob = ctx->original.blob; 6034 struct spdk_blob *newblob = _blob; 6035 6036 if (bserrno != 0) { 6037 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6038 return; 6039 } 6040 6041 ctx->new.blob = newblob; 6042 assert(spdk_blob_is_thin_provisioned(newblob)); 6043 assert(spdk_mem_all_zero(newblob->active.clusters, 6044 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 6045 assert(spdk_mem_all_zero(newblob->active.extent_pages, 6046 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 6047 6048 blob_freeze_io(origblob, bs_snapshot_freeze_cpl, ctx); 6049 } 6050 6051 static void 6052 bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 6053 { 6054 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6055 struct spdk_blob *origblob = ctx->original.blob; 6056 6057 if (bserrno != 0) { 6058 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6059 return; 6060 } 6061 6062 ctx->new.id = blobid; 6063 ctx->cpl.u.blobid.blobid = blobid; 6064 6065 spdk_bs_open_blob(origblob->bs, ctx->new.id, bs_snapshot_newblob_open_cpl, ctx); 6066 } 6067 6068 6069 static void 6070 bs_xattr_snapshot(void *arg, const char *name, 6071 const void **value, size_t *value_len) 6072 { 6073 assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0); 6074 6075 struct spdk_blob *blob = (struct spdk_blob *)arg; 6076 *value = &blob->id; 6077 *value_len = sizeof(blob->id); 6078 } 6079 6080 static void 6081 bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6082 { 6083 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6084 struct spdk_blob_opts opts; 6085 struct spdk_blob_xattr_opts internal_xattrs; 6086 char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS }; 6087 6088 if (bserrno != 0) { 6089 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6090 return; 6091 } 6092 6093 ctx->original.blob = _blob; 6094 6095 if (_blob->data_ro || _blob->md_ro) { 6096 SPDK_DEBUGLOG(blob, "Cannot create snapshot from read only blob with id %" PRIu64 "\n", 6097 _blob->id); 6098 ctx->bserrno = -EINVAL; 6099 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6100 return; 6101 } 6102 6103 if (_blob->locked_operation_in_progress) { 6104 SPDK_DEBUGLOG(blob, "Cannot create snapshot - another operation in progress\n"); 6105 ctx->bserrno = -EBUSY; 6106 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6107 return; 6108 } 6109 6110 _blob->locked_operation_in_progress = true; 6111 6112 spdk_blob_opts_init(&opts, sizeof(opts)); 6113 blob_xattrs_init(&internal_xattrs); 6114 6115 /* Change the size of new blob to the same as in original blob, 6116 * but do not allocate clusters */ 6117 opts.thin_provision = true; 6118 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 6119 opts.use_extent_table = _blob->use_extent_table; 6120 6121 /* If there are any xattrs specified for snapshot, set them now */ 6122 if (ctx->xattrs) { 6123 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 6124 } 6125 /* Set internal xattr SNAPSHOT_IN_PROGRESS */ 6126 internal_xattrs.count = 1; 6127 internal_xattrs.ctx = _blob; 6128 internal_xattrs.names = xattrs_names; 6129 internal_xattrs.get_value = bs_xattr_snapshot; 6130 6131 bs_create_blob(_blob->bs, &opts, &internal_xattrs, 6132 bs_snapshot_newblob_create_cpl, ctx); 6133 } 6134 6135 void spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid, 6136 const struct spdk_blob_xattr_opts *snapshot_xattrs, 6137 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6138 { 6139 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6140 6141 if (!ctx) { 6142 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 6143 return; 6144 } 6145 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6146 ctx->cpl.u.blobid.cb_fn = cb_fn; 6147 ctx->cpl.u.blobid.cb_arg = cb_arg; 6148 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 6149 ctx->bserrno = 0; 6150 ctx->frozen = false; 6151 ctx->original.id = blobid; 6152 ctx->xattrs = snapshot_xattrs; 6153 6154 spdk_bs_open_blob(bs, ctx->original.id, bs_snapshot_origblob_open_cpl, ctx); 6155 } 6156 /* END spdk_bs_create_snapshot */ 6157 6158 /* START spdk_bs_create_clone */ 6159 6160 static void 6161 bs_xattr_clone(void *arg, const char *name, 6162 const void **value, size_t *value_len) 6163 { 6164 assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0); 6165 6166 struct spdk_blob *blob = (struct spdk_blob *)arg; 6167 *value = &blob->id; 6168 *value_len = sizeof(blob->id); 6169 } 6170 6171 static void 6172 bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6173 { 6174 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6175 struct spdk_blob *clone = _blob; 6176 6177 ctx->new.blob = clone; 6178 bs_blob_list_add(clone); 6179 6180 spdk_blob_close(clone, bs_clone_snapshot_origblob_cleanup, ctx); 6181 } 6182 6183 static void 6184 bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 6185 { 6186 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6187 6188 ctx->cpl.u.blobid.blobid = blobid; 6189 spdk_bs_open_blob(ctx->original.blob->bs, blobid, bs_clone_newblob_open_cpl, ctx); 6190 } 6191 6192 static void 6193 bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6194 { 6195 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6196 struct spdk_blob_opts opts; 6197 struct spdk_blob_xattr_opts internal_xattrs; 6198 char *xattr_names[] = { BLOB_SNAPSHOT }; 6199 6200 if (bserrno != 0) { 6201 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6202 return; 6203 } 6204 6205 ctx->original.blob = _blob; 6206 ctx->original.md_ro = _blob->md_ro; 6207 6208 if (!_blob->data_ro || !_blob->md_ro) { 6209 SPDK_DEBUGLOG(blob, "Clone not from read-only blob\n"); 6210 ctx->bserrno = -EINVAL; 6211 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6212 return; 6213 } 6214 6215 if (_blob->locked_operation_in_progress) { 6216 SPDK_DEBUGLOG(blob, "Cannot create clone - another operation in progress\n"); 6217 ctx->bserrno = -EBUSY; 6218 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6219 return; 6220 } 6221 6222 _blob->locked_operation_in_progress = true; 6223 6224 spdk_blob_opts_init(&opts, sizeof(opts)); 6225 blob_xattrs_init(&internal_xattrs); 6226 6227 opts.thin_provision = true; 6228 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 6229 opts.use_extent_table = _blob->use_extent_table; 6230 if (ctx->xattrs) { 6231 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 6232 } 6233 6234 /* Set internal xattr BLOB_SNAPSHOT */ 6235 internal_xattrs.count = 1; 6236 internal_xattrs.ctx = _blob; 6237 internal_xattrs.names = xattr_names; 6238 internal_xattrs.get_value = bs_xattr_clone; 6239 6240 bs_create_blob(_blob->bs, &opts, &internal_xattrs, 6241 bs_clone_newblob_create_cpl, ctx); 6242 } 6243 6244 void spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid, 6245 const struct spdk_blob_xattr_opts *clone_xattrs, 6246 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6247 { 6248 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6249 6250 if (!ctx) { 6251 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 6252 return; 6253 } 6254 6255 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6256 ctx->cpl.u.blobid.cb_fn = cb_fn; 6257 ctx->cpl.u.blobid.cb_arg = cb_arg; 6258 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 6259 ctx->bserrno = 0; 6260 ctx->xattrs = clone_xattrs; 6261 ctx->original.id = blobid; 6262 6263 spdk_bs_open_blob(bs, ctx->original.id, bs_clone_origblob_open_cpl, ctx); 6264 } 6265 6266 /* END spdk_bs_create_clone */ 6267 6268 /* START spdk_bs_inflate_blob */ 6269 6270 static void 6271 bs_inflate_blob_set_parent_cpl(void *cb_arg, struct spdk_blob *_parent, int bserrno) 6272 { 6273 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6274 struct spdk_blob *_blob = ctx->original.blob; 6275 6276 if (bserrno != 0) { 6277 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6278 return; 6279 } 6280 6281 /* Temporarily override md_ro flag for MD modification */ 6282 _blob->md_ro = false; 6283 6284 bserrno = blob_set_xattr(_blob, BLOB_SNAPSHOT, &_parent->id, sizeof(spdk_blob_id), true); 6285 if (bserrno != 0) { 6286 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6287 return; 6288 } 6289 6290 assert(_parent != NULL); 6291 6292 bs_blob_list_remove(_blob); 6293 _blob->parent_id = _parent->id; 6294 6295 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 6296 _blob->back_bs_dev = bs_create_blob_bs_dev(_parent); 6297 bs_blob_list_add(_blob); 6298 6299 spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx); 6300 } 6301 6302 static void 6303 bs_inflate_blob_done(struct spdk_clone_snapshot_ctx *ctx) 6304 { 6305 struct spdk_blob *_blob = ctx->original.blob; 6306 struct spdk_blob *_parent; 6307 6308 if (ctx->allocate_all) { 6309 /* remove thin provisioning */ 6310 bs_blob_list_remove(_blob); 6311 blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 6312 _blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV; 6313 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 6314 _blob->back_bs_dev = NULL; 6315 _blob->parent_id = SPDK_BLOBID_INVALID; 6316 } else { 6317 _parent = ((struct spdk_blob_bs_dev *)(_blob->back_bs_dev))->blob; 6318 if (_parent->parent_id != SPDK_BLOBID_INVALID) { 6319 /* We must change the parent of the inflated blob */ 6320 spdk_bs_open_blob(_blob->bs, _parent->parent_id, 6321 bs_inflate_blob_set_parent_cpl, ctx); 6322 return; 6323 } 6324 6325 bs_blob_list_remove(_blob); 6326 blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 6327 _blob->parent_id = SPDK_BLOBID_INVALID; 6328 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 6329 _blob->back_bs_dev = bs_create_zeroes_dev(); 6330 } 6331 6332 /* Temporarily override md_ro flag for MD modification */ 6333 _blob->md_ro = false; 6334 _blob->state = SPDK_BLOB_STATE_DIRTY; 6335 6336 spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx); 6337 } 6338 6339 /* Check if cluster needs allocation */ 6340 static inline bool 6341 bs_cluster_needs_allocation(struct spdk_blob *blob, uint64_t cluster, bool allocate_all) 6342 { 6343 struct spdk_blob_bs_dev *b; 6344 6345 assert(blob != NULL); 6346 6347 if (blob->active.clusters[cluster] != 0) { 6348 /* Cluster is already allocated */ 6349 return false; 6350 } 6351 6352 if (blob->parent_id == SPDK_BLOBID_INVALID) { 6353 /* Blob have no parent blob */ 6354 return allocate_all; 6355 } 6356 6357 b = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 6358 return (allocate_all || b->blob->active.clusters[cluster] != 0); 6359 } 6360 6361 static void 6362 bs_inflate_blob_touch_next(void *cb_arg, int bserrno) 6363 { 6364 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6365 struct spdk_blob *_blob = ctx->original.blob; 6366 struct spdk_bs_cpl cpl; 6367 spdk_bs_user_op_t *op; 6368 uint64_t offset; 6369 6370 if (bserrno != 0) { 6371 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6372 return; 6373 } 6374 6375 for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) { 6376 if (bs_cluster_needs_allocation(_blob, ctx->cluster, ctx->allocate_all)) { 6377 break; 6378 } 6379 } 6380 6381 if (ctx->cluster < _blob->active.num_clusters) { 6382 offset = bs_cluster_to_lba(_blob->bs, ctx->cluster); 6383 6384 /* We may safely increment a cluster before copying */ 6385 ctx->cluster++; 6386 6387 /* Use a dummy 0B read as a context for cluster copy */ 6388 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 6389 cpl.u.blob_basic.cb_fn = bs_inflate_blob_touch_next; 6390 cpl.u.blob_basic.cb_arg = ctx; 6391 6392 op = bs_user_op_alloc(ctx->channel, &cpl, SPDK_BLOB_READ, _blob, 6393 NULL, 0, offset, 0); 6394 if (!op) { 6395 bs_clone_snapshot_origblob_cleanup(ctx, -ENOMEM); 6396 return; 6397 } 6398 6399 bs_allocate_and_copy_cluster(_blob, ctx->channel, offset, op); 6400 } else { 6401 bs_inflate_blob_done(ctx); 6402 } 6403 } 6404 6405 static void 6406 bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6407 { 6408 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6409 uint64_t clusters_needed; 6410 uint64_t i; 6411 6412 if (bserrno != 0) { 6413 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6414 return; 6415 } 6416 6417 ctx->original.blob = _blob; 6418 ctx->original.md_ro = _blob->md_ro; 6419 6420 if (_blob->locked_operation_in_progress) { 6421 SPDK_DEBUGLOG(blob, "Cannot inflate blob - another operation in progress\n"); 6422 ctx->bserrno = -EBUSY; 6423 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6424 return; 6425 } 6426 6427 _blob->locked_operation_in_progress = true; 6428 6429 if (!ctx->allocate_all && _blob->parent_id == SPDK_BLOBID_INVALID) { 6430 /* This blob have no parent, so we cannot decouple it. */ 6431 SPDK_ERRLOG("Cannot decouple parent of blob with no parent.\n"); 6432 bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 6433 return; 6434 } 6435 6436 if (spdk_blob_is_thin_provisioned(_blob) == false) { 6437 /* This is not thin provisioned blob. No need to inflate. */ 6438 bs_clone_snapshot_origblob_cleanup(ctx, 0); 6439 return; 6440 } 6441 6442 /* Do two passes - one to verify that we can obtain enough clusters 6443 * and another to actually claim them. 6444 */ 6445 clusters_needed = 0; 6446 for (i = 0; i < _blob->active.num_clusters; i++) { 6447 if (bs_cluster_needs_allocation(_blob, i, ctx->allocate_all)) { 6448 clusters_needed++; 6449 } 6450 } 6451 6452 if (clusters_needed > _blob->bs->num_free_clusters) { 6453 /* Not enough free clusters. Cannot satisfy the request. */ 6454 bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC); 6455 return; 6456 } 6457 6458 ctx->cluster = 0; 6459 bs_inflate_blob_touch_next(ctx, 0); 6460 } 6461 6462 static void 6463 bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 6464 spdk_blob_id blobid, bool allocate_all, spdk_blob_op_complete cb_fn, void *cb_arg) 6465 { 6466 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6467 6468 if (!ctx) { 6469 cb_fn(cb_arg, -ENOMEM); 6470 return; 6471 } 6472 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 6473 ctx->cpl.u.bs_basic.cb_fn = cb_fn; 6474 ctx->cpl.u.bs_basic.cb_arg = cb_arg; 6475 ctx->bserrno = 0; 6476 ctx->original.id = blobid; 6477 ctx->channel = channel; 6478 ctx->allocate_all = allocate_all; 6479 6480 spdk_bs_open_blob(bs, ctx->original.id, bs_inflate_blob_open_cpl, ctx); 6481 } 6482 6483 void 6484 spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 6485 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 6486 { 6487 bs_inflate_blob(bs, channel, blobid, true, cb_fn, cb_arg); 6488 } 6489 6490 void 6491 spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 6492 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 6493 { 6494 bs_inflate_blob(bs, channel, blobid, false, cb_fn, cb_arg); 6495 } 6496 /* END spdk_bs_inflate_blob */ 6497 6498 /* START spdk_blob_resize */ 6499 struct spdk_bs_resize_ctx { 6500 spdk_blob_op_complete cb_fn; 6501 void *cb_arg; 6502 struct spdk_blob *blob; 6503 uint64_t sz; 6504 int rc; 6505 }; 6506 6507 static void 6508 bs_resize_unfreeze_cpl(void *cb_arg, int rc) 6509 { 6510 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 6511 6512 if (rc != 0) { 6513 SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc); 6514 } 6515 6516 if (ctx->rc != 0) { 6517 SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc); 6518 rc = ctx->rc; 6519 } 6520 6521 ctx->blob->locked_operation_in_progress = false; 6522 6523 ctx->cb_fn(ctx->cb_arg, rc); 6524 free(ctx); 6525 } 6526 6527 static void 6528 bs_resize_freeze_cpl(void *cb_arg, int rc) 6529 { 6530 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 6531 6532 if (rc != 0) { 6533 ctx->blob->locked_operation_in_progress = false; 6534 ctx->cb_fn(ctx->cb_arg, rc); 6535 free(ctx); 6536 return; 6537 } 6538 6539 ctx->rc = blob_resize(ctx->blob, ctx->sz); 6540 6541 blob_unfreeze_io(ctx->blob, bs_resize_unfreeze_cpl, ctx); 6542 } 6543 6544 void 6545 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg) 6546 { 6547 struct spdk_bs_resize_ctx *ctx; 6548 6549 blob_verify_md_op(blob); 6550 6551 SPDK_DEBUGLOG(blob, "Resizing blob %" PRIu64 " to %" PRIu64 " clusters\n", blob->id, sz); 6552 6553 if (blob->md_ro) { 6554 cb_fn(cb_arg, -EPERM); 6555 return; 6556 } 6557 6558 if (sz == blob->active.num_clusters) { 6559 cb_fn(cb_arg, 0); 6560 return; 6561 } 6562 6563 if (blob->locked_operation_in_progress) { 6564 cb_fn(cb_arg, -EBUSY); 6565 return; 6566 } 6567 6568 ctx = calloc(1, sizeof(*ctx)); 6569 if (!ctx) { 6570 cb_fn(cb_arg, -ENOMEM); 6571 return; 6572 } 6573 6574 blob->locked_operation_in_progress = true; 6575 ctx->cb_fn = cb_fn; 6576 ctx->cb_arg = cb_arg; 6577 ctx->blob = blob; 6578 ctx->sz = sz; 6579 blob_freeze_io(blob, bs_resize_freeze_cpl, ctx); 6580 } 6581 6582 /* END spdk_blob_resize */ 6583 6584 6585 /* START spdk_bs_delete_blob */ 6586 6587 static void 6588 bs_delete_close_cpl(void *cb_arg, int bserrno) 6589 { 6590 spdk_bs_sequence_t *seq = cb_arg; 6591 6592 bs_sequence_finish(seq, bserrno); 6593 } 6594 6595 static void 6596 bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 6597 { 6598 struct spdk_blob *blob = cb_arg; 6599 6600 if (bserrno != 0) { 6601 /* 6602 * We already removed this blob from the blobstore tailq, so 6603 * we need to free it here since this is the last reference 6604 * to it. 6605 */ 6606 blob_free(blob); 6607 bs_delete_close_cpl(seq, bserrno); 6608 return; 6609 } 6610 6611 /* 6612 * This will immediately decrement the ref_count and call 6613 * the completion routine since the metadata state is clean. 6614 * By calling spdk_blob_close, we reduce the number of call 6615 * points into code that touches the blob->open_ref count 6616 * and the blobstore's blob list. 6617 */ 6618 spdk_blob_close(blob, bs_delete_close_cpl, seq); 6619 } 6620 6621 struct delete_snapshot_ctx { 6622 struct spdk_blob_list *parent_snapshot_entry; 6623 struct spdk_blob *snapshot; 6624 bool snapshot_md_ro; 6625 struct spdk_blob *clone; 6626 bool clone_md_ro; 6627 spdk_blob_op_with_handle_complete cb_fn; 6628 void *cb_arg; 6629 int bserrno; 6630 uint32_t next_extent_page; 6631 }; 6632 6633 static void 6634 delete_blob_cleanup_finish(void *cb_arg, int bserrno) 6635 { 6636 struct delete_snapshot_ctx *ctx = cb_arg; 6637 6638 if (bserrno != 0) { 6639 SPDK_ERRLOG("Snapshot cleanup error %d\n", bserrno); 6640 } 6641 6642 assert(ctx != NULL); 6643 6644 if (bserrno != 0 && ctx->bserrno == 0) { 6645 ctx->bserrno = bserrno; 6646 } 6647 6648 ctx->cb_fn(ctx->cb_arg, ctx->snapshot, ctx->bserrno); 6649 free(ctx); 6650 } 6651 6652 static void 6653 delete_snapshot_cleanup_snapshot(void *cb_arg, int bserrno) 6654 { 6655 struct delete_snapshot_ctx *ctx = cb_arg; 6656 6657 if (bserrno != 0) { 6658 ctx->bserrno = bserrno; 6659 SPDK_ERRLOG("Clone cleanup error %d\n", bserrno); 6660 } 6661 6662 if (ctx->bserrno != 0) { 6663 assert(blob_lookup(ctx->snapshot->bs, ctx->snapshot->id) == NULL); 6664 RB_INSERT(spdk_blob_tree, &ctx->snapshot->bs->open_blobs, ctx->snapshot); 6665 spdk_bit_array_set(ctx->snapshot->bs->open_blobids, ctx->snapshot->id); 6666 } 6667 6668 ctx->snapshot->locked_operation_in_progress = false; 6669 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 6670 6671 spdk_blob_close(ctx->snapshot, delete_blob_cleanup_finish, ctx); 6672 } 6673 6674 static void 6675 delete_snapshot_cleanup_clone(void *cb_arg, int bserrno) 6676 { 6677 struct delete_snapshot_ctx *ctx = cb_arg; 6678 6679 ctx->clone->locked_operation_in_progress = false; 6680 ctx->clone->md_ro = ctx->clone_md_ro; 6681 6682 spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx); 6683 } 6684 6685 static void 6686 delete_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 6687 { 6688 struct delete_snapshot_ctx *ctx = cb_arg; 6689 6690 if (bserrno) { 6691 ctx->bserrno = bserrno; 6692 delete_snapshot_cleanup_clone(ctx, 0); 6693 return; 6694 } 6695 6696 ctx->clone->locked_operation_in_progress = false; 6697 spdk_blob_close(ctx->clone, delete_blob_cleanup_finish, ctx); 6698 } 6699 6700 static void 6701 delete_snapshot_sync_snapshot_cpl(void *cb_arg, int bserrno) 6702 { 6703 struct delete_snapshot_ctx *ctx = cb_arg; 6704 struct spdk_blob_list *parent_snapshot_entry = NULL; 6705 struct spdk_blob_list *snapshot_entry = NULL; 6706 struct spdk_blob_list *clone_entry = NULL; 6707 struct spdk_blob_list *snapshot_clone_entry = NULL; 6708 6709 if (bserrno) { 6710 SPDK_ERRLOG("Failed to sync MD on blob\n"); 6711 ctx->bserrno = bserrno; 6712 delete_snapshot_cleanup_clone(ctx, 0); 6713 return; 6714 } 6715 6716 /* Get snapshot entry for the snapshot we want to remove */ 6717 snapshot_entry = bs_get_snapshot_entry(ctx->snapshot->bs, ctx->snapshot->id); 6718 6719 assert(snapshot_entry != NULL); 6720 6721 /* Remove clone entry in this snapshot (at this point there can be only one clone) */ 6722 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 6723 assert(clone_entry != NULL); 6724 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 6725 snapshot_entry->clone_count--; 6726 assert(TAILQ_EMPTY(&snapshot_entry->clones)); 6727 6728 if (ctx->snapshot->parent_id != SPDK_BLOBID_INVALID) { 6729 /* This snapshot is at the same time a clone of another snapshot - we need to 6730 * update parent snapshot (remove current clone, add new one inherited from 6731 * the snapshot that is being removed) */ 6732 6733 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 6734 * snapshot that we are removing */ 6735 blob_get_snapshot_and_clone_entries(ctx->snapshot, &parent_snapshot_entry, 6736 &snapshot_clone_entry); 6737 6738 /* Switch clone entry in parent snapshot */ 6739 TAILQ_INSERT_TAIL(&parent_snapshot_entry->clones, clone_entry, link); 6740 TAILQ_REMOVE(&parent_snapshot_entry->clones, snapshot_clone_entry, link); 6741 free(snapshot_clone_entry); 6742 } else { 6743 /* No parent snapshot - just remove clone entry */ 6744 free(clone_entry); 6745 } 6746 6747 /* Restore md_ro flags */ 6748 ctx->clone->md_ro = ctx->clone_md_ro; 6749 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 6750 6751 blob_unfreeze_io(ctx->clone, delete_snapshot_unfreeze_cpl, ctx); 6752 } 6753 6754 static void 6755 delete_snapshot_sync_clone_cpl(void *cb_arg, int bserrno) 6756 { 6757 struct delete_snapshot_ctx *ctx = cb_arg; 6758 uint64_t i; 6759 6760 ctx->snapshot->md_ro = false; 6761 6762 if (bserrno) { 6763 SPDK_ERRLOG("Failed to sync MD on clone\n"); 6764 ctx->bserrno = bserrno; 6765 6766 /* Restore snapshot to previous state */ 6767 bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true); 6768 if (bserrno != 0) { 6769 delete_snapshot_cleanup_clone(ctx, bserrno); 6770 return; 6771 } 6772 6773 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx); 6774 return; 6775 } 6776 6777 /* Clear cluster map entries for snapshot */ 6778 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 6779 if (ctx->clone->active.clusters[i] == ctx->snapshot->active.clusters[i]) { 6780 ctx->snapshot->active.clusters[i] = 0; 6781 } 6782 } 6783 for (i = 0; i < ctx->snapshot->active.num_extent_pages && 6784 i < ctx->clone->active.num_extent_pages; i++) { 6785 if (ctx->clone->active.extent_pages[i] == ctx->snapshot->active.extent_pages[i]) { 6786 ctx->snapshot->active.extent_pages[i] = 0; 6787 } 6788 } 6789 6790 blob_set_thin_provision(ctx->snapshot); 6791 ctx->snapshot->state = SPDK_BLOB_STATE_DIRTY; 6792 6793 if (ctx->parent_snapshot_entry != NULL) { 6794 ctx->snapshot->back_bs_dev = NULL; 6795 } 6796 6797 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_cpl, ctx); 6798 } 6799 6800 static void 6801 delete_snapshot_update_extent_pages_cpl(struct delete_snapshot_ctx *ctx) 6802 { 6803 /* Delete old backing bs_dev from clone (related to snapshot that will be removed) */ 6804 ctx->clone->back_bs_dev->destroy(ctx->clone->back_bs_dev); 6805 6806 /* Set/remove snapshot xattr and switch parent ID and backing bs_dev on clone... */ 6807 if (ctx->parent_snapshot_entry != NULL) { 6808 /* ...to parent snapshot */ 6809 ctx->clone->parent_id = ctx->parent_snapshot_entry->id; 6810 ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev; 6811 blob_set_xattr(ctx->clone, BLOB_SNAPSHOT, &ctx->parent_snapshot_entry->id, 6812 sizeof(spdk_blob_id), 6813 true); 6814 } else { 6815 /* ...to blobid invalid and zeroes dev */ 6816 ctx->clone->parent_id = SPDK_BLOBID_INVALID; 6817 ctx->clone->back_bs_dev = bs_create_zeroes_dev(); 6818 blob_remove_xattr(ctx->clone, BLOB_SNAPSHOT, true); 6819 } 6820 6821 spdk_blob_sync_md(ctx->clone, delete_snapshot_sync_clone_cpl, ctx); 6822 } 6823 6824 static void 6825 delete_snapshot_update_extent_pages(void *cb_arg, int bserrno) 6826 { 6827 struct delete_snapshot_ctx *ctx = cb_arg; 6828 uint32_t *extent_page; 6829 uint64_t i; 6830 6831 for (i = ctx->next_extent_page; i < ctx->snapshot->active.num_extent_pages && 6832 i < ctx->clone->active.num_extent_pages; i++) { 6833 if (ctx->snapshot->active.extent_pages[i] == 0) { 6834 /* No extent page to use from snapshot */ 6835 continue; 6836 } 6837 6838 extent_page = &ctx->clone->active.extent_pages[i]; 6839 if (*extent_page == 0) { 6840 /* Copy extent page from snapshot when clone did not have a matching one */ 6841 *extent_page = ctx->snapshot->active.extent_pages[i]; 6842 continue; 6843 } 6844 6845 /* Clone and snapshot both contain partially filled matching extent pages. 6846 * Update the clone extent page in place with cluster map containing the mix of both. */ 6847 ctx->next_extent_page = i + 1; 6848 6849 blob_write_extent_page(ctx->clone, *extent_page, i * SPDK_EXTENTS_PER_EP, 6850 delete_snapshot_update_extent_pages, ctx); 6851 return; 6852 } 6853 delete_snapshot_update_extent_pages_cpl(ctx); 6854 } 6855 6856 static void 6857 delete_snapshot_sync_snapshot_xattr_cpl(void *cb_arg, int bserrno) 6858 { 6859 struct delete_snapshot_ctx *ctx = cb_arg; 6860 uint64_t i; 6861 6862 /* Temporarily override md_ro flag for clone for MD modification */ 6863 ctx->clone_md_ro = ctx->clone->md_ro; 6864 ctx->clone->md_ro = false; 6865 6866 if (bserrno) { 6867 SPDK_ERRLOG("Failed to sync MD with xattr on blob\n"); 6868 ctx->bserrno = bserrno; 6869 delete_snapshot_cleanup_clone(ctx, 0); 6870 return; 6871 } 6872 6873 /* Copy snapshot map to clone map (only unallocated clusters in clone) */ 6874 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 6875 if (ctx->clone->active.clusters[i] == 0) { 6876 ctx->clone->active.clusters[i] = ctx->snapshot->active.clusters[i]; 6877 } 6878 } 6879 ctx->next_extent_page = 0; 6880 delete_snapshot_update_extent_pages(ctx, 0); 6881 } 6882 6883 static void 6884 delete_snapshot_freeze_io_cb(void *cb_arg, int bserrno) 6885 { 6886 struct delete_snapshot_ctx *ctx = cb_arg; 6887 6888 if (bserrno) { 6889 SPDK_ERRLOG("Failed to freeze I/O on clone\n"); 6890 ctx->bserrno = bserrno; 6891 delete_snapshot_cleanup_clone(ctx, 0); 6892 return; 6893 } 6894 6895 /* Temporarily override md_ro flag for snapshot for MD modification */ 6896 ctx->snapshot_md_ro = ctx->snapshot->md_ro; 6897 ctx->snapshot->md_ro = false; 6898 6899 /* Mark blob as pending for removal for power failure safety, use clone id for recovery */ 6900 ctx->bserrno = blob_set_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, &ctx->clone->id, 6901 sizeof(spdk_blob_id), true); 6902 if (ctx->bserrno != 0) { 6903 delete_snapshot_cleanup_clone(ctx, 0); 6904 return; 6905 } 6906 6907 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx); 6908 } 6909 6910 static void 6911 delete_snapshot_open_clone_cb(void *cb_arg, struct spdk_blob *clone, int bserrno) 6912 { 6913 struct delete_snapshot_ctx *ctx = cb_arg; 6914 6915 if (bserrno) { 6916 SPDK_ERRLOG("Failed to open clone\n"); 6917 ctx->bserrno = bserrno; 6918 delete_snapshot_cleanup_snapshot(ctx, 0); 6919 return; 6920 } 6921 6922 ctx->clone = clone; 6923 6924 if (clone->locked_operation_in_progress) { 6925 SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress on its clone\n"); 6926 ctx->bserrno = -EBUSY; 6927 spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx); 6928 return; 6929 } 6930 6931 clone->locked_operation_in_progress = true; 6932 6933 blob_freeze_io(clone, delete_snapshot_freeze_io_cb, ctx); 6934 } 6935 6936 static void 6937 update_clone_on_snapshot_deletion(struct spdk_blob *snapshot, struct delete_snapshot_ctx *ctx) 6938 { 6939 struct spdk_blob_list *snapshot_entry = NULL; 6940 struct spdk_blob_list *clone_entry = NULL; 6941 struct spdk_blob_list *snapshot_clone_entry = NULL; 6942 6943 /* Get snapshot entry for the snapshot we want to remove */ 6944 snapshot_entry = bs_get_snapshot_entry(snapshot->bs, snapshot->id); 6945 6946 assert(snapshot_entry != NULL); 6947 6948 /* Get clone of the snapshot (at this point there can be only one clone) */ 6949 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 6950 assert(snapshot_entry->clone_count == 1); 6951 assert(clone_entry != NULL); 6952 6953 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 6954 * snapshot that we are removing */ 6955 blob_get_snapshot_and_clone_entries(snapshot, &ctx->parent_snapshot_entry, 6956 &snapshot_clone_entry); 6957 6958 spdk_bs_open_blob(snapshot->bs, clone_entry->id, delete_snapshot_open_clone_cb, ctx); 6959 } 6960 6961 static void 6962 bs_delete_blob_finish(void *cb_arg, struct spdk_blob *blob, int bserrno) 6963 { 6964 spdk_bs_sequence_t *seq = cb_arg; 6965 struct spdk_blob_list *snapshot_entry = NULL; 6966 uint32_t page_num; 6967 6968 if (bserrno) { 6969 SPDK_ERRLOG("Failed to remove blob\n"); 6970 bs_sequence_finish(seq, bserrno); 6971 return; 6972 } 6973 6974 /* Remove snapshot from the list */ 6975 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 6976 if (snapshot_entry != NULL) { 6977 TAILQ_REMOVE(&blob->bs->snapshots, snapshot_entry, link); 6978 free(snapshot_entry); 6979 } 6980 6981 page_num = bs_blobid_to_page(blob->id); 6982 spdk_bit_array_clear(blob->bs->used_blobids, page_num); 6983 blob->state = SPDK_BLOB_STATE_DIRTY; 6984 blob->active.num_pages = 0; 6985 blob_resize(blob, 0); 6986 6987 blob_persist(seq, blob, bs_delete_persist_cpl, blob); 6988 } 6989 6990 static int 6991 bs_is_blob_deletable(struct spdk_blob *blob, bool *update_clone) 6992 { 6993 struct spdk_blob_list *snapshot_entry = NULL; 6994 struct spdk_blob_list *clone_entry = NULL; 6995 struct spdk_blob *clone = NULL; 6996 bool has_one_clone = false; 6997 6998 /* Check if this is a snapshot with clones */ 6999 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 7000 if (snapshot_entry != NULL) { 7001 if (snapshot_entry->clone_count > 1) { 7002 SPDK_ERRLOG("Cannot remove snapshot with more than one clone\n"); 7003 return -EBUSY; 7004 } else if (snapshot_entry->clone_count == 1) { 7005 has_one_clone = true; 7006 } 7007 } 7008 7009 /* Check if someone has this blob open (besides this delete context): 7010 * - open_ref = 1 - only this context opened blob, so it is ok to remove it 7011 * - open_ref <= 2 && has_one_clone = true - clone is holding snapshot 7012 * and that is ok, because we will update it accordingly */ 7013 if (blob->open_ref <= 2 && has_one_clone) { 7014 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 7015 assert(clone_entry != NULL); 7016 clone = blob_lookup(blob->bs, clone_entry->id); 7017 7018 if (blob->open_ref == 2 && clone == NULL) { 7019 /* Clone is closed and someone else opened this blob */ 7020 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 7021 return -EBUSY; 7022 } 7023 7024 *update_clone = true; 7025 return 0; 7026 } 7027 7028 if (blob->open_ref > 1) { 7029 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 7030 return -EBUSY; 7031 } 7032 7033 assert(has_one_clone == false); 7034 *update_clone = false; 7035 return 0; 7036 } 7037 7038 static void 7039 bs_delete_enomem_close_cpl(void *cb_arg, int bserrno) 7040 { 7041 spdk_bs_sequence_t *seq = cb_arg; 7042 7043 bs_sequence_finish(seq, -ENOMEM); 7044 } 7045 7046 static void 7047 bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 7048 { 7049 spdk_bs_sequence_t *seq = cb_arg; 7050 struct delete_snapshot_ctx *ctx; 7051 bool update_clone = false; 7052 7053 if (bserrno != 0) { 7054 bs_sequence_finish(seq, bserrno); 7055 return; 7056 } 7057 7058 blob_verify_md_op(blob); 7059 7060 ctx = calloc(1, sizeof(*ctx)); 7061 if (ctx == NULL) { 7062 spdk_blob_close(blob, bs_delete_enomem_close_cpl, seq); 7063 return; 7064 } 7065 7066 ctx->snapshot = blob; 7067 ctx->cb_fn = bs_delete_blob_finish; 7068 ctx->cb_arg = seq; 7069 7070 /* Check if blob can be removed and if it is a snapshot with clone on top of it */ 7071 ctx->bserrno = bs_is_blob_deletable(blob, &update_clone); 7072 if (ctx->bserrno) { 7073 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 7074 return; 7075 } 7076 7077 if (blob->locked_operation_in_progress) { 7078 SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress\n"); 7079 ctx->bserrno = -EBUSY; 7080 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 7081 return; 7082 } 7083 7084 blob->locked_operation_in_progress = true; 7085 7086 /* 7087 * Remove the blob from the blob_store list now, to ensure it does not 7088 * get returned after this point by blob_lookup(). 7089 */ 7090 spdk_bit_array_clear(blob->bs->open_blobids, blob->id); 7091 RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob); 7092 7093 if (update_clone) { 7094 /* This blob is a snapshot with active clone - update clone first */ 7095 update_clone_on_snapshot_deletion(blob, ctx); 7096 } else { 7097 /* This blob does not have any clones - just remove it */ 7098 bs_blob_list_remove(blob); 7099 bs_delete_blob_finish(seq, blob, 0); 7100 free(ctx); 7101 } 7102 } 7103 7104 void 7105 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 7106 spdk_blob_op_complete cb_fn, void *cb_arg) 7107 { 7108 struct spdk_bs_cpl cpl; 7109 spdk_bs_sequence_t *seq; 7110 7111 SPDK_DEBUGLOG(blob, "Deleting blob %" PRIu64 "\n", blobid); 7112 7113 assert(spdk_get_thread() == bs->md_thread); 7114 7115 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7116 cpl.u.blob_basic.cb_fn = cb_fn; 7117 cpl.u.blob_basic.cb_arg = cb_arg; 7118 7119 seq = bs_sequence_start(bs->md_channel, &cpl); 7120 if (!seq) { 7121 cb_fn(cb_arg, -ENOMEM); 7122 return; 7123 } 7124 7125 spdk_bs_open_blob(bs, blobid, bs_delete_open_cpl, seq); 7126 } 7127 7128 /* END spdk_bs_delete_blob */ 7129 7130 /* START spdk_bs_open_blob */ 7131 7132 static void 7133 bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7134 { 7135 struct spdk_blob *blob = cb_arg; 7136 struct spdk_blob *existing; 7137 7138 if (bserrno != 0) { 7139 blob_free(blob); 7140 seq->cpl.u.blob_handle.blob = NULL; 7141 bs_sequence_finish(seq, bserrno); 7142 return; 7143 } 7144 7145 existing = blob_lookup(blob->bs, blob->id); 7146 if (existing) { 7147 blob_free(blob); 7148 existing->open_ref++; 7149 seq->cpl.u.blob_handle.blob = existing; 7150 bs_sequence_finish(seq, 0); 7151 return; 7152 } 7153 7154 blob->open_ref++; 7155 7156 spdk_bit_array_set(blob->bs->open_blobids, blob->id); 7157 RB_INSERT(spdk_blob_tree, &blob->bs->open_blobs, blob); 7158 7159 bs_sequence_finish(seq, bserrno); 7160 } 7161 7162 static inline void 7163 blob_open_opts_copy(const struct spdk_blob_open_opts *src, struct spdk_blob_open_opts *dst) 7164 { 7165 #define FIELD_OK(field) \ 7166 offsetof(struct spdk_blob_opts, field) + sizeof(src->field) <= src->opts_size 7167 7168 #define SET_FIELD(field) \ 7169 if (FIELD_OK(field)) { \ 7170 dst->field = src->field; \ 7171 } \ 7172 7173 SET_FIELD(clear_method); 7174 7175 dst->opts_size = src->opts_size; 7176 7177 /* You should not remove this statement, but need to update the assert statement 7178 * if you add a new field, and also add a corresponding SET_FIELD statement */ 7179 SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_open_opts) == 16, "Incorrect size"); 7180 7181 #undef FIELD_OK 7182 #undef SET_FIELD 7183 } 7184 7185 static void 7186 bs_open_blob(struct spdk_blob_store *bs, 7187 spdk_blob_id blobid, 7188 struct spdk_blob_open_opts *opts, 7189 spdk_blob_op_with_handle_complete cb_fn, 7190 void *cb_arg) 7191 { 7192 struct spdk_blob *blob; 7193 struct spdk_bs_cpl cpl; 7194 struct spdk_blob_open_opts opts_local; 7195 spdk_bs_sequence_t *seq; 7196 uint32_t page_num; 7197 7198 SPDK_DEBUGLOG(blob, "Opening blob %" PRIu64 "\n", blobid); 7199 assert(spdk_get_thread() == bs->md_thread); 7200 7201 page_num = bs_blobid_to_page(blobid); 7202 if (spdk_bit_array_get(bs->used_blobids, page_num) == false) { 7203 /* Invalid blobid */ 7204 cb_fn(cb_arg, NULL, -ENOENT); 7205 return; 7206 } 7207 7208 blob = blob_lookup(bs, blobid); 7209 if (blob) { 7210 blob->open_ref++; 7211 cb_fn(cb_arg, blob, 0); 7212 return; 7213 } 7214 7215 blob = blob_alloc(bs, blobid); 7216 if (!blob) { 7217 cb_fn(cb_arg, NULL, -ENOMEM); 7218 return; 7219 } 7220 7221 spdk_blob_open_opts_init(&opts_local, sizeof(opts_local)); 7222 if (opts) { 7223 blob_open_opts_copy(opts, &opts_local); 7224 } 7225 7226 blob->clear_method = opts_local.clear_method; 7227 7228 cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE; 7229 cpl.u.blob_handle.cb_fn = cb_fn; 7230 cpl.u.blob_handle.cb_arg = cb_arg; 7231 cpl.u.blob_handle.blob = blob; 7232 7233 seq = bs_sequence_start(bs->md_channel, &cpl); 7234 if (!seq) { 7235 blob_free(blob); 7236 cb_fn(cb_arg, NULL, -ENOMEM); 7237 return; 7238 } 7239 7240 blob_load(seq, blob, bs_open_blob_cpl, blob); 7241 } 7242 7243 void spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 7244 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 7245 { 7246 bs_open_blob(bs, blobid, NULL, cb_fn, cb_arg); 7247 } 7248 7249 void spdk_bs_open_blob_ext(struct spdk_blob_store *bs, spdk_blob_id blobid, 7250 struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 7251 { 7252 bs_open_blob(bs, blobid, opts, cb_fn, cb_arg); 7253 } 7254 7255 /* END spdk_bs_open_blob */ 7256 7257 /* START spdk_blob_set_read_only */ 7258 int spdk_blob_set_read_only(struct spdk_blob *blob) 7259 { 7260 blob_verify_md_op(blob); 7261 7262 blob->data_ro_flags |= SPDK_BLOB_READ_ONLY; 7263 7264 blob->state = SPDK_BLOB_STATE_DIRTY; 7265 return 0; 7266 } 7267 /* END spdk_blob_set_read_only */ 7268 7269 /* START spdk_blob_sync_md */ 7270 7271 static void 7272 blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7273 { 7274 struct spdk_blob *blob = cb_arg; 7275 7276 if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 7277 blob->data_ro = true; 7278 blob->md_ro = true; 7279 } 7280 7281 bs_sequence_finish(seq, bserrno); 7282 } 7283 7284 static void 7285 blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 7286 { 7287 struct spdk_bs_cpl cpl; 7288 spdk_bs_sequence_t *seq; 7289 7290 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7291 cpl.u.blob_basic.cb_fn = cb_fn; 7292 cpl.u.blob_basic.cb_arg = cb_arg; 7293 7294 seq = bs_sequence_start(blob->bs->md_channel, &cpl); 7295 if (!seq) { 7296 cb_fn(cb_arg, -ENOMEM); 7297 return; 7298 } 7299 7300 blob_persist(seq, blob, blob_sync_md_cpl, blob); 7301 } 7302 7303 void 7304 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 7305 { 7306 blob_verify_md_op(blob); 7307 7308 SPDK_DEBUGLOG(blob, "Syncing blob %" PRIu64 "\n", blob->id); 7309 7310 if (blob->md_ro) { 7311 assert(blob->state == SPDK_BLOB_STATE_CLEAN); 7312 cb_fn(cb_arg, 0); 7313 return; 7314 } 7315 7316 blob_sync_md(blob, cb_fn, cb_arg); 7317 } 7318 7319 /* END spdk_blob_sync_md */ 7320 7321 struct spdk_blob_insert_cluster_ctx { 7322 struct spdk_thread *thread; 7323 struct spdk_blob *blob; 7324 uint32_t cluster_num; /* cluster index in blob */ 7325 uint32_t cluster; /* cluster on disk */ 7326 uint32_t extent_page; /* extent page on disk */ 7327 int rc; 7328 spdk_blob_op_complete cb_fn; 7329 void *cb_arg; 7330 }; 7331 7332 static void 7333 blob_insert_cluster_msg_cpl(void *arg) 7334 { 7335 struct spdk_blob_insert_cluster_ctx *ctx = arg; 7336 7337 ctx->cb_fn(ctx->cb_arg, ctx->rc); 7338 free(ctx); 7339 } 7340 7341 static void 7342 blob_insert_cluster_msg_cb(void *arg, int bserrno) 7343 { 7344 struct spdk_blob_insert_cluster_ctx *ctx = arg; 7345 7346 ctx->rc = bserrno; 7347 spdk_thread_send_msg(ctx->thread, blob_insert_cluster_msg_cpl, ctx); 7348 } 7349 7350 static void 7351 blob_insert_new_ep_cb(void *arg, int bserrno) 7352 { 7353 struct spdk_blob_insert_cluster_ctx *ctx = arg; 7354 uint32_t *extent_page; 7355 7356 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 7357 *extent_page = ctx->extent_page; 7358 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 7359 blob_sync_md(ctx->blob, blob_insert_cluster_msg_cb, ctx); 7360 } 7361 7362 static void 7363 blob_persist_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7364 { 7365 struct spdk_blob_md_page *page = cb_arg; 7366 7367 bs_sequence_finish(seq, bserrno); 7368 spdk_free(page); 7369 } 7370 7371 static void 7372 blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 7373 spdk_blob_op_complete cb_fn, void *cb_arg) 7374 { 7375 spdk_bs_sequence_t *seq; 7376 struct spdk_bs_cpl cpl; 7377 struct spdk_blob_md_page *page = NULL; 7378 uint32_t page_count = 0; 7379 int rc; 7380 7381 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7382 cpl.u.blob_basic.cb_fn = cb_fn; 7383 cpl.u.blob_basic.cb_arg = cb_arg; 7384 7385 seq = bs_sequence_start(blob->bs->md_channel, &cpl); 7386 if (!seq) { 7387 cb_fn(cb_arg, -ENOMEM); 7388 return; 7389 } 7390 rc = blob_serialize_add_page(blob, &page, &page_count, &page); 7391 if (rc < 0) { 7392 bs_sequence_finish(seq, rc); 7393 return; 7394 } 7395 7396 blob_serialize_extent_page(blob, cluster_num, page); 7397 7398 page->crc = blob_md_page_calc_crc(page); 7399 7400 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent) == true); 7401 7402 bs_sequence_write_dev(seq, page, bs_md_page_to_lba(blob->bs, extent), 7403 bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 7404 blob_persist_extent_page_cpl, page); 7405 } 7406 7407 static void 7408 blob_insert_cluster_msg(void *arg) 7409 { 7410 struct spdk_blob_insert_cluster_ctx *ctx = arg; 7411 uint32_t *extent_page; 7412 7413 ctx->rc = blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster); 7414 if (ctx->rc != 0) { 7415 spdk_thread_send_msg(ctx->thread, blob_insert_cluster_msg_cpl, ctx); 7416 return; 7417 } 7418 7419 if (ctx->blob->use_extent_table == false) { 7420 /* Extent table is not used, proceed with sync of md that will only use extents_rle. */ 7421 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 7422 blob_sync_md(ctx->blob, blob_insert_cluster_msg_cb, ctx); 7423 return; 7424 } 7425 7426 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 7427 if (*extent_page == 0) { 7428 /* Extent page requires allocation. 7429 * It was already claimed in the used_md_pages map and placed in ctx. */ 7430 assert(ctx->extent_page != 0); 7431 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 7432 blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, 7433 blob_insert_new_ep_cb, ctx); 7434 } else { 7435 /* It is possible for original thread to allocate extent page for 7436 * different cluster in the same extent page. In such case proceed with 7437 * updating the existing extent page, but release the additional one. */ 7438 if (ctx->extent_page != 0) { 7439 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 7440 bs_release_md_page(ctx->blob->bs, ctx->extent_page); 7441 ctx->extent_page = 0; 7442 } 7443 /* Extent page already allocated. 7444 * Every cluster allocation, requires just an update of single extent page. */ 7445 blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, 7446 blob_insert_cluster_msg_cb, ctx); 7447 } 7448 } 7449 7450 static void 7451 blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 7452 uint64_t cluster, uint32_t extent_page, spdk_blob_op_complete cb_fn, void *cb_arg) 7453 { 7454 struct spdk_blob_insert_cluster_ctx *ctx; 7455 7456 ctx = calloc(1, sizeof(*ctx)); 7457 if (ctx == NULL) { 7458 cb_fn(cb_arg, -ENOMEM); 7459 return; 7460 } 7461 7462 ctx->thread = spdk_get_thread(); 7463 ctx->blob = blob; 7464 ctx->cluster_num = cluster_num; 7465 ctx->cluster = cluster; 7466 ctx->extent_page = extent_page; 7467 ctx->cb_fn = cb_fn; 7468 ctx->cb_arg = cb_arg; 7469 7470 spdk_thread_send_msg(blob->bs->md_thread, blob_insert_cluster_msg, ctx); 7471 } 7472 7473 /* START spdk_blob_close */ 7474 7475 static void 7476 blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7477 { 7478 struct spdk_blob *blob = cb_arg; 7479 7480 if (bserrno == 0) { 7481 blob->open_ref--; 7482 if (blob->open_ref == 0) { 7483 /* 7484 * Blobs with active.num_pages == 0 are deleted blobs. 7485 * these blobs are removed from the blob_store list 7486 * when the deletion process starts - so don't try to 7487 * remove them again. 7488 */ 7489 if (blob->active.num_pages > 0) { 7490 spdk_bit_array_clear(blob->bs->open_blobids, blob->id); 7491 RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob); 7492 } 7493 blob_free(blob); 7494 } 7495 } 7496 7497 bs_sequence_finish(seq, bserrno); 7498 } 7499 7500 void spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 7501 { 7502 struct spdk_bs_cpl cpl; 7503 spdk_bs_sequence_t *seq; 7504 7505 blob_verify_md_op(blob); 7506 7507 SPDK_DEBUGLOG(blob, "Closing blob %" PRIu64 "\n", blob->id); 7508 7509 if (blob->open_ref == 0) { 7510 cb_fn(cb_arg, -EBADF); 7511 return; 7512 } 7513 7514 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7515 cpl.u.blob_basic.cb_fn = cb_fn; 7516 cpl.u.blob_basic.cb_arg = cb_arg; 7517 7518 seq = bs_sequence_start(blob->bs->md_channel, &cpl); 7519 if (!seq) { 7520 cb_fn(cb_arg, -ENOMEM); 7521 return; 7522 } 7523 7524 /* Sync metadata */ 7525 blob_persist(seq, blob, blob_close_cpl, blob); 7526 } 7527 7528 /* END spdk_blob_close */ 7529 7530 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs) 7531 { 7532 return spdk_get_io_channel(bs); 7533 } 7534 7535 void spdk_bs_free_io_channel(struct spdk_io_channel *channel) 7536 { 7537 spdk_put_io_channel(channel); 7538 } 7539 7540 void spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel, 7541 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 7542 { 7543 blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 7544 SPDK_BLOB_UNMAP); 7545 } 7546 7547 void spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel, 7548 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 7549 { 7550 blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 7551 SPDK_BLOB_WRITE_ZEROES); 7552 } 7553 7554 void spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel, 7555 void *payload, uint64_t offset, uint64_t length, 7556 spdk_blob_op_complete cb_fn, void *cb_arg) 7557 { 7558 blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 7559 SPDK_BLOB_WRITE); 7560 } 7561 7562 void spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel, 7563 void *payload, uint64_t offset, uint64_t length, 7564 spdk_blob_op_complete cb_fn, void *cb_arg) 7565 { 7566 blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 7567 SPDK_BLOB_READ); 7568 } 7569 7570 void spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel, 7571 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 7572 spdk_blob_op_complete cb_fn, void *cb_arg) 7573 { 7574 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false); 7575 } 7576 7577 void spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel, 7578 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 7579 spdk_blob_op_complete cb_fn, void *cb_arg) 7580 { 7581 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true); 7582 } 7583 7584 struct spdk_bs_iter_ctx { 7585 int64_t page_num; 7586 struct spdk_blob_store *bs; 7587 7588 spdk_blob_op_with_handle_complete cb_fn; 7589 void *cb_arg; 7590 }; 7591 7592 static void 7593 bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 7594 { 7595 struct spdk_bs_iter_ctx *ctx = cb_arg; 7596 struct spdk_blob_store *bs = ctx->bs; 7597 spdk_blob_id id; 7598 7599 if (bserrno == 0) { 7600 ctx->cb_fn(ctx->cb_arg, _blob, bserrno); 7601 free(ctx); 7602 return; 7603 } 7604 7605 ctx->page_num++; 7606 ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num); 7607 if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) { 7608 ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT); 7609 free(ctx); 7610 return; 7611 } 7612 7613 id = bs_page_to_blobid(ctx->page_num); 7614 7615 spdk_bs_open_blob(bs, id, bs_iter_cpl, ctx); 7616 } 7617 7618 void 7619 spdk_bs_iter_first(struct spdk_blob_store *bs, 7620 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 7621 { 7622 struct spdk_bs_iter_ctx *ctx; 7623 7624 ctx = calloc(1, sizeof(*ctx)); 7625 if (!ctx) { 7626 cb_fn(cb_arg, NULL, -ENOMEM); 7627 return; 7628 } 7629 7630 ctx->page_num = -1; 7631 ctx->bs = bs; 7632 ctx->cb_fn = cb_fn; 7633 ctx->cb_arg = cb_arg; 7634 7635 bs_iter_cpl(ctx, NULL, -1); 7636 } 7637 7638 static void 7639 bs_iter_close_cpl(void *cb_arg, int bserrno) 7640 { 7641 struct spdk_bs_iter_ctx *ctx = cb_arg; 7642 7643 bs_iter_cpl(ctx, NULL, -1); 7644 } 7645 7646 void 7647 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob, 7648 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 7649 { 7650 struct spdk_bs_iter_ctx *ctx; 7651 7652 assert(blob != NULL); 7653 7654 ctx = calloc(1, sizeof(*ctx)); 7655 if (!ctx) { 7656 cb_fn(cb_arg, NULL, -ENOMEM); 7657 return; 7658 } 7659 7660 ctx->page_num = bs_blobid_to_page(blob->id); 7661 ctx->bs = bs; 7662 ctx->cb_fn = cb_fn; 7663 ctx->cb_arg = cb_arg; 7664 7665 /* Close the existing blob */ 7666 spdk_blob_close(blob, bs_iter_close_cpl, ctx); 7667 } 7668 7669 static int 7670 blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 7671 uint16_t value_len, bool internal) 7672 { 7673 struct spdk_xattr_tailq *xattrs; 7674 struct spdk_xattr *xattr; 7675 size_t desc_size; 7676 void *tmp; 7677 7678 blob_verify_md_op(blob); 7679 7680 if (blob->md_ro) { 7681 return -EPERM; 7682 } 7683 7684 desc_size = sizeof(struct spdk_blob_md_descriptor_xattr) + strlen(name) + value_len; 7685 if (desc_size > SPDK_BS_MAX_DESC_SIZE) { 7686 SPDK_DEBUGLOG(blob, "Xattr '%s' of size %zu does not fix into single page %zu\n", name, 7687 desc_size, SPDK_BS_MAX_DESC_SIZE); 7688 return -ENOMEM; 7689 } 7690 7691 if (internal) { 7692 xattrs = &blob->xattrs_internal; 7693 blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR; 7694 } else { 7695 xattrs = &blob->xattrs; 7696 } 7697 7698 TAILQ_FOREACH(xattr, xattrs, link) { 7699 if (!strcmp(name, xattr->name)) { 7700 tmp = malloc(value_len); 7701 if (!tmp) { 7702 return -ENOMEM; 7703 } 7704 7705 free(xattr->value); 7706 xattr->value_len = value_len; 7707 xattr->value = tmp; 7708 memcpy(xattr->value, value, value_len); 7709 7710 blob->state = SPDK_BLOB_STATE_DIRTY; 7711 7712 return 0; 7713 } 7714 } 7715 7716 xattr = calloc(1, sizeof(*xattr)); 7717 if (!xattr) { 7718 return -ENOMEM; 7719 } 7720 7721 xattr->name = strdup(name); 7722 if (!xattr->name) { 7723 free(xattr); 7724 return -ENOMEM; 7725 } 7726 7727 xattr->value_len = value_len; 7728 xattr->value = malloc(value_len); 7729 if (!xattr->value) { 7730 free(xattr->name); 7731 free(xattr); 7732 return -ENOMEM; 7733 } 7734 memcpy(xattr->value, value, value_len); 7735 TAILQ_INSERT_TAIL(xattrs, xattr, link); 7736 7737 blob->state = SPDK_BLOB_STATE_DIRTY; 7738 7739 return 0; 7740 } 7741 7742 int 7743 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 7744 uint16_t value_len) 7745 { 7746 return blob_set_xattr(blob, name, value, value_len, false); 7747 } 7748 7749 static int 7750 blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal) 7751 { 7752 struct spdk_xattr_tailq *xattrs; 7753 struct spdk_xattr *xattr; 7754 7755 blob_verify_md_op(blob); 7756 7757 if (blob->md_ro) { 7758 return -EPERM; 7759 } 7760 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 7761 7762 TAILQ_FOREACH(xattr, xattrs, link) { 7763 if (!strcmp(name, xattr->name)) { 7764 TAILQ_REMOVE(xattrs, xattr, link); 7765 free(xattr->value); 7766 free(xattr->name); 7767 free(xattr); 7768 7769 if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) { 7770 blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR; 7771 } 7772 blob->state = SPDK_BLOB_STATE_DIRTY; 7773 7774 return 0; 7775 } 7776 } 7777 7778 return -ENOENT; 7779 } 7780 7781 int 7782 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name) 7783 { 7784 return blob_remove_xattr(blob, name, false); 7785 } 7786 7787 static int 7788 blob_get_xattr_value(struct spdk_blob *blob, const char *name, 7789 const void **value, size_t *value_len, bool internal) 7790 { 7791 struct spdk_xattr *xattr; 7792 struct spdk_xattr_tailq *xattrs; 7793 7794 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 7795 7796 TAILQ_FOREACH(xattr, xattrs, link) { 7797 if (!strcmp(name, xattr->name)) { 7798 *value = xattr->value; 7799 *value_len = xattr->value_len; 7800 return 0; 7801 } 7802 } 7803 return -ENOENT; 7804 } 7805 7806 int 7807 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 7808 const void **value, size_t *value_len) 7809 { 7810 blob_verify_md_op(blob); 7811 7812 return blob_get_xattr_value(blob, name, value, value_len, false); 7813 } 7814 7815 struct spdk_xattr_names { 7816 uint32_t count; 7817 const char *names[0]; 7818 }; 7819 7820 static int 7821 blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names) 7822 { 7823 struct spdk_xattr *xattr; 7824 int count = 0; 7825 7826 TAILQ_FOREACH(xattr, xattrs, link) { 7827 count++; 7828 } 7829 7830 *names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *)); 7831 if (*names == NULL) { 7832 return -ENOMEM; 7833 } 7834 7835 TAILQ_FOREACH(xattr, xattrs, link) { 7836 (*names)->names[(*names)->count++] = xattr->name; 7837 } 7838 7839 return 0; 7840 } 7841 7842 int 7843 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names) 7844 { 7845 blob_verify_md_op(blob); 7846 7847 return blob_get_xattr_names(&blob->xattrs, names); 7848 } 7849 7850 uint32_t 7851 spdk_xattr_names_get_count(struct spdk_xattr_names *names) 7852 { 7853 assert(names != NULL); 7854 7855 return names->count; 7856 } 7857 7858 const char * 7859 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index) 7860 { 7861 if (index >= names->count) { 7862 return NULL; 7863 } 7864 7865 return names->names[index]; 7866 } 7867 7868 void 7869 spdk_xattr_names_free(struct spdk_xattr_names *names) 7870 { 7871 free(names); 7872 } 7873 7874 struct spdk_bs_type 7875 spdk_bs_get_bstype(struct spdk_blob_store *bs) 7876 { 7877 return bs->bstype; 7878 } 7879 7880 void 7881 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype) 7882 { 7883 memcpy(&bs->bstype, &bstype, sizeof(bstype)); 7884 } 7885 7886 bool 7887 spdk_blob_is_read_only(struct spdk_blob *blob) 7888 { 7889 assert(blob != NULL); 7890 return (blob->data_ro || blob->md_ro); 7891 } 7892 7893 bool 7894 spdk_blob_is_snapshot(struct spdk_blob *blob) 7895 { 7896 struct spdk_blob_list *snapshot_entry; 7897 7898 assert(blob != NULL); 7899 7900 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 7901 if (snapshot_entry == NULL) { 7902 return false; 7903 } 7904 7905 return true; 7906 } 7907 7908 bool 7909 spdk_blob_is_clone(struct spdk_blob *blob) 7910 { 7911 assert(blob != NULL); 7912 7913 if (blob->parent_id != SPDK_BLOBID_INVALID) { 7914 assert(spdk_blob_is_thin_provisioned(blob)); 7915 return true; 7916 } 7917 7918 return false; 7919 } 7920 7921 bool 7922 spdk_blob_is_thin_provisioned(struct spdk_blob *blob) 7923 { 7924 assert(blob != NULL); 7925 return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 7926 } 7927 7928 static void 7929 blob_update_clear_method(struct spdk_blob *blob) 7930 { 7931 enum blob_clear_method stored_cm; 7932 7933 assert(blob != NULL); 7934 7935 /* If BLOB_CLEAR_WITH_DEFAULT was passed in, use the setting stored 7936 * in metadata previously. If something other than the default was 7937 * specified, ignore stored value and used what was passed in. 7938 */ 7939 stored_cm = ((blob->md_ro_flags & SPDK_BLOB_CLEAR_METHOD) >> SPDK_BLOB_CLEAR_METHOD_SHIFT); 7940 7941 if (blob->clear_method == BLOB_CLEAR_WITH_DEFAULT) { 7942 blob->clear_method = stored_cm; 7943 } else if (blob->clear_method != stored_cm) { 7944 SPDK_WARNLOG("Using passed in clear method 0x%x instead of stored value of 0x%x\n", 7945 blob->clear_method, stored_cm); 7946 } 7947 } 7948 7949 spdk_blob_id 7950 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id) 7951 { 7952 struct spdk_blob_list *snapshot_entry = NULL; 7953 struct spdk_blob_list *clone_entry = NULL; 7954 7955 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 7956 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 7957 if (clone_entry->id == blob_id) { 7958 return snapshot_entry->id; 7959 } 7960 } 7961 } 7962 7963 return SPDK_BLOBID_INVALID; 7964 } 7965 7966 int 7967 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids, 7968 size_t *count) 7969 { 7970 struct spdk_blob_list *snapshot_entry, *clone_entry; 7971 size_t n; 7972 7973 snapshot_entry = bs_get_snapshot_entry(bs, blobid); 7974 if (snapshot_entry == NULL) { 7975 *count = 0; 7976 return 0; 7977 } 7978 7979 if (ids == NULL || *count < snapshot_entry->clone_count) { 7980 *count = snapshot_entry->clone_count; 7981 return -ENOMEM; 7982 } 7983 *count = snapshot_entry->clone_count; 7984 7985 n = 0; 7986 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 7987 ids[n++] = clone_entry->id; 7988 } 7989 7990 return 0; 7991 } 7992 7993 SPDK_LOG_REGISTER_COMPONENT(blob) 7994